1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
20
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29* Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33* Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 malloc_trim(size_t pad);
88 malloc_usable_size(void* p);
89 malloc_stats();
90
91* Vital statistics:
92
93 Supported pointer representation: 4 or 8 bytes
94 Supported size_t representation: 4 or 8 bytes
95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
102
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
125
126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
127 8-byte size_t: 2^64 minus about two pages
128
129 It is assumed that (possibly signed) size_t values suffice to
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
146 Thread-safety: thread-safe
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
149 Also SVID/XPG, ANSI C, and probably others as well.
150
151* Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
155 below. It has been tested most extensively on Solaris and Linux.
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
170 HAVE_MREMAP 0
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
175
176 Configuration and functionality options:
177
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
188 MORECORE_CONTIGUOUS 1
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
192
193 Tuning options that are also dynamically changeable via mallopt:
194
195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
203
204/*
205 void* is the pointer type that malloc should say it returns
206*/
207
208#ifndef void
209#define void void
210#endif /*void*/
211
212#include <stddef.h> /* for size_t */
213#include <stdlib.h> /* for getenv(), abort() */
214#include <unistd.h> /* for __libc_enable_secure */
215
216#include <atomic.h>
217#include <_itoa.h>
218#include <bits/wordsize.h>
219#include <sys/sysinfo.h>
220
221#include <ldsodefs.h>
222
223#include <unistd.h>
224#include <stdio.h> /* needed for malloc_stats */
225#include <errno.h>
226#include <assert.h>
227
228#include <shlib-compat.h>
229
230/* For uintptr_t. */
231#include <stdint.h>
232
233/* For va_arg, va_start, va_end. */
234#include <stdarg.h>
235
236/* For MIN, MAX, powerof2. */
237#include <sys/param.h>
238
239/* For ALIGN_UP et. al. */
240#include <libc-pointer-arith.h>
241
242/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243#include <libc-diag.h>
244
245#include <malloc/malloc-internal.h>
246
247/* For SINGLE_THREAD_P. */
248#include <sysdep-cancel.h>
249
250/*
251 Debugging:
252
253 Because freed chunks may be overwritten with bookkeeping fields, this
254 malloc will often die when freed memory is overwritten by user
255 programs. This can be very effective (albeit in an annoying way)
256 in helping track down dangling pointers.
257
258 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
259 enabled that will catch more memory errors. You probably won't be
260 able to make much sense of the actual assertion errors, but they
261 should help you locate incorrectly overwritten memory. The checking
262 is fairly extensive, and will slow down execution
263 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
264 will attempt to check every non-mmapped allocated and free chunk in
265 the course of computing the summmaries. (By nature, mmapped regions
266 cannot be checked very much automatically.)
267
268 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
269 this code. The assertions in the check routines spell out in more
270 detail the assumptions and invariants underlying the algorithms.
271
272 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
273 checking that all accesses to malloced memory stay within their
274 bounds. However, there are several add-ons and adaptations of this
275 or other mallocs available that do this.
276*/
277
278#ifndef MALLOC_DEBUG
279#define MALLOC_DEBUG 0
280#endif
281
282#ifndef NDEBUG
283# define __assert_fail(assertion, file, line, function) \
284 __malloc_assert(assertion, file, line, function)
285
286extern const char *__progname;
287
288static void
289__malloc_assert (const char *assertion, const char *file, unsigned int line,
290 const char *function)
291{
292 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
293 __progname, __progname[0] ? ": " : "",
294 file, line,
295 function ? function : "", function ? ": " : "",
296 assertion);
297 fflush (stderr);
298 abort ();
299}
300#endif
301
302#if USE_TCACHE
303/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
304# define TCACHE_MAX_BINS 64
305# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
306
307/* Only used to pre-fill the tunables. */
308# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
309
310/* When "x" is from chunksize(). */
311# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
312/* When "x" is a user-provided size. */
313# define usize2tidx(x) csize2tidx (request2size (x))
314
315/* With rounding and alignment, the bins are...
316 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
317 idx 1 bytes 25..40 or 13..20
318 idx 2 bytes 41..56 or 21..28
319 etc. */
320
321/* This is another arbitrary limit, which tunables can change. Each
322 tcache bin will hold at most this number of chunks. */
323# define TCACHE_FILL_COUNT 7
324
325/* Maximum chunks in tcache bins for tunables. This value must fit the range
326 of tcache->counts[] entries, else they may overflow. */
327# define MAX_TCACHE_COUNT UINT16_MAX
328#endif
329
330
331/*
332 REALLOC_ZERO_BYTES_FREES should be set if a call to
333 realloc with zero bytes should be the same as a call to free.
334 This is required by the C standard. Otherwise, since this malloc
335 returns a unique pointer for malloc(0), so does realloc(p, 0).
336*/
337
338#ifndef REALLOC_ZERO_BYTES_FREES
339#define REALLOC_ZERO_BYTES_FREES 1
340#endif
341
342/*
343 TRIM_FASTBINS controls whether free() of a very small chunk can
344 immediately lead to trimming. Setting to true (1) can reduce memory
345 footprint, but will almost always slow down programs that use a lot
346 of small chunks.
347
348 Define this only if you are willing to give up some speed to more
349 aggressively reduce system-level memory footprint when releasing
350 memory in programs that use many small chunks. You can get
351 essentially the same effect by setting MXFAST to 0, but this can
352 lead to even greater slowdowns in programs using many small chunks.
353 TRIM_FASTBINS is an in-between compile-time option, that disables
354 only those chunks bordering topmost memory from being placed in
355 fastbins.
356*/
357
358#ifndef TRIM_FASTBINS
359#define TRIM_FASTBINS 0
360#endif
361
362
363/* Definition for getting more memory from the OS. */
364#define MORECORE (*__morecore)
365#define MORECORE_FAILURE 0
366void * __default_morecore (ptrdiff_t);
367void *(*__morecore)(ptrdiff_t) = __default_morecore;
368
369
370#include <string.h>
371
372/*
373 MORECORE-related declarations. By default, rely on sbrk
374*/
375
376
377/*
378 MORECORE is the name of the routine to call to obtain more memory
379 from the system. See below for general guidance on writing
380 alternative MORECORE functions, as well as a version for WIN32 and a
381 sample version for pre-OSX macos.
382*/
383
384#ifndef MORECORE
385#define MORECORE sbrk
386#endif
387
388/*
389 MORECORE_FAILURE is the value returned upon failure of MORECORE
390 as well as mmap. Since it cannot be an otherwise valid memory address,
391 and must reflect values of standard sys calls, you probably ought not
392 try to redefine it.
393*/
394
395#ifndef MORECORE_FAILURE
396#define MORECORE_FAILURE (-1)
397#endif
398
399/*
400 If MORECORE_CONTIGUOUS is true, take advantage of fact that
401 consecutive calls to MORECORE with positive arguments always return
402 contiguous increasing addresses. This is true of unix sbrk. Even
403 if not defined, when regions happen to be contiguous, malloc will
404 permit allocations spanning regions obtained from different
405 calls. But defining this when applicable enables some stronger
406 consistency checks and space efficiencies.
407*/
408
409#ifndef MORECORE_CONTIGUOUS
410#define MORECORE_CONTIGUOUS 1
411#endif
412
413/*
414 Define MORECORE_CANNOT_TRIM if your version of MORECORE
415 cannot release space back to the system when given negative
416 arguments. This is generally necessary only if you are using
417 a hand-crafted MORECORE function that cannot handle negative arguments.
418*/
419
420/* #define MORECORE_CANNOT_TRIM */
421
422/* MORECORE_CLEARS (default 1)
423 The degree to which the routine mapped to MORECORE zeroes out
424 memory: never (0), only for newly allocated space (1) or always
425 (2). The distinction between (1) and (2) is necessary because on
426 some systems, if the application first decrements and then
427 increments the break value, the contents of the reallocated space
428 are unspecified.
429 */
430
431#ifndef MORECORE_CLEARS
432# define MORECORE_CLEARS 1
433#endif
434
435
436/*
437 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
438 sbrk fails, and mmap is used as a backup. The value must be a
439 multiple of page size. This backup strategy generally applies only
440 when systems have "holes" in address space, so sbrk cannot perform
441 contiguous expansion, but there is still space available on system.
442 On systems for which this is known to be useful (i.e. most linux
443 kernels), this occurs only when programs allocate huge amounts of
444 memory. Between this, and the fact that mmap regions tend to be
445 limited, the size should be large, to avoid too many mmap calls and
446 thus avoid running out of kernel resources. */
447
448#ifndef MMAP_AS_MORECORE_SIZE
449#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
450#endif
451
452/*
453 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
454 large blocks.
455*/
456
457#ifndef HAVE_MREMAP
458#define HAVE_MREMAP 0
459#endif
460
461/* We may need to support __malloc_initialize_hook for backwards
462 compatibility. */
463
464#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
465# define HAVE_MALLOC_INIT_HOOK 1
466#else
467# define HAVE_MALLOC_INIT_HOOK 0
468#endif
469
470
471/*
472 This version of malloc supports the standard SVID/XPG mallinfo
473 routine that returns a struct containing usage properties and
474 statistics. It should work on any SVID/XPG compliant system that has
475 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
476 install such a thing yourself, cut out the preliminary declarations
477 as described above and below and save them in a malloc.h file. But
478 there's no compelling reason to bother to do this.)
479
480 The main declaration needed is the mallinfo struct that is returned
481 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
482 bunch of fields that are not even meaningful in this version of
483 malloc. These fields are are instead filled by mallinfo() with
484 other numbers that might be of interest.
485*/
486
487
488/* ---------- description of public routines ------------ */
489
490/*
491 malloc(size_t n)
492 Returns a pointer to a newly allocated chunk of at least n bytes, or null
493 if no space is available. Additionally, on failure, errno is
494 set to ENOMEM on ANSI C systems.
495
496 If n is zero, malloc returns a minumum-sized chunk. (The minimum
497 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
498 systems.) On most systems, size_t is an unsigned type, so calls
499 with negative arguments are interpreted as requests for huge amounts
500 of space, which will often fail. The maximum supported value of n
501 differs across systems, but is in all cases less than the maximum
502 representable value of a size_t.
503*/
504void* __libc_malloc(size_t);
505libc_hidden_proto (__libc_malloc)
506
507/*
508 free(void* p)
509 Releases the chunk of memory pointed to by p, that had been previously
510 allocated using malloc or a related routine such as realloc.
511 It has no effect if p is null. It can have arbitrary (i.e., bad!)
512 effects if p has already been freed.
513
514 Unless disabled (using mallopt), freeing very large spaces will
515 when possible, automatically trigger operations that give
516 back unused memory to the system, thus reducing program footprint.
517*/
518void __libc_free(void*);
519libc_hidden_proto (__libc_free)
520
521/*
522 calloc(size_t n_elements, size_t element_size);
523 Returns a pointer to n_elements * element_size bytes, with all locations
524 set to zero.
525*/
526void* __libc_calloc(size_t, size_t);
527
528/*
529 realloc(void* p, size_t n)
530 Returns a pointer to a chunk of size n that contains the same data
531 as does chunk p up to the minimum of (n, p's size) bytes, or null
532 if no space is available.
533
534 The returned pointer may or may not be the same as p. The algorithm
535 prefers extending p when possible, otherwise it employs the
536 equivalent of a malloc-copy-free sequence.
537
538 If p is null, realloc is equivalent to malloc.
539
540 If space is not available, realloc returns null, errno is set (if on
541 ANSI) and p is NOT freed.
542
543 if n is for fewer bytes than already held by p, the newly unused
544 space is lopped off and freed if possible. Unless the #define
545 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
546 zero (re)allocates a minimum-sized chunk.
547
548 Large chunks that were internally obtained via mmap will always be
549 grown using malloc-copy-free sequences unless the system supports
550 MREMAP (currently only linux).
551
552 The old unix realloc convention of allowing the last-free'd chunk
553 to be used as an argument to realloc is not supported.
554*/
555void* __libc_realloc(void*, size_t);
556libc_hidden_proto (__libc_realloc)
557
558/*
559 memalign(size_t alignment, size_t n);
560 Returns a pointer to a newly allocated chunk of n bytes, aligned
561 in accord with the alignment argument.
562
563 The alignment argument should be a power of two. If the argument is
564 not a power of two, the nearest greater power is used.
565 8-byte alignment is guaranteed by normal malloc calls, so don't
566 bother calling memalign with an argument of 8 or less.
567
568 Overreliance on memalign is a sure way to fragment space.
569*/
570void* __libc_memalign(size_t, size_t);
571libc_hidden_proto (__libc_memalign)
572
573/*
574 valloc(size_t n);
575 Equivalent to memalign(pagesize, n), where pagesize is the page
576 size of the system. If the pagesize is unknown, 4096 is used.
577*/
578void* __libc_valloc(size_t);
579
580
581
582/*
583 mallopt(int parameter_number, int parameter_value)
584 Sets tunable parameters The format is to provide a
585 (parameter-number, parameter-value) pair. mallopt then sets the
586 corresponding parameter to the argument value if it can (i.e., so
587 long as the value is meaningful), and returns 1 if successful else
588 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
589 normally defined in malloc.h. Only one of these (M_MXFAST) is used
590 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
591 so setting them has no effect. But this malloc also supports four
592 other options in mallopt. See below for details. Briefly, supported
593 parameters are as follows (listed defaults are for "typical"
594 configurations).
595
596 Symbol param # default allowed param values
597 M_MXFAST 1 64 0-80 (0 disables fastbins)
598 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
599 M_TOP_PAD -2 0 any
600 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
601 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
602*/
603int __libc_mallopt(int, int);
604libc_hidden_proto (__libc_mallopt)
605
606
607/*
608 mallinfo()
609 Returns (by copy) a struct containing various summary statistics:
610
611 arena: current total non-mmapped bytes allocated from system
612 ordblks: the number of free chunks
613 smblks: the number of fastbin blocks (i.e., small chunks that
614 have been freed but not use resused or consolidated)
615 hblks: current number of mmapped regions
616 hblkhd: total bytes held in mmapped regions
617 usmblks: always 0
618 fsmblks: total bytes held in fastbin blocks
619 uordblks: current total allocated space (normal or mmapped)
620 fordblks: total free space
621 keepcost: the maximum number of bytes that could ideally be released
622 back to system via malloc_trim. ("ideally" means that
623 it ignores page restrictions etc.)
624
625 Because these fields are ints, but internal bookkeeping may
626 be kept as longs, the reported values may wrap around zero and
627 thus be inaccurate.
628*/
629struct mallinfo __libc_mallinfo(void);
630
631
632/*
633 pvalloc(size_t n);
634 Equivalent to valloc(minimum-page-that-holds(n)), that is,
635 round up n to nearest pagesize.
636 */
637void* __libc_pvalloc(size_t);
638
639/*
640 malloc_trim(size_t pad);
641
642 If possible, gives memory back to the system (via negative
643 arguments to sbrk) if there is unused memory at the `high' end of
644 the malloc pool. You can call this after freeing large blocks of
645 memory to potentially reduce the system-level memory requirements
646 of a program. However, it cannot guarantee to reduce memory. Under
647 some allocation patterns, some large free blocks of memory will be
648 locked between two used chunks, so they cannot be given back to
649 the system.
650
651 The `pad' argument to malloc_trim represents the amount of free
652 trailing space to leave untrimmed. If this argument is zero,
653 only the minimum amount of memory to maintain internal data
654 structures will be left (one page or less). Non-zero arguments
655 can be supplied to maintain enough trailing space to service
656 future expected allocations without having to re-obtain memory
657 from the system.
658
659 Malloc_trim returns 1 if it actually released any memory, else 0.
660 On systems that do not support "negative sbrks", it will always
661 return 0.
662*/
663int __malloc_trim(size_t);
664
665/*
666 malloc_usable_size(void* p);
667
668 Returns the number of bytes you can actually use in
669 an allocated chunk, which may be more than you requested (although
670 often not) due to alignment and minimum size constraints.
671 You can use this many bytes without worrying about
672 overwriting other allocated objects. This is not a particularly great
673 programming practice. malloc_usable_size can be more useful in
674 debugging and assertions, for example:
675
676 p = malloc(n);
677 assert(malloc_usable_size(p) >= 256);
678
679*/
680size_t __malloc_usable_size(void*);
681
682/*
683 malloc_stats();
684 Prints on stderr the amount of space obtained from the system (both
685 via sbrk and mmap), the maximum amount (which may be more than
686 current if malloc_trim and/or munmap got called), and the current
687 number of bytes allocated via malloc (or realloc, etc) but not yet
688 freed. Note that this is the number of bytes allocated, not the
689 number requested. It will be larger than the number requested
690 because of alignment and bookkeeping overhead. Because it includes
691 alignment wastage as being in use, this figure may be greater than
692 zero even when no user-level chunks are allocated.
693
694 The reported current and maximum system memory can be inaccurate if
695 a program makes other calls to system memory allocation functions
696 (normally sbrk) outside of malloc.
697
698 malloc_stats prints only the most commonly interesting statistics.
699 More information can be obtained by calling mallinfo.
700
701*/
702void __malloc_stats(void);
703
704/*
705 posix_memalign(void **memptr, size_t alignment, size_t size);
706
707 POSIX wrapper like memalign(), checking for validity of size.
708*/
709int __posix_memalign(void **, size_t, size_t);
710
711/* mallopt tuning options */
712
713/*
714 M_MXFAST is the maximum request size used for "fastbins", special bins
715 that hold returned chunks without consolidating their spaces. This
716 enables future requests for chunks of the same size to be handled
717 very quickly, but can increase fragmentation, and thus increase the
718 overall memory footprint of a program.
719
720 This malloc manages fastbins very conservatively yet still
721 efficiently, so fragmentation is rarely a problem for values less
722 than or equal to the default. The maximum supported value of MXFAST
723 is 80. You wouldn't want it any higher than this anyway. Fastbins
724 are designed especially for use with many small structs, objects or
725 strings -- the default handles structs/objects/arrays with sizes up
726 to 8 4byte fields, or small strings representing words, tokens,
727 etc. Using fastbins for larger objects normally worsens
728 fragmentation without improving speed.
729
730 M_MXFAST is set in REQUEST size units. It is internally used in
731 chunksize units, which adds padding and alignment. You can reduce
732 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
733 algorithm to be a closer approximation of fifo-best-fit in all cases,
734 not just for larger requests, but will generally cause it to be
735 slower.
736*/
737
738
739/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
740#ifndef M_MXFAST
741#define M_MXFAST 1
742#endif
743
744#ifndef DEFAULT_MXFAST
745#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
746#endif
747
748
749/*
750 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
751 to keep before releasing via malloc_trim in free().
752
753 Automatic trimming is mainly useful in long-lived programs.
754 Because trimming via sbrk can be slow on some systems, and can
755 sometimes be wasteful (in cases where programs immediately
756 afterward allocate more large chunks) the value should be high
757 enough so that your overall system performance would improve by
758 releasing this much memory.
759
760 The trim threshold and the mmap control parameters (see below)
761 can be traded off with one another. Trimming and mmapping are
762 two different ways of releasing unused memory back to the
763 system. Between these two, it is often possible to keep
764 system-level demands of a long-lived program down to a bare
765 minimum. For example, in one test suite of sessions measuring
766 the XF86 X server on Linux, using a trim threshold of 128K and a
767 mmap threshold of 192K led to near-minimal long term resource
768 consumption.
769
770 If you are using this malloc in a long-lived program, it should
771 pay to experiment with these values. As a rough guide, you
772 might set to a value close to the average size of a process
773 (program) running on your system. Releasing this much memory
774 would allow such a process to run in memory. Generally, it's
775 worth it to tune for trimming rather tham memory mapping when a
776 program undergoes phases where several large chunks are
777 allocated and released in ways that can reuse each other's
778 storage, perhaps mixed with phases where there are no such
779 chunks at all. And in well-behaved long-lived programs,
780 controlling release of large blocks via trimming versus mapping
781 is usually faster.
782
783 However, in most programs, these parameters serve mainly as
784 protection against the system-level effects of carrying around
785 massive amounts of unneeded memory. Since frequent calls to
786 sbrk, mmap, and munmap otherwise degrade performance, the default
787 parameters are set to relatively high values that serve only as
788 safeguards.
789
790 The trim value It must be greater than page size to have any useful
791 effect. To disable trimming completely, you can set to
792 (unsigned long)(-1)
793
794 Trim settings interact with fastbin (MXFAST) settings: Unless
795 TRIM_FASTBINS is defined, automatic trimming never takes place upon
796 freeing a chunk with size less than or equal to MXFAST. Trimming is
797 instead delayed until subsequent freeing of larger chunks. However,
798 you can still force an attempted trim by calling malloc_trim.
799
800 Also, trimming is not generally possible in cases where
801 the main arena is obtained via mmap.
802
803 Note that the trick some people use of mallocing a huge space and
804 then freeing it at program startup, in an attempt to reserve system
805 memory, doesn't have the intended effect under automatic trimming,
806 since that memory will immediately be returned to the system.
807*/
808
809#define M_TRIM_THRESHOLD -1
810
811#ifndef DEFAULT_TRIM_THRESHOLD
812#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
813#endif
814
815/*
816 M_TOP_PAD is the amount of extra `padding' space to allocate or
817 retain whenever sbrk is called. It is used in two ways internally:
818
819 * When sbrk is called to extend the top of the arena to satisfy
820 a new malloc request, this much padding is added to the sbrk
821 request.
822
823 * When malloc_trim is called automatically from free(),
824 it is used as the `pad' argument.
825
826 In both cases, the actual amount of padding is rounded
827 so that the end of the arena is always a system page boundary.
828
829 The main reason for using padding is to avoid calling sbrk so
830 often. Having even a small pad greatly reduces the likelihood
831 that nearly every malloc request during program start-up (or
832 after trimming) will invoke sbrk, which needlessly wastes
833 time.
834
835 Automatic rounding-up to page-size units is normally sufficient
836 to avoid measurable overhead, so the default is 0. However, in
837 systems where sbrk is relatively slow, it can pay to increase
838 this value, at the expense of carrying around more memory than
839 the program needs.
840*/
841
842#define M_TOP_PAD -2
843
844#ifndef DEFAULT_TOP_PAD
845#define DEFAULT_TOP_PAD (0)
846#endif
847
848/*
849 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
850 adjusted MMAP_THRESHOLD.
851*/
852
853#ifndef DEFAULT_MMAP_THRESHOLD_MIN
854#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
855#endif
856
857#ifndef DEFAULT_MMAP_THRESHOLD_MAX
858 /* For 32-bit platforms we cannot increase the maximum mmap
859 threshold much because it is also the minimum value for the
860 maximum heap size and its alignment. Going above 512k (i.e., 1M
861 for new heaps) wastes too much address space. */
862# if __WORDSIZE == 32
863# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
864# else
865# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
866# endif
867#endif
868
869/*
870 M_MMAP_THRESHOLD is the request size threshold for using mmap()
871 to service a request. Requests of at least this size that cannot
872 be allocated using already-existing space will be serviced via mmap.
873 (If enough normal freed space already exists it is used instead.)
874
875 Using mmap segregates relatively large chunks of memory so that
876 they can be individually obtained and released from the host
877 system. A request serviced through mmap is never reused by any
878 other request (at least not directly; the system may just so
879 happen to remap successive requests to the same locations).
880
881 Segregating space in this way has the benefits that:
882
883 1. Mmapped space can ALWAYS be individually released back
884 to the system, which helps keep the system level memory
885 demands of a long-lived program low.
886 2. Mapped memory can never become `locked' between
887 other chunks, as can happen with normally allocated chunks, which
888 means that even trimming via malloc_trim would not release them.
889 3. On some systems with "holes" in address spaces, mmap can obtain
890 memory that sbrk cannot.
891
892 However, it has the disadvantages that:
893
894 1. The space cannot be reclaimed, consolidated, and then
895 used to service later requests, as happens with normal chunks.
896 2. It can lead to more wastage because of mmap page alignment
897 requirements
898 3. It causes malloc performance to be more dependent on host
899 system memory management support routines which may vary in
900 implementation quality and may impose arbitrary
901 limitations. Generally, servicing a request via normal
902 malloc steps is faster than going through a system's mmap.
903
904 The advantages of mmap nearly always outweigh disadvantages for
905 "large" chunks, but the value of "large" varies across systems. The
906 default is an empirically derived value that works well in most
907 systems.
908
909
910 Update in 2006:
911 The above was written in 2001. Since then the world has changed a lot.
912 Memory got bigger. Applications got bigger. The virtual address space
913 layout in 32 bit linux changed.
914
915 In the new situation, brk() and mmap space is shared and there are no
916 artificial limits on brk size imposed by the kernel. What is more,
917 applications have started using transient allocations larger than the
918 128Kb as was imagined in 2001.
919
920 The price for mmap is also high now; each time glibc mmaps from the
921 kernel, the kernel is forced to zero out the memory it gives to the
922 application. Zeroing memory is expensive and eats a lot of cache and
923 memory bandwidth. This has nothing to do with the efficiency of the
924 virtual memory system, by doing mmap the kernel just has no choice but
925 to zero.
926
927 In 2001, the kernel had a maximum size for brk() which was about 800
928 megabytes on 32 bit x86, at that point brk() would hit the first
929 mmaped shared libaries and couldn't expand anymore. With current 2.6
930 kernels, the VA space layout is different and brk() and mmap
931 both can span the entire heap at will.
932
933 Rather than using a static threshold for the brk/mmap tradeoff,
934 we are now using a simple dynamic one. The goal is still to avoid
935 fragmentation. The old goals we kept are
936 1) try to get the long lived large allocations to use mmap()
937 2) really large allocations should always use mmap()
938 and we're adding now:
939 3) transient allocations should use brk() to avoid forcing the kernel
940 having to zero memory over and over again
941
942 The implementation works with a sliding threshold, which is by default
943 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
944 out at 128Kb as per the 2001 default.
945
946 This allows us to satisfy requirement 1) under the assumption that long
947 lived allocations are made early in the process' lifespan, before it has
948 started doing dynamic allocations of the same size (which will
949 increase the threshold).
950
951 The upperbound on the threshold satisfies requirement 2)
952
953 The threshold goes up in value when the application frees memory that was
954 allocated with the mmap allocator. The idea is that once the application
955 starts freeing memory of a certain size, it's highly probable that this is
956 a size the application uses for transient allocations. This estimator
957 is there to satisfy the new third requirement.
958
959*/
960
961#define M_MMAP_THRESHOLD -3
962
963#ifndef DEFAULT_MMAP_THRESHOLD
964#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
965#endif
966
967/*
968 M_MMAP_MAX is the maximum number of requests to simultaneously
969 service using mmap. This parameter exists because
970 some systems have a limited number of internal tables for
971 use by mmap, and using more than a few of them may degrade
972 performance.
973
974 The default is set to a value that serves only as a safeguard.
975 Setting to 0 disables use of mmap for servicing large requests.
976*/
977
978#define M_MMAP_MAX -4
979
980#ifndef DEFAULT_MMAP_MAX
981#define DEFAULT_MMAP_MAX (65536)
982#endif
983
984#include <malloc.h>
985
986#ifndef RETURN_ADDRESS
987#define RETURN_ADDRESS(X_) (NULL)
988#endif
989
990/* Forward declarations. */
991struct malloc_chunk;
992typedef struct malloc_chunk* mchunkptr;
993
994/* Internal routines. */
995
996static void* _int_malloc(mstate, size_t);
997static void _int_free(mstate, mchunkptr, int);
998static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
999 INTERNAL_SIZE_T);
1000static void* _int_memalign(mstate, size_t, size_t);
1001static void* _mid_memalign(size_t, size_t, void *);
1002
1003static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1004
1005static void* mem2mem_check(void *p, size_t sz);
1006static void top_check(void);
1007static void munmap_chunk(mchunkptr p);
1008#if HAVE_MREMAP
1009static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1010#endif
1011
1012static void* malloc_check(size_t sz, const void *caller);
1013static void free_check(void* mem, const void *caller);
1014static void* realloc_check(void* oldmem, size_t bytes,
1015 const void *caller);
1016static void* memalign_check(size_t alignment, size_t bytes,
1017 const void *caller);
1018
1019/* ------------------ MMAP support ------------------ */
1020
1021
1022#include <fcntl.h>
1023#include <sys/mman.h>
1024
1025#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1026# define MAP_ANONYMOUS MAP_ANON
1027#endif
1028
1029#ifndef MAP_NORESERVE
1030# define MAP_NORESERVE 0
1031#endif
1032
1033#define MMAP(addr, size, prot, flags) \
1034 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1035
1036
1037/*
1038 ----------------------- Chunk representations -----------------------
1039*/
1040
1041
1042/*
1043 This struct declaration is misleading (but accurate and necessary).
1044 It declares a "view" into memory allowing access to necessary
1045 fields at known offsets from a given base. See explanation below.
1046*/
1047
1048struct malloc_chunk {
1049
1050 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1051 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1052
1053 struct malloc_chunk* fd; /* double links -- used only if free. */
1054 struct malloc_chunk* bk;
1055
1056 /* Only used for large blocks: pointer to next larger size. */
1057 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1058 struct malloc_chunk* bk_nextsize;
1059};
1060
1061
1062/*
1063 malloc_chunk details:
1064
1065 (The following includes lightly edited explanations by Colin Plumb.)
1066
1067 Chunks of memory are maintained using a `boundary tag' method as
1068 described in e.g., Knuth or Standish. (See the paper by Paul
1069 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1070 survey of such techniques.) Sizes of free chunks are stored both
1071 in the front of each chunk and at the end. This makes
1072 consolidating fragmented chunks into bigger chunks very fast. The
1073 size fields also hold bits representing whether chunks are free or
1074 in use.
1075
1076 An allocated chunk looks like this:
1077
1078
1079 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1080 | Size of previous chunk, if unallocated (P clear) |
1081 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1082 | Size of chunk, in bytes |A|M|P|
1083 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1084 | User data starts here... .
1085 . .
1086 . (malloc_usable_size() bytes) .
1087 . |
1088nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1089 | (size of chunk, but used for application data) |
1090 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1091 | Size of next chunk, in bytes |A|0|1|
1092 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1093
1094 Where "chunk" is the front of the chunk for the purpose of most of
1095 the malloc code, but "mem" is the pointer that is returned to the
1096 user. "Nextchunk" is the beginning of the next contiguous chunk.
1097
1098 Chunks always begin on even word boundaries, so the mem portion
1099 (which is returned to the user) is also on an even word boundary, and
1100 thus at least double-word aligned.
1101
1102 Free chunks are stored in circular doubly-linked lists, and look like this:
1103
1104 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1105 | Size of previous chunk, if unallocated (P clear) |
1106 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1107 `head:' | Size of chunk, in bytes |A|0|P|
1108 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1109 | Forward pointer to next chunk in list |
1110 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1111 | Back pointer to previous chunk in list |
1112 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1113 | Unused space (may be 0 bytes long) .
1114 . .
1115 . |
1116nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1117 `foot:' | Size of chunk, in bytes |
1118 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1119 | Size of next chunk, in bytes |A|0|0|
1120 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1121
1122 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1123 chunk size (which is always a multiple of two words), is an in-use
1124 bit for the *previous* chunk. If that bit is *clear*, then the
1125 word before the current chunk size contains the previous chunk
1126 size, and can be used to find the front of the previous chunk.
1127 The very first chunk allocated always has this bit set,
1128 preventing access to non-existent (or non-owned) memory. If
1129 prev_inuse is set for any given chunk, then you CANNOT determine
1130 the size of the previous chunk, and might even get a memory
1131 addressing fault when trying to do so.
1132
1133 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1134 main arena, described by the main_arena variable. When additional
1135 threads are spawned, each thread receives its own arena (up to a
1136 configurable limit, after which arenas are reused for multiple
1137 threads), and the chunks in these arenas have the A bit set. To
1138 find the arena for a chunk on such a non-main arena, heap_for_ptr
1139 performs a bit mask operation and indirection through the ar_ptr
1140 member of the per-heap header heap_info (see arena.c).
1141
1142 Note that the `foot' of the current chunk is actually represented
1143 as the prev_size of the NEXT chunk. This makes it easier to
1144 deal with alignments etc but can be very confusing when trying
1145 to extend or adapt this code.
1146
1147 The three exceptions to all this are:
1148
1149 1. The special chunk `top' doesn't bother using the
1150 trailing size field since there is no next contiguous chunk
1151 that would have to index off it. After initialization, `top'
1152 is forced to always exist. If it would become less than
1153 MINSIZE bytes long, it is replenished.
1154
1155 2. Chunks allocated via mmap, which have the second-lowest-order
1156 bit M (IS_MMAPPED) set in their size fields. Because they are
1157 allocated one-by-one, each must contain its own trailing size
1158 field. If the M bit is set, the other bits are ignored
1159 (because mmapped chunks are neither in an arena, nor adjacent
1160 to a freed chunk). The M bit is also used for chunks which
1161 originally came from a dumped heap via malloc_set_state in
1162 hooks.c.
1163
1164 3. Chunks in fastbins are treated as allocated chunks from the
1165 point of view of the chunk allocator. They are consolidated
1166 with their neighbors only in bulk, in malloc_consolidate.
1167*/
1168
1169/*
1170 ---------- Size and alignment checks and conversions ----------
1171*/
1172
1173/* conversion from malloc headers to user pointers, and back */
1174
1175#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1176#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1177
1178/* The smallest possible chunk */
1179#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1180
1181/* The smallest size we can malloc is an aligned minimal chunk */
1182
1183#define MINSIZE \
1184 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1185
1186/* Check if m has acceptable alignment */
1187
1188#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1189
1190#define misaligned_chunk(p) \
1191 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1192 & MALLOC_ALIGN_MASK)
1193
1194
1195/*
1196 Check if a request is so large that it would wrap around zero when
1197 padded and aligned. To simplify some other code, the bound is made
1198 low enough so that adding MINSIZE will also not wrap around zero.
1199 */
1200
1201#define REQUEST_OUT_OF_RANGE(req) \
1202 ((unsigned long) (req) >= \
1203 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
1204
1205/* pad request bytes into a usable size -- internal version */
1206
1207#define request2size(req) \
1208 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1209 MINSIZE : \
1210 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1211
1212/* Same, except also perform an argument and result check. First, we check
1213 that the padding done by request2size didn't result in an integer
1214 overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
1215 size isn't so large that a later alignment would lead to another integer
1216 overflow. */
1217#define checked_request2size(req, sz) \
1218({ \
1219 (sz) = request2size (req); \
1220 if (((sz) < (req)) \
1221 || REQUEST_OUT_OF_RANGE (sz)) \
1222 { \
1223 __set_errno (ENOMEM); \
1224 return 0; \
1225 } \
1226})
1227
1228/*
1229 --------------- Physical chunk operations ---------------
1230 */
1231
1232
1233/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1234#define PREV_INUSE 0x1
1235
1236/* extract inuse bit of previous chunk */
1237#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1238
1239
1240/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1241#define IS_MMAPPED 0x2
1242
1243/* check for mmap()'ed chunk */
1244#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1245
1246
1247/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1248 from a non-main arena. This is only set immediately before handing
1249 the chunk to the user, if necessary. */
1250#define NON_MAIN_ARENA 0x4
1251
1252/* Check for chunk from main arena. */
1253#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1254
1255/* Mark a chunk as not being on the main arena. */
1256#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1257
1258
1259/*
1260 Bits to mask off when extracting size
1261
1262 Note: IS_MMAPPED is intentionally not masked off from size field in
1263 macros for which mmapped chunks should never be seen. This should
1264 cause helpful core dumps to occur if it is tried by accident by
1265 people extending or adapting this malloc.
1266 */
1267#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1268
1269/* Get size, ignoring use bits */
1270#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1271
1272/* Like chunksize, but do not mask SIZE_BITS. */
1273#define chunksize_nomask(p) ((p)->mchunk_size)
1274
1275/* Ptr to next physical malloc_chunk. */
1276#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1277
1278/* Size of the chunk below P. Only valid if !prev_inuse (P). */
1279#define prev_size(p) ((p)->mchunk_prev_size)
1280
1281/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1282#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1283
1284/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1285#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1286
1287/* Treat space at ptr + offset as a chunk */
1288#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1289
1290/* extract p's inuse bit */
1291#define inuse(p) \
1292 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1293
1294/* set/clear chunk as being inuse without otherwise disturbing */
1295#define set_inuse(p) \
1296 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1297
1298#define clear_inuse(p) \
1299 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1300
1301
1302/* check/set/clear inuse bits in known places */
1303#define inuse_bit_at_offset(p, s) \
1304 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1305
1306#define set_inuse_bit_at_offset(p, s) \
1307 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1308
1309#define clear_inuse_bit_at_offset(p, s) \
1310 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1311
1312
1313/* Set size at head, without disturbing its use bit */
1314#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1315
1316/* Set size/use field */
1317#define set_head(p, s) ((p)->mchunk_size = (s))
1318
1319/* Set size at footer (only when chunk is not in use) */
1320#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1321
1322
1323#pragma GCC poison mchunk_size
1324#pragma GCC poison mchunk_prev_size
1325
1326/*
1327 -------------------- Internal data structures --------------------
1328
1329 All internal state is held in an instance of malloc_state defined
1330 below. There are no other static variables, except in two optional
1331 cases:
1332 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1333 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1334 for mmap.
1335
1336 Beware of lots of tricks that minimize the total bookkeeping space
1337 requirements. The result is a little over 1K bytes (for 4byte
1338 pointers and size_t.)
1339 */
1340
1341/*
1342 Bins
1343
1344 An array of bin headers for free chunks. Each bin is doubly
1345 linked. The bins are approximately proportionally (log) spaced.
1346 There are a lot of these bins (128). This may look excessive, but
1347 works very well in practice. Most bins hold sizes that are
1348 unusual as malloc request sizes, but are more usual for fragments
1349 and consolidated sets of chunks, which is what these bins hold, so
1350 they can be found quickly. All procedures maintain the invariant
1351 that no consolidated chunk physically borders another one, so each
1352 chunk in a list is known to be preceeded and followed by either
1353 inuse chunks or the ends of memory.
1354
1355 Chunks in bins are kept in size order, with ties going to the
1356 approximately least recently used chunk. Ordering isn't needed
1357 for the small bins, which all contain the same-sized chunks, but
1358 facilitates best-fit allocation for larger chunks. These lists
1359 are just sequential. Keeping them in order almost never requires
1360 enough traversal to warrant using fancier ordered data
1361 structures.
1362
1363 Chunks of the same size are linked with the most
1364 recently freed at the front, and allocations are taken from the
1365 back. This results in LRU (FIFO) allocation order, which tends
1366 to give each chunk an equal opportunity to be consolidated with
1367 adjacent freed chunks, resulting in larger free chunks and less
1368 fragmentation.
1369
1370 To simplify use in double-linked lists, each bin header acts
1371 as a malloc_chunk. This avoids special-casing for headers.
1372 But to conserve space and improve locality, we allocate
1373 only the fd/bk pointers of bins, and then use repositioning tricks
1374 to treat these as the fields of a malloc_chunk*.
1375 */
1376
1377typedef struct malloc_chunk *mbinptr;
1378
1379/* addressing -- note that bin_at(0) does not exist */
1380#define bin_at(m, i) \
1381 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1382 - offsetof (struct malloc_chunk, fd))
1383
1384/* analog of ++bin */
1385#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1386
1387/* Reminders about list directionality within bins */
1388#define first(b) ((b)->fd)
1389#define last(b) ((b)->bk)
1390
1391/* Take a chunk off a bin list */
1392#define unlink(AV, P, BK, FD) { \
1393 if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0)) \
1394 malloc_printerr ("corrupted size vs. prev_size"); \
1395 FD = P->fd; \
1396 BK = P->bk; \
1397 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1398 malloc_printerr ("corrupted double-linked list"); \
1399 else { \
1400 FD->bk = BK; \
1401 BK->fd = FD; \
1402 if (!in_smallbin_range (chunksize_nomask (P)) \
1403 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1404 if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
1405 || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
1406 malloc_printerr ("corrupted double-linked list (not small)"); \
1407 if (FD->fd_nextsize == NULL) { \
1408 if (P->fd_nextsize == P) \
1409 FD->fd_nextsize = FD->bk_nextsize = FD; \
1410 else { \
1411 FD->fd_nextsize = P->fd_nextsize; \
1412 FD->bk_nextsize = P->bk_nextsize; \
1413 P->fd_nextsize->bk_nextsize = FD; \
1414 P->bk_nextsize->fd_nextsize = FD; \
1415 } \
1416 } else { \
1417 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1418 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1419 } \
1420 } \
1421 } \
1422}
1423
1424/*
1425 Indexing
1426
1427 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1428 8 bytes apart. Larger bins are approximately logarithmically spaced:
1429
1430 64 bins of size 8
1431 32 bins of size 64
1432 16 bins of size 512
1433 8 bins of size 4096
1434 4 bins of size 32768
1435 2 bins of size 262144
1436 1 bin of size what's left
1437
1438 There is actually a little bit of slop in the numbers in bin_index
1439 for the sake of speed. This makes no difference elsewhere.
1440
1441 The bins top out around 1MB because we expect to service large
1442 requests via mmap.
1443
1444 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1445 a valid chunk size the small bins are bumped up one.
1446 */
1447
1448#define NBINS 128
1449#define NSMALLBINS 64
1450#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1451#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1452#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1453
1454#define in_smallbin_range(sz) \
1455 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1456
1457#define smallbin_index(sz) \
1458 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1459 + SMALLBIN_CORRECTION)
1460
1461#define largebin_index_32(sz) \
1462 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1463 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1464 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1465 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1466 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1467 126)
1468
1469#define largebin_index_32_big(sz) \
1470 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1471 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1472 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1473 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1474 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1475 126)
1476
1477// XXX It remains to be seen whether it is good to keep the widths of
1478// XXX the buckets the same or whether it should be scaled by a factor
1479// XXX of two as well.
1480#define largebin_index_64(sz) \
1481 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1482 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1483 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1484 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1485 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1486 126)
1487
1488#define largebin_index(sz) \
1489 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1490 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1491 : largebin_index_32 (sz))
1492
1493#define bin_index(sz) \
1494 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1495
1496
1497/*
1498 Unsorted chunks
1499
1500 All remainders from chunk splits, as well as all returned chunks,
1501 are first placed in the "unsorted" bin. They are then placed
1502 in regular bins after malloc gives them ONE chance to be used before
1503 binning. So, basically, the unsorted_chunks list acts as a queue,
1504 with chunks being placed on it in free (and malloc_consolidate),
1505 and taken off (to be either used or placed in bins) in malloc.
1506
1507 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1508 does not have to be taken into account in size comparisons.
1509 */
1510
1511/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1512#define unsorted_chunks(M) (bin_at (M, 1))
1513
1514/*
1515 Top
1516
1517 The top-most available chunk (i.e., the one bordering the end of
1518 available memory) is treated specially. It is never included in
1519 any bin, is used only if no other chunk is available, and is
1520 released back to the system if it is very large (see
1521 M_TRIM_THRESHOLD). Because top initially
1522 points to its own bin with initial zero size, thus forcing
1523 extension on the first malloc request, we avoid having any special
1524 code in malloc to check whether it even exists yet. But we still
1525 need to do so when getting memory from system, so we make
1526 initial_top treat the bin as a legal but unusable chunk during the
1527 interval between initialization and the first call to
1528 sysmalloc. (This is somewhat delicate, since it relies on
1529 the 2 preceding words to be zero during this interval as well.)
1530 */
1531
1532/* Conveniently, the unsorted bin can be used as dummy top on first call */
1533#define initial_top(M) (unsorted_chunks (M))
1534
1535/*
1536 Binmap
1537
1538 To help compensate for the large number of bins, a one-level index
1539 structure is used for bin-by-bin searching. `binmap' is a
1540 bitvector recording whether bins are definitely empty so they can
1541 be skipped over during during traversals. The bits are NOT always
1542 cleared as soon as bins are empty, but instead only
1543 when they are noticed to be empty during traversal in malloc.
1544 */
1545
1546/* Conservatively use 32 bits per map word, even if on 64bit system */
1547#define BINMAPSHIFT 5
1548#define BITSPERMAP (1U << BINMAPSHIFT)
1549#define BINMAPSIZE (NBINS / BITSPERMAP)
1550
1551#define idx2block(i) ((i) >> BINMAPSHIFT)
1552#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1553
1554#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1555#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1556#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1557
1558/*
1559 Fastbins
1560
1561 An array of lists holding recently freed small chunks. Fastbins
1562 are not doubly linked. It is faster to single-link them, and
1563 since chunks are never removed from the middles of these lists,
1564 double linking is not necessary. Also, unlike regular bins, they
1565 are not even processed in FIFO order (they use faster LIFO) since
1566 ordering doesn't much matter in the transient contexts in which
1567 fastbins are normally used.
1568
1569 Chunks in fastbins keep their inuse bit set, so they cannot
1570 be consolidated with other free chunks. malloc_consolidate
1571 releases all chunks in fastbins and consolidates them with
1572 other free chunks.
1573 */
1574
1575typedef struct malloc_chunk *mfastbinptr;
1576#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1577
1578/* offset 2 to use otherwise unindexable first 2 bins */
1579#define fastbin_index(sz) \
1580 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1581
1582
1583/* The maximum fastbin request size we support */
1584#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1585
1586#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1587
1588/*
1589 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1590 that triggers automatic consolidation of possibly-surrounding
1591 fastbin chunks. This is a heuristic, so the exact value should not
1592 matter too much. It is defined at half the default trim threshold as a
1593 compromise heuristic to only attempt consolidation if it is likely
1594 to lead to trimming. However, it is not dynamically tunable, since
1595 consolidation reduces fragmentation surrounding large chunks even
1596 if trimming is not used.
1597 */
1598
1599#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1600
1601/*
1602 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1603 regions. Otherwise, contiguity is exploited in merging together,
1604 when possible, results from consecutive MORECORE calls.
1605
1606 The initial value comes from MORECORE_CONTIGUOUS, but is
1607 changed dynamically if mmap is ever used as an sbrk substitute.
1608 */
1609
1610#define NONCONTIGUOUS_BIT (2U)
1611
1612#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1613#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1614#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1615#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1616
1617/* Maximum size of memory handled in fastbins. */
1618static INTERNAL_SIZE_T global_max_fast;
1619
1620/*
1621 Set value of max_fast.
1622 Use impossibly small value if 0.
1623 Precondition: there are no existing fastbin chunks in the main arena.
1624 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1625 before changing max_fast. Note other arenas will leak their fast bin
1626 entries if max_fast is reduced.
1627 */
1628
1629#define set_max_fast(s) \
1630 global_max_fast = (((s) == 0) \
1631 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1632
1633static inline INTERNAL_SIZE_T
1634get_max_fast (void)
1635{
1636 /* Tell the GCC optimizers that global_max_fast is never larger
1637 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1638 _int_malloc after constant propagation of the size parameter.
1639 (The code never executes because malloc preserves the
1640 global_max_fast invariant, but the optimizers may not recognize
1641 this.) */
1642 if (global_max_fast > MAX_FAST_SIZE)
1643 __builtin_unreachable ();
1644 return global_max_fast;
1645}
1646
1647/*
1648 ----------- Internal state representation and initialization -----------
1649 */
1650
1651/*
1652 have_fastchunks indicates that there are probably some fastbin chunks.
1653 It is set true on entering a chunk into any fastbin, and cleared early in
1654 malloc_consolidate. The value is approximate since it may be set when there
1655 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1656 available. Given it's sole purpose is to reduce number of redundant calls to
1657 malloc_consolidate, it does not affect correctness. As a result we can safely
1658 use relaxed atomic accesses.
1659 */
1660
1661
1662struct malloc_state
1663{
1664 /* Serialize access. */
1665 __libc_lock_define (, mutex);
1666
1667 /* Flags (formerly in max_fast). */
1668 int flags;
1669
1670 /* Set if the fastbin chunks contain recently inserted free blocks. */
1671 /* Note this is a bool but not all targets support atomics on booleans. */
1672 int have_fastchunks;
1673
1674 /* Fastbins */
1675 mfastbinptr fastbinsY[NFASTBINS];
1676
1677 /* Base of the topmost chunk -- not otherwise kept in a bin */
1678 mchunkptr top;
1679
1680 /* The remainder from the most recent split of a small request */
1681 mchunkptr last_remainder;
1682
1683 /* Normal bins packed as described above */
1684 mchunkptr bins[NBINS * 2 - 2];
1685
1686 /* Bitmap of bins */
1687 unsigned int binmap[BINMAPSIZE];
1688
1689 /* Linked list */
1690 struct malloc_state *next;
1691
1692 /* Linked list for free arenas. Access to this field is serialized
1693 by free_list_lock in arena.c. */
1694 struct malloc_state *next_free;
1695
1696 /* Number of threads attached to this arena. 0 if the arena is on
1697 the free list. Access to this field is serialized by
1698 free_list_lock in arena.c. */
1699 INTERNAL_SIZE_T attached_threads;
1700
1701 /* Memory allocated from the system in this arena. */
1702 INTERNAL_SIZE_T system_mem;
1703 INTERNAL_SIZE_T max_system_mem;
1704};
1705
1706struct malloc_par
1707{
1708 /* Tunable parameters */
1709 unsigned long trim_threshold;
1710 INTERNAL_SIZE_T top_pad;
1711 INTERNAL_SIZE_T mmap_threshold;
1712 INTERNAL_SIZE_T arena_test;
1713 INTERNAL_SIZE_T arena_max;
1714
1715 /* Memory map support */
1716 int n_mmaps;
1717 int n_mmaps_max;
1718 int max_n_mmaps;
1719 /* the mmap_threshold is dynamic, until the user sets
1720 it manually, at which point we need to disable any
1721 dynamic behavior. */
1722 int no_dyn_threshold;
1723
1724 /* Statistics */
1725 INTERNAL_SIZE_T mmapped_mem;
1726 INTERNAL_SIZE_T max_mmapped_mem;
1727
1728 /* First address handed out by MORECORE/sbrk. */
1729 char *sbrk_base;
1730
1731#if USE_TCACHE
1732 /* Maximum number of buckets to use. */
1733 size_t tcache_bins;
1734 size_t tcache_max_bytes;
1735 /* Maximum number of chunks in each bucket. */
1736 size_t tcache_count;
1737 /* Maximum number of chunks to remove from the unsorted list, which
1738 aren't used to prefill the cache. */
1739 size_t tcache_unsorted_limit;
1740#endif
1741};
1742
1743/* There are several instances of this struct ("arenas") in this
1744 malloc. If you are adapting this malloc in a way that does NOT use
1745 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1746 before using. This malloc relies on the property that malloc_state
1747 is initialized to all zeroes (as is true of C statics). */
1748
1749static struct malloc_state main_arena =
1750{
1751 .mutex = _LIBC_LOCK_INITIALIZER,
1752 .next = &main_arena,
1753 .attached_threads = 1
1754};
1755
1756/* These variables are used for undumping support. Chunked are marked
1757 as using mmap, but we leave them alone if they fall into this
1758 range. NB: The chunk size for these chunks only includes the
1759 initial size field (of SIZE_SZ bytes), there is no trailing size
1760 field (unlike with regular mmapped chunks). */
1761static mchunkptr dumped_main_arena_start; /* Inclusive. */
1762static mchunkptr dumped_main_arena_end; /* Exclusive. */
1763
1764/* True if the pointer falls into the dumped arena. Use this after
1765 chunk_is_mmapped indicates a chunk is mmapped. */
1766#define DUMPED_MAIN_ARENA_CHUNK(p) \
1767 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1768
1769/* There is only one instance of the malloc parameters. */
1770
1771static struct malloc_par mp_ =
1772{
1773 .top_pad = DEFAULT_TOP_PAD,
1774 .n_mmaps_max = DEFAULT_MMAP_MAX,
1775 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1776 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1777#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1778 .arena_test = NARENAS_FROM_NCORES (1)
1779#if USE_TCACHE
1780 ,
1781 .tcache_count = TCACHE_FILL_COUNT,
1782 .tcache_bins = TCACHE_MAX_BINS,
1783 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1784 .tcache_unsorted_limit = 0 /* No limit. */
1785#endif
1786};
1787
1788/*
1789 Initialize a malloc_state struct.
1790
1791 This is called from ptmalloc_init () or from _int_new_arena ()
1792 when creating a new arena.
1793 */
1794
1795static void
1796malloc_init_state (mstate av)
1797{
1798 int i;
1799 mbinptr bin;
1800
1801 /* Establish circular links for normal bins */
1802 for (i = 1; i < NBINS; ++i)
1803 {
1804 bin = bin_at (av, i);
1805 bin->fd = bin->bk = bin;
1806 }
1807
1808#if MORECORE_CONTIGUOUS
1809 if (av != &main_arena)
1810#endif
1811 set_noncontiguous (av);
1812 if (av == &main_arena)
1813 set_max_fast (DEFAULT_MXFAST);
1814 atomic_store_relaxed (&av->have_fastchunks, false);
1815
1816 av->top = initial_top (av);
1817}
1818
1819/*
1820 Other internal utilities operating on mstates
1821 */
1822
1823static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1824static int systrim (size_t, mstate);
1825static void malloc_consolidate (mstate);
1826
1827
1828/* -------------- Early definitions for debugging hooks ---------------- */
1829
1830/* Define and initialize the hook variables. These weak definitions must
1831 appear before any use of the variables in a function (arena.c uses one). */
1832#ifndef weak_variable
1833/* In GNU libc we want the hook variables to be weak definitions to
1834 avoid a problem with Emacs. */
1835# define weak_variable weak_function
1836#endif
1837
1838/* Forward declarations. */
1839static void *malloc_hook_ini (size_t sz,
1840 const void *caller) __THROW;
1841static void *realloc_hook_ini (void *ptr, size_t sz,
1842 const void *caller) __THROW;
1843static void *memalign_hook_ini (size_t alignment, size_t sz,
1844 const void *caller) __THROW;
1845
1846#if HAVE_MALLOC_INIT_HOOK
1847void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1848compat_symbol (libc, __malloc_initialize_hook,
1849 __malloc_initialize_hook, GLIBC_2_0);
1850#endif
1851
1852void weak_variable (*__free_hook) (void *__ptr,
1853 const void *) = NULL;
1854void *weak_variable (*__malloc_hook)
1855 (size_t __size, const void *) = malloc_hook_ini;
1856void *weak_variable (*__realloc_hook)
1857 (void *__ptr, size_t __size, const void *)
1858 = realloc_hook_ini;
1859void *weak_variable (*__memalign_hook)
1860 (size_t __alignment, size_t __size, const void *)
1861 = memalign_hook_ini;
1862void weak_variable (*__after_morecore_hook) (void) = NULL;
1863
1864/* This function is called from the arena shutdown hook, to free the
1865 thread cache (if it exists). */
1866static void tcache_thread_shutdown (void);
1867
1868/* ------------------ Testing support ----------------------------------*/
1869
1870static int perturb_byte;
1871
1872static void
1873alloc_perturb (char *p, size_t n)
1874{
1875 if (__glibc_unlikely (perturb_byte))
1876 memset (p, perturb_byte ^ 0xff, n);
1877}
1878
1879static void
1880free_perturb (char *p, size_t n)
1881{
1882 if (__glibc_unlikely (perturb_byte))
1883 memset (p, perturb_byte, n);
1884}
1885
1886
1887
1888#include <stap-probe.h>
1889
1890/* ------------------- Support for multiple arenas -------------------- */
1891#include "arena.c"
1892
1893/*
1894 Debugging support
1895
1896 These routines make a number of assertions about the states
1897 of data structures that should be true at all times. If any
1898 are not true, it's very likely that a user program has somehow
1899 trashed memory. (It's also possible that there is a coding error
1900 in malloc. In which case, please report it!)
1901 */
1902
1903#if !MALLOC_DEBUG
1904
1905# define check_chunk(A, P)
1906# define check_free_chunk(A, P)
1907# define check_inuse_chunk(A, P)
1908# define check_remalloced_chunk(A, P, N)
1909# define check_malloced_chunk(A, P, N)
1910# define check_malloc_state(A)
1911
1912#else
1913
1914# define check_chunk(A, P) do_check_chunk (A, P)
1915# define check_free_chunk(A, P) do_check_free_chunk (A, P)
1916# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1917# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1918# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1919# define check_malloc_state(A) do_check_malloc_state (A)
1920
1921/*
1922 Properties of all chunks
1923 */
1924
1925static void
1926do_check_chunk (mstate av, mchunkptr p)
1927{
1928 unsigned long sz = chunksize (p);
1929 /* min and max possible addresses assuming contiguous allocation */
1930 char *max_address = (char *) (av->top) + chunksize (av->top);
1931 char *min_address = max_address - av->system_mem;
1932
1933 if (!chunk_is_mmapped (p))
1934 {
1935 /* Has legal address ... */
1936 if (p != av->top)
1937 {
1938 if (contiguous (av))
1939 {
1940 assert (((char *) p) >= min_address);
1941 assert (((char *) p + sz) <= ((char *) (av->top)));
1942 }
1943 }
1944 else
1945 {
1946 /* top size is always at least MINSIZE */
1947 assert ((unsigned long) (sz) >= MINSIZE);
1948 /* top predecessor always marked inuse */
1949 assert (prev_inuse (p));
1950 }
1951 }
1952 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
1953 {
1954 /* address is outside main heap */
1955 if (contiguous (av) && av->top != initial_top (av))
1956 {
1957 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1958 }
1959 /* chunk is page-aligned */
1960 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
1961 /* mem is aligned */
1962 assert (aligned_OK (chunk2mem (p)));
1963 }
1964}
1965
1966/*
1967 Properties of free chunks
1968 */
1969
1970static void
1971do_check_free_chunk (mstate av, mchunkptr p)
1972{
1973 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
1974 mchunkptr next = chunk_at_offset (p, sz);
1975
1976 do_check_chunk (av, p);
1977
1978 /* Chunk must claim to be free ... */
1979 assert (!inuse (p));
1980 assert (!chunk_is_mmapped (p));
1981
1982 /* Unless a special marker, must have OK fields */
1983 if ((unsigned long) (sz) >= MINSIZE)
1984 {
1985 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1986 assert (aligned_OK (chunk2mem (p)));
1987 /* ... matching footer field */
1988 assert (prev_size (next_chunk (p)) == sz);
1989 /* ... and is fully consolidated */
1990 assert (prev_inuse (p));
1991 assert (next == av->top || inuse (next));
1992
1993 /* ... and has minimally sane links */
1994 assert (p->fd->bk == p);
1995 assert (p->bk->fd == p);
1996 }
1997 else /* markers are always of size SIZE_SZ */
1998 assert (sz == SIZE_SZ);
1999}
2000
2001/*
2002 Properties of inuse chunks
2003 */
2004
2005static void
2006do_check_inuse_chunk (mstate av, mchunkptr p)
2007{
2008 mchunkptr next;
2009
2010 do_check_chunk (av, p);
2011
2012 if (chunk_is_mmapped (p))
2013 return; /* mmapped chunks have no next/prev */
2014
2015 /* Check whether it claims to be in use ... */
2016 assert (inuse (p));
2017
2018 next = next_chunk (p);
2019
2020 /* ... and is surrounded by OK chunks.
2021 Since more things can be checked with free chunks than inuse ones,
2022 if an inuse chunk borders them and debug is on, it's worth doing them.
2023 */
2024 if (!prev_inuse (p))
2025 {
2026 /* Note that we cannot even look at prev unless it is not inuse */
2027 mchunkptr prv = prev_chunk (p);
2028 assert (next_chunk (prv) == p);
2029 do_check_free_chunk (av, prv);
2030 }
2031
2032 if (next == av->top)
2033 {
2034 assert (prev_inuse (next));
2035 assert (chunksize (next) >= MINSIZE);
2036 }
2037 else if (!inuse (next))
2038 do_check_free_chunk (av, next);
2039}
2040
2041/*
2042 Properties of chunks recycled from fastbins
2043 */
2044
2045static void
2046do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2047{
2048 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2049
2050 if (!chunk_is_mmapped (p))
2051 {
2052 assert (av == arena_for_chunk (p));
2053 if (chunk_main_arena (p))
2054 assert (av == &main_arena);
2055 else
2056 assert (av != &main_arena);
2057 }
2058
2059 do_check_inuse_chunk (av, p);
2060
2061 /* Legal size ... */
2062 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2063 assert ((unsigned long) (sz) >= MINSIZE);
2064 /* ... and alignment */
2065 assert (aligned_OK (chunk2mem (p)));
2066 /* chunk is less than MINSIZE more than request */
2067 assert ((long) (sz) - (long) (s) >= 0);
2068 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2069}
2070
2071/*
2072 Properties of nonrecycled chunks at the point they are malloced
2073 */
2074
2075static void
2076do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2077{
2078 /* same as recycled case ... */
2079 do_check_remalloced_chunk (av, p, s);
2080
2081 /*
2082 ... plus, must obey implementation invariant that prev_inuse is
2083 always true of any allocated chunk; i.e., that each allocated
2084 chunk borders either a previously allocated and still in-use
2085 chunk, or the base of its memory arena. This is ensured
2086 by making all allocations from the `lowest' part of any found
2087 chunk. This does not necessarily hold however for chunks
2088 recycled via fastbins.
2089 */
2090
2091 assert (prev_inuse (p));
2092}
2093
2094
2095/*
2096 Properties of malloc_state.
2097
2098 This may be useful for debugging malloc, as well as detecting user
2099 programmer errors that somehow write into malloc_state.
2100
2101 If you are extending or experimenting with this malloc, you can
2102 probably figure out how to hack this routine to print out or
2103 display chunk addresses, sizes, bins, and other instrumentation.
2104 */
2105
2106static void
2107do_check_malloc_state (mstate av)
2108{
2109 int i;
2110 mchunkptr p;
2111 mchunkptr q;
2112 mbinptr b;
2113 unsigned int idx;
2114 INTERNAL_SIZE_T size;
2115 unsigned long total = 0;
2116 int max_fast_bin;
2117
2118 /* internal size_t must be no wider than pointer type */
2119 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2120
2121 /* alignment is a power of 2 */
2122 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2123
2124 /* Check the arena is initialized. */
2125 assert (av->top != 0);
2126
2127 /* No memory has been allocated yet, so doing more tests is not possible. */
2128 if (av->top == initial_top (av))
2129 return;
2130
2131 /* pagesize is a power of 2 */
2132 assert (powerof2(GLRO (dl_pagesize)));
2133
2134 /* A contiguous main_arena is consistent with sbrk_base. */
2135 if (av == &main_arena && contiguous (av))
2136 assert ((char *) mp_.sbrk_base + av->system_mem ==
2137 (char *) av->top + chunksize (av->top));
2138
2139 /* properties of fastbins */
2140
2141 /* max_fast is in allowed range */
2142 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2143
2144 max_fast_bin = fastbin_index (get_max_fast ());
2145
2146 for (i = 0; i < NFASTBINS; ++i)
2147 {
2148 p = fastbin (av, i);
2149
2150 /* The following test can only be performed for the main arena.
2151 While mallopt calls malloc_consolidate to get rid of all fast
2152 bins (especially those larger than the new maximum) this does
2153 only happen for the main arena. Trying to do this for any
2154 other arena would mean those arenas have to be locked and
2155 malloc_consolidate be called for them. This is excessive. And
2156 even if this is acceptable to somebody it still cannot solve
2157 the problem completely since if the arena is locked a
2158 concurrent malloc call might create a new arena which then
2159 could use the newly invalid fast bins. */
2160
2161 /* all bins past max_fast are empty */
2162 if (av == &main_arena && i > max_fast_bin)
2163 assert (p == 0);
2164
2165 while (p != 0)
2166 {
2167 /* each chunk claims to be inuse */
2168 do_check_inuse_chunk (av, p);
2169 total += chunksize (p);
2170 /* chunk belongs in this bin */
2171 assert (fastbin_index (chunksize (p)) == i);
2172 p = p->fd;
2173 }
2174 }
2175
2176 /* check normal bins */
2177 for (i = 1; i < NBINS; ++i)
2178 {
2179 b = bin_at (av, i);
2180
2181 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2182 if (i >= 2)
2183 {
2184 unsigned int binbit = get_binmap (av, i);
2185 int empty = last (b) == b;
2186 if (!binbit)
2187 assert (empty);
2188 else if (!empty)
2189 assert (binbit);
2190 }
2191
2192 for (p = last (b); p != b; p = p->bk)
2193 {
2194 /* each chunk claims to be free */
2195 do_check_free_chunk (av, p);
2196 size = chunksize (p);
2197 total += size;
2198 if (i >= 2)
2199 {
2200 /* chunk belongs in bin */
2201 idx = bin_index (size);
2202 assert (idx == i);
2203 /* lists are sorted */
2204 assert (p->bk == b ||
2205 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2206
2207 if (!in_smallbin_range (size))
2208 {
2209 if (p->fd_nextsize != NULL)
2210 {
2211 if (p->fd_nextsize == p)
2212 assert (p->bk_nextsize == p);
2213 else
2214 {
2215 if (p->fd_nextsize == first (b))
2216 assert (chunksize (p) < chunksize (p->fd_nextsize));
2217 else
2218 assert (chunksize (p) > chunksize (p->fd_nextsize));
2219
2220 if (p == first (b))
2221 assert (chunksize (p) > chunksize (p->bk_nextsize));
2222 else
2223 assert (chunksize (p) < chunksize (p->bk_nextsize));
2224 }
2225 }
2226 else
2227 assert (p->bk_nextsize == NULL);
2228 }
2229 }
2230 else if (!in_smallbin_range (size))
2231 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2232 /* chunk is followed by a legal chain of inuse chunks */
2233 for (q = next_chunk (p);
2234 (q != av->top && inuse (q) &&
2235 (unsigned long) (chunksize (q)) >= MINSIZE);
2236 q = next_chunk (q))
2237 do_check_inuse_chunk (av, q);
2238 }
2239 }
2240
2241 /* top chunk is OK */
2242 check_chunk (av, av->top);
2243}
2244#endif
2245
2246
2247/* ----------------- Support for debugging hooks -------------------- */
2248#include "hooks.c"
2249
2250
2251/* ----------- Routines dealing with system allocation -------------- */
2252
2253/*
2254 sysmalloc handles malloc cases requiring more memory from the system.
2255 On entry, it is assumed that av->top does not have enough
2256 space to service request for nb bytes, thus requiring that av->top
2257 be extended or replaced.
2258 */
2259
2260static void *
2261sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2262{
2263 mchunkptr old_top; /* incoming value of av->top */
2264 INTERNAL_SIZE_T old_size; /* its size */
2265 char *old_end; /* its end address */
2266
2267 long size; /* arg to first MORECORE or mmap call */
2268 char *brk; /* return value from MORECORE */
2269
2270 long correction; /* arg to 2nd MORECORE call */
2271 char *snd_brk; /* 2nd return val */
2272
2273 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2274 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2275 char *aligned_brk; /* aligned offset into brk */
2276
2277 mchunkptr p; /* the allocated/returned chunk */
2278 mchunkptr remainder; /* remainder from allocation */
2279 unsigned long remainder_size; /* its size */
2280
2281
2282 size_t pagesize = GLRO (dl_pagesize);
2283 bool tried_mmap = false;
2284
2285
2286 /*
2287 If have mmap, and the request size meets the mmap threshold, and
2288 the system supports mmap, and there are few enough currently
2289 allocated mmapped regions, try to directly map this request
2290 rather than expanding top.
2291 */
2292
2293 if (av == NULL
2294 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2295 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2296 {
2297 char *mm; /* return value from mmap call*/
2298
2299 try_mmap:
2300 /*
2301 Round up size to nearest page. For mmapped chunks, the overhead
2302 is one SIZE_SZ unit larger than for normal chunks, because there
2303 is no following chunk whose prev_size field could be used.
2304
2305 See the front_misalign handling below, for glibc there is no
2306 need for further alignments unless we have have high alignment.
2307 */
2308 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2309 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2310 else
2311 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2312 tried_mmap = true;
2313
2314 /* Don't try if size wraps around 0 */
2315 if ((unsigned long) (size) > (unsigned long) (nb))
2316 {
2317 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2318
2319 if (mm != MAP_FAILED)
2320 {
2321 /*
2322 The offset to the start of the mmapped region is stored
2323 in the prev_size field of the chunk. This allows us to adjust
2324 returned start address to meet alignment requirements here
2325 and in memalign(), and still be able to compute proper
2326 address argument for later munmap in free() and realloc().
2327 */
2328
2329 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2330 {
2331 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2332 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2333 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2334 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2335 front_misalign = 0;
2336 }
2337 else
2338 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2339 if (front_misalign > 0)
2340 {
2341 correction = MALLOC_ALIGNMENT - front_misalign;
2342 p = (mchunkptr) (mm + correction);
2343 set_prev_size (p, correction);
2344 set_head (p, (size - correction) | IS_MMAPPED);
2345 }
2346 else
2347 {
2348 p = (mchunkptr) mm;
2349 set_prev_size (p, 0);
2350 set_head (p, size | IS_MMAPPED);
2351 }
2352
2353 /* update statistics */
2354
2355 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2356 atomic_max (&mp_.max_n_mmaps, new);
2357
2358 unsigned long sum;
2359 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2360 atomic_max (&mp_.max_mmapped_mem, sum);
2361
2362 check_chunk (av, p);
2363
2364 return chunk2mem (p);
2365 }
2366 }
2367 }
2368
2369 /* There are no usable arenas and mmap also failed. */
2370 if (av == NULL)
2371 return 0;
2372
2373 /* Record incoming configuration of top */
2374
2375 old_top = av->top;
2376 old_size = chunksize (old_top);
2377 old_end = (char *) (chunk_at_offset (old_top, old_size));
2378
2379 brk = snd_brk = (char *) (MORECORE_FAILURE);
2380
2381 /*
2382 If not the first time through, we require old_size to be
2383 at least MINSIZE and to have prev_inuse set.
2384 */
2385
2386 assert ((old_top == initial_top (av) && old_size == 0) ||
2387 ((unsigned long) (old_size) >= MINSIZE &&
2388 prev_inuse (old_top) &&
2389 ((unsigned long) old_end & (pagesize - 1)) == 0));
2390
2391 /* Precondition: not enough current space to satisfy nb request */
2392 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2393
2394
2395 if (av != &main_arena)
2396 {
2397 heap_info *old_heap, *heap;
2398 size_t old_heap_size;
2399
2400 /* First try to extend the current heap. */
2401 old_heap = heap_for_ptr (old_top);
2402 old_heap_size = old_heap->size;
2403 if ((long) (MINSIZE + nb - old_size) > 0
2404 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2405 {
2406 av->system_mem += old_heap->size - old_heap_size;
2407 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2408 | PREV_INUSE);
2409 }
2410 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2411 {
2412 /* Use a newly allocated heap. */
2413 heap->ar_ptr = av;
2414 heap->prev = old_heap;
2415 av->system_mem += heap->size;
2416 /* Set up the new top. */
2417 top (av) = chunk_at_offset (heap, sizeof (*heap));
2418 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2419
2420 /* Setup fencepost and free the old top chunk with a multiple of
2421 MALLOC_ALIGNMENT in size. */
2422 /* The fencepost takes at least MINSIZE bytes, because it might
2423 become the top chunk again later. Note that a footer is set
2424 up, too, although the chunk is marked in use. */
2425 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2426 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2427 if (old_size >= MINSIZE)
2428 {
2429 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2430 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2431 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2432 _int_free (av, old_top, 1);
2433 }
2434 else
2435 {
2436 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2437 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2438 }
2439 }
2440 else if (!tried_mmap)
2441 /* We can at least try to use to mmap memory. */
2442 goto try_mmap;
2443 }
2444 else /* av == main_arena */
2445
2446
2447 { /* Request enough space for nb + pad + overhead */
2448 size = nb + mp_.top_pad + MINSIZE;
2449
2450 /*
2451 If contiguous, we can subtract out existing space that we hope to
2452 combine with new space. We add it back later only if
2453 we don't actually get contiguous space.
2454 */
2455
2456 if (contiguous (av))
2457 size -= old_size;
2458
2459 /*
2460 Round to a multiple of page size.
2461 If MORECORE is not contiguous, this ensures that we only call it
2462 with whole-page arguments. And if MORECORE is contiguous and
2463 this is not first time through, this preserves page-alignment of
2464 previous calls. Otherwise, we correct to page-align below.
2465 */
2466
2467 size = ALIGN_UP (size, pagesize);
2468
2469 /*
2470 Don't try to call MORECORE if argument is so big as to appear
2471 negative. Note that since mmap takes size_t arg, it may succeed
2472 below even if we cannot call MORECORE.
2473 */
2474
2475 if (size > 0)
2476 {
2477 brk = (char *) (MORECORE (size));
2478 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2479 }
2480
2481 if (brk != (char *) (MORECORE_FAILURE))
2482 {
2483 /* Call the `morecore' hook if necessary. */
2484 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2485 if (__builtin_expect (hook != NULL, 0))
2486 (*hook)();
2487 }
2488 else
2489 {
2490 /*
2491 If have mmap, try using it as a backup when MORECORE fails or
2492 cannot be used. This is worth doing on systems that have "holes" in
2493 address space, so sbrk cannot extend to give contiguous space, but
2494 space is available elsewhere. Note that we ignore mmap max count
2495 and threshold limits, since the space will not be used as a
2496 segregated mmap region.
2497 */
2498
2499 /* Cannot merge with old top, so add its size back in */
2500 if (contiguous (av))
2501 size = ALIGN_UP (size + old_size, pagesize);
2502
2503 /* If we are relying on mmap as backup, then use larger units */
2504 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2505 size = MMAP_AS_MORECORE_SIZE;
2506
2507 /* Don't try if size wraps around 0 */
2508 if ((unsigned long) (size) > (unsigned long) (nb))
2509 {
2510 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2511
2512 if (mbrk != MAP_FAILED)
2513 {
2514 /* We do not need, and cannot use, another sbrk call to find end */
2515 brk = mbrk;
2516 snd_brk = brk + size;
2517
2518 /*
2519 Record that we no longer have a contiguous sbrk region.
2520 After the first time mmap is used as backup, we do not
2521 ever rely on contiguous space since this could incorrectly
2522 bridge regions.
2523 */
2524 set_noncontiguous (av);
2525 }
2526 }
2527 }
2528
2529 if (brk != (char *) (MORECORE_FAILURE))
2530 {
2531 if (mp_.sbrk_base == 0)
2532 mp_.sbrk_base = brk;
2533 av->system_mem += size;
2534
2535 /*
2536 If MORECORE extends previous space, we can likewise extend top size.
2537 */
2538
2539 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2540 set_head (old_top, (size + old_size) | PREV_INUSE);
2541
2542 else if (contiguous (av) && old_size && brk < old_end)
2543 /* Oops! Someone else killed our space.. Can't touch anything. */
2544 malloc_printerr ("break adjusted to free malloc space");
2545
2546 /*
2547 Otherwise, make adjustments:
2548
2549 * If the first time through or noncontiguous, we need to call sbrk
2550 just to find out where the end of memory lies.
2551
2552 * We need to ensure that all returned chunks from malloc will meet
2553 MALLOC_ALIGNMENT
2554
2555 * If there was an intervening foreign sbrk, we need to adjust sbrk
2556 request size to account for fact that we will not be able to
2557 combine new space with existing space in old_top.
2558
2559 * Almost all systems internally allocate whole pages at a time, in
2560 which case we might as well use the whole last page of request.
2561 So we allocate enough more memory to hit a page boundary now,
2562 which in turn causes future contiguous calls to page-align.
2563 */
2564
2565 else
2566 {
2567 front_misalign = 0;
2568 end_misalign = 0;
2569 correction = 0;
2570 aligned_brk = brk;
2571
2572 /* handle contiguous cases */
2573 if (contiguous (av))
2574 {
2575 /* Count foreign sbrk as system_mem. */
2576 if (old_size)
2577 av->system_mem += brk - old_end;
2578
2579 /* Guarantee alignment of first new chunk made from this space */
2580
2581 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2582 if (front_misalign > 0)
2583 {
2584 /*
2585 Skip over some bytes to arrive at an aligned position.
2586 We don't need to specially mark these wasted front bytes.
2587 They will never be accessed anyway because
2588 prev_inuse of av->top (and any chunk created from its start)
2589 is always true after initialization.
2590 */
2591
2592 correction = MALLOC_ALIGNMENT - front_misalign;
2593 aligned_brk += correction;
2594 }
2595
2596 /*
2597 If this isn't adjacent to existing space, then we will not
2598 be able to merge with old_top space, so must add to 2nd request.
2599 */
2600
2601 correction += old_size;
2602
2603 /* Extend the end address to hit a page boundary */
2604 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2605 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2606
2607 assert (correction >= 0);
2608 snd_brk = (char *) (MORECORE (correction));
2609
2610 /*
2611 If can't allocate correction, try to at least find out current
2612 brk. It might be enough to proceed without failing.
2613
2614 Note that if second sbrk did NOT fail, we assume that space
2615 is contiguous with first sbrk. This is a safe assumption unless
2616 program is multithreaded but doesn't use locks and a foreign sbrk
2617 occurred between our first and second calls.
2618 */
2619
2620 if (snd_brk == (char *) (MORECORE_FAILURE))
2621 {
2622 correction = 0;
2623 snd_brk = (char *) (MORECORE (0));
2624 }
2625 else
2626 {
2627 /* Call the `morecore' hook if necessary. */
2628 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2629 if (__builtin_expect (hook != NULL, 0))
2630 (*hook)();
2631 }
2632 }
2633
2634 /* handle non-contiguous cases */
2635 else
2636 {
2637 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2638 /* MORECORE/mmap must correctly align */
2639 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2640 else
2641 {
2642 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2643 if (front_misalign > 0)
2644 {
2645 /*
2646 Skip over some bytes to arrive at an aligned position.
2647 We don't need to specially mark these wasted front bytes.
2648 They will never be accessed anyway because
2649 prev_inuse of av->top (and any chunk created from its start)
2650 is always true after initialization.
2651 */
2652
2653 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2654 }
2655 }
2656
2657 /* Find out current end of memory */
2658 if (snd_brk == (char *) (MORECORE_FAILURE))
2659 {
2660 snd_brk = (char *) (MORECORE (0));
2661 }
2662 }
2663
2664 /* Adjust top based on results of second sbrk */
2665 if (snd_brk != (char *) (MORECORE_FAILURE))
2666 {
2667 av->top = (mchunkptr) aligned_brk;
2668 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2669 av->system_mem += correction;
2670
2671 /*
2672 If not the first time through, we either have a
2673 gap due to foreign sbrk or a non-contiguous region. Insert a
2674 double fencepost at old_top to prevent consolidation with space
2675 we don't own. These fenceposts are artificial chunks that are
2676 marked as inuse and are in any case too small to use. We need
2677 two to make sizes and alignments work out.
2678 */
2679
2680 if (old_size != 0)
2681 {
2682 /*
2683 Shrink old_top to insert fenceposts, keeping size a
2684 multiple of MALLOC_ALIGNMENT. We know there is at least
2685 enough space in old_top to do this.
2686 */
2687 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2688 set_head (old_top, old_size | PREV_INUSE);
2689
2690 /*
2691 Note that the following assignments completely overwrite
2692 old_top when old_size was previously MINSIZE. This is
2693 intentional. We need the fencepost, even if old_top otherwise gets
2694 lost.
2695 */
2696 set_head (chunk_at_offset (old_top, old_size),
2697 (2 * SIZE_SZ) | PREV_INUSE);
2698 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
2699 (2 * SIZE_SZ) | PREV_INUSE);
2700
2701 /* If possible, release the rest. */
2702 if (old_size >= MINSIZE)
2703 {
2704 _int_free (av, old_top, 1);
2705 }
2706 }
2707 }
2708 }
2709 }
2710 } /* if (av != &main_arena) */
2711
2712 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2713 av->max_system_mem = av->system_mem;
2714 check_malloc_state (av);
2715
2716 /* finally, do the allocation */
2717 p = av->top;
2718 size = chunksize (p);
2719
2720 /* check that one of the above allocation paths succeeded */
2721 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2722 {
2723 remainder_size = size - nb;
2724 remainder = chunk_at_offset (p, nb);
2725 av->top = remainder;
2726 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2727 set_head (remainder, remainder_size | PREV_INUSE);
2728 check_malloced_chunk (av, p, nb);
2729 return chunk2mem (p);
2730 }
2731
2732 /* catch all failure paths */
2733 __set_errno (ENOMEM);
2734 return 0;
2735}
2736
2737
2738/*
2739 systrim is an inverse of sorts to sysmalloc. It gives memory back
2740 to the system (via negative arguments to sbrk) if there is unused
2741 memory at the `high' end of the malloc pool. It is called
2742 automatically by free() when top space exceeds the trim
2743 threshold. It is also called by the public malloc_trim routine. It
2744 returns 1 if it actually released any memory, else 0.
2745 */
2746
2747static int
2748systrim (size_t pad, mstate av)
2749{
2750 long top_size; /* Amount of top-most memory */
2751 long extra; /* Amount to release */
2752 long released; /* Amount actually released */
2753 char *current_brk; /* address returned by pre-check sbrk call */
2754 char *new_brk; /* address returned by post-check sbrk call */
2755 size_t pagesize;
2756 long top_area;
2757
2758 pagesize = GLRO (dl_pagesize);
2759 top_size = chunksize (av->top);
2760
2761 top_area = top_size - MINSIZE - 1;
2762 if (top_area <= pad)
2763 return 0;
2764
2765 /* Release in pagesize units and round down to the nearest page. */
2766 extra = ALIGN_DOWN(top_area - pad, pagesize);
2767
2768 if (extra == 0)
2769 return 0;
2770
2771 /*
2772 Only proceed if end of memory is where we last set it.
2773 This avoids problems if there were foreign sbrk calls.
2774 */
2775 current_brk = (char *) (MORECORE (0));
2776 if (current_brk == (char *) (av->top) + top_size)
2777 {
2778 /*
2779 Attempt to release memory. We ignore MORECORE return value,
2780 and instead call again to find out where new end of memory is.
2781 This avoids problems if first call releases less than we asked,
2782 of if failure somehow altered brk value. (We could still
2783 encounter problems if it altered brk in some very bad way,
2784 but the only thing we can do is adjust anyway, which will cause
2785 some downstream failure.)
2786 */
2787
2788 MORECORE (-extra);
2789 /* Call the `morecore' hook if necessary. */
2790 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2791 if (__builtin_expect (hook != NULL, 0))
2792 (*hook)();
2793 new_brk = (char *) (MORECORE (0));
2794
2795 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2796
2797 if (new_brk != (char *) MORECORE_FAILURE)
2798 {
2799 released = (long) (current_brk - new_brk);
2800
2801 if (released != 0)
2802 {
2803 /* Success. Adjust top. */
2804 av->system_mem -= released;
2805 set_head (av->top, (top_size - released) | PREV_INUSE);
2806 check_malloc_state (av);
2807 return 1;
2808 }
2809 }
2810 }
2811 return 0;
2812}
2813
2814static void
2815munmap_chunk (mchunkptr p)
2816{
2817 size_t pagesize = GLRO (dl_pagesize);
2818 INTERNAL_SIZE_T size = chunksize (p);
2819
2820 assert (chunk_is_mmapped (p));
2821
2822 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2823 main arena. We never free this memory. */
2824 if (DUMPED_MAIN_ARENA_CHUNK (p))
2825 return;
2826
2827 uintptr_t mem = (uintptr_t) chunk2mem (p);
2828 uintptr_t block = (uintptr_t) p - prev_size (p);
2829 size_t total_size = prev_size (p) + size;
2830 /* Unfortunately we have to do the compilers job by hand here. Normally
2831 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2832 page size. But gcc does not recognize the optimization possibility
2833 (in the moment at least) so we combine the two values into one before
2834 the bit test. */
2835 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2836 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
2837 malloc_printerr ("munmap_chunk(): invalid pointer");
2838
2839 atomic_decrement (&mp_.n_mmaps);
2840 atomic_add (&mp_.mmapped_mem, -total_size);
2841
2842 /* If munmap failed the process virtual memory address space is in a
2843 bad shape. Just leave the block hanging around, the process will
2844 terminate shortly anyway since not much can be done. */
2845 __munmap ((char *) block, total_size);
2846}
2847
2848#if HAVE_MREMAP
2849
2850static mchunkptr
2851mremap_chunk (mchunkptr p, size_t new_size)
2852{
2853 size_t pagesize = GLRO (dl_pagesize);
2854 INTERNAL_SIZE_T offset = prev_size (p);
2855 INTERNAL_SIZE_T size = chunksize (p);
2856 char *cp;
2857
2858 assert (chunk_is_mmapped (p));
2859
2860 uintptr_t block = (uintptr_t) p - offset;
2861 uintptr_t mem = (uintptr_t) chunk2mem(p);
2862 size_t total_size = offset + size;
2863 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2864 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
2865 malloc_printerr("mremap_chunk(): invalid pointer");
2866
2867 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2868 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
2869
2870 /* No need to remap if the number of pages does not change. */
2871 if (total_size == new_size)
2872 return p;
2873
2874 cp = (char *) __mremap ((char *) block, total_size, new_size,
2875 MREMAP_MAYMOVE);
2876
2877 if (cp == MAP_FAILED)
2878 return 0;
2879
2880 p = (mchunkptr) (cp + offset);
2881
2882 assert (aligned_OK (chunk2mem (p)));
2883
2884 assert (prev_size (p) == offset);
2885 set_head (p, (new_size - offset) | IS_MMAPPED);
2886
2887 INTERNAL_SIZE_T new;
2888 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
2889 + new_size - size - offset;
2890 atomic_max (&mp_.max_mmapped_mem, new);
2891 return p;
2892}
2893#endif /* HAVE_MREMAP */
2894
2895/*------------------------ Public wrappers. --------------------------------*/
2896
2897#if USE_TCACHE
2898
2899/* We overlay this structure on the user-data portion of a chunk when
2900 the chunk is stored in the per-thread cache. */
2901typedef struct tcache_entry
2902{
2903 struct tcache_entry *next;
2904 /* This field exists to detect double frees. */
2905 struct tcache_perthread_struct *key;
2906} tcache_entry;
2907
2908/* There is one of these for each thread, which contains the
2909 per-thread cache (hence "tcache_perthread_struct"). Keeping
2910 overall size low is mildly important. Note that COUNTS and ENTRIES
2911 are redundant (we could have just counted the linked list each
2912 time), this is for performance reasons. */
2913typedef struct tcache_perthread_struct
2914{
2915 uint16_t counts[TCACHE_MAX_BINS];
2916 tcache_entry *entries[TCACHE_MAX_BINS];
2917} tcache_perthread_struct;
2918
2919static __thread bool tcache_shutting_down = false;
2920static __thread tcache_perthread_struct *tcache = NULL;
2921
2922/* Caller must ensure that we know tc_idx is valid and there's room
2923 for more chunks. */
2924static __always_inline void
2925tcache_put (mchunkptr chunk, size_t tc_idx)
2926{
2927 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
2928
2929 /* Mark this chunk as "in the tcache" so the test in _int_free will
2930 detect a double free. */
2931 e->key = tcache;
2932
2933 e->next = tcache->entries[tc_idx];
2934 tcache->entries[tc_idx] = e;
2935 ++(tcache->counts[tc_idx]);
2936}
2937
2938/* Caller must ensure that we know tc_idx is valid and there's
2939 available chunks to remove. */
2940static __always_inline void *
2941tcache_get (size_t tc_idx)
2942{
2943 tcache_entry *e = tcache->entries[tc_idx];
2944 tcache->entries[tc_idx] = e->next;
2945 --(tcache->counts[tc_idx]);
2946 e->key = NULL;
2947 return (void *) e;
2948}
2949
2950static void
2951tcache_thread_shutdown (void)
2952{
2953 int i;
2954 tcache_perthread_struct *tcache_tmp = tcache;
2955
2956 if (!tcache)
2957 return;
2958
2959 /* Disable the tcache and prevent it from being reinitialized. */
2960 tcache = NULL;
2961 tcache_shutting_down = true;
2962
2963 /* Free all of the entries and the tcache itself back to the arena
2964 heap for coalescing. */
2965 for (i = 0; i < TCACHE_MAX_BINS; ++i)
2966 {
2967 while (tcache_tmp->entries[i])
2968 {
2969 tcache_entry *e = tcache_tmp->entries[i];
2970 tcache_tmp->entries[i] = e->next;
2971 __libc_free (e);
2972 }
2973 }
2974
2975 __libc_free (tcache_tmp);
2976}
2977
2978static void
2979tcache_init(void)
2980{
2981 mstate ar_ptr;
2982 void *victim = 0;
2983 const size_t bytes = sizeof (tcache_perthread_struct);
2984
2985 if (tcache_shutting_down)
2986 return;
2987
2988 arena_get (ar_ptr, bytes);
2989 victim = _int_malloc (ar_ptr, bytes);
2990 if (!victim && ar_ptr != NULL)
2991 {
2992 ar_ptr = arena_get_retry (ar_ptr, bytes);
2993 victim = _int_malloc (ar_ptr, bytes);
2994 }
2995
2996
2997 if (ar_ptr != NULL)
2998 __libc_lock_unlock (ar_ptr->mutex);
2999
3000 /* In a low memory situation, we may not be able to allocate memory
3001 - in which case, we just keep trying later. However, we
3002 typically do this very early, so either there is sufficient
3003 memory, or there isn't enough memory to do non-trivial
3004 allocations anyway. */
3005 if (victim)
3006 {
3007 tcache = (tcache_perthread_struct *) victim;
3008 memset (tcache, 0, sizeof (tcache_perthread_struct));
3009 }
3010
3011}
3012
3013# define MAYBE_INIT_TCACHE() \
3014 if (__glibc_unlikely (tcache == NULL)) \
3015 tcache_init();
3016
3017#else /* !USE_TCACHE */
3018# define MAYBE_INIT_TCACHE()
3019
3020static void
3021tcache_thread_shutdown (void)
3022{
3023 /* Nothing to do if there is no thread cache. */
3024}
3025
3026#endif /* !USE_TCACHE */
3027
3028void *
3029__libc_malloc (size_t bytes)
3030{
3031 mstate ar_ptr;
3032 void *victim;
3033
3034 void *(*hook) (size_t, const void *)
3035 = atomic_forced_read (__malloc_hook);
3036 if (__builtin_expect (hook != NULL, 0))
3037 return (*hook)(bytes, RETURN_ADDRESS (0));
3038#if USE_TCACHE
3039 /* int_free also calls request2size, be careful to not pad twice. */
3040 size_t tbytes;
3041 checked_request2size (bytes, tbytes);
3042 size_t tc_idx = csize2tidx (tbytes);
3043
3044 MAYBE_INIT_TCACHE ();
3045
3046 DIAG_PUSH_NEEDS_COMMENT;
3047 if (tc_idx < mp_.tcache_bins
3048 && tcache
3049 && tcache->counts[tc_idx] > 0)
3050 {
3051 return tcache_get (tc_idx);
3052 }
3053 DIAG_POP_NEEDS_COMMENT;
3054#endif
3055
3056 if (SINGLE_THREAD_P)
3057 {
3058 victim = _int_malloc (&main_arena, bytes);
3059 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3060 &main_arena == arena_for_chunk (mem2chunk (victim)));
3061 return victim;
3062 }
3063
3064 arena_get (ar_ptr, bytes);
3065
3066 victim = _int_malloc (ar_ptr, bytes);
3067 /* Retry with another arena only if we were able to find a usable arena
3068 before. */
3069 if (!victim && ar_ptr != NULL)
3070 {
3071 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3072 ar_ptr = arena_get_retry (ar_ptr, bytes);
3073 victim = _int_malloc (ar_ptr, bytes);
3074 }
3075
3076 if (ar_ptr != NULL)
3077 __libc_lock_unlock (ar_ptr->mutex);
3078
3079 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3080 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3081 return victim;
3082}
3083libc_hidden_def (__libc_malloc)
3084
3085void
3086__libc_free (void *mem)
3087{
3088 mstate ar_ptr;
3089 mchunkptr p; /* chunk corresponding to mem */
3090
3091 void (*hook) (void *, const void *)
3092 = atomic_forced_read (__free_hook);
3093 if (__builtin_expect (hook != NULL, 0))
3094 {
3095 (*hook)(mem, RETURN_ADDRESS (0));
3096 return;
3097 }
3098
3099 if (mem == 0) /* free(0) has no effect */
3100 return;
3101
3102 p = mem2chunk (mem);
3103
3104 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3105 {
3106 /* See if the dynamic brk/mmap threshold needs adjusting.
3107 Dumped fake mmapped chunks do not affect the threshold. */
3108 if (!mp_.no_dyn_threshold
3109 && chunksize_nomask (p) > mp_.mmap_threshold
3110 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
3111 && !DUMPED_MAIN_ARENA_CHUNK (p))
3112 {
3113 mp_.mmap_threshold = chunksize (p);
3114 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3115 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3116 mp_.mmap_threshold, mp_.trim_threshold);
3117 }
3118 munmap_chunk (p);
3119 return;
3120 }
3121
3122 MAYBE_INIT_TCACHE ();
3123
3124 ar_ptr = arena_for_chunk (p);
3125 _int_free (ar_ptr, p, 0);
3126}
3127libc_hidden_def (__libc_free)
3128
3129void *
3130__libc_realloc (void *oldmem, size_t bytes)
3131{
3132 mstate ar_ptr;
3133 INTERNAL_SIZE_T nb; /* padded request size */
3134
3135 void *newp; /* chunk to return */
3136
3137 void *(*hook) (void *, size_t, const void *) =
3138 atomic_forced_read (__realloc_hook);
3139 if (__builtin_expect (hook != NULL, 0))
3140 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3141
3142#if REALLOC_ZERO_BYTES_FREES
3143 if (bytes == 0 && oldmem != NULL)
3144 {
3145 __libc_free (oldmem); return 0;
3146 }
3147#endif
3148
3149 /* realloc of null is supposed to be same as malloc */
3150 if (oldmem == 0)
3151 return __libc_malloc (bytes);
3152
3153 /* chunk corresponding to oldmem */
3154 const mchunkptr oldp = mem2chunk (oldmem);
3155 /* its size */
3156 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3157
3158 if (chunk_is_mmapped (oldp))
3159 ar_ptr = NULL;
3160 else
3161 {
3162 MAYBE_INIT_TCACHE ();
3163 ar_ptr = arena_for_chunk (oldp);
3164 }
3165
3166 /* Little security check which won't hurt performance: the allocator
3167 never wrapps around at the end of the address space. Therefore
3168 we can exclude some size values which might appear here by
3169 accident or by "design" from some intruder. We need to bypass
3170 this check for dumped fake mmap chunks from the old main arena
3171 because the new malloc may provide additional alignment. */
3172 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3173 || __builtin_expect (misaligned_chunk (oldp), 0))
3174 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
3175 malloc_printerr ("realloc(): invalid pointer");
3176
3177 checked_request2size (bytes, nb);
3178
3179 if (chunk_is_mmapped (oldp))
3180 {
3181 /* If this is a faked mmapped chunk from the dumped main arena,
3182 always make a copy (and do not free the old chunk). */
3183 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3184 {
3185 /* Must alloc, copy, free. */
3186 void *newmem = __libc_malloc (bytes);
3187 if (newmem == 0)
3188 return NULL;
3189 /* Copy as many bytes as are available from the old chunk
3190 and fit into the new size. NB: The overhead for faked
3191 mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
3192 regular mmapped chunks. */
3193 if (bytes > oldsize - SIZE_SZ)
3194 bytes = oldsize - SIZE_SZ;
3195 memcpy (newmem, oldmem, bytes);
3196 return newmem;
3197 }
3198
3199 void *newmem;
3200
3201#if HAVE_MREMAP
3202 newp = mremap_chunk (oldp, nb);
3203 if (newp)
3204 return chunk2mem (newp);
3205#endif
3206 /* Note the extra SIZE_SZ overhead. */
3207 if (oldsize - SIZE_SZ >= nb)
3208 return oldmem; /* do nothing */
3209
3210 /* Must alloc, copy, free. */
3211 newmem = __libc_malloc (bytes);
3212 if (newmem == 0)
3213 return 0; /* propagate failure */
3214
3215 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3216 munmap_chunk (oldp);
3217 return newmem;
3218 }
3219
3220 if (SINGLE_THREAD_P)
3221 {
3222 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3223 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3224 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3225
3226 return newp;
3227 }
3228
3229 __libc_lock_lock (ar_ptr->mutex);
3230
3231 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3232
3233 __libc_lock_unlock (ar_ptr->mutex);
3234 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3235 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3236
3237 if (newp == NULL)
3238 {
3239 /* Try harder to allocate memory in other arenas. */
3240 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3241 newp = __libc_malloc (bytes);
3242 if (newp != NULL)
3243 {
3244 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3245 _int_free (ar_ptr, oldp, 0);
3246 }
3247 }
3248
3249 return newp;
3250}
3251libc_hidden_def (__libc_realloc)
3252
3253void *
3254__libc_memalign (size_t alignment, size_t bytes)
3255{
3256 void *address = RETURN_ADDRESS (0);
3257 return _mid_memalign (alignment, bytes, address);
3258}
3259
3260static void *
3261_mid_memalign (size_t alignment, size_t bytes, void *address)
3262{
3263 mstate ar_ptr;
3264 void *p;
3265
3266 void *(*hook) (size_t, size_t, const void *) =
3267 atomic_forced_read (__memalign_hook);
3268 if (__builtin_expect (hook != NULL, 0))
3269 return (*hook)(alignment, bytes, address);
3270
3271 /* If we need less alignment than we give anyway, just relay to malloc. */
3272 if (alignment <= MALLOC_ALIGNMENT)
3273 return __libc_malloc (bytes);
3274
3275 /* Otherwise, ensure that it is at least a minimum chunk size */
3276 if (alignment < MINSIZE)
3277 alignment = MINSIZE;
3278
3279 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3280 power of 2 and will cause overflow in the check below. */
3281 if (alignment > SIZE_MAX / 2 + 1)
3282 {
3283 __set_errno (EINVAL);
3284 return 0;
3285 }
3286
3287 /* Check for overflow. */
3288 if (bytes > SIZE_MAX - alignment - MINSIZE)
3289 {
3290 __set_errno (ENOMEM);
3291 return 0;
3292 }
3293
3294
3295 /* Make sure alignment is power of 2. */
3296 if (!powerof2 (alignment))
3297 {
3298 size_t a = MALLOC_ALIGNMENT * 2;
3299 while (a < alignment)
3300 a <<= 1;
3301 alignment = a;
3302 }
3303
3304 if (SINGLE_THREAD_P)
3305 {
3306 p = _int_memalign (&main_arena, alignment, bytes);
3307 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3308 &main_arena == arena_for_chunk (mem2chunk (p)));
3309
3310 return p;
3311 }
3312
3313 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3314
3315 p = _int_memalign (ar_ptr, alignment, bytes);
3316 if (!p && ar_ptr != NULL)
3317 {
3318 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3319 ar_ptr = arena_get_retry (ar_ptr, bytes);
3320 p = _int_memalign (ar_ptr, alignment, bytes);
3321 }
3322
3323 if (ar_ptr != NULL)
3324 __libc_lock_unlock (ar_ptr->mutex);
3325
3326 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3327 ar_ptr == arena_for_chunk (mem2chunk (p)));
3328 return p;
3329}
3330/* For ISO C11. */
3331weak_alias (__libc_memalign, aligned_alloc)
3332libc_hidden_def (__libc_memalign)
3333
3334void *
3335__libc_valloc (size_t bytes)
3336{
3337 if (__malloc_initialized < 0)
3338 ptmalloc_init ();
3339
3340 void *address = RETURN_ADDRESS (0);
3341 size_t pagesize = GLRO (dl_pagesize);
3342 return _mid_memalign (pagesize, bytes, address);
3343}
3344
3345void *
3346__libc_pvalloc (size_t bytes)
3347{
3348 if (__malloc_initialized < 0)
3349 ptmalloc_init ();
3350
3351 void *address = RETURN_ADDRESS (0);
3352 size_t pagesize = GLRO (dl_pagesize);
3353 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
3354
3355 /* Check for overflow. */
3356 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
3357 {
3358 __set_errno (ENOMEM);
3359 return 0;
3360 }
3361
3362 return _mid_memalign (pagesize, rounded_bytes, address);
3363}
3364
3365void *
3366__libc_calloc (size_t n, size_t elem_size)
3367{
3368 mstate av;
3369 mchunkptr oldtop, p;
3370 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3371 void *mem;
3372 unsigned long clearsize;
3373 unsigned long nclears;
3374 INTERNAL_SIZE_T *d;
3375
3376 /* size_t is unsigned so the behavior on overflow is defined. */
3377 bytes = n * elem_size;
3378#define HALF_INTERNAL_SIZE_T \
3379 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3380 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3381 {
3382 if (elem_size != 0 && bytes / elem_size != n)
3383 {
3384 __set_errno (ENOMEM);
3385 return 0;
3386 }
3387 }
3388
3389 void *(*hook) (size_t, const void *) =
3390 atomic_forced_read (__malloc_hook);
3391 if (__builtin_expect (hook != NULL, 0))
3392 {
3393 sz = bytes;
3394 mem = (*hook)(sz, RETURN_ADDRESS (0));
3395 if (mem == 0)
3396 return 0;
3397
3398 return memset (mem, 0, sz);
3399 }
3400
3401 sz = bytes;
3402
3403 MAYBE_INIT_TCACHE ();
3404
3405 if (SINGLE_THREAD_P)
3406 av = &main_arena;
3407 else
3408 arena_get (av, sz);
3409
3410 if (av)
3411 {
3412 /* Check if we hand out the top chunk, in which case there may be no
3413 need to clear. */
3414#if MORECORE_CLEARS
3415 oldtop = top (av);
3416 oldtopsize = chunksize (top (av));
3417# if MORECORE_CLEARS < 2
3418 /* Only newly allocated memory is guaranteed to be cleared. */
3419 if (av == &main_arena &&
3420 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3421 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3422# endif
3423 if (av != &main_arena)
3424 {
3425 heap_info *heap = heap_for_ptr (oldtop);
3426 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3427 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3428 }
3429#endif
3430 }
3431 else
3432 {
3433 /* No usable arenas. */
3434 oldtop = 0;
3435 oldtopsize = 0;
3436 }
3437 mem = _int_malloc (av, sz);
3438
3439 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3440 av == arena_for_chunk (mem2chunk (mem)));
3441
3442 if (!SINGLE_THREAD_P)
3443 {
3444 if (mem == 0 && av != NULL)
3445 {
3446 LIBC_PROBE (memory_calloc_retry, 1, sz);
3447 av = arena_get_retry (av, sz);
3448 mem = _int_malloc (av, sz);
3449 }
3450
3451 if (av != NULL)
3452 __libc_lock_unlock (av->mutex);
3453 }
3454
3455 /* Allocation failed even after a retry. */
3456 if (mem == 0)
3457 return 0;
3458
3459 p = mem2chunk (mem);
3460
3461 /* Two optional cases in which clearing not necessary */
3462 if (chunk_is_mmapped (p))
3463 {
3464 if (__builtin_expect (perturb_byte, 0))
3465 return memset (mem, 0, sz);
3466
3467 return mem;
3468 }
3469
3470 csz = chunksize (p);
3471
3472#if MORECORE_CLEARS
3473 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3474 {
3475 /* clear only the bytes from non-freshly-sbrked memory */
3476 csz = oldtopsize;
3477 }
3478#endif
3479
3480 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3481 contents have an odd number of INTERNAL_SIZE_T-sized words;
3482 minimally 3. */
3483 d = (INTERNAL_SIZE_T *) mem;
3484 clearsize = csz - SIZE_SZ;
3485 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3486 assert (nclears >= 3);
3487
3488 if (nclears > 9)
3489 return memset (d, 0, clearsize);
3490
3491 else
3492 {
3493 *(d + 0) = 0;
3494 *(d + 1) = 0;
3495 *(d + 2) = 0;
3496 if (nclears > 4)
3497 {
3498 *(d + 3) = 0;
3499 *(d + 4) = 0;
3500 if (nclears > 6)
3501 {
3502 *(d + 5) = 0;
3503 *(d + 6) = 0;
3504 if (nclears > 8)
3505 {
3506 *(d + 7) = 0;
3507 *(d + 8) = 0;
3508 }
3509 }
3510 }
3511 }
3512
3513 return mem;
3514}
3515
3516/*
3517 ------------------------------ malloc ------------------------------
3518 */
3519
3520static void *
3521_int_malloc (mstate av, size_t bytes)
3522{
3523 INTERNAL_SIZE_T nb; /* normalized request size */
3524 unsigned int idx; /* associated bin index */
3525 mbinptr bin; /* associated bin */
3526
3527 mchunkptr victim; /* inspected/selected chunk */
3528 INTERNAL_SIZE_T size; /* its size */
3529 int victim_index; /* its bin index */
3530
3531 mchunkptr remainder; /* remainder from a split */
3532 unsigned long remainder_size; /* its size */
3533
3534 unsigned int block; /* bit map traverser */
3535 unsigned int bit; /* bit map traverser */
3536 unsigned int map; /* current word of binmap */
3537
3538 mchunkptr fwd; /* misc temp for linking */
3539 mchunkptr bck; /* misc temp for linking */
3540
3541#if USE_TCACHE
3542 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3543#endif
3544
3545 /*
3546 Convert request size to internal form by adding SIZE_SZ bytes
3547 overhead plus possibly more to obtain necessary alignment and/or
3548 to obtain a size of at least MINSIZE, the smallest allocatable
3549 size. Also, checked_request2size traps (returning 0) request sizes
3550 that are so large that they wrap around zero when padded and
3551 aligned.
3552 */
3553
3554 checked_request2size (bytes, nb);
3555
3556 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3557 mmap. */
3558 if (__glibc_unlikely (av == NULL))
3559 {
3560 void *p = sysmalloc (nb, av);
3561 if (p != NULL)
3562 alloc_perturb (p, bytes);
3563 return p;
3564 }
3565
3566 /*
3567 If the size qualifies as a fastbin, first check corresponding bin.
3568 This code is safe to execute even if av is not yet initialized, so we
3569 can try it without checking, which saves some time on this fast path.
3570 */
3571
3572#define REMOVE_FB(fb, victim, pp) \
3573 do \
3574 { \
3575 victim = pp; \
3576 if (victim == NULL) \
3577 break; \
3578 } \
3579 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
3580 != victim); \
3581
3582 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3583 {
3584 idx = fastbin_index (nb);
3585 mfastbinptr *fb = &fastbin (av, idx);
3586 mchunkptr pp;
3587 victim = *fb;
3588
3589 if (victim != NULL)
3590 {
3591 if (SINGLE_THREAD_P)
3592 *fb = victim->fd;
3593 else
3594 REMOVE_FB (fb, pp, victim);
3595 if (__glibc_likely (victim != NULL))
3596 {
3597 size_t victim_idx = fastbin_index (chunksize (victim));
3598 if (__builtin_expect (victim_idx != idx, 0))
3599 malloc_printerr ("malloc(): memory corruption (fast)");
3600 check_remalloced_chunk (av, victim, nb);
3601#if USE_TCACHE
3602 /* While we're here, if we see other chunks of the same size,
3603 stash them in the tcache. */
3604 size_t tc_idx = csize2tidx (nb);
3605 if (tcache && tc_idx < mp_.tcache_bins)
3606 {
3607 mchunkptr tc_victim;
3608
3609 /* While bin not empty and tcache not full, copy chunks. */
3610 while (tcache->counts[tc_idx] < mp_.tcache_count
3611 && (tc_victim = *fb) != NULL)
3612 {
3613 if (SINGLE_THREAD_P)
3614 *fb = tc_victim->fd;
3615 else
3616 {
3617 REMOVE_FB (fb, pp, tc_victim);
3618 if (__glibc_unlikely (tc_victim == NULL))
3619 break;
3620 }
3621 tcache_put (tc_victim, tc_idx);
3622 }
3623 }
3624#endif
3625 void *p = chunk2mem (victim);
3626 alloc_perturb (p, bytes);
3627 return p;
3628 }
3629 }
3630 }
3631
3632 /*
3633 If a small request, check regular bin. Since these "smallbins"
3634 hold one size each, no searching within bins is necessary.
3635 (For a large request, we need to wait until unsorted chunks are
3636 processed to find best fit. But for small ones, fits are exact
3637 anyway, so we can check now, which is faster.)
3638 */
3639
3640 if (in_smallbin_range (nb))
3641 {
3642 idx = smallbin_index (nb);
3643 bin = bin_at (av, idx);
3644
3645 if ((victim = last (bin)) != bin)
3646 {
3647 bck = victim->bk;
3648 if (__glibc_unlikely (bck->fd != victim))
3649 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3650 set_inuse_bit_at_offset (victim, nb);
3651 bin->bk = bck;
3652 bck->fd = bin;
3653
3654 if (av != &main_arena)
3655 set_non_main_arena (victim);
3656 check_malloced_chunk (av, victim, nb);
3657#if USE_TCACHE
3658 /* While we're here, if we see other chunks of the same size,
3659 stash them in the tcache. */
3660 size_t tc_idx = csize2tidx (nb);
3661 if (tcache && tc_idx < mp_.tcache_bins)
3662 {
3663 mchunkptr tc_victim;
3664
3665 /* While bin not empty and tcache not full, copy chunks over. */
3666 while (tcache->counts[tc_idx] < mp_.tcache_count
3667 && (tc_victim = last (bin)) != bin)
3668 {
3669 if (tc_victim != 0)
3670 {
3671 bck = tc_victim->bk;
3672 set_inuse_bit_at_offset (tc_victim, nb);
3673 if (av != &main_arena)
3674 set_non_main_arena (tc_victim);
3675 bin->bk = bck;
3676 bck->fd = bin;
3677
3678 tcache_put (tc_victim, tc_idx);
3679 }
3680 }
3681 }
3682#endif
3683 void *p = chunk2mem (victim);
3684 alloc_perturb (p, bytes);
3685 return p;
3686 }
3687 }
3688
3689 /*
3690 If this is a large request, consolidate fastbins before continuing.
3691 While it might look excessive to kill all fastbins before
3692 even seeing if there is space available, this avoids
3693 fragmentation problems normally associated with fastbins.
3694 Also, in practice, programs tend to have runs of either small or
3695 large requests, but less often mixtures, so consolidation is not
3696 invoked all that often in most programs. And the programs that
3697 it is called frequently in otherwise tend to fragment.
3698 */
3699
3700 else
3701 {
3702 idx = largebin_index (nb);
3703 if (atomic_load_relaxed (&av->have_fastchunks))
3704 malloc_consolidate (av);
3705 }
3706
3707 /*
3708 Process recently freed or remaindered chunks, taking one only if
3709 it is exact fit, or, if this a small request, the chunk is remainder from
3710 the most recent non-exact fit. Place other traversed chunks in
3711 bins. Note that this step is the only place in any routine where
3712 chunks are placed in bins.
3713
3714 The outer loop here is needed because we might not realize until
3715 near the end of malloc that we should have consolidated, so must
3716 do so and retry. This happens at most once, and only when we would
3717 otherwise need to expand memory to service a "small" request.
3718 */
3719
3720#if USE_TCACHE
3721 INTERNAL_SIZE_T tcache_nb = 0;
3722 size_t tc_idx = csize2tidx (nb);
3723 if (tcache && tc_idx < mp_.tcache_bins)
3724 tcache_nb = nb;
3725 int return_cached = 0;
3726
3727 tcache_unsorted_count = 0;
3728#endif
3729
3730 for (;; )
3731 {
3732 int iters = 0;
3733 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3734 {
3735 bck = victim->bk;
3736 size = chunksize (victim);
3737 mchunkptr next = chunk_at_offset (victim, size);
3738
3739 if (__glibc_unlikely (size <= 2 * SIZE_SZ)
3740 || __glibc_unlikely (size > av->system_mem))
3741 malloc_printerr ("malloc(): invalid size (unsorted)");
3742 if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
3743 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3744 malloc_printerr ("malloc(): invalid next size (unsorted)");
3745 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3746 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3747 if (__glibc_unlikely (bck->fd != victim)
3748 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3749 malloc_printerr ("malloc(): unsorted double linked list corrupted");
3750 if (__glibc_unlikely (prev_inuse (next)))
3751 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
3752
3753 /*
3754 If a small request, try to use last remainder if it is the
3755 only chunk in unsorted bin. This helps promote locality for
3756 runs of consecutive small requests. This is the only
3757 exception to best-fit, and applies only when there is
3758 no exact fit for a small chunk.
3759 */
3760
3761 if (in_smallbin_range (nb) &&
3762 bck == unsorted_chunks (av) &&
3763 victim == av->last_remainder &&
3764 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3765 {
3766 /* split and reattach remainder */
3767 remainder_size = size - nb;
3768 remainder = chunk_at_offset (victim, nb);
3769 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3770 av->last_remainder = remainder;
3771 remainder->bk = remainder->fd = unsorted_chunks (av);
3772 if (!in_smallbin_range (remainder_size))
3773 {
3774 remainder->fd_nextsize = NULL;
3775 remainder->bk_nextsize = NULL;
3776 }
3777
3778 set_head (victim, nb | PREV_INUSE |
3779 (av != &main_arena ? NON_MAIN_ARENA : 0));
3780 set_head (remainder, remainder_size | PREV_INUSE);
3781 set_foot (remainder, remainder_size);
3782
3783 check_malloced_chunk (av, victim, nb);
3784 void *p = chunk2mem (victim);
3785 alloc_perturb (p, bytes);
3786 return p;
3787 }
3788
3789 /* remove from unsorted list */
3790 if (__glibc_unlikely (bck->fd != victim))
3791 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
3792 unsorted_chunks (av)->bk = bck;
3793 bck->fd = unsorted_chunks (av);
3794
3795 /* Take now instead of binning if exact fit */
3796
3797 if (size == nb)
3798 {
3799 set_inuse_bit_at_offset (victim, size);
3800 if (av != &main_arena)
3801 set_non_main_arena (victim);
3802#if USE_TCACHE
3803 /* Fill cache first, return to user only if cache fills.
3804 We may return one of these chunks later. */
3805 if (tcache_nb
3806 && tcache->counts[tc_idx] < mp_.tcache_count)
3807 {
3808 tcache_put (victim, tc_idx);
3809 return_cached = 1;
3810 continue;
3811 }
3812 else
3813 {
3814#endif
3815 check_malloced_chunk (av, victim, nb);
3816 void *p = chunk2mem (victim);
3817 alloc_perturb (p, bytes);
3818 return p;
3819#if USE_TCACHE
3820 }
3821#endif
3822 }
3823
3824 /* place chunk in bin */
3825
3826 if (in_smallbin_range (size))
3827 {
3828 victim_index = smallbin_index (size);
3829 bck = bin_at (av, victim_index);
3830 fwd = bck->fd;
3831 }
3832 else
3833 {
3834 victim_index = largebin_index (size);
3835 bck = bin_at (av, victim_index);
3836 fwd = bck->fd;
3837
3838 /* maintain large bins in sorted order */
3839 if (fwd != bck)
3840 {
3841 /* Or with inuse bit to speed comparisons */
3842 size |= PREV_INUSE;
3843 /* if smaller than smallest, bypass loop below */
3844 assert (chunk_main_arena (bck->bk));
3845 if ((unsigned long) (size)
3846 < (unsigned long) chunksize_nomask (bck->bk))
3847 {
3848 fwd = bck;
3849 bck = bck->bk;
3850
3851 victim->fd_nextsize = fwd->fd;
3852 victim->bk_nextsize = fwd->fd->bk_nextsize;
3853 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3854 }
3855 else
3856 {
3857 assert (chunk_main_arena (fwd));
3858 while ((unsigned long) size < chunksize_nomask (fwd))
3859 {
3860 fwd = fwd->fd_nextsize;
3861 assert (chunk_main_arena (fwd));
3862 }
3863
3864 if ((unsigned long) size
3865 == (unsigned long) chunksize_nomask (fwd))
3866 /* Always insert in the second position. */
3867 fwd = fwd->fd;
3868 else
3869 {
3870 victim->fd_nextsize = fwd;
3871 victim->bk_nextsize = fwd->bk_nextsize;
3872 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
3873 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
3874 fwd->bk_nextsize = victim;
3875 victim->bk_nextsize->fd_nextsize = victim;
3876 }
3877 bck = fwd->bk;
3878 if (bck->fd != fwd)
3879 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
3880 }
3881 }
3882 else
3883 victim->fd_nextsize = victim->bk_nextsize = victim;
3884 }
3885
3886 mark_bin (av, victim_index);
3887 victim->bk = bck;
3888 victim->fd = fwd;
3889 fwd->bk = victim;
3890 bck->fd = victim;
3891
3892#if USE_TCACHE
3893 /* If we've processed as many chunks as we're allowed while
3894 filling the cache, return one of the cached ones. */
3895 ++tcache_unsorted_count;
3896 if (return_cached
3897 && mp_.tcache_unsorted_limit > 0
3898 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
3899 {
3900 return tcache_get (tc_idx);
3901 }
3902#endif
3903
3904#define MAX_ITERS 10000
3905 if (++iters >= MAX_ITERS)
3906 break;
3907 }
3908
3909#if USE_TCACHE
3910 /* If all the small chunks we found ended up cached, return one now. */
3911 if (return_cached)
3912 {
3913 return tcache_get (tc_idx);
3914 }
3915#endif
3916
3917 /*
3918 If a large request, scan through the chunks of current bin in
3919 sorted order to find smallest that fits. Use the skip list for this.
3920 */
3921
3922 if (!in_smallbin_range (nb))
3923 {
3924 bin = bin_at (av, idx);
3925
3926 /* skip scan if empty or largest chunk is too small */
3927 if ((victim = first (bin)) != bin
3928 && (unsigned long) chunksize_nomask (victim)
3929 >= (unsigned long) (nb))
3930 {
3931 victim = victim->bk_nextsize;
3932 while (((unsigned long) (size = chunksize (victim)) <
3933 (unsigned long) (nb)))
3934 victim = victim->bk_nextsize;
3935
3936 /* Avoid removing the first entry for a size so that the skip
3937 list does not have to be rerouted. */
3938 if (victim != last (bin)
3939 && chunksize_nomask (victim)
3940 == chunksize_nomask (victim->fd))
3941 victim = victim->fd;
3942
3943 remainder_size = size - nb;
3944 unlink (av, victim, bck, fwd);
3945
3946 /* Exhaust */
3947 if (remainder_size < MINSIZE)
3948 {
3949 set_inuse_bit_at_offset (victim, size);
3950 if (av != &main_arena)
3951 set_non_main_arena (victim);
3952 }
3953 /* Split */
3954 else
3955 {
3956 remainder = chunk_at_offset (victim, nb);
3957 /* We cannot assume the unsorted list is empty and therefore
3958 have to perform a complete insert here. */
3959 bck = unsorted_chunks (av);
3960 fwd = bck->fd;
3961 if (__glibc_unlikely (fwd->bk != bck))
3962 malloc_printerr ("malloc(): corrupted unsorted chunks");
3963 remainder->bk = bck;
3964 remainder->fd = fwd;
3965 bck->fd = remainder;
3966 fwd->bk = remainder;
3967 if (!in_smallbin_range (remainder_size))
3968 {
3969 remainder->fd_nextsize = NULL;
3970 remainder->bk_nextsize = NULL;
3971 }
3972 set_head (victim, nb | PREV_INUSE |
3973 (av != &main_arena ? NON_MAIN_ARENA : 0));
3974 set_head (remainder, remainder_size | PREV_INUSE);
3975 set_foot (remainder, remainder_size);
3976 }
3977 check_malloced_chunk (av, victim, nb);
3978 void *p = chunk2mem (victim);
3979 alloc_perturb (p, bytes);
3980 return p;
3981 }
3982 }
3983
3984 /*
3985 Search for a chunk by scanning bins, starting with next largest
3986 bin. This search is strictly by best-fit; i.e., the smallest
3987 (with ties going to approximately the least recently used) chunk
3988 that fits is selected.
3989
3990 The bitmap avoids needing to check that most blocks are nonempty.
3991 The particular case of skipping all bins during warm-up phases
3992 when no chunks have been returned yet is faster than it might look.
3993 */
3994
3995 ++idx;
3996 bin = bin_at (av, idx);
3997 block = idx2block (idx);
3998 map = av->binmap[block];
3999 bit = idx2bit (idx);
4000
4001 for (;; )
4002 {
4003 /* Skip rest of block if there are no more set bits in this block. */
4004 if (bit > map || bit == 0)
4005 {
4006 do
4007 {
4008 if (++block >= BINMAPSIZE) /* out of bins */
4009 goto use_top;
4010 }
4011 while ((map = av->binmap[block]) == 0);
4012
4013 bin = bin_at (av, (block << BINMAPSHIFT));
4014 bit = 1;
4015 }
4016
4017 /* Advance to bin with set bit. There must be one. */
4018 while ((bit & map) == 0)
4019 {
4020 bin = next_bin (bin);
4021 bit <<= 1;
4022 assert (bit != 0);
4023 }
4024
4025 /* Inspect the bin. It is likely to be non-empty */
4026 victim = last (bin);
4027
4028 /* If a false alarm (empty bin), clear the bit. */
4029 if (victim == bin)
4030 {
4031 av->binmap[block] = map &= ~bit; /* Write through */
4032 bin = next_bin (bin);
4033 bit <<= 1;
4034 }
4035
4036 else
4037 {
4038 size = chunksize (victim);
4039
4040 /* We know the first chunk in this bin is big enough to use. */
4041 assert ((unsigned long) (size) >= (unsigned long) (nb));
4042
4043 remainder_size = size - nb;
4044
4045 /* unlink */
4046 unlink (av, victim, bck, fwd);
4047
4048 /* Exhaust */
4049 if (remainder_size < MINSIZE)
4050 {
4051 set_inuse_bit_at_offset (victim, size);
4052 if (av != &main_arena)
4053 set_non_main_arena (victim);
4054 }
4055
4056 /* Split */
4057 else
4058 {
4059 remainder = chunk_at_offset (victim, nb);
4060
4061 /* We cannot assume the unsorted list is empty and therefore
4062 have to perform a complete insert here. */
4063 bck = unsorted_chunks (av);
4064 fwd = bck->fd;
4065 if (__glibc_unlikely (fwd->bk != bck))
4066 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4067 remainder->bk = bck;
4068 remainder->fd = fwd;
4069 bck->fd = remainder;
4070 fwd->bk = remainder;
4071
4072 /* advertise as last remainder */
4073 if (in_smallbin_range (nb))
4074 av->last_remainder = remainder;
4075 if (!in_smallbin_range (remainder_size))
4076 {
4077 remainder->fd_nextsize = NULL;
4078 remainder->bk_nextsize = NULL;
4079 }
4080 set_head (victim, nb | PREV_INUSE |
4081 (av != &main_arena ? NON_MAIN_ARENA : 0));
4082 set_head (remainder, remainder_size | PREV_INUSE);
4083 set_foot (remainder, remainder_size);
4084 }
4085 check_malloced_chunk (av, victim, nb);
4086 void *p = chunk2mem (victim);
4087 alloc_perturb (p, bytes);
4088 return p;
4089 }
4090 }
4091
4092 use_top:
4093 /*
4094 If large enough, split off the chunk bordering the end of memory
4095 (held in av->top). Note that this is in accord with the best-fit
4096 search rule. In effect, av->top is treated as larger (and thus
4097 less well fitting) than any other available chunk since it can
4098 be extended to be as large as necessary (up to system
4099 limitations).
4100
4101 We require that av->top always exists (i.e., has size >=
4102 MINSIZE) after initialization, so if it would otherwise be
4103 exhausted by current request, it is replenished. (The main
4104 reason for ensuring it exists is that we may need MINSIZE space
4105 to put in fenceposts in sysmalloc.)
4106 */
4107
4108 victim = av->top;
4109 size = chunksize (victim);
4110
4111 if (__glibc_unlikely (size > av->system_mem))
4112 malloc_printerr ("malloc(): corrupted top size");
4113
4114 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4115 {
4116 remainder_size = size - nb;
4117 remainder = chunk_at_offset (victim, nb);
4118 av->top = remainder;
4119 set_head (victim, nb | PREV_INUSE |
4120 (av != &main_arena ? NON_MAIN_ARENA : 0));
4121 set_head (remainder, remainder_size | PREV_INUSE);
4122
4123 check_malloced_chunk (av, victim, nb);
4124 void *p = chunk2mem (victim);
4125 alloc_perturb (p, bytes);
4126 return p;
4127 }
4128
4129 /* When we are using atomic ops to free fast chunks we can get
4130 here for all block sizes. */
4131 else if (atomic_load_relaxed (&av->have_fastchunks))
4132 {
4133 malloc_consolidate (av);
4134 /* restore original bin index */
4135 if (in_smallbin_range (nb))
4136 idx = smallbin_index (nb);
4137 else
4138 idx = largebin_index (nb);
4139 }
4140
4141 /*
4142 Otherwise, relay to handle system-dependent cases
4143 */
4144 else
4145 {
4146 void *p = sysmalloc (nb, av);
4147 if (p != NULL)
4148 alloc_perturb (p, bytes);
4149 return p;
4150 }
4151 }
4152}
4153
4154/*
4155 ------------------------------ free ------------------------------
4156 */
4157
4158static void
4159_int_free (mstate av, mchunkptr p, int have_lock)
4160{
4161 INTERNAL_SIZE_T size; /* its size */
4162 mfastbinptr *fb; /* associated fastbin */
4163 mchunkptr nextchunk; /* next contiguous chunk */
4164 INTERNAL_SIZE_T nextsize; /* its size */
4165 int nextinuse; /* true if nextchunk is used */
4166 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4167 mchunkptr bck; /* misc temp for linking */
4168 mchunkptr fwd; /* misc temp for linking */
4169
4170 size = chunksize (p);
4171
4172 /* Little security check which won't hurt performance: the
4173 allocator never wrapps around at the end of the address space.
4174 Therefore we can exclude some size values which might appear
4175 here by accident or by "design" from some intruder. */
4176 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4177 || __builtin_expect (misaligned_chunk (p), 0))
4178 malloc_printerr ("free(): invalid pointer");
4179 /* We know that each chunk is at least MINSIZE bytes in size or a
4180 multiple of MALLOC_ALIGNMENT. */
4181 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4182 malloc_printerr ("free(): invalid size");
4183
4184 check_inuse_chunk(av, p);
4185
4186#if USE_TCACHE
4187 {
4188 size_t tc_idx = csize2tidx (size);
4189 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4190 {
4191 /* Check to see if it's already in the tcache. */
4192 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4193
4194 /* This test succeeds on double free. However, we don't 100%
4195 trust it (it also matches random payload data at a 1 in
4196 2^<size_t> chance), so verify it's not an unlikely
4197 coincidence before aborting. */
4198 if (__glibc_unlikely (e->key == tcache))
4199 {
4200 tcache_entry *tmp;
4201 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4202 for (tmp = tcache->entries[tc_idx];
4203 tmp;
4204 tmp = tmp->next)
4205 if (tmp == e)
4206 malloc_printerr ("free(): double free detected in tcache 2");
4207 /* If we get here, it was a coincidence. We've wasted a
4208 few cycles, but don't abort. */
4209 }
4210
4211 if (tcache->counts[tc_idx] < mp_.tcache_count)
4212 {
4213 tcache_put (p, tc_idx);
4214 return;
4215 }
4216 }
4217 }
4218#endif
4219
4220 /*
4221 If eligible, place chunk on a fastbin so it can be found
4222 and used quickly in malloc.
4223 */
4224
4225 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4226
4227#if TRIM_FASTBINS
4228 /*
4229 If TRIM_FASTBINS set, don't place chunks
4230 bordering top into fastbins
4231 */
4232 && (chunk_at_offset(p, size) != av->top)
4233#endif
4234 ) {
4235
4236 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4237 <= 2 * SIZE_SZ, 0)
4238 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4239 >= av->system_mem, 0))
4240 {
4241 bool fail = true;
4242 /* We might not have a lock at this point and concurrent modifications
4243 of system_mem might result in a false positive. Redo the test after
4244 getting the lock. */
4245 if (!have_lock)
4246 {
4247 __libc_lock_lock (av->mutex);
4248 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
4249 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4250 __libc_lock_unlock (av->mutex);
4251 }
4252
4253 if (fail)
4254 malloc_printerr ("free(): invalid next size (fast)");
4255 }
4256
4257 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
4258
4259 atomic_store_relaxed (&av->have_fastchunks, true);
4260 unsigned int idx = fastbin_index(size);
4261 fb = &fastbin (av, idx);
4262
4263 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4264 mchunkptr old = *fb, old2;
4265
4266 if (SINGLE_THREAD_P)
4267 {
4268 /* Check that the top of the bin is not the record we are going to
4269 add (i.e., double free). */
4270 if (__builtin_expect (old == p, 0))
4271 malloc_printerr ("double free or corruption (fasttop)");
4272 p->fd = old;
4273 *fb = p;
4274 }
4275 else
4276 do
4277 {
4278 /* Check that the top of the bin is not the record we are going to
4279 add (i.e., double free). */
4280 if (__builtin_expect (old == p, 0))
4281 malloc_printerr ("double free or corruption (fasttop)");
4282 p->fd = old2 = old;
4283 }
4284 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4285 != old2);
4286
4287 /* Check that size of fastbin chunk at the top is the same as
4288 size of the chunk that we are adding. We can dereference OLD
4289 only if we have the lock, otherwise it might have already been
4290 allocated again. */
4291 if (have_lock && old != NULL
4292 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4293 malloc_printerr ("invalid fastbin entry (free)");
4294 }
4295
4296 /*
4297 Consolidate other non-mmapped chunks as they arrive.
4298 */
4299
4300 else if (!chunk_is_mmapped(p)) {
4301
4302 /* If we're single-threaded, don't lock the arena. */
4303 if (SINGLE_THREAD_P)
4304 have_lock = true;
4305
4306 if (!have_lock)
4307 __libc_lock_lock (av->mutex);
4308
4309 nextchunk = chunk_at_offset(p, size);
4310
4311 /* Lightweight tests: check whether the block is already the
4312 top block. */
4313 if (__glibc_unlikely (p == av->top))
4314 malloc_printerr ("double free or corruption (top)");
4315 /* Or whether the next chunk is beyond the boundaries of the arena. */
4316 if (__builtin_expect (contiguous (av)
4317 && (char *) nextchunk
4318 >= ((char *) av->top + chunksize(av->top)), 0))
4319 malloc_printerr ("double free or corruption (out)");
4320 /* Or whether the block is actually not marked used. */
4321 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4322 malloc_printerr ("double free or corruption (!prev)");
4323
4324 nextsize = chunksize(nextchunk);
4325 if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
4326 || __builtin_expect (nextsize >= av->system_mem, 0))
4327 malloc_printerr ("free(): invalid next size (normal)");
4328
4329 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
4330
4331 /* consolidate backward */
4332 if (!prev_inuse(p)) {
4333 prevsize = prev_size (p);
4334 size += prevsize;
4335 p = chunk_at_offset(p, -((long) prevsize));
4336 if (__glibc_unlikely (chunksize(p) != prevsize))
4337 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4338 unlink(av, p, bck, fwd);
4339 }
4340
4341 if (nextchunk != av->top) {
4342 /* get and clear inuse bit */
4343 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4344
4345 /* consolidate forward */
4346 if (!nextinuse) {
4347 unlink(av, nextchunk, bck, fwd);
4348 size += nextsize;
4349 } else
4350 clear_inuse_bit_at_offset(nextchunk, 0);
4351
4352 /*
4353 Place the chunk in unsorted chunk list. Chunks are
4354 not placed into regular bins until after they have
4355 been given one chance to be used in malloc.
4356 */
4357
4358 bck = unsorted_chunks(av);
4359 fwd = bck->fd;
4360 if (__glibc_unlikely (fwd->bk != bck))
4361 malloc_printerr ("free(): corrupted unsorted chunks");
4362 p->fd = fwd;
4363 p->bk = bck;
4364 if (!in_smallbin_range(size))
4365 {
4366 p->fd_nextsize = NULL;
4367 p->bk_nextsize = NULL;
4368 }
4369 bck->fd = p;
4370 fwd->bk = p;
4371
4372 set_head(p, size | PREV_INUSE);
4373 set_foot(p, size);
4374
4375 check_free_chunk(av, p);
4376 }
4377
4378 /*
4379 If the chunk borders the current high end of memory,
4380 consolidate into top
4381 */
4382
4383 else {
4384 size += nextsize;
4385 set_head(p, size | PREV_INUSE);
4386 av->top = p;
4387 check_chunk(av, p);
4388 }
4389
4390 /*
4391 If freeing a large space, consolidate possibly-surrounding
4392 chunks. Then, if the total unused topmost memory exceeds trim
4393 threshold, ask malloc_trim to reduce top.
4394
4395 Unless max_fast is 0, we don't know if there are fastbins
4396 bordering top, so we cannot tell for sure whether threshold
4397 has been reached unless fastbins are consolidated. But we
4398 don't want to consolidate on each free. As a compromise,
4399 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4400 is reached.
4401 */
4402
4403 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4404 if (atomic_load_relaxed (&av->have_fastchunks))
4405 malloc_consolidate(av);
4406
4407 if (av == &main_arena) {
4408#ifndef MORECORE_CANNOT_TRIM
4409 if ((unsigned long)(chunksize(av->top)) >=
4410 (unsigned long)(mp_.trim_threshold))
4411 systrim(mp_.top_pad, av);
4412#endif
4413 } else {
4414 /* Always try heap_trim(), even if the top chunk is not
4415 large, because the corresponding heap might go away. */
4416 heap_info *heap = heap_for_ptr(top(av));
4417
4418 assert(heap->ar_ptr == av);
4419 heap_trim(heap, mp_.top_pad);
4420 }
4421 }
4422
4423 if (!have_lock)
4424 __libc_lock_unlock (av->mutex);
4425 }
4426 /*
4427 If the chunk was allocated via mmap, release via munmap().
4428 */
4429
4430 else {
4431 munmap_chunk (p);
4432 }
4433}
4434
4435/*
4436 ------------------------- malloc_consolidate -------------------------
4437
4438 malloc_consolidate is a specialized version of free() that tears
4439 down chunks held in fastbins. Free itself cannot be used for this
4440 purpose since, among other things, it might place chunks back onto
4441 fastbins. So, instead, we need to use a minor variant of the same
4442 code.
4443*/
4444
4445static void malloc_consolidate(mstate av)
4446{
4447 mfastbinptr* fb; /* current fastbin being consolidated */
4448 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4449 mchunkptr p; /* current chunk being consolidated */
4450 mchunkptr nextp; /* next chunk to consolidate */
4451 mchunkptr unsorted_bin; /* bin header */
4452 mchunkptr first_unsorted; /* chunk to link to */
4453
4454 /* These have same use as in free() */
4455 mchunkptr nextchunk;
4456 INTERNAL_SIZE_T size;
4457 INTERNAL_SIZE_T nextsize;
4458 INTERNAL_SIZE_T prevsize;
4459 int nextinuse;
4460 mchunkptr bck;
4461 mchunkptr fwd;
4462
4463 atomic_store_relaxed (&av->have_fastchunks, false);
4464
4465 unsorted_bin = unsorted_chunks(av);
4466
4467 /*
4468 Remove each chunk from fast bin and consolidate it, placing it
4469 then in unsorted bin. Among other reasons for doing this,
4470 placing in unsorted bin avoids needing to calculate actual bins
4471 until malloc is sure that chunks aren't immediately going to be
4472 reused anyway.
4473 */
4474
4475 maxfb = &fastbin (av, NFASTBINS - 1);
4476 fb = &fastbin (av, 0);
4477 do {
4478 p = atomic_exchange_acq (fb, NULL);
4479 if (p != 0) {
4480 do {
4481 {
4482 unsigned int idx = fastbin_index (chunksize (p));
4483 if ((&fastbin (av, idx)) != fb)
4484 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4485 }
4486
4487 check_inuse_chunk(av, p);
4488 nextp = p->fd;
4489
4490 /* Slightly streamlined version of consolidation code in free() */
4491 size = chunksize (p);
4492 nextchunk = chunk_at_offset(p, size);
4493 nextsize = chunksize(nextchunk);
4494
4495 if (!prev_inuse(p)) {
4496 prevsize = prev_size (p);
4497 size += prevsize;
4498 p = chunk_at_offset(p, -((long) prevsize));
4499 if (__glibc_unlikely (chunksize(p) != prevsize))
4500 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4501 unlink(av, p, bck, fwd);
4502 }
4503
4504 if (nextchunk != av->top) {
4505 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4506
4507 if (!nextinuse) {
4508 size += nextsize;
4509 unlink(av, nextchunk, bck, fwd);
4510 } else
4511 clear_inuse_bit_at_offset(nextchunk, 0);
4512
4513 first_unsorted = unsorted_bin->fd;
4514 unsorted_bin->fd = p;
4515 first_unsorted->bk = p;
4516
4517 if (!in_smallbin_range (size)) {
4518 p->fd_nextsize = NULL;
4519 p->bk_nextsize = NULL;
4520 }
4521
4522 set_head(p, size | PREV_INUSE);
4523 p->bk = unsorted_bin;
4524 p->fd = first_unsorted;
4525 set_foot(p, size);
4526 }
4527
4528 else {
4529 size += nextsize;
4530 set_head(p, size | PREV_INUSE);
4531 av->top = p;
4532 }
4533
4534 } while ( (p = nextp) != 0);
4535
4536 }
4537 } while (fb++ != maxfb);
4538}
4539
4540/*
4541 ------------------------------ realloc ------------------------------
4542*/
4543
4544void*
4545_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4546 INTERNAL_SIZE_T nb)
4547{
4548 mchunkptr newp; /* chunk to return */
4549 INTERNAL_SIZE_T newsize; /* its size */
4550 void* newmem; /* corresponding user mem */
4551
4552 mchunkptr next; /* next contiguous chunk after oldp */
4553
4554 mchunkptr remainder; /* extra space at end of newp */
4555 unsigned long remainder_size; /* its size */
4556
4557 mchunkptr bck; /* misc temp for linking */
4558 mchunkptr fwd; /* misc temp for linking */
4559
4560 /* oldmem size */
4561 if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
4562 || __builtin_expect (oldsize >= av->system_mem, 0))
4563 malloc_printerr ("realloc(): invalid old size");
4564
4565 check_inuse_chunk (av, oldp);
4566
4567 /* All callers already filter out mmap'ed chunks. */
4568 assert (!chunk_is_mmapped (oldp));
4569
4570 next = chunk_at_offset (oldp, oldsize);
4571 INTERNAL_SIZE_T nextsize = chunksize (next);
4572 if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
4573 || __builtin_expect (nextsize >= av->system_mem, 0))
4574 malloc_printerr ("realloc(): invalid next size");
4575
4576 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4577 {
4578 /* already big enough; split below */
4579 newp = oldp;
4580 newsize = oldsize;
4581 }
4582
4583 else
4584 {
4585 /* Try to expand forward into top */
4586 if (next == av->top &&
4587 (unsigned long) (newsize = oldsize + nextsize) >=
4588 (unsigned long) (nb + MINSIZE))
4589 {
4590 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4591 av->top = chunk_at_offset (oldp, nb);
4592 set_head (av->top, (newsize - nb) | PREV_INUSE);
4593 check_inuse_chunk (av, oldp);
4594 return chunk2mem (oldp);
4595 }
4596
4597 /* Try to expand forward into next chunk; split off remainder below */
4598 else if (next != av->top &&
4599 !inuse (next) &&
4600 (unsigned long) (newsize = oldsize + nextsize) >=
4601 (unsigned long) (nb))
4602 {
4603 newp = oldp;
4604 unlink (av, next, bck, fwd);
4605 }
4606
4607 /* allocate, copy, free */
4608 else
4609 {
4610 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4611 if (newmem == 0)
4612 return 0; /* propagate failure */
4613
4614 newp = mem2chunk (newmem);
4615 newsize = chunksize (newp);
4616
4617 /*
4618 Avoid copy if newp is next chunk after oldp.
4619 */
4620 if (newp == next)
4621 {
4622 newsize += oldsize;
4623 newp = oldp;
4624 }
4625 else
4626 {
4627 memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
4628 _int_free (av, oldp, 1);
4629 check_inuse_chunk (av, newp);
4630 return chunk2mem (newp);
4631 }
4632 }
4633 }
4634
4635 /* If possible, free extra space in old or extended chunk */
4636
4637 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4638
4639 remainder_size = newsize - nb;
4640
4641 if (remainder_size < MINSIZE) /* not enough extra to split off */
4642 {
4643 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4644 set_inuse_bit_at_offset (newp, newsize);
4645 }
4646 else /* split remainder */
4647 {
4648 remainder = chunk_at_offset (newp, nb);
4649 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4650 set_head (remainder, remainder_size | PREV_INUSE |
4651 (av != &main_arena ? NON_MAIN_ARENA : 0));
4652 /* Mark remainder as inuse so free() won't complain */
4653 set_inuse_bit_at_offset (remainder, remainder_size);
4654 _int_free (av, remainder, 1);
4655 }
4656
4657 check_inuse_chunk (av, newp);
4658 return chunk2mem (newp);
4659}
4660
4661/*
4662 ------------------------------ memalign ------------------------------
4663 */
4664
4665static void *
4666_int_memalign (mstate av, size_t alignment, size_t bytes)
4667{
4668 INTERNAL_SIZE_T nb; /* padded request size */
4669 char *m; /* memory returned by malloc call */
4670 mchunkptr p; /* corresponding chunk */
4671 char *brk; /* alignment point within p */
4672 mchunkptr newp; /* chunk to return */
4673 INTERNAL_SIZE_T newsize; /* its size */
4674 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4675 mchunkptr remainder; /* spare room at end to split off */
4676 unsigned long remainder_size; /* its size */
4677 INTERNAL_SIZE_T size;
4678
4679
4680
4681 checked_request2size (bytes, nb);
4682
4683 /*
4684 Strategy: find a spot within that chunk that meets the alignment
4685 request, and then possibly free the leading and trailing space.
4686 */
4687
4688
4689 /* Check for overflow. */
4690 if (nb > SIZE_MAX - alignment - MINSIZE)
4691 {
4692 __set_errno (ENOMEM);
4693 return 0;
4694 }
4695
4696 /* Call malloc with worst case padding to hit alignment. */
4697
4698 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4699
4700 if (m == 0)
4701 return 0; /* propagate failure */
4702
4703 p = mem2chunk (m);
4704
4705 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4706
4707 { /*
4708 Find an aligned spot inside chunk. Since we need to give back
4709 leading space in a chunk of at least MINSIZE, if the first
4710 calculation places us at a spot with less than MINSIZE leader,
4711 we can move to the next aligned spot -- we've allocated enough
4712 total room so that this is always possible.
4713 */
4714 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4715 - ((signed long) alignment));
4716 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4717 brk += alignment;
4718
4719 newp = (mchunkptr) brk;
4720 leadsize = brk - (char *) (p);
4721 newsize = chunksize (p) - leadsize;
4722
4723 /* For mmapped chunks, just adjust offset */
4724 if (chunk_is_mmapped (p))
4725 {
4726 set_prev_size (newp, prev_size (p) + leadsize);
4727 set_head (newp, newsize | IS_MMAPPED);
4728 return chunk2mem (newp);
4729 }
4730
4731 /* Otherwise, give back leader, use the rest */
4732 set_head (newp, newsize | PREV_INUSE |
4733 (av != &main_arena ? NON_MAIN_ARENA : 0));
4734 set_inuse_bit_at_offset (newp, newsize);
4735 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4736 _int_free (av, p, 1);
4737 p = newp;
4738
4739 assert (newsize >= nb &&
4740 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
4741 }
4742
4743 /* Also give back spare room at the end */
4744 if (!chunk_is_mmapped (p))
4745 {
4746 size = chunksize (p);
4747 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4748 {
4749 remainder_size = size - nb;
4750 remainder = chunk_at_offset (p, nb);
4751 set_head (remainder, remainder_size | PREV_INUSE |
4752 (av != &main_arena ? NON_MAIN_ARENA : 0));
4753 set_head_size (p, nb);
4754 _int_free (av, remainder, 1);
4755 }
4756 }
4757
4758 check_inuse_chunk (av, p);
4759 return chunk2mem (p);
4760}
4761
4762
4763/*
4764 ------------------------------ malloc_trim ------------------------------
4765 */
4766
4767static int
4768mtrim (mstate av, size_t pad)
4769{
4770 /* Ensure all blocks are consolidated. */
4771 malloc_consolidate (av);
4772
4773 const size_t ps = GLRO (dl_pagesize);
4774 int psindex = bin_index (ps);
4775 const size_t psm1 = ps - 1;
4776
4777 int result = 0;
4778 for (int i = 1; i < NBINS; ++i)
4779 if (i == 1 || i >= psindex)
4780 {
4781 mbinptr bin = bin_at (av, i);
4782
4783 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4784 {
4785 INTERNAL_SIZE_T size = chunksize (p);
4786
4787 if (size > psm1 + sizeof (struct malloc_chunk))
4788 {
4789 /* See whether the chunk contains at least one unused page. */
4790 char *paligned_mem = (char *) (((uintptr_t) p
4791 + sizeof (struct malloc_chunk)
4792 + psm1) & ~psm1);
4793
4794 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4795 assert ((char *) p + size > paligned_mem);
4796
4797 /* This is the size we could potentially free. */
4798 size -= paligned_mem - (char *) p;
4799
4800 if (size > psm1)
4801 {
4802#if MALLOC_DEBUG
4803 /* When debugging we simulate destroying the memory
4804 content. */
4805 memset (paligned_mem, 0x89, size & ~psm1);
4806#endif
4807 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4808
4809 result = 1;
4810 }
4811 }
4812 }
4813 }
4814
4815#ifndef MORECORE_CANNOT_TRIM
4816 return result | (av == &main_arena ? systrim (pad, av) : 0);
4817
4818#else
4819 return result;
4820#endif
4821}
4822
4823
4824int
4825__malloc_trim (size_t s)
4826{
4827 int result = 0;
4828
4829 if (__malloc_initialized < 0)
4830 ptmalloc_init ();
4831
4832 mstate ar_ptr = &main_arena;
4833 do
4834 {
4835 __libc_lock_lock (ar_ptr->mutex);
4836 result |= mtrim (ar_ptr, s);
4837 __libc_lock_unlock (ar_ptr->mutex);
4838
4839 ar_ptr = ar_ptr->next;
4840 }
4841 while (ar_ptr != &main_arena);
4842
4843 return result;
4844}
4845
4846
4847/*
4848 ------------------------- malloc_usable_size -------------------------
4849 */
4850
4851static size_t
4852musable (void *mem)
4853{
4854 mchunkptr p;
4855 if (mem != 0)
4856 {
4857 p = mem2chunk (mem);
4858
4859 if (__builtin_expect (using_malloc_checking == 1, 0))
4860 return malloc_check_get_size (p);
4861
4862 if (chunk_is_mmapped (p))
4863 {
4864 if (DUMPED_MAIN_ARENA_CHUNK (p))
4865 return chunksize (p) - SIZE_SZ;
4866 else
4867 return chunksize (p) - 2 * SIZE_SZ;
4868 }
4869 else if (inuse (p))
4870 return chunksize (p) - SIZE_SZ;
4871 }
4872 return 0;
4873}
4874
4875
4876size_t
4877__malloc_usable_size (void *m)
4878{
4879 size_t result;
4880
4881 result = musable (m);
4882 return result;
4883}
4884
4885/*
4886 ------------------------------ mallinfo ------------------------------
4887 Accumulate malloc statistics for arena AV into M.
4888 */
4889
4890static void
4891int_mallinfo (mstate av, struct mallinfo *m)
4892{
4893 size_t i;
4894 mbinptr b;
4895 mchunkptr p;
4896 INTERNAL_SIZE_T avail;
4897 INTERNAL_SIZE_T fastavail;
4898 int nblocks;
4899 int nfastblocks;
4900
4901 check_malloc_state (av);
4902
4903 /* Account for top */
4904 avail = chunksize (av->top);
4905 nblocks = 1; /* top always exists */
4906
4907 /* traverse fastbins */
4908 nfastblocks = 0;
4909 fastavail = 0;
4910
4911 for (i = 0; i < NFASTBINS; ++i)
4912 {
4913 for (p = fastbin (av, i); p != 0; p = p->fd)
4914 {
4915 ++nfastblocks;
4916 fastavail += chunksize (p);
4917 }
4918 }
4919
4920 avail += fastavail;
4921
4922 /* traverse regular bins */
4923 for (i = 1; i < NBINS; ++i)
4924 {
4925 b = bin_at (av, i);
4926 for (p = last (b); p != b; p = p->bk)
4927 {
4928 ++nblocks;
4929 avail += chunksize (p);
4930 }
4931 }
4932
4933 m->smblks += nfastblocks;
4934 m->ordblks += nblocks;
4935 m->fordblks += avail;
4936 m->uordblks += av->system_mem - avail;
4937 m->arena += av->system_mem;
4938 m->fsmblks += fastavail;
4939 if (av == &main_arena)
4940 {
4941 m->hblks = mp_.n_mmaps;
4942 m->hblkhd = mp_.mmapped_mem;
4943 m->usmblks = 0;
4944 m->keepcost = chunksize (av->top);
4945 }
4946}
4947
4948
4949struct mallinfo
4950__libc_mallinfo (void)
4951{
4952 struct mallinfo m;
4953 mstate ar_ptr;
4954
4955 if (__malloc_initialized < 0)
4956 ptmalloc_init ();
4957
4958 memset (&m, 0, sizeof (m));
4959 ar_ptr = &main_arena;
4960 do
4961 {
4962 __libc_lock_lock (ar_ptr->mutex);
4963 int_mallinfo (ar_ptr, &m);
4964 __libc_lock_unlock (ar_ptr->mutex);
4965
4966 ar_ptr = ar_ptr->next;
4967 }
4968 while (ar_ptr != &main_arena);
4969
4970 return m;
4971}
4972
4973/*
4974 ------------------------------ malloc_stats ------------------------------
4975 */
4976
4977void
4978__malloc_stats (void)
4979{
4980 int i;
4981 mstate ar_ptr;
4982 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4983
4984 if (__malloc_initialized < 0)
4985 ptmalloc_init ();
4986 _IO_flockfile (stderr);
4987 int old_flags2 = stderr->_flags2;
4988 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4989 for (i = 0, ar_ptr = &main_arena;; i++)
4990 {
4991 struct mallinfo mi;
4992
4993 memset (&mi, 0, sizeof (mi));
4994 __libc_lock_lock (ar_ptr->mutex);
4995 int_mallinfo (ar_ptr, &mi);
4996 fprintf (stderr, "Arena %d:\n", i);
4997 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
4998 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
4999#if MALLOC_DEBUG > 1
5000 if (i > 0)
5001 dump_heap (heap_for_ptr (top (ar_ptr)));
5002#endif
5003 system_b += mi.arena;
5004 in_use_b += mi.uordblks;
5005 __libc_lock_unlock (ar_ptr->mutex);
5006 ar_ptr = ar_ptr->next;
5007 if (ar_ptr == &main_arena)
5008 break;
5009 }
5010 fprintf (stderr, "Total (incl. mmap):\n");
5011 fprintf (stderr, "system bytes = %10u\n", system_b);
5012 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5013 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5014 fprintf (stderr, "max mmap bytes = %10lu\n",
5015 (unsigned long) mp_.max_mmapped_mem);
5016 stderr->_flags2 = old_flags2;
5017 _IO_funlockfile (stderr);
5018}
5019
5020
5021/*
5022 ------------------------------ mallopt ------------------------------
5023 */
5024static inline int
5025__always_inline
5026do_set_trim_threshold (size_t value)
5027{
5028 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5029 mp_.no_dyn_threshold);
5030 mp_.trim_threshold = value;
5031 mp_.no_dyn_threshold = 1;
5032 return 1;
5033}
5034
5035static inline int
5036__always_inline
5037do_set_top_pad (size_t value)
5038{
5039 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5040 mp_.no_dyn_threshold);
5041 mp_.top_pad = value;
5042 mp_.no_dyn_threshold = 1;
5043 return 1;
5044}
5045
5046static inline int
5047__always_inline
5048do_set_mmap_threshold (size_t value)
5049{
5050 /* Forbid setting the threshold too high. */
5051 if (value <= HEAP_MAX_SIZE / 2)
5052 {
5053 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5054 mp_.no_dyn_threshold);
5055 mp_.mmap_threshold = value;
5056 mp_.no_dyn_threshold = 1;
5057 return 1;
5058 }
5059 return 0;
5060}
5061
5062static inline int
5063__always_inline
5064do_set_mmaps_max (int32_t value)
5065{
5066 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5067 mp_.no_dyn_threshold);
5068 mp_.n_mmaps_max = value;
5069 mp_.no_dyn_threshold = 1;
5070 return 1;
5071}
5072
5073static inline int
5074__always_inline
5075do_set_mallopt_check (int32_t value)
5076{
5077 return 1;
5078}
5079
5080static inline int
5081__always_inline
5082do_set_perturb_byte (int32_t value)
5083{
5084 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5085 perturb_byte = value;
5086 return 1;
5087}
5088
5089static inline int
5090__always_inline
5091do_set_arena_test (size_t value)
5092{
5093 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5094 mp_.arena_test = value;
5095 return 1;
5096}
5097
5098static inline int
5099__always_inline
5100do_set_arena_max (size_t value)
5101{
5102 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5103 mp_.arena_max = value;
5104 return 1;
5105}
5106
5107#if USE_TCACHE
5108static inline int
5109__always_inline
5110do_set_tcache_max (size_t value)
5111{
5112 if (value >= 0 && value <= MAX_TCACHE_SIZE)
5113 {
5114 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5115 mp_.tcache_max_bytes = value;
5116 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5117 }
5118 return 1;
5119}
5120
5121static inline int
5122__always_inline
5123do_set_tcache_count (size_t value)
5124{
5125 if (value <= MAX_TCACHE_COUNT)
5126 {
5127 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5128 mp_.tcache_count = value;
5129 }
5130 return 1;
5131}
5132
5133static inline int
5134__always_inline
5135do_set_tcache_unsorted_limit (size_t value)
5136{
5137 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5138 mp_.tcache_unsorted_limit = value;
5139 return 1;
5140}
5141#endif
5142
5143static inline int
5144__always_inline
5145do_set_mxfast (size_t value)
5146{
5147 if (value >= 0 && value <= MAX_FAST_SIZE)
5148 {
5149 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5150 set_max_fast (value);
5151 return 1;
5152 }
5153 return 0;
5154}
5155
5156int
5157__libc_mallopt (int param_number, int value)
5158{
5159 mstate av = &main_arena;
5160 int res = 1;
5161
5162 if (__malloc_initialized < 0)
5163 ptmalloc_init ();
5164 __libc_lock_lock (av->mutex);
5165
5166 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5167
5168 /* We must consolidate main arena before changing max_fast
5169 (see definition of set_max_fast). */
5170 malloc_consolidate (av);
5171
5172 switch (param_number)
5173 {
5174 case M_MXFAST:
5175 do_set_mxfast (value);
5176 break;
5177
5178 case M_TRIM_THRESHOLD:
5179 do_set_trim_threshold (value);
5180 break;
5181
5182 case M_TOP_PAD:
5183 do_set_top_pad (value);
5184 break;
5185
5186 case M_MMAP_THRESHOLD:
5187 res = do_set_mmap_threshold (value);
5188 break;
5189
5190 case M_MMAP_MAX:
5191 do_set_mmaps_max (value);
5192 break;
5193
5194 case M_CHECK_ACTION:
5195 do_set_mallopt_check (value);
5196 break;
5197
5198 case M_PERTURB:
5199 do_set_perturb_byte (value);
5200 break;
5201
5202 case M_ARENA_TEST:
5203 if (value > 0)
5204 do_set_arena_test (value);
5205 break;
5206
5207 case M_ARENA_MAX:
5208 if (value > 0)
5209 do_set_arena_max (value);
5210 break;
5211 }
5212 __libc_lock_unlock (av->mutex);
5213 return res;
5214}
5215libc_hidden_def (__libc_mallopt)
5216
5217
5218/*
5219 -------------------- Alternative MORECORE functions --------------------
5220 */
5221
5222
5223/*
5224 General Requirements for MORECORE.
5225
5226 The MORECORE function must have the following properties:
5227
5228 If MORECORE_CONTIGUOUS is false:
5229
5230 * MORECORE must allocate in multiples of pagesize. It will
5231 only be called with arguments that are multiples of pagesize.
5232
5233 * MORECORE(0) must return an address that is at least
5234 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5235
5236 else (i.e. If MORECORE_CONTIGUOUS is true):
5237
5238 * Consecutive calls to MORECORE with positive arguments
5239 return increasing addresses, indicating that space has been
5240 contiguously extended.
5241
5242 * MORECORE need not allocate in multiples of pagesize.
5243 Calls to MORECORE need not have args of multiples of pagesize.
5244
5245 * MORECORE need not page-align.
5246
5247 In either case:
5248
5249 * MORECORE may allocate more memory than requested. (Or even less,
5250 but this will generally result in a malloc failure.)
5251
5252 * MORECORE must not allocate memory when given argument zero, but
5253 instead return one past the end address of memory from previous
5254 nonzero call. This malloc does NOT call MORECORE(0)
5255 until at least one call with positive arguments is made, so
5256 the initial value returned is not important.
5257
5258 * Even though consecutive calls to MORECORE need not return contiguous
5259 addresses, it must be OK for malloc'ed chunks to span multiple
5260 regions in those cases where they do happen to be contiguous.
5261
5262 * MORECORE need not handle negative arguments -- it may instead
5263 just return MORECORE_FAILURE when given negative arguments.
5264 Negative arguments are always multiples of pagesize. MORECORE
5265 must not misinterpret negative args as large positive unsigned
5266 args. You can suppress all such calls from even occurring by defining
5267 MORECORE_CANNOT_TRIM,
5268
5269 There is some variation across systems about the type of the
5270 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5271 actually be size_t, because sbrk supports negative args, so it is
5272 normally the signed type of the same width as size_t (sometimes
5273 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5274 matter though. Internally, we use "long" as arguments, which should
5275 work across all reasonable possibilities.
5276
5277 Additionally, if MORECORE ever returns failure for a positive
5278 request, then mmap is used as a noncontiguous system allocator. This
5279 is a useful backup strategy for systems with holes in address spaces
5280 -- in this case sbrk cannot contiguously expand the heap, but mmap
5281 may be able to map noncontiguous space.
5282
5283 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5284 a function that always returns MORECORE_FAILURE.
5285
5286 If you are using this malloc with something other than sbrk (or its
5287 emulation) to supply memory regions, you probably want to set
5288 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5289 allocator kindly contributed for pre-OSX macOS. It uses virtually
5290 but not necessarily physically contiguous non-paged memory (locked
5291 in, present and won't get swapped out). You can use it by
5292 uncommenting this section, adding some #includes, and setting up the
5293 appropriate defines above:
5294
5295 *#define MORECORE osMoreCore
5296 *#define MORECORE_CONTIGUOUS 0
5297
5298 There is also a shutdown routine that should somehow be called for
5299 cleanup upon program exit.
5300
5301 *#define MAX_POOL_ENTRIES 100
5302 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5303 static int next_os_pool;
5304 void *our_os_pools[MAX_POOL_ENTRIES];
5305
5306 void *osMoreCore(int size)
5307 {
5308 void *ptr = 0;
5309 static void *sbrk_top = 0;
5310
5311 if (size > 0)
5312 {
5313 if (size < MINIMUM_MORECORE_SIZE)
5314 size = MINIMUM_MORECORE_SIZE;
5315 if (CurrentExecutionLevel() == kTaskLevel)
5316 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5317 if (ptr == 0)
5318 {
5319 return (void *) MORECORE_FAILURE;
5320 }
5321 // save ptrs so they can be freed during cleanup
5322 our_os_pools[next_os_pool] = ptr;
5323 next_os_pool++;
5324 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5325 sbrk_top = (char *) ptr + size;
5326 return ptr;
5327 }
5328 else if (size < 0)
5329 {
5330 // we don't currently support shrink behavior
5331 return (void *) MORECORE_FAILURE;
5332 }
5333 else
5334 {
5335 return sbrk_top;
5336 }
5337 }
5338
5339 // cleanup any allocated memory pools
5340 // called as last thing before shutting down driver
5341
5342 void osCleanupMem(void)
5343 {
5344 void **ptr;
5345
5346 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5347 if (*ptr)
5348 {
5349 PoolDeallocate(*ptr);
5350 * ptr = 0;
5351 }
5352 }
5353
5354 */
5355
5356
5357/* Helper code. */
5358
5359extern char **__libc_argv attribute_hidden;
5360
5361static void
5362malloc_printerr (const char *str)
5363{
5364 __libc_message (do_abort, "%s\n", str);
5365 __builtin_unreachable ();
5366}
5367
5368/* We need a wrapper function for one of the additions of POSIX. */
5369int
5370__posix_memalign (void **memptr, size_t alignment, size_t size)
5371{
5372 void *mem;
5373
5374 /* Test whether the SIZE argument is valid. It must be a power of
5375 two multiple of sizeof (void *). */
5376 if (alignment % sizeof (void *) != 0
5377 || !powerof2 (alignment / sizeof (void *))
5378 || alignment == 0)
5379 return EINVAL;
5380
5381
5382 void *address = RETURN_ADDRESS (0);
5383 mem = _mid_memalign (alignment, size, address);
5384
5385 if (mem != NULL)
5386 {
5387 *memptr = mem;
5388 return 0;
5389 }
5390
5391 return ENOMEM;
5392}
5393weak_alias (__posix_memalign, posix_memalign)
5394
5395
5396int
5397__malloc_info (int options, FILE *fp)
5398{
5399 /* For now, at least. */
5400 if (options != 0)
5401 return EINVAL;
5402
5403 int n = 0;
5404 size_t total_nblocks = 0;
5405 size_t total_nfastblocks = 0;
5406 size_t total_avail = 0;
5407 size_t total_fastavail = 0;
5408 size_t total_system = 0;
5409 size_t total_max_system = 0;
5410 size_t total_aspace = 0;
5411 size_t total_aspace_mprotect = 0;
5412
5413
5414
5415 if (__malloc_initialized < 0)
5416 ptmalloc_init ();
5417
5418 fputs ("<malloc version=\"1\">\n", fp);
5419
5420 /* Iterate over all arenas currently in use. */
5421 mstate ar_ptr = &main_arena;
5422 do
5423 {
5424 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5425
5426 size_t nblocks = 0;
5427 size_t nfastblocks = 0;
5428 size_t avail = 0;
5429 size_t fastavail = 0;
5430 struct
5431 {
5432 size_t from;
5433 size_t to;
5434 size_t total;
5435 size_t count;
5436 } sizes[NFASTBINS + NBINS - 1];
5437#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5438
5439 __libc_lock_lock (ar_ptr->mutex);
5440
5441 /* Account for top chunk. The top-most available chunk is
5442 treated specially and is never in any bin. See "initial_top"
5443 comments. */
5444 avail = chunksize (ar_ptr->top);
5445 nblocks = 1; /* Top always exists. */
5446
5447 for (size_t i = 0; i < NFASTBINS; ++i)
5448 {
5449 mchunkptr p = fastbin (ar_ptr, i);
5450 if (p != NULL)
5451 {
5452 size_t nthissize = 0;
5453 size_t thissize = chunksize (p);
5454
5455 while (p != NULL)
5456 {
5457 ++nthissize;
5458 p = p->fd;
5459 }
5460
5461 fastavail += nthissize * thissize;
5462 nfastblocks += nthissize;
5463 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5464 sizes[i].to = thissize;
5465 sizes[i].count = nthissize;
5466 }
5467 else
5468 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5469
5470 sizes[i].total = sizes[i].count * sizes[i].to;
5471 }
5472
5473
5474 mbinptr bin;
5475 struct malloc_chunk *r;
5476
5477 for (size_t i = 1; i < NBINS; ++i)
5478 {
5479 bin = bin_at (ar_ptr, i);
5480 r = bin->fd;
5481 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5482 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5483 = sizes[NFASTBINS - 1 + i].count = 0;
5484
5485 if (r != NULL)
5486 while (r != bin)
5487 {
5488 size_t r_size = chunksize_nomask (r);
5489 ++sizes[NFASTBINS - 1 + i].count;
5490 sizes[NFASTBINS - 1 + i].total += r_size;
5491 sizes[NFASTBINS - 1 + i].from
5492 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5493 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5494 r_size);
5495
5496 r = r->fd;
5497 }
5498
5499 if (sizes[NFASTBINS - 1 + i].count == 0)
5500 sizes[NFASTBINS - 1 + i].from = 0;
5501 nblocks += sizes[NFASTBINS - 1 + i].count;
5502 avail += sizes[NFASTBINS - 1 + i].total;
5503 }
5504
5505 size_t heap_size = 0;
5506 size_t heap_mprotect_size = 0;
5507 size_t heap_count = 0;
5508 if (ar_ptr != &main_arena)
5509 {
5510 /* Iterate over the arena heaps from back to front. */
5511 heap_info *heap = heap_for_ptr (top (ar_ptr));
5512 do
5513 {
5514 heap_size += heap->size;
5515 heap_mprotect_size += heap->mprotect_size;
5516 heap = heap->prev;
5517 ++heap_count;
5518 }
5519 while (heap != NULL);
5520 }
5521
5522 __libc_lock_unlock (ar_ptr->mutex);
5523
5524 total_nfastblocks += nfastblocks;
5525 total_fastavail += fastavail;
5526
5527 total_nblocks += nblocks;
5528 total_avail += avail;
5529
5530 for (size_t i = 0; i < nsizes; ++i)
5531 if (sizes[i].count != 0 && i != NFASTBINS)
5532 fprintf (fp, "\
5533 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5534 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5535
5536 if (sizes[NFASTBINS].count != 0)
5537 fprintf (fp, "\
5538 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5539 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5540 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5541
5542 total_system += ar_ptr->system_mem;
5543 total_max_system += ar_ptr->max_system_mem;
5544
5545 fprintf (fp,
5546 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5547 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5548 "<system type=\"current\" size=\"%zu\"/>\n"
5549 "<system type=\"max\" size=\"%zu\"/>\n",
5550 nfastblocks, fastavail, nblocks, avail,
5551 ar_ptr->system_mem, ar_ptr->max_system_mem);
5552
5553 if (ar_ptr != &main_arena)
5554 {
5555 fprintf (fp,
5556 "<aspace type=\"total\" size=\"%zu\"/>\n"
5557 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5558 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5559 heap_size, heap_mprotect_size, heap_count);
5560 total_aspace += heap_size;
5561 total_aspace_mprotect += heap_mprotect_size;
5562 }
5563 else
5564 {
5565 fprintf (fp,
5566 "<aspace type=\"total\" size=\"%zu\"/>\n"
5567 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5568 ar_ptr->system_mem, ar_ptr->system_mem);
5569 total_aspace += ar_ptr->system_mem;
5570 total_aspace_mprotect += ar_ptr->system_mem;
5571 }
5572
5573 fputs ("</heap>\n", fp);
5574 ar_ptr = ar_ptr->next;
5575 }
5576 while (ar_ptr != &main_arena);
5577
5578 fprintf (fp,
5579 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5580 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5581 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5582 "<system type=\"current\" size=\"%zu\"/>\n"
5583 "<system type=\"max\" size=\"%zu\"/>\n"
5584 "<aspace type=\"total\" size=\"%zu\"/>\n"
5585 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5586 "</malloc>\n",
5587 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5588 mp_.n_mmaps, mp_.mmapped_mem,
5589 total_system, total_max_system,
5590 total_aspace, total_aspace_mprotect);
5591
5592 return 0;
5593}
5594weak_alias (__malloc_info, malloc_info)
5595
5596
5597strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5598strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5599strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5600strong_alias (__libc_memalign, __memalign)
5601weak_alias (__libc_memalign, memalign)
5602strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5603strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5604strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5605strong_alias (__libc_mallinfo, __mallinfo)
5606weak_alias (__libc_mallinfo, mallinfo)
5607strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5608
5609weak_alias (__malloc_stats, malloc_stats)
5610weak_alias (__malloc_usable_size, malloc_usable_size)
5611weak_alias (__malloc_trim, malloc_trim)
5612
5613#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5614compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5615#endif
5616
5617/* ------------------------------------------------------------
5618 History:
5619
5620 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5621
5622 */
5623/*
5624 * Local variables:
5625 * c-basic-offset: 2
5626 * End:
5627 */
5628