1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
29{
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
33}
34
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
42}
43
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
46{
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
50}
51
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* Activate a standard set of debugging hooks. */
56void
57__malloc_check_init (void)
58{
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
64}
65
66/* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
70
71static unsigned char
72magicbyte (const void *p)
73{
74 unsigned char magic;
75
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
81}
82
83
84/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
90static size_t
91malloc_check_get_size (mchunkptr p)
92{
93 size_t size;
94 unsigned char c;
95 unsigned char magic = magicbyte (p);
96
97 assert (using_malloc_checking == 1);
98
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
101 size -= c)
102 {
103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
104 malloc_printerr ("malloc_check_get_size: memory corruption");
105 }
106
107 /* chunk2mem size. */
108 return size - 2 * SIZE_SZ;
109}
110
111/* Instrument a chunk with overrun detector byte(s) and convert it
112 into a user pointer with requested size req_sz. */
113
114static void *
115mem2mem_check (void *ptr, size_t req_sz)
116{
117 mchunkptr p;
118 unsigned char *m_ptr = ptr;
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
121
122 if (!ptr)
123 return ptr;
124
125 p = mem2chunk (ptr);
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
131 {
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
137
138 m_ptr[i] = block_sz;
139 }
140 m_ptr[req_sz] = magic;
141 return (void *) m_ptr;
142}
143
144/* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
146
147static mchunkptr
148mem2chunk_check (void *mem, unsigned char **magic_p)
149{
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
153
154 if (!aligned_OK (mem))
155 return NULL;
156
157 p = mem2chunk (mem);
158 sz = chunksize (p);
159 magic = magicbyte (p);
160 if (!chunk_is_mmapped (p))
161 {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
172
173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
174 {
175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
176 return NULL;
177 }
178 }
179 else
180 {
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
182
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
194 return NULL;
195
196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
197 {
198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
199 return NULL;
200 }
201 }
202 ((unsigned char *) p)[sz] ^= 0xFF;
203 if (magic_p)
204 *magic_p = (unsigned char *) p + sz;
205 return p;
206}
207
208/* Check for corruption of the top chunk. */
209static void
210top_check (void)
211{
212 mchunkptr t = top (&main_arena);
213
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
220 return;
221
222 malloc_printerr ("malloc: top chunk is corrupt");
223}
224
225static void *
226malloc_check (size_t sz, const void *caller)
227{
228 void *victim;
229 size_t nb;
230
231 if (__builtin_add_overflow (sz, 1, &nb))
232 {
233 __set_errno (ENOMEM);
234 return NULL;
235 }
236
237 __libc_lock_lock (main_arena.mutex);
238 top_check ();
239 victim = _int_malloc (&main_arena, nb);
240 __libc_lock_unlock (main_arena.mutex);
241 return mem2mem_check (victim, sz);
242}
243
244static void
245free_check (void *mem, const void *caller)
246{
247 mchunkptr p;
248
249 if (!mem)
250 return;
251
252 __libc_lock_lock (main_arena.mutex);
253 p = mem2chunk_check (mem, NULL);
254 if (!p)
255 malloc_printerr ("free(): invalid pointer");
256 if (chunk_is_mmapped (p))
257 {
258 __libc_lock_unlock (main_arena.mutex);
259 munmap_chunk (p);
260 return;
261 }
262 _int_free (&main_arena, p, 1);
263 __libc_lock_unlock (main_arena.mutex);
264}
265
266static void *
267realloc_check (void *oldmem, size_t bytes, const void *caller)
268{
269 INTERNAL_SIZE_T nb;
270 void *newmem = 0;
271 unsigned char *magic_p;
272 size_t rb;
273
274 if (__builtin_add_overflow (bytes, 1, &rb))
275 {
276 __set_errno (ENOMEM);
277 return NULL;
278 }
279 if (oldmem == 0)
280 return malloc_check (bytes, NULL);
281
282 if (bytes == 0)
283 {
284 free_check (oldmem, NULL);
285 return NULL;
286 }
287 __libc_lock_lock (main_arena.mutex);
288 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
289 __libc_lock_unlock (main_arena.mutex);
290 if (!oldp)
291 malloc_printerr ("realloc(): invalid pointer");
292 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
293
294 if (!checked_request2size (rb, &nb))
295 goto invert;
296
297 __libc_lock_lock (main_arena.mutex);
298
299 if (chunk_is_mmapped (oldp))
300 {
301#if HAVE_MREMAP
302 mchunkptr newp = mremap_chunk (oldp, nb);
303 if (newp)
304 newmem = chunk2mem (newp);
305 else
306#endif
307 {
308 /* Note the extra SIZE_SZ overhead. */
309 if (oldsize - SIZE_SZ >= nb)
310 newmem = oldmem; /* do nothing */
311 else
312 {
313 /* Must alloc, copy, free. */
314 top_check ();
315 newmem = _int_malloc (&main_arena, rb);
316 if (newmem)
317 {
318 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
319 munmap_chunk (oldp);
320 }
321 }
322 }
323 }
324 else
325 {
326 top_check ();
327 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
328 }
329
330 DIAG_PUSH_NEEDS_COMMENT;
331#if __GNUC_PREREQ (7, 0)
332 /* GCC 7 warns about magic_p may be used uninitialized. But we never
333 reach here if magic_p is uninitialized. */
334 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
335#endif
336 /* mem2chunk_check changed the magic byte in the old chunk.
337 If newmem is NULL, then the old chunk will still be used though,
338 so we need to invert that change here. */
339invert:
340 if (newmem == NULL)
341 *magic_p ^= 0xFF;
342 DIAG_POP_NEEDS_COMMENT;
343
344 __libc_lock_unlock (main_arena.mutex);
345
346 return mem2mem_check (newmem, bytes);
347}
348
349static void *
350memalign_check (size_t alignment, size_t bytes, const void *caller)
351{
352 void *mem;
353
354 if (alignment <= MALLOC_ALIGNMENT)
355 return malloc_check (bytes, NULL);
356
357 if (alignment < MINSIZE)
358 alignment = MINSIZE;
359
360 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
361 power of 2 and will cause overflow in the check below. */
362 if (alignment > SIZE_MAX / 2 + 1)
363 {
364 __set_errno (EINVAL);
365 return 0;
366 }
367
368 /* Check for overflow. */
369 if (bytes > SIZE_MAX - alignment - MINSIZE)
370 {
371 __set_errno (ENOMEM);
372 return 0;
373 }
374
375 /* Make sure alignment is power of 2. */
376 if (!powerof2 (alignment))
377 {
378 size_t a = MALLOC_ALIGNMENT * 2;
379 while (a < alignment)
380 a <<= 1;
381 alignment = a;
382 }
383
384 __libc_lock_lock (main_arena.mutex);
385 top_check ();
386 mem = _int_memalign (&main_arena, alignment, bytes + 1);
387 __libc_lock_unlock (main_arena.mutex);
388 return mem2mem_check (mem, bytes);
389}
390
391#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
392
393/* Support for restoring dumped heaps contained in historic Emacs
394 executables. The heap saving feature (malloc_get_state) is no
395 longer implemented in this version of glibc, but we have a heap
396 rewriter in malloc_set_state which transforms the heap into a
397 version compatible with current malloc. */
398
399#define MALLOC_STATE_MAGIC 0x444c4541l
400#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
401
402struct malloc_save_state
403{
404 long magic;
405 long version;
406 mbinptr av[NBINS * 2 + 2];
407 char *sbrk_base;
408 int sbrked_mem_bytes;
409 unsigned long trim_threshold;
410 unsigned long top_pad;
411 unsigned int n_mmaps_max;
412 unsigned long mmap_threshold;
413 int check_action;
414 unsigned long max_sbrked_mem;
415 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
416 unsigned int n_mmaps;
417 unsigned int max_n_mmaps;
418 unsigned long mmapped_mem;
419 unsigned long max_mmapped_mem;
420 int using_malloc_checking;
421 unsigned long max_fast;
422 unsigned long arena_test;
423 unsigned long arena_max;
424 unsigned long narenas;
425};
426
427/* Dummy implementation which always fails. We need to provide this
428 symbol so that existing Emacs binaries continue to work with
429 BIND_NOW. */
430void *
431attribute_compat_text_section
432malloc_get_state (void)
433{
434 __set_errno (ENOSYS);
435 return NULL;
436}
437compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
438
439int
440attribute_compat_text_section
441malloc_set_state (void *msptr)
442{
443 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
444
445 if (ms->magic != MALLOC_STATE_MAGIC)
446 return -1;
447
448 /* Must fail if the major version is too high. */
449 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
450 return -2;
451
452 /* We do not need to perform locking here because malloc_set_state
453 must be called before the first call into the malloc subsytem
454 (usually via __malloc_initialize_hook). pthread_create always
455 calls calloc and thus must be called only afterwards, so there
456 cannot be more than one thread when we reach this point. */
457
458 /* Disable the malloc hooks (and malloc checking). */
459 __malloc_hook = NULL;
460 __realloc_hook = NULL;
461 __free_hook = NULL;
462 __memalign_hook = NULL;
463 using_malloc_checking = 0;
464
465 /* Patch the dumped heap. We no longer try to integrate into the
466 existing heap. Instead, we mark the existing chunks as mmapped.
467 Together with the update to dumped_main_arena_start and
468 dumped_main_arena_end, realloc and free will recognize these
469 chunks as dumped fake mmapped chunks and never free them. */
470
471 /* Find the chunk with the lowest address with the heap. */
472 mchunkptr chunk = NULL;
473 {
474 size_t *candidate = (size_t *) ms->sbrk_base;
475 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
476 while (candidate < end)
477 if (*candidate != 0)
478 {
479 chunk = mem2chunk ((void *) (candidate + 1));
480 break;
481 }
482 else
483 ++candidate;
484 }
485 if (chunk == NULL)
486 return 0;
487
488 /* Iterate over the dumped heap and patch the chunks so that they
489 are treated as fake mmapped chunks. */
490 mchunkptr top = ms->av[2];
491 while (chunk < top)
492 {
493 if (inuse (chunk))
494 {
495 /* Mark chunk as mmapped, to trigger the fallback path. */
496 size_t size = chunksize (chunk);
497 set_head (chunk, size | IS_MMAPPED);
498 }
499 chunk = next_chunk (chunk);
500 }
501
502 /* The dumped fake mmapped chunks all lie in this address range. */
503 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
504 dumped_main_arena_end = top;
505
506 return 0;
507}
508compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
509
510#endif /* SHLIB_COMPAT */
511
512/*
513 * Local variables:
514 * c-basic-offset: 2
515 * End:
516 */
517