1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
29{
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
33}
34
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
42}
43
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
46{
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
50}
51
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
59
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68static int disallow_malloc_check;
69
70/* Activate a standard set of debugging hooks. */
71void
72__malloc_check_init (void)
73{
74 if (disallow_malloc_check)
75 {
76 disallow_malloc_check = 0;
77 return;
78 }
79 using_malloc_checking = 1;
80 __malloc_hook = malloc_check;
81 __free_hook = free_check;
82 __realloc_hook = realloc_check;
83 __memalign_hook = memalign_check;
84}
85
86/* A simple, standard set of debugging hooks. Overhead is `only' one
87 byte per chunk; still this will catch most cases of double frees or
88 overruns. The goal here is to avoid obscure crashes due to invalid
89 usage, unlike in the MALLOC_DEBUG code. */
90
91static unsigned char
92magicbyte (const void *p)
93{
94 unsigned char magic;
95
96 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
97 /* Do not return 1. See the comment in mem2mem_check(). */
98 if (magic == 1)
99 ++magic;
100 return magic;
101}
102
103
104/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
105 highest address of the chunk, downwards. The end of each block tells
106 us the size of that block, up to the actual size of the requested
107 memory. Our magic byte is right at the end of the requested size, so we
108 must reach it with this iteration, otherwise we have witnessed a memory
109 corruption. */
110static size_t
111malloc_check_get_size (mchunkptr p)
112{
113 size_t size;
114 unsigned char c;
115 unsigned char magic = magicbyte (p);
116
117 assert (using_malloc_checking == 1);
118
119 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
120 (c = ((unsigned char *) p)[size]) != magic;
121 size -= c)
122 {
123 if (c <= 0 || size < (c + 2 * SIZE_SZ))
124 malloc_printerr ("malloc_check_get_size: memory corruption");
125 }
126
127 /* chunk2mem size. */
128 return size - 2 * SIZE_SZ;
129}
130
131/* Instrument a chunk with overrun detector byte(s) and convert it
132 into a user pointer with requested size req_sz. */
133
134static void *
135internal_function
136mem2mem_check (void *ptr, size_t req_sz)
137{
138 mchunkptr p;
139 unsigned char *m_ptr = ptr;
140 size_t max_sz, block_sz, i;
141 unsigned char magic;
142
143 if (!ptr)
144 return ptr;
145
146 p = mem2chunk (ptr);
147 magic = magicbyte (p);
148 max_sz = chunksize (p) - 2 * SIZE_SZ;
149 if (!chunk_is_mmapped (p))
150 max_sz += SIZE_SZ;
151 for (i = max_sz - 1; i > req_sz; i -= block_sz)
152 {
153 block_sz = MIN (i - req_sz, 0xff);
154 /* Don't allow the magic byte to appear in the chain of length bytes.
155 For the following to work, magicbyte cannot return 0x01. */
156 if (block_sz == magic)
157 --block_sz;
158
159 m_ptr[i] = block_sz;
160 }
161 m_ptr[req_sz] = magic;
162 return (void *) m_ptr;
163}
164
165/* Convert a pointer to be free()d or realloc()ed to a valid chunk
166 pointer. If the provided pointer is not valid, return NULL. */
167
168static mchunkptr
169internal_function
170mem2chunk_check (void *mem, unsigned char **magic_p)
171{
172 mchunkptr p;
173 INTERNAL_SIZE_T sz, c;
174 unsigned char magic;
175
176 if (!aligned_OK (mem))
177 return NULL;
178
179 p = mem2chunk (mem);
180 sz = chunksize (p);
181 magic = magicbyte (p);
182 if (!chunk_is_mmapped (p))
183 {
184 /* Must be a chunk in conventional heap memory. */
185 int contig = contiguous (&main_arena);
186 if ((contig &&
187 ((char *) p < mp_.sbrk_base ||
188 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
189 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
190 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
191 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
192 next_chunk (prev_chunk (p)) != p)))
193 return NULL;
194
195 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
196 {
197 if (c == 0 || sz < (c + 2 * SIZE_SZ))
198 return NULL;
199 }
200 }
201 else
202 {
203 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
204
205 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
206 alignment relative to the beginning of a page. Check this
207 first. */
208 offset = (unsigned long) mem & page_mask;
209 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
210 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
211 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
212 offset < 0x2000) ||
213 !chunk_is_mmapped (p) || prev_inuse (p) ||
214 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
215 ((prev_size (p) + sz) & page_mask) != 0)
216 return NULL;
217
218 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
219 {
220 if (c == 0 || sz < (c + 2 * SIZE_SZ))
221 return NULL;
222 }
223 }
224 ((unsigned char *) p)[sz] ^= 0xFF;
225 if (magic_p)
226 *magic_p = (unsigned char *) p + sz;
227 return p;
228}
229
230/* Check for corruption of the top chunk. */
231static void
232top_check (void)
233{
234 mchunkptr t = top (&main_arena);
235
236 if (t == initial_top (&main_arena) ||
237 (!chunk_is_mmapped (t) &&
238 chunksize (t) >= MINSIZE &&
239 prev_inuse (t) &&
240 (!contiguous (&main_arena) ||
241 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
242 return;
243
244 malloc_printerr ("malloc: top chunk is corrupt");
245}
246
247static void *
248malloc_check (size_t sz, const void *caller)
249{
250 void *victim;
251
252 if (sz + 1 == 0)
253 {
254 __set_errno (ENOMEM);
255 return NULL;
256 }
257
258 __libc_lock_lock (main_arena.mutex);
259 top_check ();
260 victim = _int_malloc (&main_arena, sz + 1);
261 __libc_lock_unlock (main_arena.mutex);
262 return mem2mem_check (victim, sz);
263}
264
265static void
266free_check (void *mem, const void *caller)
267{
268 mchunkptr p;
269
270 if (!mem)
271 return;
272
273 __libc_lock_lock (main_arena.mutex);
274 p = mem2chunk_check (mem, NULL);
275 if (!p)
276 malloc_printerr ("free(): invalid pointer");
277 if (chunk_is_mmapped (p))
278 {
279 __libc_lock_unlock (main_arena.mutex);
280 munmap_chunk (p);
281 return;
282 }
283 _int_free (&main_arena, p, 1);
284 __libc_lock_unlock (main_arena.mutex);
285}
286
287static void *
288realloc_check (void *oldmem, size_t bytes, const void *caller)
289{
290 INTERNAL_SIZE_T nb;
291 void *newmem = 0;
292 unsigned char *magic_p;
293
294 if (bytes + 1 == 0)
295 {
296 __set_errno (ENOMEM);
297 return NULL;
298 }
299 if (oldmem == 0)
300 return malloc_check (bytes, NULL);
301
302 if (bytes == 0)
303 {
304 free_check (oldmem, NULL);
305 return NULL;
306 }
307 __libc_lock_lock (main_arena.mutex);
308 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
309 __libc_lock_unlock (main_arena.mutex);
310 if (!oldp)
311 malloc_printerr ("realloc(): invalid pointer");
312 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
313
314 checked_request2size (bytes + 1, nb);
315 __libc_lock_lock (main_arena.mutex);
316
317 if (chunk_is_mmapped (oldp))
318 {
319#if HAVE_MREMAP
320 mchunkptr newp = mremap_chunk (oldp, nb);
321 if (newp)
322 newmem = chunk2mem (newp);
323 else
324#endif
325 {
326 /* Note the extra SIZE_SZ overhead. */
327 if (oldsize - SIZE_SZ >= nb)
328 newmem = oldmem; /* do nothing */
329 else
330 {
331 /* Must alloc, copy, free. */
332 top_check ();
333 newmem = _int_malloc (&main_arena, bytes + 1);
334 if (newmem)
335 {
336 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
337 munmap_chunk (oldp);
338 }
339 }
340 }
341 }
342 else
343 {
344 top_check ();
345 INTERNAL_SIZE_T nb;
346 checked_request2size (bytes + 1, nb);
347 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
348 }
349
350 DIAG_PUSH_NEEDS_COMMENT;
351#if __GNUC_PREREQ (7, 0)
352 /* GCC 7 warns about magic_p may be used uninitialized. But we never
353 reach here if magic_p is uninitialized. */
354 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
355#endif
356 /* mem2chunk_check changed the magic byte in the old chunk.
357 If newmem is NULL, then the old chunk will still be used though,
358 so we need to invert that change here. */
359 if (newmem == NULL)
360 *magic_p ^= 0xFF;
361 DIAG_POP_NEEDS_COMMENT;
362
363 __libc_lock_unlock (main_arena.mutex);
364
365 return mem2mem_check (newmem, bytes);
366}
367
368static void *
369memalign_check (size_t alignment, size_t bytes, const void *caller)
370{
371 void *mem;
372
373 if (alignment <= MALLOC_ALIGNMENT)
374 return malloc_check (bytes, NULL);
375
376 if (alignment < MINSIZE)
377 alignment = MINSIZE;
378
379 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
380 power of 2 and will cause overflow in the check below. */
381 if (alignment > SIZE_MAX / 2 + 1)
382 {
383 __set_errno (EINVAL);
384 return 0;
385 }
386
387 /* Check for overflow. */
388 if (bytes > SIZE_MAX - alignment - MINSIZE)
389 {
390 __set_errno (ENOMEM);
391 return 0;
392 }
393
394 /* Make sure alignment is power of 2. */
395 if (!powerof2 (alignment))
396 {
397 size_t a = MALLOC_ALIGNMENT * 2;
398 while (a < alignment)
399 a <<= 1;
400 alignment = a;
401 }
402
403 __libc_lock_lock (main_arena.mutex);
404 top_check ();
405 mem = _int_memalign (&main_arena, alignment, bytes + 1);
406 __libc_lock_unlock (main_arena.mutex);
407 return mem2mem_check (mem, bytes);
408}
409
410#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
411
412/* Get/set state: malloc_get_state() records the current state of all
413 malloc variables (_except_ for the actual heap contents and `hook'
414 function pointers) in a system dependent, opaque data structure.
415 This data structure is dynamically allocated and can be free()d
416 after use. malloc_set_state() restores the state of all malloc
417 variables to the previously obtained state. This is especially
418 useful when using this malloc as part of a shared library, and when
419 the heap contents are saved/restored via some other method. The
420 primary example for this is GNU Emacs with its `dumping' procedure.
421 `Hook' function pointers are never saved or restored by these
422 functions, with two exceptions: If malloc checking was in use when
423 malloc_get_state() was called, then malloc_set_state() calls
424 __malloc_check_init() if possible; if malloc checking was not in
425 use in the recorded state but the user requested malloc checking,
426 then the hooks are reset to 0. */
427
428#define MALLOC_STATE_MAGIC 0x444c4541l
429#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
430
431struct malloc_save_state
432{
433 long magic;
434 long version;
435 mbinptr av[NBINS * 2 + 2];
436 char *sbrk_base;
437 int sbrked_mem_bytes;
438 unsigned long trim_threshold;
439 unsigned long top_pad;
440 unsigned int n_mmaps_max;
441 unsigned long mmap_threshold;
442 int check_action;
443 unsigned long max_sbrked_mem;
444 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
445 unsigned int n_mmaps;
446 unsigned int max_n_mmaps;
447 unsigned long mmapped_mem;
448 unsigned long max_mmapped_mem;
449 int using_malloc_checking;
450 unsigned long max_fast;
451 unsigned long arena_test;
452 unsigned long arena_max;
453 unsigned long narenas;
454};
455
456/* Dummy implementation which always fails. We need to provide this
457 symbol so that existing Emacs binaries continue to work with
458 BIND_NOW. */
459void *
460attribute_compat_text_section
461malloc_get_state (void)
462{
463 __set_errno (ENOSYS);
464 return NULL;
465}
466compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
467
468int
469attribute_compat_text_section
470malloc_set_state (void *msptr)
471{
472 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
473
474 if (ms->magic != MALLOC_STATE_MAGIC)
475 return -1;
476
477 /* Must fail if the major version is too high. */
478 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
479 return -2;
480
481 /* We do not need to perform locking here because __malloc_set_state
482 must be called before the first call into the malloc subsytem
483 (usually via __malloc_initialize_hook). pthread_create always
484 calls calloc and thus must be called only afterwards, so there
485 cannot be more than one thread when we reach this point. */
486
487 /* Disable the malloc hooks (and malloc checking). */
488 __malloc_hook = NULL;
489 __realloc_hook = NULL;
490 __free_hook = NULL;
491 __memalign_hook = NULL;
492 using_malloc_checking = 0;
493
494 /* Patch the dumped heap. We no longer try to integrate into the
495 existing heap. Instead, we mark the existing chunks as mmapped.
496 Together with the update to dumped_main_arena_start and
497 dumped_main_arena_end, realloc and free will recognize these
498 chunks as dumped fake mmapped chunks and never free them. */
499
500 /* Find the chunk with the lowest address with the heap. */
501 mchunkptr chunk = NULL;
502 {
503 size_t *candidate = (size_t *) ms->sbrk_base;
504 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
505 while (candidate < end)
506 if (*candidate != 0)
507 {
508 chunk = mem2chunk ((void *) (candidate + 1));
509 break;
510 }
511 else
512 ++candidate;
513 }
514 if (chunk == NULL)
515 return 0;
516
517 /* Iterate over the dumped heap and patch the chunks so that they
518 are treated as fake mmapped chunks. */
519 mchunkptr top = ms->av[2];
520 while (chunk < top)
521 {
522 if (inuse (chunk))
523 {
524 /* Mark chunk as mmapped, to trigger the fallback path. */
525 size_t size = chunksize (chunk);
526 set_head (chunk, size | IS_MMAPPED);
527 }
528 chunk = next_chunk (chunk);
529 }
530
531 /* The dumped fake mmapped chunks all lie in this address range. */
532 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
533 dumped_main_arena_end = top;
534
535 return 0;
536}
537compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
538
539#endif /* SHLIB_COMPAT */
540
541/*
542 * Local variables:
543 * c-basic-offset: 2
544 * End:
545 */
546