1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
29{
30 __malloc_hook = NULL;
31 ptmalloc_init ();
32 return __libc_malloc (sz);
33}
34
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
42}
43
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
46{
47 __memalign_hook = NULL;
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
50}
51
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* Activate a standard set of debugging hooks. */
56void
57__malloc_check_init (void)
58{
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
64}
65
66/* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
70
71static unsigned char
72magicbyte (const void *p)
73{
74 unsigned char magic;
75
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
81}
82
83
84/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
90static size_t
91malloc_check_get_size (mchunkptr p)
92{
93 size_t size;
94 unsigned char c;
95 unsigned char magic = magicbyte (p);
96
97 assert (using_malloc_checking == 1);
98
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
101 size -= c)
102 {
103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
104 malloc_printerr ("malloc_check_get_size: memory corruption");
105 }
106
107 /* chunk2mem size. */
108 return size - 2 * SIZE_SZ;
109}
110
111/* Instrument a chunk with overrun detector byte(s) and convert it
112 into a user pointer with requested size req_sz. */
113
114static void *
115mem2mem_check (void *ptr, size_t req_sz)
116{
117 mchunkptr p;
118 unsigned char *m_ptr = ptr;
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
121
122 if (!ptr)
123 return ptr;
124
125 p = mem2chunk (ptr);
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
131 {
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
137
138 m_ptr[i] = block_sz;
139 }
140 m_ptr[req_sz] = magic;
141 return (void *) m_ptr;
142}
143
144/* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
146
147static mchunkptr
148mem2chunk_check (void *mem, unsigned char **magic_p)
149{
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
153
154 if (!aligned_OK (mem))
155 return NULL;
156
157 p = mem2chunk (mem);
158 sz = chunksize (p);
159 magic = magicbyte (p);
160 if (!chunk_is_mmapped (p))
161 {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
172
173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
174 {
175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
176 return NULL;
177 }
178 }
179 else
180 {
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
182
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
194 return NULL;
195
196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
197 {
198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
199 return NULL;
200 }
201 }
202 ((unsigned char *) p)[sz] ^= 0xFF;
203 if (magic_p)
204 *magic_p = (unsigned char *) p + sz;
205 return p;
206}
207
208/* Check for corruption of the top chunk. */
209static void
210top_check (void)
211{
212 mchunkptr t = top (&main_arena);
213
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
220 return;
221
222 malloc_printerr ("malloc: top chunk is corrupt");
223}
224
225static void *
226malloc_check (size_t sz, const void *caller)
227{
228 void *victim;
229
230 if (sz + 1 == 0)
231 {
232 __set_errno (ENOMEM);
233 return NULL;
234 }
235
236 __libc_lock_lock (main_arena.mutex);
237 top_check ();
238 victim = _int_malloc (&main_arena, sz + 1);
239 __libc_lock_unlock (main_arena.mutex);
240 return mem2mem_check (victim, sz);
241}
242
243static void
244free_check (void *mem, const void *caller)
245{
246 mchunkptr p;
247
248 if (!mem)
249 return;
250
251 __libc_lock_lock (main_arena.mutex);
252 p = mem2chunk_check (mem, NULL);
253 if (!p)
254 malloc_printerr ("free(): invalid pointer");
255 if (chunk_is_mmapped (p))
256 {
257 __libc_lock_unlock (main_arena.mutex);
258 munmap_chunk (p);
259 return;
260 }
261 _int_free (&main_arena, p, 1);
262 __libc_lock_unlock (main_arena.mutex);
263}
264
265static void *
266realloc_check (void *oldmem, size_t bytes, const void *caller)
267{
268 INTERNAL_SIZE_T nb;
269 void *newmem = 0;
270 unsigned char *magic_p;
271
272 if (bytes + 1 == 0)
273 {
274 __set_errno (ENOMEM);
275 return NULL;
276 }
277 if (oldmem == 0)
278 return malloc_check (bytes, NULL);
279
280 if (bytes == 0)
281 {
282 free_check (oldmem, NULL);
283 return NULL;
284 }
285 __libc_lock_lock (main_arena.mutex);
286 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
287 __libc_lock_unlock (main_arena.mutex);
288 if (!oldp)
289 malloc_printerr ("realloc(): invalid pointer");
290 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
291
292 checked_request2size (bytes + 1, nb);
293 __libc_lock_lock (main_arena.mutex);
294
295 if (chunk_is_mmapped (oldp))
296 {
297#if HAVE_MREMAP
298 mchunkptr newp = mremap_chunk (oldp, nb);
299 if (newp)
300 newmem = chunk2mem (newp);
301 else
302#endif
303 {
304 /* Note the extra SIZE_SZ overhead. */
305 if (oldsize - SIZE_SZ >= nb)
306 newmem = oldmem; /* do nothing */
307 else
308 {
309 /* Must alloc, copy, free. */
310 top_check ();
311 newmem = _int_malloc (&main_arena, bytes + 1);
312 if (newmem)
313 {
314 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
315 munmap_chunk (oldp);
316 }
317 }
318 }
319 }
320 else
321 {
322 top_check ();
323 INTERNAL_SIZE_T nb;
324 checked_request2size (bytes + 1, nb);
325 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
326 }
327
328 DIAG_PUSH_NEEDS_COMMENT;
329#if __GNUC_PREREQ (7, 0)
330 /* GCC 7 warns about magic_p may be used uninitialized. But we never
331 reach here if magic_p is uninitialized. */
332 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
333#endif
334 /* mem2chunk_check changed the magic byte in the old chunk.
335 If newmem is NULL, then the old chunk will still be used though,
336 so we need to invert that change here. */
337 if (newmem == NULL)
338 *magic_p ^= 0xFF;
339 DIAG_POP_NEEDS_COMMENT;
340
341 __libc_lock_unlock (main_arena.mutex);
342
343 return mem2mem_check (newmem, bytes);
344}
345
346static void *
347memalign_check (size_t alignment, size_t bytes, const void *caller)
348{
349 void *mem;
350
351 if (alignment <= MALLOC_ALIGNMENT)
352 return malloc_check (bytes, NULL);
353
354 if (alignment < MINSIZE)
355 alignment = MINSIZE;
356
357 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
358 power of 2 and will cause overflow in the check below. */
359 if (alignment > SIZE_MAX / 2 + 1)
360 {
361 __set_errno (EINVAL);
362 return 0;
363 }
364
365 /* Check for overflow. */
366 if (bytes > SIZE_MAX - alignment - MINSIZE)
367 {
368 __set_errno (ENOMEM);
369 return 0;
370 }
371
372 /* Make sure alignment is power of 2. */
373 if (!powerof2 (alignment))
374 {
375 size_t a = MALLOC_ALIGNMENT * 2;
376 while (a < alignment)
377 a <<= 1;
378 alignment = a;
379 }
380
381 __libc_lock_lock (main_arena.mutex);
382 top_check ();
383 mem = _int_memalign (&main_arena, alignment, bytes + 1);
384 __libc_lock_unlock (main_arena.mutex);
385 return mem2mem_check (mem, bytes);
386}
387
388#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
389
390/* Support for restoring dumped heaps contained in historic Emacs
391 executables. The heap saving feature (malloc_get_state) is no
392 longer implemented in this version of glibc, but we have a heap
393 rewriter in malloc_set_state which transforms the heap into a
394 version compatible with current malloc. */
395
396#define MALLOC_STATE_MAGIC 0x444c4541l
397#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
398
399struct malloc_save_state
400{
401 long magic;
402 long version;
403 mbinptr av[NBINS * 2 + 2];
404 char *sbrk_base;
405 int sbrked_mem_bytes;
406 unsigned long trim_threshold;
407 unsigned long top_pad;
408 unsigned int n_mmaps_max;
409 unsigned long mmap_threshold;
410 int check_action;
411 unsigned long max_sbrked_mem;
412 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
413 unsigned int n_mmaps;
414 unsigned int max_n_mmaps;
415 unsigned long mmapped_mem;
416 unsigned long max_mmapped_mem;
417 int using_malloc_checking;
418 unsigned long max_fast;
419 unsigned long arena_test;
420 unsigned long arena_max;
421 unsigned long narenas;
422};
423
424/* Dummy implementation which always fails. We need to provide this
425 symbol so that existing Emacs binaries continue to work with
426 BIND_NOW. */
427void *
428attribute_compat_text_section
429malloc_get_state (void)
430{
431 __set_errno (ENOSYS);
432 return NULL;
433}
434compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
435
436int
437attribute_compat_text_section
438malloc_set_state (void *msptr)
439{
440 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
441
442 if (ms->magic != MALLOC_STATE_MAGIC)
443 return -1;
444
445 /* Must fail if the major version is too high. */
446 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
447 return -2;
448
449 /* We do not need to perform locking here because malloc_set_state
450 must be called before the first call into the malloc subsytem
451 (usually via __malloc_initialize_hook). pthread_create always
452 calls calloc and thus must be called only afterwards, so there
453 cannot be more than one thread when we reach this point. */
454
455 /* Disable the malloc hooks (and malloc checking). */
456 __malloc_hook = NULL;
457 __realloc_hook = NULL;
458 __free_hook = NULL;
459 __memalign_hook = NULL;
460 using_malloc_checking = 0;
461
462 /* Patch the dumped heap. We no longer try to integrate into the
463 existing heap. Instead, we mark the existing chunks as mmapped.
464 Together with the update to dumped_main_arena_start and
465 dumped_main_arena_end, realloc and free will recognize these
466 chunks as dumped fake mmapped chunks and never free them. */
467
468 /* Find the chunk with the lowest address with the heap. */
469 mchunkptr chunk = NULL;
470 {
471 size_t *candidate = (size_t *) ms->sbrk_base;
472 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
473 while (candidate < end)
474 if (*candidate != 0)
475 {
476 chunk = mem2chunk ((void *) (candidate + 1));
477 break;
478 }
479 else
480 ++candidate;
481 }
482 if (chunk == NULL)
483 return 0;
484
485 /* Iterate over the dumped heap and patch the chunks so that they
486 are treated as fake mmapped chunks. */
487 mchunkptr top = ms->av[2];
488 while (chunk < top)
489 {
490 if (inuse (chunk))
491 {
492 /* Mark chunk as mmapped, to trigger the fallback path. */
493 size_t size = chunksize (chunk);
494 set_head (chunk, size | IS_MMAPPED);
495 }
496 chunk = next_chunk (chunk);
497 }
498
499 /* The dumped fake mmapped chunks all lie in this address range. */
500 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
501 dumped_main_arena_end = top;
502
503 return 0;
504}
505compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
506
507#endif /* SHLIB_COMPAT */
508
509/*
510 * Local variables:
511 * c-basic-offset: 2
512 * End:
513 */
514