1 | /* Repeating a memory blob, with alias mapping optimization. |
2 | Copyright (C) 2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <errno.h> |
20 | #include <fcntl.h> |
21 | #include <stdbool.h> |
22 | #include <stdlib.h> |
23 | #include <string.h> |
24 | #include <support/blob_repeat.h> |
25 | #include <support/check.h> |
26 | #include <support/test-driver.h> |
27 | #include <support/support.h> |
28 | #include <support/xunistd.h> |
29 | #include <sys/mman.h> |
30 | #include <unistd.h> |
31 | #include <wchar.h> |
32 | |
33 | /* Small allocations should use malloc directly instead of the mmap |
34 | optimization because mappings carry a lot of overhead. */ |
35 | static const size_t maximum_small_size = 4 * 1024 * 1024; |
36 | |
37 | /* Set *RESULT to LEFT * RIGHT. Return true if the multiplication |
38 | overflowed. See <malloc/malloc-internal.h>. */ |
39 | static inline bool |
40 | check_mul_overflow_size_t (size_t left, size_t right, size_t *result) |
41 | { |
42 | #if __GNUC__ >= 5 |
43 | return __builtin_mul_overflow (left, right, result); |
44 | #else |
45 | /* size_t is unsigned so the behavior on overflow is defined. */ |
46 | *result = left * right; |
47 | size_t half_size_t = ((size_t) 1) << (8 * sizeof (size_t) / 2); |
48 | if (__glibc_unlikely ((left | right) >= half_size_t)) |
49 | { |
50 | if (__glibc_unlikely (right != 0 && *result / right != left)) |
51 | return true; |
52 | } |
53 | return false; |
54 | #endif |
55 | } |
56 | |
57 | /* Internal helper for fill. */ |
58 | static void |
59 | fill0 (char *target, const char *element, size_t element_size, |
60 | size_t count) |
61 | { |
62 | while (count > 0) |
63 | { |
64 | memcpy (target, element, element_size); |
65 | target += element_size; |
66 | --count; |
67 | } |
68 | } |
69 | |
70 | /* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE |
71 | bytes starting at ELEMENT. */ |
72 | static void |
73 | fill (char *target, const char *element, size_t element_size, |
74 | size_t count) |
75 | { |
76 | if (element_size == 0 || count == 0) |
77 | return; |
78 | else if (element_size == 1) |
79 | memset (target, element[0], count); |
80 | else if (element_size == sizeof (wchar_t)) |
81 | { |
82 | wchar_t wc; |
83 | memcpy (&wc, element, sizeof (wc)); |
84 | wmemset ((wchar_t *) target, wc, count); |
85 | } |
86 | else if (element_size < 1024 && count > 4096) |
87 | { |
88 | /* Use larger copies for really small element sizes. */ |
89 | char buffer[8192]; |
90 | size_t buffer_count = sizeof (buffer) / element_size; |
91 | fill0 (buffer, element, element_size, buffer_count); |
92 | while (count > 0) |
93 | { |
94 | size_t copy_count = buffer_count; |
95 | if (copy_count > count) |
96 | copy_count = count; |
97 | size_t copy_bytes = copy_count * element_size; |
98 | memcpy (target, buffer, copy_bytes); |
99 | target += copy_bytes; |
100 | count -= copy_count; |
101 | } |
102 | } |
103 | else |
104 | fill0 (target, element, element_size, count); |
105 | } |
106 | |
107 | /* Use malloc instead of mmap for small allocations and unusual size |
108 | combinations. */ |
109 | static struct support_blob_repeat |
110 | allocate_malloc (size_t total_size, const void *element, size_t element_size, |
111 | size_t count) |
112 | { |
113 | void *buffer = malloc (total_size); |
114 | if (buffer == NULL) |
115 | return (struct support_blob_repeat) { 0 }; |
116 | fill (buffer, element, element_size, count); |
117 | return (struct support_blob_repeat) |
118 | { |
119 | .start = buffer, |
120 | .size = total_size, |
121 | .use_malloc = true |
122 | }; |
123 | } |
124 | |
125 | /* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE, |
126 | avoiding overflow. This assumes that PAGE_SIZE is a power of |
127 | two. */ |
128 | static size_t |
129 | minimum_stride_size (size_t page_size, size_t element_size) |
130 | { |
131 | TEST_VERIFY_EXIT (page_size > 0); |
132 | TEST_VERIFY_EXIT (element_size > 0); |
133 | |
134 | /* Compute the number of trailing zeros common to both sizes. */ |
135 | unsigned int common_zeros = __builtin_ctzll (page_size | element_size); |
136 | |
137 | /* In the product, this power of two appears twice, but in the least |
138 | common multiple, it appears only once. Therefore, shift one |
139 | factor. */ |
140 | size_t multiple; |
141 | if (check_mul_overflow_size_t (page_size >> common_zeros, element_size, |
142 | &multiple)) |
143 | return 0; |
144 | return multiple; |
145 | } |
146 | |
147 | /* Allocations larger than maximum_small_size potentially use mmap |
148 | with alias mappings. */ |
149 | static struct support_blob_repeat |
150 | allocate_big (size_t total_size, const void *element, size_t element_size, |
151 | size_t count) |
152 | { |
153 | unsigned long page_size = xsysconf (_SC_PAGESIZE); |
154 | size_t stride_size = minimum_stride_size (page_size, element_size); |
155 | if (stride_size == 0) |
156 | { |
157 | errno = EOVERFLOW; |
158 | return (struct support_blob_repeat) { 0 }; |
159 | } |
160 | |
161 | /* Ensure that the stride size is at least maximum_small_size. This |
162 | is necessary to reduce the number of distinct mappings. */ |
163 | if (stride_size < maximum_small_size) |
164 | stride_size |
165 | = ((maximum_small_size + stride_size - 1) / stride_size) * stride_size; |
166 | |
167 | if (stride_size > total_size) |
168 | /* The mmap optimization would not save anything. */ |
169 | return allocate_malloc (total_size, element, element_size, count); |
170 | |
171 | /* Reserve the memory region. If we cannot create the mapping, |
172 | there is no reason to set up the backing file. */ |
173 | void *target = mmap (NULL, total_size, PROT_NONE, |
174 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
175 | if (target == MAP_FAILED) |
176 | return (struct support_blob_repeat) { 0 }; |
177 | |
178 | /* Create the backing file for the repeated mapping. Call mkstemp |
179 | directly to remove the resources backing the temporary file |
180 | immediately, once support_blob_repeat_free is called. Using |
181 | create_temp_file would result in a warning during post-test |
182 | cleanup. */ |
183 | int fd; |
184 | { |
185 | char *temppath = xasprintf ("%s/support_blob_repeat-XXXXXX" , test_dir); |
186 | fd = mkstemp (temppath); |
187 | if (fd < 0) |
188 | FAIL_EXIT1 ("mkstemp (\"%s\"): %m" , temppath); |
189 | xunlink (temppath); |
190 | free (temppath); |
191 | } |
192 | |
193 | /* Make sure that there is backing storage, so that the fill |
194 | operation will not fault. */ |
195 | if (posix_fallocate (fd, 0, stride_size) != 0) |
196 | FAIL_EXIT1 ("posix_fallocate (%zu): %m" , stride_size); |
197 | |
198 | /* The stride size must still be a multiple of the page size and |
199 | element size. */ |
200 | TEST_VERIFY_EXIT ((stride_size % page_size) == 0); |
201 | TEST_VERIFY_EXIT ((stride_size % element_size) == 0); |
202 | |
203 | /* Fill the backing store. */ |
204 | { |
205 | void *ptr = mmap (target, stride_size, PROT_READ | PROT_WRITE, |
206 | MAP_FIXED | MAP_FILE | MAP_SHARED, fd, 0); |
207 | if (ptr == MAP_FAILED) |
208 | { |
209 | int saved_errno = errno; |
210 | xmunmap (target, total_size); |
211 | xclose (fd); |
212 | errno = saved_errno; |
213 | return (struct support_blob_repeat) { 0 }; |
214 | } |
215 | if (ptr != target) |
216 | FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p" , |
217 | stride_size, target, ptr); |
218 | |
219 | /* Write the repeating data. */ |
220 | fill (target, element, element_size, stride_size / element_size); |
221 | |
222 | /* Return to a PROT_NONE mapping, just to be on the safe side. */ |
223 | ptr = mmap (target, stride_size, PROT_NONE, |
224 | MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
225 | if (ptr == MAP_FAILED) |
226 | FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m" ); |
227 | if (ptr != target) |
228 | FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p" , |
229 | stride_size, target, ptr); |
230 | } |
231 | |
232 | /* Create the alias mappings. */ |
233 | { |
234 | size_t remaining_size = total_size; |
235 | char *current = target; |
236 | int flags = MAP_FIXED | MAP_FILE | MAP_PRIVATE; |
237 | #ifdef MAP_NORESERVE |
238 | flags |= MAP_NORESERVE; |
239 | #endif |
240 | while (remaining_size > 0) |
241 | { |
242 | size_t to_map = stride_size; |
243 | if (to_map > remaining_size) |
244 | to_map = remaining_size; |
245 | void *ptr = mmap (current, to_map, PROT_READ | PROT_WRITE, |
246 | flags, fd, 0); |
247 | if (ptr == MAP_FAILED) |
248 | { |
249 | int saved_errno = errno; |
250 | xmunmap (target, total_size); |
251 | xclose (fd); |
252 | errno = saved_errno; |
253 | return (struct support_blob_repeat) { 0 }; |
254 | } |
255 | if (ptr != current) |
256 | FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p" , |
257 | to_map, target, ptr); |
258 | remaining_size -= to_map; |
259 | current += to_map; |
260 | } |
261 | } |
262 | |
263 | xclose (fd); |
264 | |
265 | return (struct support_blob_repeat) |
266 | { |
267 | .start = target, |
268 | .size = total_size, |
269 | .use_malloc = false |
270 | }; |
271 | } |
272 | |
273 | struct support_blob_repeat |
274 | support_blob_repeat_allocate (const void *element, size_t element_size, |
275 | size_t count) |
276 | { |
277 | size_t total_size; |
278 | if (check_mul_overflow_size_t (element_size, count, &total_size)) |
279 | { |
280 | errno = EOVERFLOW; |
281 | return (struct support_blob_repeat) { 0 }; |
282 | } |
283 | if (total_size <= maximum_small_size) |
284 | return allocate_malloc (total_size, element, element_size, count); |
285 | else |
286 | return allocate_big (total_size, element, element_size, count); |
287 | } |
288 | |
289 | void |
290 | support_blob_repeat_free (struct support_blob_repeat *blob) |
291 | { |
292 | if (blob->size > 0) |
293 | { |
294 | int saved_errno = errno; |
295 | if (blob->use_malloc) |
296 | free (blob->start); |
297 | else |
298 | xmunmap (blob->start, blob->size); |
299 | errno = saved_errno; |
300 | } |
301 | *blob = (struct support_blob_repeat) { 0 }; |
302 | } |
303 | |