blob_repeat.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /* Repeating a memory blob, with alias mapping optimization.
  2. Copyright (C) 2018-2019 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #include <errno.h>
  16. #include <fcntl.h>
  17. #include <stdbool.h>
  18. #include <stdlib.h>
  19. #include <string.h>
  20. #include <support/blob_repeat.h>
  21. #include <support/check.h>
  22. #include <support/test-driver.h>
  23. #include <support/support.h>
  24. #include <support/xunistd.h>
  25. #include <sys/mman.h>
  26. #include <unistd.h>
  27. #include <wchar.h>
  28. /* Small allocations should use malloc directly instead of the mmap
  29. optimization because mappings carry a lot of overhead. */
  30. static const size_t maximum_small_size = 4 * 1024 * 1024;
  31. /* Internal helper for fill. */
  32. static void
  33. fill0 (char *target, const char *element, size_t element_size,
  34. size_t count)
  35. {
  36. while (count > 0)
  37. {
  38. memcpy (target, element, element_size);
  39. target += element_size;
  40. --count;
  41. }
  42. }
  43. /* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE
  44. bytes starting at ELEMENT. */
  45. static void
  46. fill (char *target, const char *element, size_t element_size,
  47. size_t count)
  48. {
  49. if (element_size == 0 || count == 0)
  50. return;
  51. else if (element_size == 1)
  52. memset (target, element[0], count);
  53. else if (element_size == sizeof (wchar_t))
  54. {
  55. wchar_t wc;
  56. memcpy (&wc, element, sizeof (wc));
  57. wmemset ((wchar_t *) target, wc, count);
  58. }
  59. else if (element_size < 1024 && count > 4096)
  60. {
  61. /* Use larger copies for really small element sizes. */
  62. char buffer[8192];
  63. size_t buffer_count = sizeof (buffer) / element_size;
  64. fill0 (buffer, element, element_size, buffer_count);
  65. while (count > 0)
  66. {
  67. size_t copy_count = buffer_count;
  68. if (copy_count > count)
  69. copy_count = count;
  70. size_t copy_bytes = copy_count * element_size;
  71. memcpy (target, buffer, copy_bytes);
  72. target += copy_bytes;
  73. count -= copy_count;
  74. }
  75. }
  76. else
  77. fill0 (target, element, element_size, count);
  78. }
  79. /* Use malloc instead of mmap for small allocations and unusual size
  80. combinations. */
  81. static struct support_blob_repeat
  82. allocate_malloc (size_t total_size, const void *element, size_t element_size,
  83. size_t count)
  84. {
  85. void *buffer = malloc (total_size);
  86. if (buffer == NULL)
  87. return (struct support_blob_repeat) { 0 };
  88. fill (buffer, element, element_size, count);
  89. return (struct support_blob_repeat)
  90. {
  91. .start = buffer,
  92. .size = total_size,
  93. .use_malloc = true
  94. };
  95. }
  96. /* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE,
  97. avoiding overflow. This assumes that PAGE_SIZE is a power of
  98. two. */
  99. static size_t
  100. minimum_stride_size (size_t page_size, size_t element_size)
  101. {
  102. TEST_VERIFY_EXIT (page_size > 0);
  103. TEST_VERIFY_EXIT (element_size > 0);
  104. /* Compute the number of trailing zeros common to both sizes. */
  105. unsigned int common_zeros = __builtin_ctzll (page_size | element_size);
  106. /* In the product, this power of two appears twice, but in the least
  107. common multiple, it appears only once. Therefore, shift one
  108. factor. */
  109. size_t multiple;
  110. if (__builtin_mul_overflow (page_size >> common_zeros, element_size,
  111. &multiple))
  112. return 0;
  113. return multiple;
  114. }
  115. /* Allocations larger than maximum_small_size potentially use mmap
  116. with alias mappings. */
  117. static struct support_blob_repeat
  118. allocate_big (size_t total_size, const void *element, size_t element_size,
  119. size_t count)
  120. {
  121. unsigned long page_size = xsysconf (_SC_PAGESIZE);
  122. size_t stride_size = minimum_stride_size (page_size, element_size);
  123. if (stride_size == 0)
  124. {
  125. errno = EOVERFLOW;
  126. return (struct support_blob_repeat) { 0 };
  127. }
  128. /* Ensure that the stride size is at least maximum_small_size. This
  129. is necessary to reduce the number of distinct mappings. */
  130. if (stride_size < maximum_small_size)
  131. stride_size
  132. = ((maximum_small_size + stride_size - 1) / stride_size) * stride_size;
  133. if (stride_size > total_size)
  134. /* The mmap optimization would not save anything. */
  135. return allocate_malloc (total_size, element, element_size, count);
  136. /* Reserve the memory region. If we cannot create the mapping,
  137. there is no reason to set up the backing file. */
  138. void *target = mmap (NULL, total_size, PROT_NONE,
  139. MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  140. if (target == MAP_FAILED)
  141. return (struct support_blob_repeat) { 0 };
  142. /* Create the backing file for the repeated mapping. Call mkstemp
  143. directly to remove the resources backing the temporary file
  144. immediately, once support_blob_repeat_free is called. Using
  145. create_temp_file would result in a warning during post-test
  146. cleanup. */
  147. int fd;
  148. {
  149. char *temppath = xasprintf ("%s/support_blob_repeat-XXXXXX", test_dir);
  150. fd = mkstemp (temppath);
  151. if (fd < 0)
  152. FAIL_EXIT1 ("mkstemp (\"%s\"): %m", temppath);
  153. xunlink (temppath);
  154. free (temppath);
  155. }
  156. /* Make sure that there is backing storage, so that the fill
  157. operation will not fault. */
  158. if (posix_fallocate (fd, 0, stride_size) != 0)
  159. FAIL_EXIT1 ("posix_fallocate (%zu): %m", stride_size);
  160. /* The stride size must still be a multiple of the page size and
  161. element size. */
  162. TEST_VERIFY_EXIT ((stride_size % page_size) == 0);
  163. TEST_VERIFY_EXIT ((stride_size % element_size) == 0);
  164. /* Fill the backing store. */
  165. {
  166. void *ptr = mmap (target, stride_size, PROT_READ | PROT_WRITE,
  167. MAP_FIXED | MAP_FILE | MAP_SHARED, fd, 0);
  168. if (ptr == MAP_FAILED)
  169. {
  170. int saved_errno = errno;
  171. xmunmap (target, total_size);
  172. xclose (fd);
  173. errno = saved_errno;
  174. return (struct support_blob_repeat) { 0 };
  175. }
  176. if (ptr != target)
  177. FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p",
  178. stride_size, target, ptr);
  179. /* Write the repeating data. */
  180. fill (target, element, element_size, stride_size / element_size);
  181. /* Return to a PROT_NONE mapping, just to be on the safe side. */
  182. ptr = mmap (target, stride_size, PROT_NONE,
  183. MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  184. if (ptr == MAP_FAILED)
  185. FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m");
  186. if (ptr != target)
  187. FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p",
  188. stride_size, target, ptr);
  189. }
  190. /* Create the alias mappings. */
  191. {
  192. size_t remaining_size = total_size;
  193. char *current = target;
  194. int flags = MAP_FIXED | MAP_FILE | MAP_PRIVATE;
  195. #ifdef MAP_NORESERVE
  196. flags |= MAP_NORESERVE;
  197. #endif
  198. while (remaining_size > 0)
  199. {
  200. size_t to_map = stride_size;
  201. if (to_map > remaining_size)
  202. to_map = remaining_size;
  203. void *ptr = mmap (current, to_map, PROT_READ | PROT_WRITE,
  204. flags, fd, 0);
  205. if (ptr == MAP_FAILED)
  206. {
  207. int saved_errno = errno;
  208. xmunmap (target, total_size);
  209. xclose (fd);
  210. errno = saved_errno;
  211. return (struct support_blob_repeat) { 0 };
  212. }
  213. if (ptr != current)
  214. FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p",
  215. to_map, target, ptr);
  216. remaining_size -= to_map;
  217. current += to_map;
  218. }
  219. }
  220. xclose (fd);
  221. return (struct support_blob_repeat)
  222. {
  223. .start = target,
  224. .size = total_size,
  225. .use_malloc = false
  226. };
  227. }
  228. struct support_blob_repeat
  229. support_blob_repeat_allocate (const void *element, size_t element_size,
  230. size_t count)
  231. {
  232. size_t total_size;
  233. if (__builtin_mul_overflow (element_size, count, &total_size))
  234. {
  235. errno = EOVERFLOW;
  236. return (struct support_blob_repeat) { 0 };
  237. }
  238. if (total_size <= maximum_small_size)
  239. return allocate_malloc (total_size, element, element_size, count);
  240. else
  241. return allocate_big (total_size, element, element_size, count);
  242. }
  243. void
  244. support_blob_repeat_free (struct support_blob_repeat *blob)
  245. {
  246. if (blob->size > 0)
  247. {
  248. int saved_errno = errno;
  249. if (blob->use_malloc)
  250. free (blob->start);
  251. else
  252. xmunmap (blob->start, blob->size);
  253. errno = saved_errno;
  254. }
  255. *blob = (struct support_blob_repeat) { 0 };
  256. }