mmap.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * flexible mmap layout support
  3. *
  4. * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  5. * All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. *
  22. * Started by Ingo Molnar <mingo@elte.hu>
  23. */
  24. #include <linux/personality.h>
  25. #include <linux/mm.h>
  26. #include <linux/random.h>
  27. #include <linux/sched.h>
  28. #include <linux/elf-randomize.h>
  29. #include <linux/security.h>
  30. #include <linux/mman.h>
  31. /*
  32. * Top of mmap area (just below the process stack).
  33. *
  34. * Leave at least a ~128 MB hole on 32bit applications.
  35. *
  36. * On 64bit applications we randomise the stack by 1GB so we need to
  37. * space our mmap start address by a further 1GB, otherwise there is a
  38. * chance the mmap area will end up closer to the stack than our ulimit
  39. * requires.
  40. */
  41. #define MIN_GAP32 (128*1024*1024)
  42. #define MIN_GAP64 ((128 + 1024)*1024*1024UL)
  43. #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
  44. #define MAX_GAP (TASK_SIZE/6*5)
  45. static inline int mmap_is_legacy(void)
  46. {
  47. if (current->personality & ADDR_COMPAT_LAYOUT)
  48. return 1;
  49. if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  50. return 1;
  51. return sysctl_legacy_va_layout;
  52. }
  53. unsigned long arch_mmap_rnd(void)
  54. {
  55. unsigned long rnd;
  56. /* 8MB for 32bit, 1GB for 64bit */
  57. if (is_32bit_task())
  58. rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
  59. else
  60. rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
  61. return rnd << PAGE_SHIFT;
  62. }
  63. static inline unsigned long mmap_base(unsigned long rnd)
  64. {
  65. unsigned long gap = rlimit(RLIMIT_STACK);
  66. if (gap < MIN_GAP)
  67. gap = MIN_GAP;
  68. else if (gap > MAX_GAP)
  69. gap = MAX_GAP;
  70. return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  71. }
  72. #ifdef CONFIG_PPC_RADIX_MMU
  73. /*
  74. * Same function as generic code used only for radix, because we don't need to overload
  75. * the generic one. But we will have to duplicate, because hash select
  76. * HAVE_ARCH_UNMAPPED_AREA
  77. */
  78. static unsigned long
  79. radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
  80. unsigned long len, unsigned long pgoff,
  81. unsigned long flags)
  82. {
  83. struct mm_struct *mm = current->mm;
  84. struct vm_area_struct *vma;
  85. struct vm_unmapped_area_info info;
  86. if (len > TASK_SIZE - mmap_min_addr)
  87. return -ENOMEM;
  88. if (flags & MAP_FIXED)
  89. return addr;
  90. if (addr) {
  91. addr = PAGE_ALIGN(addr);
  92. vma = find_vma(mm, addr);
  93. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  94. (!vma || addr + len <= vm_start_gap(vma)))
  95. return addr;
  96. }
  97. info.flags = 0;
  98. info.length = len;
  99. info.low_limit = mm->mmap_base;
  100. info.high_limit = TASK_SIZE;
  101. info.align_mask = 0;
  102. return vm_unmapped_area(&info);
  103. }
  104. static unsigned long
  105. radix__arch_get_unmapped_area_topdown(struct file *filp,
  106. const unsigned long addr0,
  107. const unsigned long len,
  108. const unsigned long pgoff,
  109. const unsigned long flags)
  110. {
  111. struct vm_area_struct *vma;
  112. struct mm_struct *mm = current->mm;
  113. unsigned long addr = addr0;
  114. struct vm_unmapped_area_info info;
  115. /* requested length too big for entire address space */
  116. if (len > TASK_SIZE - mmap_min_addr)
  117. return -ENOMEM;
  118. if (flags & MAP_FIXED)
  119. return addr;
  120. /* requesting a specific address */
  121. if (addr) {
  122. addr = PAGE_ALIGN(addr);
  123. vma = find_vma(mm, addr);
  124. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  125. (!vma || addr + len <= vm_start_gap(vma)))
  126. return addr;
  127. }
  128. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  129. info.length = len;
  130. info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  131. info.high_limit = mm->mmap_base;
  132. info.align_mask = 0;
  133. addr = vm_unmapped_area(&info);
  134. /*
  135. * A failed mmap() very likely causes application failure,
  136. * so fall back to the bottom-up function here. This scenario
  137. * can happen with large stack limits and large mmap()
  138. * allocations.
  139. */
  140. if (addr & ~PAGE_MASK) {
  141. VM_BUG_ON(addr != -ENOMEM);
  142. info.flags = 0;
  143. info.low_limit = TASK_UNMAPPED_BASE;
  144. info.high_limit = TASK_SIZE;
  145. addr = vm_unmapped_area(&info);
  146. }
  147. return addr;
  148. }
  149. static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  150. unsigned long random_factor)
  151. {
  152. if (mmap_is_legacy()) {
  153. mm->mmap_base = TASK_UNMAPPED_BASE;
  154. mm->get_unmapped_area = radix__arch_get_unmapped_area;
  155. } else {
  156. mm->mmap_base = mmap_base(random_factor);
  157. mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
  158. }
  159. }
  160. #else
  161. /* dummy */
  162. extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  163. unsigned long random_factor);
  164. #endif
  165. /*
  166. * This function, called very early during the creation of a new
  167. * process VM image, sets up which VM layout function to use:
  168. */
  169. void arch_pick_mmap_layout(struct mm_struct *mm)
  170. {
  171. unsigned long random_factor = 0UL;
  172. if (current->flags & PF_RANDOMIZE)
  173. random_factor = arch_mmap_rnd();
  174. if (radix_enabled())
  175. return radix__arch_pick_mmap_layout(mm, random_factor);
  176. /*
  177. * Fall back to the standard layout if the personality
  178. * bit is set, or if the expected stack growth is unlimited:
  179. */
  180. if (mmap_is_legacy()) {
  181. mm->mmap_base = TASK_UNMAPPED_BASE;
  182. mm->get_unmapped_area = arch_get_unmapped_area;
  183. } else {
  184. mm->mmap_base = mmap_base(random_factor);
  185. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  186. }
  187. }