memalign.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * Copyright (c) 2015 Google, Inc
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #ifndef __ALIGNMEM_H
  7. #define __ALIGNMEM_H
  8. /*
  9. * ARCH_DMA_MINALIGN is defined in asm/cache.h for each architecture. It
  10. * is used to align DMA buffers.
  11. */
  12. #ifndef __ASSEMBLY__
  13. #include <asm/cache.h>
  14. #include <malloc.h>
  15. /*
  16. * The ALLOC_CACHE_ALIGN_BUFFER macro is used to allocate a buffer on the
  17. * stack that meets the minimum architecture alignment requirements for DMA.
  18. * Such a buffer is useful for DMA operations where flushing and invalidating
  19. * the cache before and after a read and/or write operation is required for
  20. * correct operations.
  21. *
  22. * When called the macro creates an array on the stack that is sized such
  23. * that:
  24. *
  25. * 1) The beginning of the array can be advanced enough to be aligned.
  26. *
  27. * 2) The size of the aligned portion of the array is a multiple of the minimum
  28. * architecture alignment required for DMA.
  29. *
  30. * 3) The aligned portion contains enough space for the original number of
  31. * elements requested.
  32. *
  33. * The macro then creates a pointer to the aligned portion of this array and
  34. * assigns to the pointer the address of the first element in the aligned
  35. * portion of the array.
  36. *
  37. * Calling the macro as:
  38. *
  39. * ALLOC_CACHE_ALIGN_BUFFER(uint32_t, buffer, 1024);
  40. *
  41. * Will result in something similar to saying:
  42. *
  43. * uint32_t buffer[1024];
  44. *
  45. * The following differences exist:
  46. *
  47. * 1) The resulting buffer is guaranteed to be aligned to the value of
  48. * ARCH_DMA_MINALIGN.
  49. *
  50. * 2) The buffer variable created by the macro is a pointer to the specified
  51. * type, and NOT an array of the specified type. This can be very important
  52. * if you want the address of the buffer, which you probably do, to pass it
  53. * to the DMA hardware. The value of &buffer is different in the two cases.
  54. * In the macro case it will be the address of the pointer, not the address
  55. * of the space reserved for the buffer. However, in the second case it
  56. * would be the address of the buffer. So if you are replacing hard coded
  57. * stack buffers with this macro you need to make sure you remove the & from
  58. * the locations where you are taking the address of the buffer.
  59. *
  60. * Note that the size parameter is the number of array elements to allocate,
  61. * not the number of bytes.
  62. *
  63. * This macro can not be used outside of function scope, or for the creation
  64. * of a function scoped static buffer. It can not be used to create a cache
  65. * line aligned global buffer.
  66. */
  67. #define PAD_COUNT(s, pad) (((s) - 1) / (pad) + 1)
  68. #define PAD_SIZE(s, pad) (PAD_COUNT(s, pad) * pad)
  69. #define ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, pad) \
  70. char __##name[ROUND(PAD_SIZE((size) * sizeof(type), pad), align) \
  71. + (align - 1)]; \
  72. \
  73. type *name = (type *)ALIGN((uintptr_t)__##name, align)
  74. #define ALLOC_ALIGN_BUFFER(type, name, size, align) \
  75. ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, 1)
  76. #define ALLOC_CACHE_ALIGN_BUFFER_PAD(type, name, size, pad) \
  77. ALLOC_ALIGN_BUFFER_PAD(type, name, size, ARCH_DMA_MINALIGN, pad)
  78. #define ALLOC_CACHE_ALIGN_BUFFER(type, name, size) \
  79. ALLOC_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
  80. /*
  81. * DEFINE_CACHE_ALIGN_BUFFER() is similar to ALLOC_CACHE_ALIGN_BUFFER, but it's
  82. * purpose is to allow allocating aligned buffers outside of function scope.
  83. * Usage of this macro shall be avoided or used with extreme care!
  84. */
  85. #define DEFINE_ALIGN_BUFFER(type, name, size, align) \
  86. static char __##name[ALIGN(size * sizeof(type), align)] \
  87. __aligned(align); \
  88. \
  89. static type *name = (type *)__##name
  90. #define DEFINE_CACHE_ALIGN_BUFFER(type, name, size) \
  91. DEFINE_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
  92. /**
  93. * malloc_cache_aligned() - allocate a memory region aligned to cache line size
  94. *
  95. * This allocates memory at a cache-line boundary. The amount allocated may
  96. * be larger than requested as it is rounded up to the nearest multiple of the
  97. * cache-line size. This ensured that subsequent cache operations on this
  98. * memory (flush, invalidate) will not affect subsequently allocated regions.
  99. *
  100. * @size: Minimum number of bytes to allocate
  101. *
  102. * @return pointer to new memory region, or NULL if there is no more memory
  103. * available.
  104. */
  105. static inline void *malloc_cache_aligned(size_t size)
  106. {
  107. return memalign(ARCH_DMA_MINALIGN, ALIGN(size, ARCH_DMA_MINALIGN));
  108. }
  109. #endif
  110. #endif /* __ALIGNMEM_H */