cpufeature.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_CPUFEATURE_H
  9. #define __ASM_CPUFEATURE_H
  10. #include <linux/jump_label.h>
  11. #include <asm/cpucaps.h>
  12. #include <asm/hwcap.h>
  13. #include <asm/sysreg.h>
  14. /*
  15. * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
  16. * in the kernel and for user space to keep track of which optional features
  17. * are supported by the current system. So let's map feature 'x' to HWCAP_x.
  18. * Note that HWCAP_x constants are bit fields so we need to take the log.
  19. */
  20. #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
  21. #define cpu_feature(x) ilog2(HWCAP_ ## x)
  22. #ifndef __ASSEMBLY__
  23. #include <linux/kernel.h>
  24. /* CPU feature register tracking */
  25. enum ftr_type {
  26. FTR_EXACT, /* Use a predefined safe value */
  27. FTR_LOWER_SAFE, /* Smaller value is safe */
  28. FTR_HIGHER_SAFE,/* Bigger value is safe */
  29. };
  30. #define FTR_STRICT true /* SANITY check strict matching required */
  31. #define FTR_NONSTRICT false /* SANITY check ignored */
  32. #define FTR_SIGNED true /* Value should be treated as signed */
  33. #define FTR_UNSIGNED false /* Value should be treated as unsigned */
  34. struct arm64_ftr_bits {
  35. bool sign; /* Value is signed ? */
  36. bool strict; /* CPU Sanity check: strict matching required ? */
  37. enum ftr_type type;
  38. u8 shift;
  39. u8 width;
  40. s64 safe_val; /* safe value for FTR_EXACT features */
  41. };
  42. /*
  43. * @arm64_ftr_reg - Feature register
  44. * @strict_mask Bits which should match across all CPUs for sanity.
  45. * @sys_val Safe value across the CPUs (system view)
  46. */
  47. struct arm64_ftr_reg {
  48. const char *name;
  49. u64 strict_mask;
  50. u64 sys_val;
  51. const struct arm64_ftr_bits *ftr_bits;
  52. };
  53. extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  54. /* scope of capability check */
  55. enum {
  56. SCOPE_SYSTEM,
  57. SCOPE_LOCAL_CPU,
  58. };
  59. struct arm64_cpu_capabilities {
  60. const char *desc;
  61. u16 capability;
  62. int def_scope; /* default scope */
  63. bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
  64. int (*enable)(void *); /* Called on all active CPUs */
  65. union {
  66. struct { /* To be used for erratum handling only */
  67. u32 midr_model;
  68. u32 midr_range_min, midr_range_max;
  69. };
  70. struct { /* Feature register checking */
  71. u32 sys_reg;
  72. u8 field_pos;
  73. u8 min_field_value;
  74. u8 hwcap_type;
  75. bool sign;
  76. unsigned long hwcap;
  77. };
  78. };
  79. };
  80. extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  81. extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
  82. bool this_cpu_has_cap(unsigned int cap);
  83. static inline bool cpu_have_feature(unsigned int num)
  84. {
  85. return elf_hwcap & (1UL << num);
  86. }
  87. static inline bool cpus_have_cap(unsigned int num)
  88. {
  89. if (num >= ARM64_NCAPS)
  90. return false;
  91. if (__builtin_constant_p(num))
  92. return static_branch_unlikely(&cpu_hwcap_keys[num]);
  93. else
  94. return test_bit(num, cpu_hwcaps);
  95. }
  96. static inline void cpus_set_cap(unsigned int num)
  97. {
  98. if (num >= ARM64_NCAPS) {
  99. pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
  100. num, ARM64_NCAPS);
  101. } else {
  102. __set_bit(num, cpu_hwcaps);
  103. static_branch_enable(&cpu_hwcap_keys[num]);
  104. }
  105. }
  106. static inline int __attribute_const__
  107. cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
  108. {
  109. return (s64)(features << (64 - width - field)) >> (64 - width);
  110. }
  111. static inline int __attribute_const__
  112. cpuid_feature_extract_signed_field(u64 features, int field)
  113. {
  114. return cpuid_feature_extract_signed_field_width(features, field, 4);
  115. }
  116. static inline unsigned int __attribute_const__
  117. cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
  118. {
  119. return (u64)(features << (64 - width - field)) >> (64 - width);
  120. }
  121. static inline unsigned int __attribute_const__
  122. cpuid_feature_extract_unsigned_field(u64 features, int field)
  123. {
  124. return cpuid_feature_extract_unsigned_field_width(features, field, 4);
  125. }
  126. static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
  127. {
  128. return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
  129. }
  130. static inline int __attribute_const__
  131. cpuid_feature_extract_field(u64 features, int field, bool sign)
  132. {
  133. return (sign) ?
  134. cpuid_feature_extract_signed_field(features, field) :
  135. cpuid_feature_extract_unsigned_field(features, field);
  136. }
  137. static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
  138. {
  139. return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign);
  140. }
  141. static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
  142. {
  143. return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
  144. cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
  145. }
  146. static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
  147. {
  148. u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
  149. return val == ID_AA64PFR0_EL0_32BIT_64BIT;
  150. }
  151. void __init setup_cpu_features(void);
  152. void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  153. const char *info);
  154. void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
  155. void check_local_cpu_capabilities(void);
  156. void update_cpu_errata_workarounds(void);
  157. void __init enable_errata_workarounds(void);
  158. void verify_local_cpu_errata_workarounds(void);
  159. u64 read_system_reg(u32 id);
  160. static inline bool cpu_supports_mixed_endian_el0(void)
  161. {
  162. return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
  163. }
  164. static inline bool system_supports_32bit_el0(void)
  165. {
  166. return cpus_have_cap(ARM64_HAS_32BIT_EL0);
  167. }
  168. static inline bool system_supports_mixed_endian_el0(void)
  169. {
  170. return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
  171. }
  172. static inline bool system_uses_ttbr0_pan(void)
  173. {
  174. return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
  175. !cpus_have_cap(ARM64_HAS_PAN);
  176. }
  177. #endif /* __ASSEMBLY__ */
  178. #endif