cpu_errata.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * Contains CPU specific errata definitions
  3. *
  4. * Copyright (C) 2014 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/types.h>
  19. #include <asm/cpu.h>
  20. #include <asm/cputype.h>
  21. #include <asm/cpufeature.h>
  22. static bool __maybe_unused
  23. is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  24. {
  25. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  26. return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
  27. entry->midr_range_min,
  28. entry->midr_range_max);
  29. }
  30. static bool
  31. has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
  32. int scope)
  33. {
  34. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  35. return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
  36. (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
  37. }
  38. static int cpu_enable_trap_ctr_access(void *__unused)
  39. {
  40. /* Clear SCTLR_EL1.UCT */
  41. config_sctlr_el1(SCTLR_EL1_UCT, 0);
  42. return 0;
  43. }
  44. #define MIDR_RANGE(model, min, max) \
  45. .def_scope = SCOPE_LOCAL_CPU, \
  46. .matches = is_affected_midr_range, \
  47. .midr_model = model, \
  48. .midr_range_min = min, \
  49. .midr_range_max = max
  50. const struct arm64_cpu_capabilities arm64_errata[] = {
  51. #if defined(CONFIG_ARM64_ERRATUM_826319) || \
  52. defined(CONFIG_ARM64_ERRATUM_827319) || \
  53. defined(CONFIG_ARM64_ERRATUM_824069)
  54. {
  55. /* Cortex-A53 r0p[012] */
  56. .desc = "ARM errata 826319, 827319, 824069",
  57. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  58. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
  59. .enable = cpu_enable_cache_maint_trap,
  60. },
  61. #endif
  62. #ifdef CONFIG_ARM64_ERRATUM_819472
  63. {
  64. /* Cortex-A53 r0p[01] */
  65. .desc = "ARM errata 819472",
  66. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  67. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
  68. .enable = cpu_enable_cache_maint_trap,
  69. },
  70. #endif
  71. #ifdef CONFIG_ARM64_ERRATUM_832075
  72. {
  73. /* Cortex-A57 r0p0 - r1p2 */
  74. .desc = "ARM erratum 832075",
  75. .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
  76. MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
  77. (1 << MIDR_VARIANT_SHIFT) | 2),
  78. },
  79. #endif
  80. #ifdef CONFIG_ARM64_ERRATUM_834220
  81. {
  82. /* Cortex-A57 r0p0 - r1p2 */
  83. .desc = "ARM erratum 834220",
  84. .capability = ARM64_WORKAROUND_834220,
  85. MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
  86. (1 << MIDR_VARIANT_SHIFT) | 2),
  87. },
  88. #endif
  89. #ifdef CONFIG_ARM64_ERRATUM_845719
  90. {
  91. /* Cortex-A53 r0p[01234] */
  92. .desc = "ARM erratum 845719",
  93. .capability = ARM64_WORKAROUND_845719,
  94. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
  95. },
  96. #endif
  97. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  98. {
  99. /* Cavium ThunderX, pass 1.x */
  100. .desc = "Cavium erratum 23154",
  101. .capability = ARM64_WORKAROUND_CAVIUM_23154,
  102. MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
  103. },
  104. #endif
  105. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  106. {
  107. /* Cavium ThunderX, T88 pass 1.x - 2.1 */
  108. .desc = "Cavium erratum 27456",
  109. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  110. MIDR_RANGE(MIDR_THUNDERX, 0x00,
  111. (1 << MIDR_VARIANT_SHIFT) | 1),
  112. },
  113. {
  114. /* Cavium ThunderX, T81 pass 1.0 */
  115. .desc = "Cavium erratum 27456",
  116. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  117. MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
  118. },
  119. #endif
  120. {
  121. .desc = "Mismatched cache line size",
  122. .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
  123. .matches = has_mismatched_cache_line_size,
  124. .def_scope = SCOPE_LOCAL_CPU,
  125. .enable = cpu_enable_trap_ctr_access,
  126. },
  127. {
  128. }
  129. };
  130. /*
  131. * The CPU Errata work arounds are detected and applied at boot time
  132. * and the related information is freed soon after. If the new CPU requires
  133. * an errata not detected at boot, fail this CPU.
  134. */
  135. void verify_local_cpu_errata_workarounds(void)
  136. {
  137. const struct arm64_cpu_capabilities *caps = arm64_errata;
  138. for (; caps->matches; caps++)
  139. if (!cpus_have_cap(caps->capability) &&
  140. caps->matches(caps, SCOPE_LOCAL_CPU)) {
  141. pr_crit("CPU%d: Requires work around for %s, not detected"
  142. " at boot time\n",
  143. smp_processor_id(),
  144. caps->desc ? : "an erratum");
  145. cpu_die_early();
  146. }
  147. }
  148. void update_cpu_errata_workarounds(void)
  149. {
  150. update_cpu_capabilities(arm64_errata, "enabling workaround for");
  151. }
  152. void __init enable_errata_workarounds(void)
  153. {
  154. enable_cpu_capabilities(arm64_errata);
  155. }