smp.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ASM_SMP_H
  17. #define __ASM_SMP_H
  18. /* Values for secondary_data.status */
  19. #define CPU_MMU_OFF (-1)
  20. #define CPU_BOOT_SUCCESS (0)
  21. /* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
  22. #define CPU_KILL_ME (1)
  23. /* The cpu couldn't die gracefully and is looping in the kernel */
  24. #define CPU_STUCK_IN_KERNEL (2)
  25. /* Fatal system error detected by secondary CPU, crash the system */
  26. #define CPU_PANIC_KERNEL (3)
  27. #ifndef __ASSEMBLY__
  28. #include <asm/percpu.h>
  29. #include <linux/threads.h>
  30. #include <linux/cpumask.h>
  31. #include <linux/thread_info.h>
  32. DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
  33. /*
  34. * We don't use this_cpu_read(cpu_number) as that has implicit writes to
  35. * preempt_count, and associated (compiler) barriers, that we'd like to avoid
  36. * the expense of. If we're preemptible, the value can be stale at use anyway.
  37. * And we can't use this_cpu_ptr() either, as that winds up recursing back
  38. * here under CONFIG_DEBUG_PREEMPT=y.
  39. */
  40. #define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
  41. struct seq_file;
  42. /*
  43. * generate IPI list text
  44. */
  45. extern void show_ipi_list(struct seq_file *p, int prec);
  46. /*
  47. * Called from C code, this handles an IPI.
  48. */
  49. extern void handle_IPI(int ipinr, struct pt_regs *regs);
  50. /*
  51. * Discover the set of possible CPUs and determine their
  52. * SMP operations.
  53. */
  54. extern void smp_init_cpus(void);
  55. /*
  56. * Provide a function to raise an IPI cross call on CPUs in callmap.
  57. */
  58. extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
  59. extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
  60. /*
  61. * Called from the secondary holding pen, this is the secondary CPU entry point.
  62. */
  63. asmlinkage void secondary_start_kernel(void);
  64. /*
  65. * Initial data for bringing up a secondary CPU.
  66. * @stack - sp for the secondary CPU
  67. * @status - Result passed back from the secondary CPU to
  68. * indicate failure.
  69. */
  70. struct secondary_data {
  71. void *stack;
  72. struct task_struct *task;
  73. long status;
  74. };
  75. extern struct secondary_data secondary_data;
  76. extern long __early_cpu_boot_status;
  77. extern void secondary_entry(void);
  78. extern void arch_send_call_function_single_ipi(int cpu);
  79. extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
  80. #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
  81. extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
  82. #else
  83. static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
  84. {
  85. BUILD_BUG();
  86. }
  87. #endif
  88. extern int __cpu_disable(void);
  89. extern void __cpu_die(unsigned int cpu);
  90. extern void cpu_die(void);
  91. extern void cpu_die_early(void);
  92. static inline void cpu_park_loop(void)
  93. {
  94. for (;;) {
  95. wfe();
  96. wfi();
  97. }
  98. }
  99. static inline void update_cpu_boot_status(int val)
  100. {
  101. WRITE_ONCE(secondary_data.status, val);
  102. /* Ensure the visibility of the status update */
  103. dsb(ishst);
  104. }
  105. /*
  106. * The calling secondary CPU has detected serious configuration mismatch,
  107. * which calls for a kernel panic. Update the boot status and park the calling
  108. * CPU.
  109. */
  110. static inline void cpu_panic_kernel(void)
  111. {
  112. update_cpu_boot_status(CPU_PANIC_KERNEL);
  113. cpu_park_loop();
  114. }
  115. /*
  116. * If a secondary CPU enters the kernel but fails to come online,
  117. * (e.g. due to mismatched features), and cannot exit the kernel,
  118. * we increment cpus_stuck_in_kernel and leave the CPU in a
  119. * quiesecent loop within the kernel text. The memory containing
  120. * this loop must not be re-used for anything else as the 'stuck'
  121. * core is executing it.
  122. *
  123. * This function is used to inhibit features like kexec and hibernate.
  124. */
  125. bool cpus_are_stuck_in_kernel(void);
  126. extern void smp_send_crash_stop(void);
  127. extern bool smp_crash_stop_failed(void);
  128. #endif /* ifndef __ASSEMBLY__ */
  129. #endif /* ifndef __ASM_SMP_H */