perf_event.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _UAPI_LINUX_PERF_EVENT_H
  15. #define _UAPI_LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  51. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  52. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  53. PERF_COUNT_HW_MAX, /* non-ABI */
  54. };
  55. /*
  56. * Generalized hardware cache events:
  57. *
  58. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  59. * { read, write, prefetch } x
  60. * { accesses, misses }
  61. */
  62. enum perf_hw_cache_id {
  63. PERF_COUNT_HW_CACHE_L1D = 0,
  64. PERF_COUNT_HW_CACHE_L1I = 1,
  65. PERF_COUNT_HW_CACHE_LL = 2,
  66. PERF_COUNT_HW_CACHE_DTLB = 3,
  67. PERF_COUNT_HW_CACHE_ITLB = 4,
  68. PERF_COUNT_HW_CACHE_BPU = 5,
  69. PERF_COUNT_HW_CACHE_NODE = 6,
  70. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  71. };
  72. enum perf_hw_cache_op_id {
  73. PERF_COUNT_HW_CACHE_OP_READ = 0,
  74. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  75. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  76. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  77. };
  78. enum perf_hw_cache_op_result_id {
  79. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  80. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  81. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  82. };
  83. /*
  84. * Special "software" events provided by the kernel, even if the hardware
  85. * does not support performance events. These events measure various
  86. * physical and sw events of the kernel (and allow the profiling of them as
  87. * well):
  88. */
  89. enum perf_sw_ids {
  90. PERF_COUNT_SW_CPU_CLOCK = 0,
  91. PERF_COUNT_SW_TASK_CLOCK = 1,
  92. PERF_COUNT_SW_PAGE_FAULTS = 2,
  93. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  94. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  95. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  96. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  97. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  98. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  99. PERF_COUNT_SW_DUMMY = 9,
  100. PERF_COUNT_SW_BPF_OUTPUT = 10,
  101. PERF_COUNT_SW_MAX, /* non-ABI */
  102. };
  103. /*
  104. * Bits that can be set in attr.sample_type to request information
  105. * in the overflow packets.
  106. */
  107. enum perf_event_sample_format {
  108. PERF_SAMPLE_IP = 1U << 0,
  109. PERF_SAMPLE_TID = 1U << 1,
  110. PERF_SAMPLE_TIME = 1U << 2,
  111. PERF_SAMPLE_ADDR = 1U << 3,
  112. PERF_SAMPLE_READ = 1U << 4,
  113. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  114. PERF_SAMPLE_ID = 1U << 6,
  115. PERF_SAMPLE_CPU = 1U << 7,
  116. PERF_SAMPLE_PERIOD = 1U << 8,
  117. PERF_SAMPLE_STREAM_ID = 1U << 9,
  118. PERF_SAMPLE_RAW = 1U << 10,
  119. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  120. PERF_SAMPLE_REGS_USER = 1U << 12,
  121. PERF_SAMPLE_STACK_USER = 1U << 13,
  122. PERF_SAMPLE_WEIGHT = 1U << 14,
  123. PERF_SAMPLE_DATA_SRC = 1U << 15,
  124. PERF_SAMPLE_IDENTIFIER = 1U << 16,
  125. PERF_SAMPLE_TRANSACTION = 1U << 17,
  126. PERF_SAMPLE_REGS_INTR = 1U << 18,
  127. PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
  128. };
  129. /*
  130. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  131. *
  132. * If the user does not pass priv level information via branch_sample_type,
  133. * the kernel uses the event's priv level. Branch and event priv levels do
  134. * not have to match. Branch priv level is checked for permissions.
  135. *
  136. * The branch types can be combined, however BRANCH_ANY covers all types
  137. * of branches and therefore it supersedes all the other types.
  138. */
  139. enum perf_branch_sample_type_shift {
  140. PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
  141. PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
  142. PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
  143. PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
  144. PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
  145. PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
  146. PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
  147. PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
  148. PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
  149. PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
  150. PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
  151. PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
  152. PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
  153. PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
  154. PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
  155. PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
  156. PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
  157. };
  158. enum perf_branch_sample_type {
  159. PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
  160. PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
  161. PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
  162. PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
  163. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
  164. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
  165. PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
  166. PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
  167. PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
  168. PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
  169. PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
  170. PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
  171. PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
  172. PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
  173. PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
  174. PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
  175. PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
  176. };
  177. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  178. (PERF_SAMPLE_BRANCH_USER|\
  179. PERF_SAMPLE_BRANCH_KERNEL|\
  180. PERF_SAMPLE_BRANCH_HV)
  181. /*
  182. * Values to determine ABI of the registers dump.
  183. */
  184. enum perf_sample_regs_abi {
  185. PERF_SAMPLE_REGS_ABI_NONE = 0,
  186. PERF_SAMPLE_REGS_ABI_32 = 1,
  187. PERF_SAMPLE_REGS_ABI_64 = 2,
  188. };
  189. /*
  190. * Values for the memory transaction event qualifier, mostly for
  191. * abort events. Multiple bits can be set.
  192. */
  193. enum {
  194. PERF_TXN_ELISION = (1 << 0), /* From elision */
  195. PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
  196. PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
  197. PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
  198. PERF_TXN_RETRY = (1 << 4), /* Retry possible */
  199. PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
  200. PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
  201. PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
  202. PERF_TXN_MAX = (1 << 8), /* non-ABI */
  203. /* bits 32..63 are reserved for the abort code */
  204. PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
  205. PERF_TXN_ABORT_SHIFT = 32,
  206. };
  207. /*
  208. * The format of the data returned by read() on a perf event fd,
  209. * as specified by attr.read_format:
  210. *
  211. * struct read_format {
  212. * { u64 value;
  213. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  214. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  215. * { u64 id; } && PERF_FORMAT_ID
  216. * } && !PERF_FORMAT_GROUP
  217. *
  218. * { u64 nr;
  219. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  220. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  221. * { u64 value;
  222. * { u64 id; } && PERF_FORMAT_ID
  223. * } cntr[nr];
  224. * } && PERF_FORMAT_GROUP
  225. * };
  226. */
  227. enum perf_event_read_format {
  228. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  229. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  230. PERF_FORMAT_ID = 1U << 2,
  231. PERF_FORMAT_GROUP = 1U << 3,
  232. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  233. };
  234. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  235. #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
  236. #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
  237. #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
  238. /* add: sample_stack_user */
  239. #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
  240. #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
  241. /*
  242. * Hardware event_id to monitor via a performance monitoring event:
  243. *
  244. * @sample_max_stack: Max number of frame pointers in a callchain,
  245. * should be < /proc/sys/kernel/perf_event_max_stack
  246. */
  247. struct perf_event_attr {
  248. /*
  249. * Major type: hardware/software/tracepoint/etc.
  250. */
  251. __u32 type;
  252. /*
  253. * Size of the attr structure, for fwd/bwd compat.
  254. */
  255. __u32 size;
  256. /*
  257. * Type specific configuration information.
  258. */
  259. __u64 config;
  260. union {
  261. __u64 sample_period;
  262. __u64 sample_freq;
  263. };
  264. __u64 sample_type;
  265. __u64 read_format;
  266. __u64 disabled : 1, /* off by default */
  267. inherit : 1, /* children inherit it */
  268. pinned : 1, /* must always be on PMU */
  269. exclusive : 1, /* only group on PMU */
  270. exclude_user : 1, /* don't count user */
  271. exclude_kernel : 1, /* ditto kernel */
  272. exclude_hv : 1, /* ditto hypervisor */
  273. exclude_idle : 1, /* don't count when idle */
  274. mmap : 1, /* include mmap data */
  275. comm : 1, /* include comm data */
  276. freq : 1, /* use freq, not period */
  277. inherit_stat : 1, /* per task counts */
  278. enable_on_exec : 1, /* next exec enables */
  279. task : 1, /* trace fork/exit */
  280. watermark : 1, /* wakeup_watermark */
  281. /*
  282. * precise_ip:
  283. *
  284. * 0 - SAMPLE_IP can have arbitrary skid
  285. * 1 - SAMPLE_IP must have constant skid
  286. * 2 - SAMPLE_IP requested to have 0 skid
  287. * 3 - SAMPLE_IP must have 0 skid
  288. *
  289. * See also PERF_RECORD_MISC_EXACT_IP
  290. */
  291. precise_ip : 2, /* skid constraint */
  292. mmap_data : 1, /* non-exec mmap data */
  293. sample_id_all : 1, /* sample_type all events */
  294. exclude_host : 1, /* don't count in host */
  295. exclude_guest : 1, /* don't count in guest */
  296. exclude_callchain_kernel : 1, /* exclude kernel callchains */
  297. exclude_callchain_user : 1, /* exclude user callchains */
  298. mmap2 : 1, /* include mmap with inode data */
  299. comm_exec : 1, /* flag comm events that are due to an exec */
  300. use_clockid : 1, /* use @clockid for time fields */
  301. context_switch : 1, /* context switch data */
  302. write_backward : 1, /* Write ring buffer from end to beginning */
  303. __reserved_1 : 36;
  304. union {
  305. __u32 wakeup_events; /* wakeup every n events */
  306. __u32 wakeup_watermark; /* bytes before wakeup */
  307. };
  308. __u32 bp_type;
  309. union {
  310. __u64 bp_addr;
  311. __u64 config1; /* extension of config */
  312. };
  313. union {
  314. __u64 bp_len;
  315. __u64 config2; /* extension of config1 */
  316. };
  317. __u64 branch_sample_type; /* enum perf_branch_sample_type */
  318. /*
  319. * Defines set of user regs to dump on samples.
  320. * See asm/perf_regs.h for details.
  321. */
  322. __u64 sample_regs_user;
  323. /*
  324. * Defines size of the user stack to dump on samples.
  325. */
  326. __u32 sample_stack_user;
  327. __s32 clockid;
  328. /*
  329. * Defines set of regs to dump for each sample
  330. * state captured on:
  331. * - precise = 0: PMU interrupt
  332. * - precise > 0: sampled instruction
  333. *
  334. * See asm/perf_regs.h for details.
  335. */
  336. __u64 sample_regs_intr;
  337. /*
  338. * Wakeup watermark for AUX area
  339. */
  340. __u32 aux_watermark;
  341. __u16 sample_max_stack;
  342. __u16 __reserved_2; /* align to __u64 */
  343. };
  344. #define perf_flags(attr) (*(&(attr)->read_format + 1))
  345. /*
  346. * Ioctls that can be done on a perf event fd:
  347. */
  348. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  349. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  350. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  351. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  352. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  353. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  354. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  355. #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
  356. #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
  357. #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
  358. enum perf_event_ioc_flags {
  359. PERF_IOC_FLAG_GROUP = 1U << 0,
  360. };
  361. /*
  362. * Structure of the page that can be mapped via mmap
  363. */
  364. struct perf_event_mmap_page {
  365. __u32 version; /* version number of this structure */
  366. __u32 compat_version; /* lowest version this is compat with */
  367. /*
  368. * Bits needed to read the hw events in user-space.
  369. *
  370. * u32 seq, time_mult, time_shift, index, width;
  371. * u64 count, enabled, running;
  372. * u64 cyc, time_offset;
  373. * s64 pmc = 0;
  374. *
  375. * do {
  376. * seq = pc->lock;
  377. * barrier()
  378. *
  379. * enabled = pc->time_enabled;
  380. * running = pc->time_running;
  381. *
  382. * if (pc->cap_usr_time && enabled != running) {
  383. * cyc = rdtsc();
  384. * time_offset = pc->time_offset;
  385. * time_mult = pc->time_mult;
  386. * time_shift = pc->time_shift;
  387. * }
  388. *
  389. * index = pc->index;
  390. * count = pc->offset;
  391. * if (pc->cap_user_rdpmc && index) {
  392. * width = pc->pmc_width;
  393. * pmc = rdpmc(index - 1);
  394. * }
  395. *
  396. * barrier();
  397. * } while (pc->lock != seq);
  398. *
  399. * NOTE: for obvious reason this only works on self-monitoring
  400. * processes.
  401. */
  402. __u32 lock; /* seqlock for synchronization */
  403. __u32 index; /* hardware event identifier */
  404. __s64 offset; /* add to hardware event value */
  405. __u64 time_enabled; /* time event active */
  406. __u64 time_running; /* time event on cpu */
  407. union {
  408. __u64 capabilities;
  409. struct {
  410. __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
  411. cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
  412. cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
  413. cap_user_time : 1, /* The time_* fields are used */
  414. cap_user_time_zero : 1, /* The time_zero field is used */
  415. cap_____res : 59;
  416. };
  417. };
  418. /*
  419. * If cap_user_rdpmc this field provides the bit-width of the value
  420. * read using the rdpmc() or equivalent instruction. This can be used
  421. * to sign extend the result like:
  422. *
  423. * pmc <<= 64 - width;
  424. * pmc >>= 64 - width; // signed shift right
  425. * count += pmc;
  426. */
  427. __u16 pmc_width;
  428. /*
  429. * If cap_usr_time the below fields can be used to compute the time
  430. * delta since time_enabled (in ns) using rdtsc or similar.
  431. *
  432. * u64 quot, rem;
  433. * u64 delta;
  434. *
  435. * quot = (cyc >> time_shift);
  436. * rem = cyc & (((u64)1 << time_shift) - 1);
  437. * delta = time_offset + quot * time_mult +
  438. * ((rem * time_mult) >> time_shift);
  439. *
  440. * Where time_offset,time_mult,time_shift and cyc are read in the
  441. * seqcount loop described above. This delta can then be added to
  442. * enabled and possible running (if index), improving the scaling:
  443. *
  444. * enabled += delta;
  445. * if (index)
  446. * running += delta;
  447. *
  448. * quot = count / running;
  449. * rem = count % running;
  450. * count = quot * enabled + (rem * enabled) / running;
  451. */
  452. __u16 time_shift;
  453. __u32 time_mult;
  454. __u64 time_offset;
  455. /*
  456. * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
  457. * from sample timestamps.
  458. *
  459. * time = timestamp - time_zero;
  460. * quot = time / time_mult;
  461. * rem = time % time_mult;
  462. * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
  463. *
  464. * And vice versa:
  465. *
  466. * quot = cyc >> time_shift;
  467. * rem = cyc & (((u64)1 << time_shift) - 1);
  468. * timestamp = time_zero + quot * time_mult +
  469. * ((rem * time_mult) >> time_shift);
  470. */
  471. __u64 time_zero;
  472. __u32 size; /* Header size up to __reserved[] fields. */
  473. /*
  474. * Hole for extension of the self monitor capabilities
  475. */
  476. __u8 __reserved[118*8+4]; /* align to 1k. */
  477. /*
  478. * Control data for the mmap() data buffer.
  479. *
  480. * User-space reading the @data_head value should issue an smp_rmb(),
  481. * after reading this value.
  482. *
  483. * When the mapping is PROT_WRITE the @data_tail value should be
  484. * written by userspace to reflect the last read data, after issueing
  485. * an smp_mb() to separate the data read from the ->data_tail store.
  486. * In this case the kernel will not over-write unread data.
  487. *
  488. * See perf_output_put_handle() for the data ordering.
  489. *
  490. * data_{offset,size} indicate the location and size of the perf record
  491. * buffer within the mmapped area.
  492. */
  493. __u64 data_head; /* head in the data section */
  494. __u64 data_tail; /* user-space written tail */
  495. __u64 data_offset; /* where the buffer starts */
  496. __u64 data_size; /* data buffer size */
  497. /*
  498. * AUX area is defined by aux_{offset,size} fields that should be set
  499. * by the userspace, so that
  500. *
  501. * aux_offset >= data_offset + data_size
  502. *
  503. * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
  504. *
  505. * Ring buffer pointers aux_{head,tail} have the same semantics as
  506. * data_{head,tail} and same ordering rules apply.
  507. */
  508. __u64 aux_head;
  509. __u64 aux_tail;
  510. __u64 aux_offset;
  511. __u64 aux_size;
  512. };
  513. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  514. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  515. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  516. #define PERF_RECORD_MISC_USER (2 << 0)
  517. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  518. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  519. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  520. /*
  521. * Indicates that /proc/PID/maps parsing are truncated by time out.
  522. */
  523. #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
  524. /*
  525. * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  526. * different events so can reuse the same bit position.
  527. * Ditto PERF_RECORD_MISC_SWITCH_OUT.
  528. */
  529. #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
  530. #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
  531. #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
  532. /*
  533. * Indicates that the content of PERF_SAMPLE_IP points to
  534. * the actual instruction that triggered the event. See also
  535. * perf_event_attr::precise_ip.
  536. */
  537. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  538. /*
  539. * Reserve the last bit to indicate some extended misc field
  540. */
  541. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  542. struct perf_event_header {
  543. __u32 type;
  544. __u16 misc;
  545. __u16 size;
  546. };
  547. enum perf_event_type {
  548. /*
  549. * If perf_event_attr.sample_id_all is set then all event types will
  550. * have the sample_type selected fields related to where/when
  551. * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
  552. * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
  553. * just after the perf_event_header and the fields already present for
  554. * the existing fields, i.e. at the end of the payload. That way a newer
  555. * perf.data file will be supported by older perf tools, with these new
  556. * optional fields being ignored.
  557. *
  558. * struct sample_id {
  559. * { u32 pid, tid; } && PERF_SAMPLE_TID
  560. * { u64 time; } && PERF_SAMPLE_TIME
  561. * { u64 id; } && PERF_SAMPLE_ID
  562. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  563. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  564. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  565. * } && perf_event_attr::sample_id_all
  566. *
  567. * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
  568. * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
  569. * relative to header.size.
  570. */
  571. /*
  572. * The MMAP events record the PROT_EXEC mappings so that we can
  573. * correlate userspace IPs to code. They have the following structure:
  574. *
  575. * struct {
  576. * struct perf_event_header header;
  577. *
  578. * u32 pid, tid;
  579. * u64 addr;
  580. * u64 len;
  581. * u64 pgoff;
  582. * char filename[];
  583. * struct sample_id sample_id;
  584. * };
  585. */
  586. PERF_RECORD_MMAP = 1,
  587. /*
  588. * struct {
  589. * struct perf_event_header header;
  590. * u64 id;
  591. * u64 lost;
  592. * struct sample_id sample_id;
  593. * };
  594. */
  595. PERF_RECORD_LOST = 2,
  596. /*
  597. * struct {
  598. * struct perf_event_header header;
  599. *
  600. * u32 pid, tid;
  601. * char comm[];
  602. * struct sample_id sample_id;
  603. * };
  604. */
  605. PERF_RECORD_COMM = 3,
  606. /*
  607. * struct {
  608. * struct perf_event_header header;
  609. * u32 pid, ppid;
  610. * u32 tid, ptid;
  611. * u64 time;
  612. * struct sample_id sample_id;
  613. * };
  614. */
  615. PERF_RECORD_EXIT = 4,
  616. /*
  617. * struct {
  618. * struct perf_event_header header;
  619. * u64 time;
  620. * u64 id;
  621. * u64 stream_id;
  622. * struct sample_id sample_id;
  623. * };
  624. */
  625. PERF_RECORD_THROTTLE = 5,
  626. PERF_RECORD_UNTHROTTLE = 6,
  627. /*
  628. * struct {
  629. * struct perf_event_header header;
  630. * u32 pid, ppid;
  631. * u32 tid, ptid;
  632. * u64 time;
  633. * struct sample_id sample_id;
  634. * };
  635. */
  636. PERF_RECORD_FORK = 7,
  637. /*
  638. * struct {
  639. * struct perf_event_header header;
  640. * u32 pid, tid;
  641. *
  642. * struct read_format values;
  643. * struct sample_id sample_id;
  644. * };
  645. */
  646. PERF_RECORD_READ = 8,
  647. /*
  648. * struct {
  649. * struct perf_event_header header;
  650. *
  651. * #
  652. * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
  653. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
  654. * # is fixed relative to header.
  655. * #
  656. *
  657. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  658. * { u64 ip; } && PERF_SAMPLE_IP
  659. * { u32 pid, tid; } && PERF_SAMPLE_TID
  660. * { u64 time; } && PERF_SAMPLE_TIME
  661. * { u64 addr; } && PERF_SAMPLE_ADDR
  662. * { u64 id; } && PERF_SAMPLE_ID
  663. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  664. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  665. * { u64 period; } && PERF_SAMPLE_PERIOD
  666. *
  667. * { struct read_format values; } && PERF_SAMPLE_READ
  668. *
  669. * { u64 nr,
  670. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  671. *
  672. * #
  673. * # The RAW record below is opaque data wrt the ABI
  674. * #
  675. * # That is, the ABI doesn't make any promises wrt to
  676. * # the stability of its content, it may vary depending
  677. * # on event, hardware, kernel version and phase of
  678. * # the moon.
  679. * #
  680. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  681. * #
  682. *
  683. * { u32 size;
  684. * char data[size];}&& PERF_SAMPLE_RAW
  685. *
  686. * { u64 nr;
  687. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  688. *
  689. * { u64 abi; # enum perf_sample_regs_abi
  690. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
  691. *
  692. * { u64 size;
  693. * char data[size];
  694. * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
  695. *
  696. * { u64 weight; } && PERF_SAMPLE_WEIGHT
  697. * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
  698. * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
  699. * { u64 abi; # enum perf_sample_regs_abi
  700. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
  701. * };
  702. */
  703. PERF_RECORD_SAMPLE = 9,
  704. /*
  705. * The MMAP2 records are an augmented version of MMAP, they add
  706. * maj, min, ino numbers to be used to uniquely identify each mapping
  707. *
  708. * struct {
  709. * struct perf_event_header header;
  710. *
  711. * u32 pid, tid;
  712. * u64 addr;
  713. * u64 len;
  714. * u64 pgoff;
  715. * u32 maj;
  716. * u32 min;
  717. * u64 ino;
  718. * u64 ino_generation;
  719. * u32 prot, flags;
  720. * char filename[];
  721. * struct sample_id sample_id;
  722. * };
  723. */
  724. PERF_RECORD_MMAP2 = 10,
  725. /*
  726. * Records that new data landed in the AUX buffer part.
  727. *
  728. * struct {
  729. * struct perf_event_header header;
  730. *
  731. * u64 aux_offset;
  732. * u64 aux_size;
  733. * u64 flags;
  734. * struct sample_id sample_id;
  735. * };
  736. */
  737. PERF_RECORD_AUX = 11,
  738. /*
  739. * Indicates that instruction trace has started
  740. *
  741. * struct {
  742. * struct perf_event_header header;
  743. * u32 pid;
  744. * u32 tid;
  745. * };
  746. */
  747. PERF_RECORD_ITRACE_START = 12,
  748. /*
  749. * Records the dropped/lost sample number.
  750. *
  751. * struct {
  752. * struct perf_event_header header;
  753. *
  754. * u64 lost;
  755. * struct sample_id sample_id;
  756. * };
  757. */
  758. PERF_RECORD_LOST_SAMPLES = 13,
  759. /*
  760. * Records a context switch in or out (flagged by
  761. * PERF_RECORD_MISC_SWITCH_OUT). See also
  762. * PERF_RECORD_SWITCH_CPU_WIDE.
  763. *
  764. * struct {
  765. * struct perf_event_header header;
  766. * struct sample_id sample_id;
  767. * };
  768. */
  769. PERF_RECORD_SWITCH = 14,
  770. /*
  771. * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
  772. * next_prev_tid that are the next (switching out) or previous
  773. * (switching in) pid/tid.
  774. *
  775. * struct {
  776. * struct perf_event_header header;
  777. * u32 next_prev_pid;
  778. * u32 next_prev_tid;
  779. * struct sample_id sample_id;
  780. * };
  781. */
  782. PERF_RECORD_SWITCH_CPU_WIDE = 15,
  783. PERF_RECORD_MAX, /* non-ABI */
  784. };
  785. #define PERF_MAX_STACK_DEPTH 127
  786. #define PERF_MAX_CONTEXTS_PER_STACK 8
  787. enum perf_callchain_context {
  788. PERF_CONTEXT_HV = (__u64)-32,
  789. PERF_CONTEXT_KERNEL = (__u64)-128,
  790. PERF_CONTEXT_USER = (__u64)-512,
  791. PERF_CONTEXT_GUEST = (__u64)-2048,
  792. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  793. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  794. PERF_CONTEXT_MAX = (__u64)-4095,
  795. };
  796. /**
  797. * PERF_RECORD_AUX::flags bits
  798. */
  799. #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
  800. #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
  801. #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
  802. #define PERF_FLAG_FD_OUTPUT (1UL << 1)
  803. #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
  804. #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
  805. union perf_mem_data_src {
  806. __u64 val;
  807. struct {
  808. __u64 mem_op:5, /* type of opcode */
  809. mem_lvl:14, /* memory hierarchy level */
  810. mem_snoop:5, /* snoop mode */
  811. mem_lock:2, /* lock instr */
  812. mem_dtlb:7, /* tlb access */
  813. mem_rsvd:31;
  814. };
  815. };
  816. /* type of opcode (load/store/prefetch,code) */
  817. #define PERF_MEM_OP_NA 0x01 /* not available */
  818. #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
  819. #define PERF_MEM_OP_STORE 0x04 /* store instruction */
  820. #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
  821. #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
  822. #define PERF_MEM_OP_SHIFT 0
  823. /* memory hierarchy (memory level, hit or miss) */
  824. #define PERF_MEM_LVL_NA 0x01 /* not available */
  825. #define PERF_MEM_LVL_HIT 0x02 /* hit level */
  826. #define PERF_MEM_LVL_MISS 0x04 /* miss level */
  827. #define PERF_MEM_LVL_L1 0x08 /* L1 */
  828. #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
  829. #define PERF_MEM_LVL_L2 0x20 /* L2 */
  830. #define PERF_MEM_LVL_L3 0x40 /* L3 */
  831. #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
  832. #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
  833. #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
  834. #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
  835. #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
  836. #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
  837. #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
  838. #define PERF_MEM_LVL_SHIFT 5
  839. /* snoop mode */
  840. #define PERF_MEM_SNOOP_NA 0x01 /* not available */
  841. #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
  842. #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
  843. #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
  844. #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
  845. #define PERF_MEM_SNOOP_SHIFT 19
  846. /* locked instruction */
  847. #define PERF_MEM_LOCK_NA 0x01 /* not available */
  848. #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
  849. #define PERF_MEM_LOCK_SHIFT 24
  850. /* TLB access */
  851. #define PERF_MEM_TLB_NA 0x01 /* not available */
  852. #define PERF_MEM_TLB_HIT 0x02 /* hit level */
  853. #define PERF_MEM_TLB_MISS 0x04 /* miss level */
  854. #define PERF_MEM_TLB_L1 0x08 /* L1 */
  855. #define PERF_MEM_TLB_L2 0x10 /* L2 */
  856. #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
  857. #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
  858. #define PERF_MEM_TLB_SHIFT 26
  859. #define PERF_MEM_S(a, s) \
  860. (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
  861. /*
  862. * single taken branch record layout:
  863. *
  864. * from: source instruction (may not always be a branch insn)
  865. * to: branch target
  866. * mispred: branch target was mispredicted
  867. * predicted: branch target was predicted
  868. *
  869. * support for mispred, predicted is optional. In case it
  870. * is not supported mispred = predicted = 0.
  871. *
  872. * in_tx: running in a hardware transaction
  873. * abort: aborting a hardware transaction
  874. * cycles: cycles from last branch (or 0 if not supported)
  875. */
  876. struct perf_branch_entry {
  877. __u64 from;
  878. __u64 to;
  879. __u64 mispred:1, /* target mispredicted */
  880. predicted:1,/* target predicted */
  881. in_tx:1, /* in transaction */
  882. abort:1, /* transaction abort */
  883. cycles:16, /* cycle count to last branch */
  884. reserved:44;
  885. };
  886. #endif /* _UAPI_LINUX_PERF_EVENT_H */