trace_events_trigger.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include "trace.h"
  25. static LIST_HEAD(trigger_commands);
  26. static DEFINE_MUTEX(trigger_cmd_mutex);
  27. void trigger_data_free(struct event_trigger_data *data)
  28. {
  29. if (data->cmd_ops->set_filter)
  30. data->cmd_ops->set_filter(NULL, data, NULL);
  31. synchronize_sched(); /* make sure current triggers exit before free */
  32. kfree(data);
  33. }
  34. /**
  35. * event_triggers_call - Call triggers associated with a trace event
  36. * @file: The trace_event_file associated with the event
  37. * @rec: The trace entry for the event, NULL for unconditional invocation
  38. *
  39. * For each trigger associated with an event, invoke the trigger
  40. * function registered with the associated trigger command. If rec is
  41. * non-NULL, it means that the trigger requires further processing and
  42. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  43. * trigger has a filter associated with it, rec will checked against
  44. * the filter and if the record matches the trigger will be invoked.
  45. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  46. * in any case until the current event is written, the trigger
  47. * function isn't invoked but the bit associated with the deferred
  48. * trigger is set in the return value.
  49. *
  50. * Returns an enum event_trigger_type value containing a set bit for
  51. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  52. *
  53. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  54. *
  55. * Return: an enum event_trigger_type value containing a set bit for
  56. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  57. */
  58. enum event_trigger_type
  59. event_triggers_call(struct trace_event_file *file, void *rec)
  60. {
  61. struct event_trigger_data *data;
  62. enum event_trigger_type tt = ETT_NONE;
  63. struct event_filter *filter;
  64. if (list_empty(&file->triggers))
  65. return tt;
  66. list_for_each_entry_rcu(data, &file->triggers, list) {
  67. if (data->paused)
  68. continue;
  69. if (!rec) {
  70. data->ops->func(data, rec);
  71. continue;
  72. }
  73. filter = rcu_dereference_sched(data->filter);
  74. if (filter && !filter_match_preds(filter, rec))
  75. continue;
  76. if (event_command_post_trigger(data->cmd_ops)) {
  77. tt |= data->cmd_ops->trigger_type;
  78. continue;
  79. }
  80. data->ops->func(data, rec);
  81. }
  82. return tt;
  83. }
  84. EXPORT_SYMBOL_GPL(event_triggers_call);
  85. /**
  86. * event_triggers_post_call - Call 'post_triggers' for a trace event
  87. * @file: The trace_event_file associated with the event
  88. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  89. * @rec: The trace entry for the event
  90. *
  91. * For each trigger associated with an event, invoke the trigger
  92. * function registered with the associated trigger command, if the
  93. * corresponding bit is set in the tt enum passed into this function.
  94. * See @event_triggers_call for details on how those bits are set.
  95. *
  96. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  97. */
  98. void
  99. event_triggers_post_call(struct trace_event_file *file,
  100. enum event_trigger_type tt,
  101. void *rec)
  102. {
  103. struct event_trigger_data *data;
  104. list_for_each_entry_rcu(data, &file->triggers, list) {
  105. if (data->paused)
  106. continue;
  107. if (data->cmd_ops->trigger_type & tt)
  108. data->ops->func(data, rec);
  109. }
  110. }
  111. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  112. #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
  113. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  114. {
  115. struct trace_event_file *event_file = event_file_data(m->private);
  116. if (t == SHOW_AVAILABLE_TRIGGERS)
  117. return NULL;
  118. return seq_list_next(t, &event_file->triggers, pos);
  119. }
  120. static void *trigger_start(struct seq_file *m, loff_t *pos)
  121. {
  122. struct trace_event_file *event_file;
  123. /* ->stop() is called even if ->start() fails */
  124. mutex_lock(&event_mutex);
  125. event_file = event_file_data(m->private);
  126. if (unlikely(!event_file))
  127. return ERR_PTR(-ENODEV);
  128. if (list_empty(&event_file->triggers))
  129. return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
  130. return seq_list_start(&event_file->triggers, *pos);
  131. }
  132. static void trigger_stop(struct seq_file *m, void *t)
  133. {
  134. mutex_unlock(&event_mutex);
  135. }
  136. static int trigger_show(struct seq_file *m, void *v)
  137. {
  138. struct event_trigger_data *data;
  139. struct event_command *p;
  140. if (v == SHOW_AVAILABLE_TRIGGERS) {
  141. seq_puts(m, "# Available triggers:\n");
  142. seq_putc(m, '#');
  143. mutex_lock(&trigger_cmd_mutex);
  144. list_for_each_entry_reverse(p, &trigger_commands, list)
  145. seq_printf(m, " %s", p->name);
  146. seq_putc(m, '\n');
  147. mutex_unlock(&trigger_cmd_mutex);
  148. return 0;
  149. }
  150. data = list_entry(v, struct event_trigger_data, list);
  151. data->ops->print(m, data->ops, data);
  152. return 0;
  153. }
  154. static const struct seq_operations event_triggers_seq_ops = {
  155. .start = trigger_start,
  156. .next = trigger_next,
  157. .stop = trigger_stop,
  158. .show = trigger_show,
  159. };
  160. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  161. {
  162. int ret = 0;
  163. mutex_lock(&event_mutex);
  164. if (unlikely(!event_file_data(file))) {
  165. mutex_unlock(&event_mutex);
  166. return -ENODEV;
  167. }
  168. if ((file->f_mode & FMODE_WRITE) &&
  169. (file->f_flags & O_TRUNC)) {
  170. struct trace_event_file *event_file;
  171. struct event_command *p;
  172. event_file = event_file_data(file);
  173. list_for_each_entry(p, &trigger_commands, list) {
  174. if (p->unreg_all)
  175. p->unreg_all(event_file);
  176. }
  177. }
  178. if (file->f_mode & FMODE_READ) {
  179. ret = seq_open(file, &event_triggers_seq_ops);
  180. if (!ret) {
  181. struct seq_file *m = file->private_data;
  182. m->private = file;
  183. }
  184. }
  185. mutex_unlock(&event_mutex);
  186. return ret;
  187. }
  188. static int trigger_process_regex(struct trace_event_file *file, char *buff)
  189. {
  190. char *command, *next = buff;
  191. struct event_command *p;
  192. int ret = -EINVAL;
  193. command = strsep(&next, ": \t");
  194. command = (command[0] != '!') ? command : command + 1;
  195. mutex_lock(&trigger_cmd_mutex);
  196. list_for_each_entry(p, &trigger_commands, list) {
  197. if (strcmp(p->name, command) == 0) {
  198. ret = p->func(p, file, buff, command, next);
  199. goto out_unlock;
  200. }
  201. }
  202. out_unlock:
  203. mutex_unlock(&trigger_cmd_mutex);
  204. return ret;
  205. }
  206. static ssize_t event_trigger_regex_write(struct file *file,
  207. const char __user *ubuf,
  208. size_t cnt, loff_t *ppos)
  209. {
  210. struct trace_event_file *event_file;
  211. ssize_t ret;
  212. char *buf;
  213. if (!cnt)
  214. return 0;
  215. if (cnt >= PAGE_SIZE)
  216. return -EINVAL;
  217. buf = memdup_user_nul(ubuf, cnt);
  218. if (IS_ERR(buf))
  219. return PTR_ERR(buf);
  220. strim(buf);
  221. mutex_lock(&event_mutex);
  222. event_file = event_file_data(file);
  223. if (unlikely(!event_file)) {
  224. mutex_unlock(&event_mutex);
  225. kfree(buf);
  226. return -ENODEV;
  227. }
  228. ret = trigger_process_regex(event_file, buf);
  229. mutex_unlock(&event_mutex);
  230. kfree(buf);
  231. if (ret < 0)
  232. goto out;
  233. *ppos += cnt;
  234. ret = cnt;
  235. out:
  236. return ret;
  237. }
  238. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  239. {
  240. mutex_lock(&event_mutex);
  241. if (file->f_mode & FMODE_READ)
  242. seq_release(inode, file);
  243. mutex_unlock(&event_mutex);
  244. return 0;
  245. }
  246. static ssize_t
  247. event_trigger_write(struct file *filp, const char __user *ubuf,
  248. size_t cnt, loff_t *ppos)
  249. {
  250. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  251. }
  252. static int
  253. event_trigger_open(struct inode *inode, struct file *filp)
  254. {
  255. return event_trigger_regex_open(inode, filp);
  256. }
  257. static int
  258. event_trigger_release(struct inode *inode, struct file *file)
  259. {
  260. return event_trigger_regex_release(inode, file);
  261. }
  262. const struct file_operations event_trigger_fops = {
  263. .open = event_trigger_open,
  264. .read = seq_read,
  265. .write = event_trigger_write,
  266. .llseek = tracing_lseek,
  267. .release = event_trigger_release,
  268. };
  269. /*
  270. * Currently we only register event commands from __init, so mark this
  271. * __init too.
  272. */
  273. __init int register_event_command(struct event_command *cmd)
  274. {
  275. struct event_command *p;
  276. int ret = 0;
  277. mutex_lock(&trigger_cmd_mutex);
  278. list_for_each_entry(p, &trigger_commands, list) {
  279. if (strcmp(cmd->name, p->name) == 0) {
  280. ret = -EBUSY;
  281. goto out_unlock;
  282. }
  283. }
  284. list_add(&cmd->list, &trigger_commands);
  285. out_unlock:
  286. mutex_unlock(&trigger_cmd_mutex);
  287. return ret;
  288. }
  289. /*
  290. * Currently we only unregister event commands from __init, so mark
  291. * this __init too.
  292. */
  293. __init int unregister_event_command(struct event_command *cmd)
  294. {
  295. struct event_command *p, *n;
  296. int ret = -ENODEV;
  297. mutex_lock(&trigger_cmd_mutex);
  298. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  299. if (strcmp(cmd->name, p->name) == 0) {
  300. ret = 0;
  301. list_del_init(&p->list);
  302. goto out_unlock;
  303. }
  304. }
  305. out_unlock:
  306. mutex_unlock(&trigger_cmd_mutex);
  307. return ret;
  308. }
  309. /**
  310. * event_trigger_print - Generic event_trigger_ops @print implementation
  311. * @name: The name of the event trigger
  312. * @m: The seq_file being printed to
  313. * @data: Trigger-specific data
  314. * @filter_str: filter_str to print, if present
  315. *
  316. * Common implementation for event triggers to print themselves.
  317. *
  318. * Usually wrapped by a function that simply sets the @name of the
  319. * trigger command and then invokes this.
  320. *
  321. * Return: 0 on success, errno otherwise
  322. */
  323. static int
  324. event_trigger_print(const char *name, struct seq_file *m,
  325. void *data, char *filter_str)
  326. {
  327. long count = (long)data;
  328. seq_puts(m, name);
  329. if (count == -1)
  330. seq_puts(m, ":unlimited");
  331. else
  332. seq_printf(m, ":count=%ld", count);
  333. if (filter_str)
  334. seq_printf(m, " if %s\n", filter_str);
  335. else
  336. seq_putc(m, '\n');
  337. return 0;
  338. }
  339. /**
  340. * event_trigger_init - Generic event_trigger_ops @init implementation
  341. * @ops: The trigger ops associated with the trigger
  342. * @data: Trigger-specific data
  343. *
  344. * Common implementation of event trigger initialization.
  345. *
  346. * Usually used directly as the @init method in event trigger
  347. * implementations.
  348. *
  349. * Return: 0 on success, errno otherwise
  350. */
  351. int event_trigger_init(struct event_trigger_ops *ops,
  352. struct event_trigger_data *data)
  353. {
  354. data->ref++;
  355. return 0;
  356. }
  357. /**
  358. * event_trigger_free - Generic event_trigger_ops @free implementation
  359. * @ops: The trigger ops associated with the trigger
  360. * @data: Trigger-specific data
  361. *
  362. * Common implementation of event trigger de-initialization.
  363. *
  364. * Usually used directly as the @free method in event trigger
  365. * implementations.
  366. */
  367. static void
  368. event_trigger_free(struct event_trigger_ops *ops,
  369. struct event_trigger_data *data)
  370. {
  371. if (WARN_ON_ONCE(data->ref <= 0))
  372. return;
  373. data->ref--;
  374. if (!data->ref)
  375. trigger_data_free(data);
  376. }
  377. int trace_event_trigger_enable_disable(struct trace_event_file *file,
  378. int trigger_enable)
  379. {
  380. int ret = 0;
  381. if (trigger_enable) {
  382. if (atomic_inc_return(&file->tm_ref) > 1)
  383. return ret;
  384. set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  385. ret = trace_event_enable_disable(file, 1, 1);
  386. } else {
  387. if (atomic_dec_return(&file->tm_ref) > 0)
  388. return ret;
  389. clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  390. ret = trace_event_enable_disable(file, 0, 1);
  391. }
  392. return ret;
  393. }
  394. /**
  395. * clear_event_triggers - Clear all triggers associated with a trace array
  396. * @tr: The trace array to clear
  397. *
  398. * For each trigger, the triggering event has its tm_ref decremented
  399. * via trace_event_trigger_enable_disable(), and any associated event
  400. * (in the case of enable/disable_event triggers) will have its sm_ref
  401. * decremented via free()->trace_event_enable_disable(). That
  402. * combination effectively reverses the soft-mode/trigger state added
  403. * by trigger registration.
  404. *
  405. * Must be called with event_mutex held.
  406. */
  407. void
  408. clear_event_triggers(struct trace_array *tr)
  409. {
  410. struct trace_event_file *file;
  411. list_for_each_entry(file, &tr->events, list) {
  412. struct event_trigger_data *data;
  413. list_for_each_entry_rcu(data, &file->triggers, list) {
  414. trace_event_trigger_enable_disable(file, 0);
  415. if (data->ops->free)
  416. data->ops->free(data->ops, data);
  417. }
  418. }
  419. }
  420. /**
  421. * update_cond_flag - Set or reset the TRIGGER_COND bit
  422. * @file: The trace_event_file associated with the event
  423. *
  424. * If an event has triggers and any of those triggers has a filter or
  425. * a post_trigger, trigger invocation needs to be deferred until after
  426. * the current event has logged its data, and the event should have
  427. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  428. * cleared.
  429. */
  430. void update_cond_flag(struct trace_event_file *file)
  431. {
  432. struct event_trigger_data *data;
  433. bool set_cond = false;
  434. list_for_each_entry_rcu(data, &file->triggers, list) {
  435. if (data->filter || event_command_post_trigger(data->cmd_ops) ||
  436. event_command_needs_rec(data->cmd_ops)) {
  437. set_cond = true;
  438. break;
  439. }
  440. }
  441. if (set_cond)
  442. set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  443. else
  444. clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  445. }
  446. /**
  447. * register_trigger - Generic event_command @reg implementation
  448. * @glob: The raw string used to register the trigger
  449. * @ops: The trigger ops associated with the trigger
  450. * @data: Trigger-specific data to associate with the trigger
  451. * @file: The trace_event_file associated with the event
  452. *
  453. * Common implementation for event trigger registration.
  454. *
  455. * Usually used directly as the @reg method in event command
  456. * implementations.
  457. *
  458. * Return: 0 on success, errno otherwise
  459. */
  460. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  461. struct event_trigger_data *data,
  462. struct trace_event_file *file)
  463. {
  464. struct event_trigger_data *test;
  465. int ret = 0;
  466. list_for_each_entry_rcu(test, &file->triggers, list) {
  467. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  468. ret = -EEXIST;
  469. goto out;
  470. }
  471. }
  472. if (data->ops->init) {
  473. ret = data->ops->init(data->ops, data);
  474. if (ret < 0)
  475. goto out;
  476. }
  477. list_add_rcu(&data->list, &file->triggers);
  478. ret++;
  479. update_cond_flag(file);
  480. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  481. list_del_rcu(&data->list);
  482. update_cond_flag(file);
  483. ret--;
  484. }
  485. out:
  486. return ret;
  487. }
  488. /**
  489. * unregister_trigger - Generic event_command @unreg implementation
  490. * @glob: The raw string used to register the trigger
  491. * @ops: The trigger ops associated with the trigger
  492. * @test: Trigger-specific data used to find the trigger to remove
  493. * @file: The trace_event_file associated with the event
  494. *
  495. * Common implementation for event trigger unregistration.
  496. *
  497. * Usually used directly as the @unreg method in event command
  498. * implementations.
  499. */
  500. void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  501. struct event_trigger_data *test,
  502. struct trace_event_file *file)
  503. {
  504. struct event_trigger_data *data;
  505. bool unregistered = false;
  506. list_for_each_entry_rcu(data, &file->triggers, list) {
  507. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  508. unregistered = true;
  509. list_del_rcu(&data->list);
  510. trace_event_trigger_enable_disable(file, 0);
  511. update_cond_flag(file);
  512. break;
  513. }
  514. }
  515. if (unregistered && data->ops->free)
  516. data->ops->free(data->ops, data);
  517. }
  518. /**
  519. * event_trigger_callback - Generic event_command @func implementation
  520. * @cmd_ops: The command ops, used for trigger registration
  521. * @file: The trace_event_file associated with the event
  522. * @glob: The raw string used to register the trigger
  523. * @cmd: The cmd portion of the string used to register the trigger
  524. * @param: The params portion of the string used to register the trigger
  525. *
  526. * Common implementation for event command parsing and trigger
  527. * instantiation.
  528. *
  529. * Usually used directly as the @func method in event command
  530. * implementations.
  531. *
  532. * Return: 0 on success, errno otherwise
  533. */
  534. static int
  535. event_trigger_callback(struct event_command *cmd_ops,
  536. struct trace_event_file *file,
  537. char *glob, char *cmd, char *param)
  538. {
  539. struct event_trigger_data *trigger_data;
  540. struct event_trigger_ops *trigger_ops;
  541. char *trigger = NULL;
  542. char *number;
  543. int ret;
  544. /* separate the trigger from the filter (t:n [if filter]) */
  545. if (param && isdigit(param[0]))
  546. trigger = strsep(&param, " \t");
  547. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  548. ret = -ENOMEM;
  549. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  550. if (!trigger_data)
  551. goto out;
  552. trigger_data->count = -1;
  553. trigger_data->ops = trigger_ops;
  554. trigger_data->cmd_ops = cmd_ops;
  555. INIT_LIST_HEAD(&trigger_data->list);
  556. INIT_LIST_HEAD(&trigger_data->named_list);
  557. if (glob[0] == '!') {
  558. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  559. kfree(trigger_data);
  560. ret = 0;
  561. goto out;
  562. }
  563. if (trigger) {
  564. number = strsep(&trigger, ":");
  565. ret = -EINVAL;
  566. if (!strlen(number))
  567. goto out_free;
  568. /*
  569. * We use the callback data field (which is a pointer)
  570. * as our counter.
  571. */
  572. ret = kstrtoul(number, 0, &trigger_data->count);
  573. if (ret)
  574. goto out_free;
  575. }
  576. if (!param) /* if param is non-empty, it's supposed to be a filter */
  577. goto out_reg;
  578. if (!cmd_ops->set_filter)
  579. goto out_reg;
  580. ret = cmd_ops->set_filter(param, trigger_data, file);
  581. if (ret < 0)
  582. goto out_free;
  583. out_reg:
  584. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  585. /*
  586. * The above returns on success the # of functions enabled,
  587. * but if it didn't find any functions it returns zero.
  588. * Consider no functions a failure too.
  589. */
  590. if (!ret) {
  591. ret = -ENOENT;
  592. goto out_free;
  593. } else if (ret < 0)
  594. goto out_free;
  595. ret = 0;
  596. out:
  597. return ret;
  598. out_free:
  599. if (cmd_ops->set_filter)
  600. cmd_ops->set_filter(NULL, trigger_data, NULL);
  601. kfree(trigger_data);
  602. goto out;
  603. }
  604. /**
  605. * set_trigger_filter - Generic event_command @set_filter implementation
  606. * @filter_str: The filter string for the trigger, NULL to remove filter
  607. * @trigger_data: Trigger-specific data
  608. * @file: The trace_event_file associated with the event
  609. *
  610. * Common implementation for event command filter parsing and filter
  611. * instantiation.
  612. *
  613. * Usually used directly as the @set_filter method in event command
  614. * implementations.
  615. *
  616. * Also used to remove a filter (if filter_str = NULL).
  617. *
  618. * Return: 0 on success, errno otherwise
  619. */
  620. int set_trigger_filter(char *filter_str,
  621. struct event_trigger_data *trigger_data,
  622. struct trace_event_file *file)
  623. {
  624. struct event_trigger_data *data = trigger_data;
  625. struct event_filter *filter = NULL, *tmp;
  626. int ret = -EINVAL;
  627. char *s;
  628. if (!filter_str) /* clear the current filter */
  629. goto assign;
  630. s = strsep(&filter_str, " \t");
  631. if (!strlen(s) || strcmp(s, "if") != 0)
  632. goto out;
  633. if (!filter_str)
  634. goto out;
  635. /* The filter is for the 'trigger' event, not the triggered event */
  636. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  637. if (ret)
  638. goto out;
  639. assign:
  640. tmp = rcu_access_pointer(data->filter);
  641. rcu_assign_pointer(data->filter, filter);
  642. if (tmp) {
  643. /* Make sure the call is done with the filter */
  644. synchronize_sched();
  645. free_event_filter(tmp);
  646. }
  647. kfree(data->filter_str);
  648. data->filter_str = NULL;
  649. if (filter_str) {
  650. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  651. if (!data->filter_str) {
  652. free_event_filter(rcu_access_pointer(data->filter));
  653. data->filter = NULL;
  654. ret = -ENOMEM;
  655. }
  656. }
  657. out:
  658. return ret;
  659. }
  660. static LIST_HEAD(named_triggers);
  661. /**
  662. * find_named_trigger - Find the common named trigger associated with @name
  663. * @name: The name of the set of named triggers to find the common data for
  664. *
  665. * Named triggers are sets of triggers that share a common set of
  666. * trigger data. The first named trigger registered with a given name
  667. * owns the common trigger data that the others subsequently
  668. * registered with the same name will reference. This function
  669. * returns the common trigger data associated with that first
  670. * registered instance.
  671. *
  672. * Return: the common trigger data for the given named trigger on
  673. * success, NULL otherwise.
  674. */
  675. struct event_trigger_data *find_named_trigger(const char *name)
  676. {
  677. struct event_trigger_data *data;
  678. if (!name)
  679. return NULL;
  680. list_for_each_entry(data, &named_triggers, named_list) {
  681. if (data->named_data)
  682. continue;
  683. if (strcmp(data->name, name) == 0)
  684. return data;
  685. }
  686. return NULL;
  687. }
  688. /**
  689. * is_named_trigger - determine if a given trigger is a named trigger
  690. * @test: The trigger data to test
  691. *
  692. * Return: true if 'test' is a named trigger, false otherwise.
  693. */
  694. bool is_named_trigger(struct event_trigger_data *test)
  695. {
  696. struct event_trigger_data *data;
  697. list_for_each_entry(data, &named_triggers, named_list) {
  698. if (test == data)
  699. return true;
  700. }
  701. return false;
  702. }
  703. /**
  704. * save_named_trigger - save the trigger in the named trigger list
  705. * @name: The name of the named trigger set
  706. * @data: The trigger data to save
  707. *
  708. * Return: 0 if successful, negative error otherwise.
  709. */
  710. int save_named_trigger(const char *name, struct event_trigger_data *data)
  711. {
  712. data->name = kstrdup(name, GFP_KERNEL);
  713. if (!data->name)
  714. return -ENOMEM;
  715. list_add(&data->named_list, &named_triggers);
  716. return 0;
  717. }
  718. /**
  719. * del_named_trigger - delete a trigger from the named trigger list
  720. * @data: The trigger data to delete
  721. */
  722. void del_named_trigger(struct event_trigger_data *data)
  723. {
  724. kfree(data->name);
  725. data->name = NULL;
  726. list_del(&data->named_list);
  727. }
  728. static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
  729. {
  730. struct event_trigger_data *test;
  731. list_for_each_entry(test, &named_triggers, named_list) {
  732. if (strcmp(test->name, data->name) == 0) {
  733. if (pause) {
  734. test->paused_tmp = test->paused;
  735. test->paused = true;
  736. } else {
  737. test->paused = test->paused_tmp;
  738. }
  739. }
  740. }
  741. }
  742. /**
  743. * pause_named_trigger - Pause all named triggers with the same name
  744. * @data: The trigger data of a named trigger to pause
  745. *
  746. * Pauses a named trigger along with all other triggers having the
  747. * same name. Because named triggers share a common set of data,
  748. * pausing only one is meaningless, so pausing one named trigger needs
  749. * to pause all triggers with the same name.
  750. */
  751. void pause_named_trigger(struct event_trigger_data *data)
  752. {
  753. __pause_named_trigger(data, true);
  754. }
  755. /**
  756. * unpause_named_trigger - Un-pause all named triggers with the same name
  757. * @data: The trigger data of a named trigger to unpause
  758. *
  759. * Un-pauses a named trigger along with all other triggers having the
  760. * same name. Because named triggers share a common set of data,
  761. * unpausing only one is meaningless, so unpausing one named trigger
  762. * needs to unpause all triggers with the same name.
  763. */
  764. void unpause_named_trigger(struct event_trigger_data *data)
  765. {
  766. __pause_named_trigger(data, false);
  767. }
  768. /**
  769. * set_named_trigger_data - Associate common named trigger data
  770. * @data: The trigger data of a named trigger to unpause
  771. *
  772. * Named triggers are sets of triggers that share a common set of
  773. * trigger data. The first named trigger registered with a given name
  774. * owns the common trigger data that the others subsequently
  775. * registered with the same name will reference. This function
  776. * associates the common trigger data from the first trigger with the
  777. * given trigger.
  778. */
  779. void set_named_trigger_data(struct event_trigger_data *data,
  780. struct event_trigger_data *named_data)
  781. {
  782. data->named_data = named_data;
  783. }
  784. static void
  785. traceon_trigger(struct event_trigger_data *data, void *rec)
  786. {
  787. if (tracing_is_on())
  788. return;
  789. tracing_on();
  790. }
  791. static void
  792. traceon_count_trigger(struct event_trigger_data *data, void *rec)
  793. {
  794. if (tracing_is_on())
  795. return;
  796. if (!data->count)
  797. return;
  798. if (data->count != -1)
  799. (data->count)--;
  800. tracing_on();
  801. }
  802. static void
  803. traceoff_trigger(struct event_trigger_data *data, void *rec)
  804. {
  805. if (!tracing_is_on())
  806. return;
  807. tracing_off();
  808. }
  809. static void
  810. traceoff_count_trigger(struct event_trigger_data *data, void *rec)
  811. {
  812. if (!tracing_is_on())
  813. return;
  814. if (!data->count)
  815. return;
  816. if (data->count != -1)
  817. (data->count)--;
  818. tracing_off();
  819. }
  820. static int
  821. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  822. struct event_trigger_data *data)
  823. {
  824. return event_trigger_print("traceon", m, (void *)data->count,
  825. data->filter_str);
  826. }
  827. static int
  828. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  829. struct event_trigger_data *data)
  830. {
  831. return event_trigger_print("traceoff", m, (void *)data->count,
  832. data->filter_str);
  833. }
  834. static struct event_trigger_ops traceon_trigger_ops = {
  835. .func = traceon_trigger,
  836. .print = traceon_trigger_print,
  837. .init = event_trigger_init,
  838. .free = event_trigger_free,
  839. };
  840. static struct event_trigger_ops traceon_count_trigger_ops = {
  841. .func = traceon_count_trigger,
  842. .print = traceon_trigger_print,
  843. .init = event_trigger_init,
  844. .free = event_trigger_free,
  845. };
  846. static struct event_trigger_ops traceoff_trigger_ops = {
  847. .func = traceoff_trigger,
  848. .print = traceoff_trigger_print,
  849. .init = event_trigger_init,
  850. .free = event_trigger_free,
  851. };
  852. static struct event_trigger_ops traceoff_count_trigger_ops = {
  853. .func = traceoff_count_trigger,
  854. .print = traceoff_trigger_print,
  855. .init = event_trigger_init,
  856. .free = event_trigger_free,
  857. };
  858. static struct event_trigger_ops *
  859. onoff_get_trigger_ops(char *cmd, char *param)
  860. {
  861. struct event_trigger_ops *ops;
  862. /* we register both traceon and traceoff to this callback */
  863. if (strcmp(cmd, "traceon") == 0)
  864. ops = param ? &traceon_count_trigger_ops :
  865. &traceon_trigger_ops;
  866. else
  867. ops = param ? &traceoff_count_trigger_ops :
  868. &traceoff_trigger_ops;
  869. return ops;
  870. }
  871. static struct event_command trigger_traceon_cmd = {
  872. .name = "traceon",
  873. .trigger_type = ETT_TRACE_ONOFF,
  874. .func = event_trigger_callback,
  875. .reg = register_trigger,
  876. .unreg = unregister_trigger,
  877. .get_trigger_ops = onoff_get_trigger_ops,
  878. .set_filter = set_trigger_filter,
  879. };
  880. static struct event_command trigger_traceoff_cmd = {
  881. .name = "traceoff",
  882. .trigger_type = ETT_TRACE_ONOFF,
  883. .flags = EVENT_CMD_FL_POST_TRIGGER,
  884. .func = event_trigger_callback,
  885. .reg = register_trigger,
  886. .unreg = unregister_trigger,
  887. .get_trigger_ops = onoff_get_trigger_ops,
  888. .set_filter = set_trigger_filter,
  889. };
  890. #ifdef CONFIG_TRACER_SNAPSHOT
  891. static void
  892. snapshot_trigger(struct event_trigger_data *data, void *rec)
  893. {
  894. tracing_snapshot();
  895. }
  896. static void
  897. snapshot_count_trigger(struct event_trigger_data *data, void *rec)
  898. {
  899. if (!data->count)
  900. return;
  901. if (data->count != -1)
  902. (data->count)--;
  903. snapshot_trigger(data, rec);
  904. }
  905. static int
  906. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  907. struct event_trigger_data *data,
  908. struct trace_event_file *file)
  909. {
  910. int ret = register_trigger(glob, ops, data, file);
  911. if (ret > 0 && tracing_alloc_snapshot() != 0) {
  912. unregister_trigger(glob, ops, data, file);
  913. ret = 0;
  914. }
  915. return ret;
  916. }
  917. static int
  918. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  919. struct event_trigger_data *data)
  920. {
  921. return event_trigger_print("snapshot", m, (void *)data->count,
  922. data->filter_str);
  923. }
  924. static struct event_trigger_ops snapshot_trigger_ops = {
  925. .func = snapshot_trigger,
  926. .print = snapshot_trigger_print,
  927. .init = event_trigger_init,
  928. .free = event_trigger_free,
  929. };
  930. static struct event_trigger_ops snapshot_count_trigger_ops = {
  931. .func = snapshot_count_trigger,
  932. .print = snapshot_trigger_print,
  933. .init = event_trigger_init,
  934. .free = event_trigger_free,
  935. };
  936. static struct event_trigger_ops *
  937. snapshot_get_trigger_ops(char *cmd, char *param)
  938. {
  939. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  940. }
  941. static struct event_command trigger_snapshot_cmd = {
  942. .name = "snapshot",
  943. .trigger_type = ETT_SNAPSHOT,
  944. .func = event_trigger_callback,
  945. .reg = register_snapshot_trigger,
  946. .unreg = unregister_trigger,
  947. .get_trigger_ops = snapshot_get_trigger_ops,
  948. .set_filter = set_trigger_filter,
  949. };
  950. static __init int register_trigger_snapshot_cmd(void)
  951. {
  952. int ret;
  953. ret = register_event_command(&trigger_snapshot_cmd);
  954. WARN_ON(ret < 0);
  955. return ret;
  956. }
  957. #else
  958. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  959. #endif /* CONFIG_TRACER_SNAPSHOT */
  960. #ifdef CONFIG_STACKTRACE
  961. /*
  962. * Skip 3:
  963. * stacktrace_trigger()
  964. * event_triggers_post_call()
  965. * trace_event_raw_event_xxx()
  966. */
  967. #define STACK_SKIP 3
  968. static void
  969. stacktrace_trigger(struct event_trigger_data *data, void *rec)
  970. {
  971. trace_dump_stack(STACK_SKIP);
  972. }
  973. static void
  974. stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
  975. {
  976. if (!data->count)
  977. return;
  978. if (data->count != -1)
  979. (data->count)--;
  980. stacktrace_trigger(data, rec);
  981. }
  982. static int
  983. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  984. struct event_trigger_data *data)
  985. {
  986. return event_trigger_print("stacktrace", m, (void *)data->count,
  987. data->filter_str);
  988. }
  989. static struct event_trigger_ops stacktrace_trigger_ops = {
  990. .func = stacktrace_trigger,
  991. .print = stacktrace_trigger_print,
  992. .init = event_trigger_init,
  993. .free = event_trigger_free,
  994. };
  995. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  996. .func = stacktrace_count_trigger,
  997. .print = stacktrace_trigger_print,
  998. .init = event_trigger_init,
  999. .free = event_trigger_free,
  1000. };
  1001. static struct event_trigger_ops *
  1002. stacktrace_get_trigger_ops(char *cmd, char *param)
  1003. {
  1004. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  1005. }
  1006. static struct event_command trigger_stacktrace_cmd = {
  1007. .name = "stacktrace",
  1008. .trigger_type = ETT_STACKTRACE,
  1009. .flags = EVENT_CMD_FL_POST_TRIGGER,
  1010. .func = event_trigger_callback,
  1011. .reg = register_trigger,
  1012. .unreg = unregister_trigger,
  1013. .get_trigger_ops = stacktrace_get_trigger_ops,
  1014. .set_filter = set_trigger_filter,
  1015. };
  1016. static __init int register_trigger_stacktrace_cmd(void)
  1017. {
  1018. int ret;
  1019. ret = register_event_command(&trigger_stacktrace_cmd);
  1020. WARN_ON(ret < 0);
  1021. return ret;
  1022. }
  1023. #else
  1024. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  1025. #endif /* CONFIG_STACKTRACE */
  1026. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  1027. {
  1028. unregister_event_command(&trigger_traceon_cmd);
  1029. unregister_event_command(&trigger_traceoff_cmd);
  1030. }
  1031. static void
  1032. event_enable_trigger(struct event_trigger_data *data, void *rec)
  1033. {
  1034. struct enable_trigger_data *enable_data = data->private_data;
  1035. if (enable_data->enable)
  1036. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1037. else
  1038. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1039. }
  1040. static void
  1041. event_enable_count_trigger(struct event_trigger_data *data, void *rec)
  1042. {
  1043. struct enable_trigger_data *enable_data = data->private_data;
  1044. if (!data->count)
  1045. return;
  1046. /* Skip if the event is in a state we want to switch to */
  1047. if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  1048. return;
  1049. if (data->count != -1)
  1050. (data->count)--;
  1051. event_enable_trigger(data, rec);
  1052. }
  1053. int event_enable_trigger_print(struct seq_file *m,
  1054. struct event_trigger_ops *ops,
  1055. struct event_trigger_data *data)
  1056. {
  1057. struct enable_trigger_data *enable_data = data->private_data;
  1058. seq_printf(m, "%s:%s:%s",
  1059. enable_data->hist ?
  1060. (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
  1061. (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
  1062. enable_data->file->event_call->class->system,
  1063. trace_event_name(enable_data->file->event_call));
  1064. if (data->count == -1)
  1065. seq_puts(m, ":unlimited");
  1066. else
  1067. seq_printf(m, ":count=%ld", data->count);
  1068. if (data->filter_str)
  1069. seq_printf(m, " if %s\n", data->filter_str);
  1070. else
  1071. seq_putc(m, '\n');
  1072. return 0;
  1073. }
  1074. void event_enable_trigger_free(struct event_trigger_ops *ops,
  1075. struct event_trigger_data *data)
  1076. {
  1077. struct enable_trigger_data *enable_data = data->private_data;
  1078. if (WARN_ON_ONCE(data->ref <= 0))
  1079. return;
  1080. data->ref--;
  1081. if (!data->ref) {
  1082. /* Remove the SOFT_MODE flag */
  1083. trace_event_enable_disable(enable_data->file, 0, 1);
  1084. module_put(enable_data->file->event_call->mod);
  1085. trigger_data_free(data);
  1086. kfree(enable_data);
  1087. }
  1088. }
  1089. static struct event_trigger_ops event_enable_trigger_ops = {
  1090. .func = event_enable_trigger,
  1091. .print = event_enable_trigger_print,
  1092. .init = event_trigger_init,
  1093. .free = event_enable_trigger_free,
  1094. };
  1095. static struct event_trigger_ops event_enable_count_trigger_ops = {
  1096. .func = event_enable_count_trigger,
  1097. .print = event_enable_trigger_print,
  1098. .init = event_trigger_init,
  1099. .free = event_enable_trigger_free,
  1100. };
  1101. static struct event_trigger_ops event_disable_trigger_ops = {
  1102. .func = event_enable_trigger,
  1103. .print = event_enable_trigger_print,
  1104. .init = event_trigger_init,
  1105. .free = event_enable_trigger_free,
  1106. };
  1107. static struct event_trigger_ops event_disable_count_trigger_ops = {
  1108. .func = event_enable_count_trigger,
  1109. .print = event_enable_trigger_print,
  1110. .init = event_trigger_init,
  1111. .free = event_enable_trigger_free,
  1112. };
  1113. int event_enable_trigger_func(struct event_command *cmd_ops,
  1114. struct trace_event_file *file,
  1115. char *glob, char *cmd, char *param)
  1116. {
  1117. struct trace_event_file *event_enable_file;
  1118. struct enable_trigger_data *enable_data;
  1119. struct event_trigger_data *trigger_data;
  1120. struct event_trigger_ops *trigger_ops;
  1121. struct trace_array *tr = file->tr;
  1122. const char *system;
  1123. const char *event;
  1124. bool hist = false;
  1125. char *trigger;
  1126. char *number;
  1127. bool enable;
  1128. int ret;
  1129. if (!param)
  1130. return -EINVAL;
  1131. /* separate the trigger from the filter (s:e:n [if filter]) */
  1132. trigger = strsep(&param, " \t");
  1133. if (!trigger)
  1134. return -EINVAL;
  1135. system = strsep(&trigger, ":");
  1136. if (!trigger)
  1137. return -EINVAL;
  1138. event = strsep(&trigger, ":");
  1139. ret = -EINVAL;
  1140. event_enable_file = find_event_file(tr, system, event);
  1141. if (!event_enable_file)
  1142. goto out;
  1143. #ifdef CONFIG_HIST_TRIGGERS
  1144. hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
  1145. (strcmp(cmd, DISABLE_HIST_STR) == 0));
  1146. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1147. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1148. #else
  1149. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1150. #endif
  1151. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1152. ret = -ENOMEM;
  1153. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1154. if (!trigger_data)
  1155. goto out;
  1156. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  1157. if (!enable_data) {
  1158. kfree(trigger_data);
  1159. goto out;
  1160. }
  1161. trigger_data->count = -1;
  1162. trigger_data->ops = trigger_ops;
  1163. trigger_data->cmd_ops = cmd_ops;
  1164. INIT_LIST_HEAD(&trigger_data->list);
  1165. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1166. enable_data->hist = hist;
  1167. enable_data->enable = enable;
  1168. enable_data->file = event_enable_file;
  1169. trigger_data->private_data = enable_data;
  1170. if (glob[0] == '!') {
  1171. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1172. kfree(trigger_data);
  1173. kfree(enable_data);
  1174. ret = 0;
  1175. goto out;
  1176. }
  1177. if (trigger) {
  1178. number = strsep(&trigger, ":");
  1179. ret = -EINVAL;
  1180. if (!strlen(number))
  1181. goto out_free;
  1182. /*
  1183. * We use the callback data field (which is a pointer)
  1184. * as our counter.
  1185. */
  1186. ret = kstrtoul(number, 0, &trigger_data->count);
  1187. if (ret)
  1188. goto out_free;
  1189. }
  1190. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1191. goto out_reg;
  1192. if (!cmd_ops->set_filter)
  1193. goto out_reg;
  1194. ret = cmd_ops->set_filter(param, trigger_data, file);
  1195. if (ret < 0)
  1196. goto out_free;
  1197. out_reg:
  1198. /* Don't let event modules unload while probe registered */
  1199. ret = try_module_get(event_enable_file->event_call->mod);
  1200. if (!ret) {
  1201. ret = -EBUSY;
  1202. goto out_free;
  1203. }
  1204. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1205. if (ret < 0)
  1206. goto out_put;
  1207. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1208. /*
  1209. * The above returns on success the # of functions enabled,
  1210. * but if it didn't find any functions it returns zero.
  1211. * Consider no functions a failure too.
  1212. */
  1213. if (!ret) {
  1214. ret = -ENOENT;
  1215. goto out_disable;
  1216. } else if (ret < 0)
  1217. goto out_disable;
  1218. /* Just return zero, not the number of enabled functions */
  1219. ret = 0;
  1220. out:
  1221. return ret;
  1222. out_disable:
  1223. trace_event_enable_disable(event_enable_file, 0, 1);
  1224. out_put:
  1225. module_put(event_enable_file->event_call->mod);
  1226. out_free:
  1227. if (cmd_ops->set_filter)
  1228. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1229. kfree(trigger_data);
  1230. kfree(enable_data);
  1231. goto out;
  1232. }
  1233. int event_enable_register_trigger(char *glob,
  1234. struct event_trigger_ops *ops,
  1235. struct event_trigger_data *data,
  1236. struct trace_event_file *file)
  1237. {
  1238. struct enable_trigger_data *enable_data = data->private_data;
  1239. struct enable_trigger_data *test_enable_data;
  1240. struct event_trigger_data *test;
  1241. int ret = 0;
  1242. list_for_each_entry_rcu(test, &file->triggers, list) {
  1243. test_enable_data = test->private_data;
  1244. if (test_enable_data &&
  1245. (test->cmd_ops->trigger_type ==
  1246. data->cmd_ops->trigger_type) &&
  1247. (test_enable_data->file == enable_data->file)) {
  1248. ret = -EEXIST;
  1249. goto out;
  1250. }
  1251. }
  1252. if (data->ops->init) {
  1253. ret = data->ops->init(data->ops, data);
  1254. if (ret < 0)
  1255. goto out;
  1256. }
  1257. list_add_rcu(&data->list, &file->triggers);
  1258. ret++;
  1259. update_cond_flag(file);
  1260. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1261. list_del_rcu(&data->list);
  1262. update_cond_flag(file);
  1263. ret--;
  1264. }
  1265. out:
  1266. return ret;
  1267. }
  1268. void event_enable_unregister_trigger(char *glob,
  1269. struct event_trigger_ops *ops,
  1270. struct event_trigger_data *test,
  1271. struct trace_event_file *file)
  1272. {
  1273. struct enable_trigger_data *test_enable_data = test->private_data;
  1274. struct enable_trigger_data *enable_data;
  1275. struct event_trigger_data *data;
  1276. bool unregistered = false;
  1277. list_for_each_entry_rcu(data, &file->triggers, list) {
  1278. enable_data = data->private_data;
  1279. if (enable_data &&
  1280. (data->cmd_ops->trigger_type ==
  1281. test->cmd_ops->trigger_type) &&
  1282. (enable_data->file == test_enable_data->file)) {
  1283. unregistered = true;
  1284. list_del_rcu(&data->list);
  1285. trace_event_trigger_enable_disable(file, 0);
  1286. update_cond_flag(file);
  1287. break;
  1288. }
  1289. }
  1290. if (unregistered && data->ops->free)
  1291. data->ops->free(data->ops, data);
  1292. }
  1293. static struct event_trigger_ops *
  1294. event_enable_get_trigger_ops(char *cmd, char *param)
  1295. {
  1296. struct event_trigger_ops *ops;
  1297. bool enable;
  1298. #ifdef CONFIG_HIST_TRIGGERS
  1299. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1300. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1301. #else
  1302. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1303. #endif
  1304. if (enable)
  1305. ops = param ? &event_enable_count_trigger_ops :
  1306. &event_enable_trigger_ops;
  1307. else
  1308. ops = param ? &event_disable_count_trigger_ops :
  1309. &event_disable_trigger_ops;
  1310. return ops;
  1311. }
  1312. static struct event_command trigger_enable_cmd = {
  1313. .name = ENABLE_EVENT_STR,
  1314. .trigger_type = ETT_EVENT_ENABLE,
  1315. .func = event_enable_trigger_func,
  1316. .reg = event_enable_register_trigger,
  1317. .unreg = event_enable_unregister_trigger,
  1318. .get_trigger_ops = event_enable_get_trigger_ops,
  1319. .set_filter = set_trigger_filter,
  1320. };
  1321. static struct event_command trigger_disable_cmd = {
  1322. .name = DISABLE_EVENT_STR,
  1323. .trigger_type = ETT_EVENT_ENABLE,
  1324. .func = event_enable_trigger_func,
  1325. .reg = event_enable_register_trigger,
  1326. .unreg = event_enable_unregister_trigger,
  1327. .get_trigger_ops = event_enable_get_trigger_ops,
  1328. .set_filter = set_trigger_filter,
  1329. };
  1330. static __init void unregister_trigger_enable_disable_cmds(void)
  1331. {
  1332. unregister_event_command(&trigger_enable_cmd);
  1333. unregister_event_command(&trigger_disable_cmd);
  1334. }
  1335. static __init int register_trigger_enable_disable_cmds(void)
  1336. {
  1337. int ret;
  1338. ret = register_event_command(&trigger_enable_cmd);
  1339. if (WARN_ON(ret < 0))
  1340. return ret;
  1341. ret = register_event_command(&trigger_disable_cmd);
  1342. if (WARN_ON(ret < 0))
  1343. unregister_trigger_enable_disable_cmds();
  1344. return ret;
  1345. }
  1346. static __init int register_trigger_traceon_traceoff_cmds(void)
  1347. {
  1348. int ret;
  1349. ret = register_event_command(&trigger_traceon_cmd);
  1350. if (WARN_ON(ret < 0))
  1351. return ret;
  1352. ret = register_event_command(&trigger_traceoff_cmd);
  1353. if (WARN_ON(ret < 0))
  1354. unregister_trigger_traceon_traceoff_cmds();
  1355. return ret;
  1356. }
  1357. __init int register_trigger_cmds(void)
  1358. {
  1359. register_trigger_traceon_traceoff_cmds();
  1360. register_trigger_snapshot_cmd();
  1361. register_trigger_stacktrace_cmd();
  1362. register_trigger_enable_disable_cmds();
  1363. register_trigger_hist_enable_disable_cmds();
  1364. register_trigger_hist_cmd();
  1365. return 0;
  1366. }