of.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Generic OPP OF helpers
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/cpu.h>
  15. #include <linux/errno.h>
  16. #include <linux/device.h>
  17. #include <linux/of.h>
  18. #include <linux/slab.h>
  19. #include <linux/export.h>
  20. #include "opp.h"
  21. static struct opp_table *_managed_opp(const struct device_node *np)
  22. {
  23. struct opp_table *opp_table;
  24. list_for_each_entry_rcu(opp_table, &opp_tables, node) {
  25. if (opp_table->np == np) {
  26. /*
  27. * Multiple devices can point to the same OPP table and
  28. * so will have same node-pointer, np.
  29. *
  30. * But the OPPs will be considered as shared only if the
  31. * OPP table contains a "opp-shared" property.
  32. */
  33. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
  34. return opp_table;
  35. return NULL;
  36. }
  37. }
  38. return NULL;
  39. }
  40. void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
  41. {
  42. struct device_node *np;
  43. /*
  44. * Only required for backward compatibility with v1 bindings, but isn't
  45. * harmful for other cases. And so we do it unconditionally.
  46. */
  47. np = of_node_get(dev->of_node);
  48. if (np) {
  49. u32 val;
  50. if (!of_property_read_u32(np, "clock-latency", &val))
  51. opp_table->clock_latency_ns_max = val;
  52. of_property_read_u32(np, "voltage-tolerance",
  53. &opp_table->voltage_tolerance_v1);
  54. of_node_put(np);
  55. }
  56. }
  57. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  58. struct device_node *np)
  59. {
  60. unsigned int count = opp_table->supported_hw_count;
  61. u32 version;
  62. int ret;
  63. if (!opp_table->supported_hw) {
  64. /*
  65. * In the case that no supported_hw has been set by the
  66. * platform but there is an opp-supported-hw value set for
  67. * an OPP then the OPP should not be enabled as there is
  68. * no way to see if the hardware supports it.
  69. */
  70. if (of_find_property(np, "opp-supported-hw", NULL))
  71. return false;
  72. else
  73. return true;
  74. }
  75. while (count--) {
  76. ret = of_property_read_u32_index(np, "opp-supported-hw", count,
  77. &version);
  78. if (ret) {
  79. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  80. __func__, count, ret);
  81. return false;
  82. }
  83. /* Both of these are bitwise masks of the versions */
  84. if (!(version & opp_table->supported_hw[count]))
  85. return false;
  86. }
  87. return true;
  88. }
  89. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  90. struct opp_table *opp_table)
  91. {
  92. u32 *microvolt, *microamp = NULL;
  93. int supplies, vcount, icount, ret, i, j;
  94. struct property *prop = NULL;
  95. char name[NAME_MAX];
  96. supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
  97. /* Search for "opp-microvolt-<name>" */
  98. if (opp_table->prop_name) {
  99. snprintf(name, sizeof(name), "opp-microvolt-%s",
  100. opp_table->prop_name);
  101. prop = of_find_property(opp->np, name, NULL);
  102. }
  103. if (!prop) {
  104. /* Search for "opp-microvolt" */
  105. sprintf(name, "opp-microvolt");
  106. prop = of_find_property(opp->np, name, NULL);
  107. /* Missing property isn't a problem, but an invalid entry is */
  108. if (!prop)
  109. return 0;
  110. }
  111. vcount = of_property_count_u32_elems(opp->np, name);
  112. if (vcount < 0) {
  113. dev_err(dev, "%s: Invalid %s property (%d)\n",
  114. __func__, name, vcount);
  115. return vcount;
  116. }
  117. /* There can be one or three elements per supply */
  118. if (vcount != supplies && vcount != supplies * 3) {
  119. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  120. __func__, name, vcount, supplies);
  121. return -EINVAL;
  122. }
  123. microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
  124. if (!microvolt)
  125. return -ENOMEM;
  126. ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
  127. if (ret) {
  128. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  129. ret = -EINVAL;
  130. goto free_microvolt;
  131. }
  132. /* Search for "opp-microamp-<name>" */
  133. prop = NULL;
  134. if (opp_table->prop_name) {
  135. snprintf(name, sizeof(name), "opp-microamp-%s",
  136. opp_table->prop_name);
  137. prop = of_find_property(opp->np, name, NULL);
  138. }
  139. if (!prop) {
  140. /* Search for "opp-microamp" */
  141. sprintf(name, "opp-microamp");
  142. prop = of_find_property(opp->np, name, NULL);
  143. }
  144. if (prop) {
  145. icount = of_property_count_u32_elems(opp->np, name);
  146. if (icount < 0) {
  147. dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
  148. name, icount);
  149. ret = icount;
  150. goto free_microvolt;
  151. }
  152. if (icount != supplies) {
  153. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  154. __func__, name, icount, supplies);
  155. ret = -EINVAL;
  156. goto free_microvolt;
  157. }
  158. microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
  159. if (!microamp) {
  160. ret = -EINVAL;
  161. goto free_microvolt;
  162. }
  163. ret = of_property_read_u32_array(opp->np, name, microamp,
  164. icount);
  165. if (ret) {
  166. dev_err(dev, "%s: error parsing %s: %d\n", __func__,
  167. name, ret);
  168. ret = -EINVAL;
  169. goto free_microamp;
  170. }
  171. }
  172. for (i = 0, j = 0; i < supplies; i++) {
  173. opp->supplies[i].u_volt = microvolt[j++];
  174. if (vcount == supplies) {
  175. opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
  176. opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
  177. } else {
  178. opp->supplies[i].u_volt_min = microvolt[j++];
  179. opp->supplies[i].u_volt_max = microvolt[j++];
  180. }
  181. if (microamp)
  182. opp->supplies[i].u_amp = microamp[i];
  183. }
  184. free_microamp:
  185. kfree(microamp);
  186. free_microvolt:
  187. kfree(microvolt);
  188. return ret;
  189. }
  190. /**
  191. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  192. * entries
  193. * @dev: device pointer used to lookup OPP table.
  194. *
  195. * Free OPPs created using static entries present in DT.
  196. *
  197. * Locking: The internal opp_table and opp structures are RCU protected.
  198. * Hence this function indirectly uses RCU updater strategy with mutex locks
  199. * to keep the integrity of the internal data structures. Callers should ensure
  200. * that this function is *NOT* called under RCU protection or in contexts where
  201. * mutex cannot be locked.
  202. */
  203. void dev_pm_opp_of_remove_table(struct device *dev)
  204. {
  205. _dev_pm_opp_remove_table(dev, false);
  206. }
  207. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  208. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  209. struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
  210. {
  211. /*
  212. * TODO: Support for multiple OPP tables.
  213. *
  214. * There should be only ONE phandle present in "operating-points-v2"
  215. * property.
  216. */
  217. return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
  218. }
  219. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
  220. /**
  221. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  222. * @dev: device for which we do this operation
  223. * @np: device node
  224. *
  225. * This function adds an opp definition to the opp table and returns status. The
  226. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  227. * removed by dev_pm_opp_remove.
  228. *
  229. * Locking: The internal opp_table and opp structures are RCU protected.
  230. * Hence this function internally uses RCU updater strategy with mutex locks
  231. * to keep the integrity of the internal data structures. Callers should ensure
  232. * that this function is *NOT* called under RCU protection or in contexts where
  233. * mutex cannot be locked.
  234. *
  235. * Return:
  236. * 0 On success OR
  237. * Duplicate OPPs (both freq and volt are same) and opp->available
  238. * -EEXIST Freq are same and volt are different OR
  239. * Duplicate OPPs (both freq and volt are same) and !opp->available
  240. * -ENOMEM Memory allocation failure
  241. * -EINVAL Failed parsing the OPP node
  242. */
  243. static int _opp_add_static_v2(struct device *dev, struct device_node *np)
  244. {
  245. struct opp_table *opp_table;
  246. struct dev_pm_opp *new_opp;
  247. u64 rate;
  248. u32 val;
  249. int ret;
  250. /* Hold our table modification lock here */
  251. mutex_lock(&opp_table_lock);
  252. new_opp = _allocate_opp(dev, &opp_table);
  253. if (!new_opp) {
  254. ret = -ENOMEM;
  255. goto unlock;
  256. }
  257. ret = of_property_read_u64(np, "opp-hz", &rate);
  258. if (ret < 0) {
  259. dev_err(dev, "%s: opp-hz not found\n", __func__);
  260. goto free_opp;
  261. }
  262. /* Check if the OPP supports hardware's hierarchy of versions or not */
  263. if (!_opp_is_supported(dev, opp_table, np)) {
  264. dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
  265. goto free_opp;
  266. }
  267. /*
  268. * Rate is defined as an unsigned long in clk API, and so casting
  269. * explicitly to its type. Must be fixed once rate is 64 bit
  270. * guaranteed in clk API.
  271. */
  272. new_opp->rate = (unsigned long)rate;
  273. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  274. new_opp->np = np;
  275. new_opp->dynamic = false;
  276. new_opp->available = true;
  277. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  278. new_opp->clock_latency_ns = val;
  279. ret = opp_parse_supplies(new_opp, dev, opp_table);
  280. if (ret)
  281. goto free_opp;
  282. ret = _opp_add(dev, new_opp, opp_table);
  283. if (ret)
  284. goto free_opp;
  285. /* OPP to select on device suspend */
  286. if (of_property_read_bool(np, "opp-suspend")) {
  287. if (opp_table->suspend_opp) {
  288. dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
  289. __func__, opp_table->suspend_opp->rate,
  290. new_opp->rate);
  291. } else {
  292. new_opp->suspend = true;
  293. opp_table->suspend_opp = new_opp;
  294. }
  295. }
  296. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  297. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  298. mutex_unlock(&opp_table_lock);
  299. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  300. __func__, new_opp->turbo, new_opp->rate,
  301. new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
  302. new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
  303. /*
  304. * Notify the changes in the availability of the operable
  305. * frequency/voltage list.
  306. */
  307. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  308. return 0;
  309. free_opp:
  310. _opp_remove(opp_table, new_opp, false);
  311. unlock:
  312. mutex_unlock(&opp_table_lock);
  313. return ret;
  314. }
  315. /* Initializes OPP tables based on new bindings */
  316. static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
  317. {
  318. struct device_node *np;
  319. struct opp_table *opp_table;
  320. int ret = 0, count = 0;
  321. mutex_lock(&opp_table_lock);
  322. opp_table = _managed_opp(opp_np);
  323. if (opp_table) {
  324. /* OPPs are already managed */
  325. if (!_add_opp_dev(dev, opp_table))
  326. ret = -ENOMEM;
  327. mutex_unlock(&opp_table_lock);
  328. return ret;
  329. }
  330. mutex_unlock(&opp_table_lock);
  331. /* We have opp-table node now, iterate over it and add OPPs */
  332. for_each_available_child_of_node(opp_np, np) {
  333. count++;
  334. ret = _opp_add_static_v2(dev, np);
  335. if (ret) {
  336. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  337. ret);
  338. goto free_table;
  339. }
  340. }
  341. /* There should be one of more OPP defined */
  342. if (WARN_ON(!count))
  343. return -ENOENT;
  344. mutex_lock(&opp_table_lock);
  345. opp_table = _find_opp_table(dev);
  346. if (WARN_ON(IS_ERR(opp_table))) {
  347. ret = PTR_ERR(opp_table);
  348. mutex_unlock(&opp_table_lock);
  349. goto free_table;
  350. }
  351. opp_table->np = opp_np;
  352. if (of_property_read_bool(opp_np, "opp-shared"))
  353. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  354. else
  355. opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
  356. mutex_unlock(&opp_table_lock);
  357. return 0;
  358. free_table:
  359. dev_pm_opp_of_remove_table(dev);
  360. return ret;
  361. }
  362. /* Initializes OPP tables based on old-deprecated bindings */
  363. static int _of_add_opp_table_v1(struct device *dev)
  364. {
  365. const struct property *prop;
  366. const __be32 *val;
  367. int nr;
  368. prop = of_find_property(dev->of_node, "operating-points", NULL);
  369. if (!prop)
  370. return -ENODEV;
  371. if (!prop->value)
  372. return -ENODATA;
  373. /*
  374. * Each OPP is a set of tuples consisting of frequency and
  375. * voltage like <freq-kHz vol-uV>.
  376. */
  377. nr = prop->length / sizeof(u32);
  378. if (nr % 2) {
  379. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  380. return -EINVAL;
  381. }
  382. val = prop->value;
  383. while (nr) {
  384. unsigned long freq = be32_to_cpup(val++) * 1000;
  385. unsigned long volt = be32_to_cpup(val++);
  386. if (_opp_add_v1(dev, freq, volt, false))
  387. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  388. __func__, freq);
  389. nr -= 2;
  390. }
  391. return 0;
  392. }
  393. /**
  394. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  395. * @dev: device pointer used to lookup OPP table.
  396. *
  397. * Register the initial OPP table with the OPP library for given device.
  398. *
  399. * Locking: The internal opp_table and opp structures are RCU protected.
  400. * Hence this function indirectly uses RCU updater strategy with mutex locks
  401. * to keep the integrity of the internal data structures. Callers should ensure
  402. * that this function is *NOT* called under RCU protection or in contexts where
  403. * mutex cannot be locked.
  404. *
  405. * Return:
  406. * 0 On success OR
  407. * Duplicate OPPs (both freq and volt are same) and opp->available
  408. * -EEXIST Freq are same and volt are different OR
  409. * Duplicate OPPs (both freq and volt are same) and !opp->available
  410. * -ENOMEM Memory allocation failure
  411. * -ENODEV when 'operating-points' property is not found or is invalid data
  412. * in device node.
  413. * -ENODATA when empty 'operating-points' property is found
  414. * -EINVAL when invalid entries are found in opp-v2 table
  415. */
  416. int dev_pm_opp_of_add_table(struct device *dev)
  417. {
  418. struct device_node *opp_np;
  419. int ret;
  420. /*
  421. * OPPs have two version of bindings now. The older one is deprecated,
  422. * try for the new binding first.
  423. */
  424. opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
  425. if (!opp_np) {
  426. /*
  427. * Try old-deprecated bindings for backward compatibility with
  428. * older dtbs.
  429. */
  430. return _of_add_opp_table_v1(dev);
  431. }
  432. ret = _of_add_opp_table_v2(dev, opp_np);
  433. of_node_put(opp_np);
  434. return ret;
  435. }
  436. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  437. /* CPU device specific helpers */
  438. /**
  439. * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
  440. * @cpumask: cpumask for which OPP table needs to be removed
  441. *
  442. * This removes the OPP tables for CPUs present in the @cpumask.
  443. * This should be used only to remove static entries created from DT.
  444. *
  445. * Locking: The internal opp_table and opp structures are RCU protected.
  446. * Hence this function internally uses RCU updater strategy with mutex locks
  447. * to keep the integrity of the internal data structures. Callers should ensure
  448. * that this function is *NOT* called under RCU protection or in contexts where
  449. * mutex cannot be locked.
  450. */
  451. void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
  452. {
  453. _dev_pm_opp_cpumask_remove_table(cpumask, true);
  454. }
  455. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  456. /**
  457. * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
  458. * @cpumask: cpumask for which OPP table needs to be added.
  459. *
  460. * This adds the OPP tables for CPUs present in the @cpumask.
  461. *
  462. * Locking: The internal opp_table and opp structures are RCU protected.
  463. * Hence this function internally uses RCU updater strategy with mutex locks
  464. * to keep the integrity of the internal data structures. Callers should ensure
  465. * that this function is *NOT* called under RCU protection or in contexts where
  466. * mutex cannot be locked.
  467. */
  468. int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
  469. {
  470. struct device *cpu_dev;
  471. int cpu, ret = 0;
  472. WARN_ON(cpumask_empty(cpumask));
  473. for_each_cpu(cpu, cpumask) {
  474. cpu_dev = get_cpu_device(cpu);
  475. if (!cpu_dev) {
  476. pr_err("%s: failed to get cpu%d device\n", __func__,
  477. cpu);
  478. continue;
  479. }
  480. ret = dev_pm_opp_of_add_table(cpu_dev);
  481. if (ret) {
  482. pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
  483. __func__, cpu, ret);
  484. /* Free all other OPPs */
  485. dev_pm_opp_of_cpumask_remove_table(cpumask);
  486. break;
  487. }
  488. }
  489. return ret;
  490. }
  491. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  492. /*
  493. * Works only for OPP v2 bindings.
  494. *
  495. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  496. */
  497. /**
  498. * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
  499. * @cpu_dev using operating-points-v2
  500. * bindings.
  501. *
  502. * @cpu_dev: CPU device for which we do this operation
  503. * @cpumask: cpumask to update with information of sharing CPUs
  504. *
  505. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  506. *
  507. * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
  508. *
  509. * Locking: The internal opp_table and opp structures are RCU protected.
  510. * Hence this function internally uses RCU updater strategy with mutex locks
  511. * to keep the integrity of the internal data structures. Callers should ensure
  512. * that this function is *NOT* called under RCU protection or in contexts where
  513. * mutex cannot be locked.
  514. */
  515. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
  516. struct cpumask *cpumask)
  517. {
  518. struct device_node *np, *tmp_np;
  519. struct device *tcpu_dev;
  520. int cpu, ret = 0;
  521. /* Get OPP descriptor node */
  522. np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
  523. if (!np) {
  524. dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
  525. return -ENOENT;
  526. }
  527. cpumask_set_cpu(cpu_dev->id, cpumask);
  528. /* OPPs are shared ? */
  529. if (!of_property_read_bool(np, "opp-shared"))
  530. goto put_cpu_node;
  531. for_each_possible_cpu(cpu) {
  532. if (cpu == cpu_dev->id)
  533. continue;
  534. tcpu_dev = get_cpu_device(cpu);
  535. if (!tcpu_dev) {
  536. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  537. __func__, cpu);
  538. ret = -ENODEV;
  539. goto put_cpu_node;
  540. }
  541. /* Get OPP descriptor node */
  542. tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
  543. if (!tmp_np) {
  544. dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
  545. __func__);
  546. ret = -ENOENT;
  547. goto put_cpu_node;
  548. }
  549. /* CPUs are sharing opp node */
  550. if (np == tmp_np)
  551. cpumask_set_cpu(cpu, cpumask);
  552. of_node_put(tmp_np);
  553. }
  554. put_cpu_node:
  555. of_node_put(np);
  556. return ret;
  557. }
  558. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);