rpmsg_rpc.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430
  1. /*
  2. * Remote Processor Procedure Call Driver
  3. *
  4. * Copyright (C) 2012-2017 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * Erik Rainey <erik.rainey@ti.com>
  7. * Suman Anna <s-anna@ti.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #define pr_fmt(fmt) "%s: " fmt, __func__
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/idr.h>
  23. #include <linux/poll.h>
  24. #include <linux/mutex.h>
  25. #include <linux/sched.h>
  26. #include <linux/fdtable.h>
  27. #include <linux/remoteproc.h>
  28. #include <linux/rpmsg.h>
  29. #include <linux/rpmsg_rpc.h>
  30. #include <linux/rpmsg/virtio_rpmsg.h>
  31. #include "rpmsg_rpc_internal.h"
  32. #define RPPC_MAX_DEVICES (8)
  33. #define RPPC_MAX_REG_FDS (10)
  34. #define RPPC_SIG_NUM_PARAM(sig) ((sig).num_param - 1)
  35. /* TODO: remove these fields */
  36. #define RPPC_JOBID_DISCRETE (0)
  37. #define RPPC_POOLID_DEFAULT (0x8000)
  38. static struct class *rppc_class;
  39. static dev_t rppc_dev;
  40. /* store all remote rpc connection services (usually one per remoteproc) */
  41. static DEFINE_IDR(rppc_devices);
  42. static DEFINE_MUTEX(rppc_devices_lock);
  43. /*
  44. * Retrieve the rproc instance so that it can be used for performing
  45. * address translations
  46. */
  47. static struct rproc *rpdev_to_rproc(struct rpmsg_device *rpdev)
  48. {
  49. struct virtio_device *vdev;
  50. vdev = virtio_rpmsg_get_vdev(rpdev);
  51. if (!vdev)
  52. return NULL;
  53. return rproc_vdev_to_rproc_safe(vdev);
  54. }
  55. /*
  56. * A wrapper function to translate local physical addresses to the remote core
  57. * device addresses (virtual addresses that a code on remote processor can use
  58. * directly.
  59. *
  60. * XXX: Fix this to return negative values on errors to follow normal kernel
  61. * conventions, and since 0 can also be a valid remote processor address
  62. *
  63. * Returns a remote processor device address on success, 0 otherwise
  64. */
  65. dev_addr_t rppc_local_to_remote_da(struct rppc_instance *rpc, phys_addr_t pa)
  66. {
  67. int ret;
  68. struct rproc *rproc;
  69. u64 da = 0;
  70. dev_addr_t rda;
  71. struct device *dev = rpc->dev;
  72. if (mutex_lock_interruptible(&rpc->rppcdev->lock))
  73. return 0;
  74. rproc = rpdev_to_rproc(rpc->rppcdev->rpdev);
  75. if (!rproc) {
  76. dev_err(dev, "error getting rproc for rpdev 0x%x\n",
  77. (u32)rpc->rppcdev->rpdev);
  78. } else {
  79. ret = rproc_pa_to_da(rproc, pa, &da);
  80. if (ret) {
  81. dev_err(dev, "error from rproc_pa_to_da, rproc = %p, pa = %pa ret = %d\n",
  82. rproc, &pa, ret);
  83. }
  84. }
  85. rda = (dev_addr_t)da;
  86. mutex_unlock(&rpc->rppcdev->lock);
  87. return rda;
  88. }
  89. static void rppc_print_msg(struct rppc_instance *rpc, char *prefix,
  90. char buffer[512])
  91. {
  92. struct rppc_msg_header *hdr = (struct rppc_msg_header *)buffer;
  93. struct rppc_instance_handle *hdl = NULL;
  94. struct rppc_query_function *info = NULL;
  95. struct rppc_packet *packet = NULL;
  96. struct rppc_param_data *param = NULL;
  97. struct device *dev = rpc->dev;
  98. u32 i = 0, paramsz = sizeof(*param);
  99. dev_dbg(dev, "%s HDR: msg_type = %d msg_len = %d\n",
  100. prefix, hdr->msg_type, hdr->msg_len);
  101. switch (hdr->msg_type) {
  102. case RPPC_MSGTYPE_CREATE_RESP:
  103. case RPPC_MSGTYPE_DELETE_RESP:
  104. hdl = RPPC_PAYLOAD(buffer, rppc_instance_handle);
  105. dev_dbg(dev, "%s endpoint = %d status = %d\n",
  106. prefix, hdl->endpoint_address, hdl->status);
  107. break;
  108. case RPPC_MSGTYPE_FUNCTION_INFO:
  109. info = RPPC_PAYLOAD(buffer, rppc_query_function);
  110. dev_dbg(dev, "%s (info not yet implemented)\n", prefix);
  111. break;
  112. case RPPC_MSGTYPE_FUNCTION_CALL:
  113. packet = RPPC_PAYLOAD(buffer, rppc_packet);
  114. dev_dbg(dev, "%s PACKET: desc = %04x msg_id = %04x flags = %08x func = 0x%08x result = %d size = %u\n",
  115. prefix, packet->desc, packet->msg_id,
  116. packet->flags, packet->fxn_id,
  117. packet->result, packet->data_size);
  118. param = (struct rppc_param_data *)packet->data;
  119. for (i = 0; i < (packet->data_size / paramsz); i++) {
  120. dev_dbg(dev, "%s param[%u] size = %zu data = %zu (0x%08x)",
  121. prefix, i, param[i].size, param[i].data,
  122. param[i].data);
  123. }
  124. break;
  125. default:
  126. break;
  127. }
  128. }
  129. /* free any outstanding function calls */
  130. static void rppc_delete_fxns(struct rppc_instance *rpc)
  131. {
  132. struct rppc_function_list *pos, *n;
  133. if (!list_empty(&rpc->fxn_list)) {
  134. mutex_lock(&rpc->lock);
  135. list_for_each_entry_safe(pos, n, &rpc->fxn_list, list) {
  136. list_del(&pos->list);
  137. kfree(pos->function);
  138. kfree(pos);
  139. }
  140. mutex_unlock(&rpc->lock);
  141. }
  142. }
  143. static
  144. struct rppc_function *rppc_find_fxn(struct rppc_instance *rpc, u16 msg_id)
  145. {
  146. struct rppc_function *function = NULL;
  147. struct rppc_function_list *pos, *n;
  148. struct device *dev = rpc->dev;
  149. mutex_lock(&rpc->lock);
  150. list_for_each_entry_safe(pos, n, &rpc->fxn_list, list) {
  151. dev_dbg(dev, "looking for msg %u, found msg %u\n",
  152. msg_id, pos->msg_id);
  153. if (pos->msg_id == msg_id) {
  154. function = pos->function;
  155. list_del(&pos->list);
  156. kfree(pos);
  157. break;
  158. }
  159. }
  160. mutex_unlock(&rpc->lock);
  161. return function;
  162. }
  163. static int rppc_add_fxn(struct rppc_instance *rpc,
  164. struct rppc_function *function, u16 msg_id)
  165. {
  166. struct rppc_function_list *fxn = NULL;
  167. struct device *dev = rpc->dev;
  168. fxn = kzalloc(sizeof(*fxn), GFP_KERNEL);
  169. if (!fxn)
  170. return -ENOMEM;
  171. fxn->function = function;
  172. fxn->msg_id = msg_id;
  173. mutex_lock(&rpc->lock);
  174. list_add(&fxn->list, &rpc->fxn_list);
  175. mutex_unlock(&rpc->lock);
  176. dev_dbg(dev, "added msg id %u to list", msg_id);
  177. return 0;
  178. }
  179. static
  180. void rppc_handle_create_resp(struct rppc_instance *rpc, char *data, int len)
  181. {
  182. struct device *dev = rpc->dev;
  183. struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
  184. struct rppc_instance_handle *hdl;
  185. u32 exp_len = sizeof(*hdl) + sizeof(*hdr);
  186. if (len != exp_len) {
  187. dev_err(dev, "invalid response message length %d (expected %d bytes)",
  188. len, exp_len);
  189. rpc->state = RPPC_STATE_STALE;
  190. return;
  191. }
  192. hdl = RPPC_PAYLOAD(data, rppc_instance_handle);
  193. mutex_lock(&rpc->lock);
  194. if (rpc->state != RPPC_STATE_STALE && hdl->status == 0) {
  195. rpc->dst = hdl->endpoint_address;
  196. rpc->state = RPPC_STATE_CONNECTED;
  197. } else {
  198. rpc->state = RPPC_STATE_STALE;
  199. }
  200. rpc->in_transition = 0;
  201. dev_dbg(dev, "creation response: status %d addr 0x%x\n",
  202. hdl->status, hdl->endpoint_address);
  203. complete(&rpc->reply_arrived);
  204. mutex_unlock(&rpc->lock);
  205. }
  206. static
  207. void rppc_handle_delete_resp(struct rppc_instance *rpc, char *data, int len)
  208. {
  209. struct device *dev = rpc->dev;
  210. struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
  211. struct rppc_instance_handle *hdl;
  212. u32 exp_len = sizeof(*hdl) + sizeof(*hdr);
  213. if (len != exp_len) {
  214. dev_err(dev, "invalid response message length %d (expected %d bytes)",
  215. len, exp_len);
  216. rpc->state = RPPC_STATE_STALE;
  217. return;
  218. }
  219. if (hdr->msg_len != sizeof(*hdl)) {
  220. dev_err(dev, "disconnect message was incorrect size!\n");
  221. rpc->state = RPPC_STATE_STALE;
  222. return;
  223. }
  224. hdl = RPPC_PAYLOAD(data, rppc_instance_handle);
  225. dev_dbg(dev, "deletion response: status %d addr 0x%x\n",
  226. hdl->status, hdl->endpoint_address);
  227. mutex_lock(&rpc->lock);
  228. rpc->dst = 0;
  229. rpc->state = RPPC_STATE_DISCONNECTED;
  230. rpc->in_transition = 0;
  231. complete(&rpc->reply_arrived);
  232. mutex_unlock(&rpc->lock);
  233. }
  234. /*
  235. * store the received message and wake up any blocking processes,
  236. * waiting for new data. The allocated buffer would be freed after
  237. * the user-space reads the packet.
  238. */
  239. static void rppc_handle_fxn_resp(struct rppc_instance *rpc, char *data, int len)
  240. {
  241. struct rppc_msg_header *hdr = (struct rppc_msg_header *)data;
  242. struct sk_buff *skb;
  243. char *skbdata;
  244. /* TODO: need to check the response length? */
  245. skb = alloc_skb(hdr->msg_len, GFP_KERNEL);
  246. if (!skb)
  247. return;
  248. skbdata = skb_put(skb, hdr->msg_len);
  249. memcpy(skbdata, hdr->msg_data, hdr->msg_len);
  250. mutex_lock(&rpc->lock);
  251. skb_queue_tail(&rpc->queue, skb);
  252. mutex_unlock(&rpc->lock);
  253. wake_up_interruptible(&rpc->readq);
  254. }
  255. /*
  256. * callback function for processing the different responses
  257. * from the remote processor on a particular rpmsg channel
  258. * instance.
  259. */
  260. static int rppc_cb(struct rpmsg_device *rpdev,
  261. void *data, int len, void *priv, u32 src)
  262. {
  263. struct rppc_msg_header *hdr = data;
  264. struct rppc_instance *rpc = priv;
  265. struct device *dev = rpc->dev;
  266. char *buf = (char *)data;
  267. dev_dbg(dev, "<== incoming msg src %d len %d msg_type %d msg_len %d\n",
  268. src, len, hdr->msg_type, hdr->msg_len);
  269. rppc_print_msg(rpc, "RX:", buf);
  270. if (len <= sizeof(*hdr)) {
  271. dev_err(dev, "message truncated\n");
  272. rpc->state = RPPC_STATE_STALE;
  273. return -EINVAL;
  274. }
  275. switch (hdr->msg_type) {
  276. case RPPC_MSGTYPE_CREATE_RESP:
  277. rppc_handle_create_resp(rpc, data, len);
  278. break;
  279. case RPPC_MSGTYPE_DELETE_RESP:
  280. rppc_handle_delete_resp(rpc, data, len);
  281. break;
  282. case RPPC_MSGTYPE_FUNCTION_CALL:
  283. case RPPC_MSGTYPE_FUNCTION_RET:
  284. rppc_handle_fxn_resp(rpc, data, len);
  285. break;
  286. default:
  287. dev_warn(dev, "unexpected msg type: %d\n", hdr->msg_type);
  288. break;
  289. }
  290. return 0;
  291. }
  292. /*
  293. * send a connection request to the remote rpc connection service. Use
  294. * the new local address created during .open for this instance as the
  295. * source address to complete the connection.
  296. */
  297. static int rppc_connect(struct rppc_instance *rpc,
  298. struct rppc_create_instance *connect)
  299. {
  300. int ret = 0;
  301. u32 len = 0;
  302. char kbuf[512];
  303. struct rppc_device *rppcdev = rpc->rppcdev;
  304. struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
  305. if (rpc->state == RPPC_STATE_CONNECTED) {
  306. dev_dbg(rpc->dev, "endpoint already connected\n");
  307. return -EISCONN;
  308. }
  309. hdr->msg_type = RPPC_MSGTYPE_CREATE_REQ;
  310. hdr->msg_len = sizeof(*connect);
  311. memcpy(hdr->msg_data, connect, hdr->msg_len);
  312. len = sizeof(struct rppc_msg_header) + hdr->msg_len;
  313. init_completion(&rpc->reply_arrived);
  314. rpc->in_transition = 1;
  315. ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
  316. rppcdev->rpdev->dst, (char *)kbuf, len);
  317. if (ret > 0) {
  318. dev_err(rpc->dev, "rpmsg_send failed: %d\n", ret);
  319. return ret;
  320. }
  321. ret = wait_for_completion_interruptible_timeout(&rpc->reply_arrived,
  322. msecs_to_jiffies(5000));
  323. if (rpc->state == RPPC_STATE_CONNECTED)
  324. return 0;
  325. if (rpc->state == RPPC_STATE_STALE)
  326. return -ENXIO;
  327. if (ret > 0) {
  328. dev_err(rpc->dev, "premature wakeup: %d\n", ret);
  329. return -EIO;
  330. }
  331. return -ETIMEDOUT;
  332. }
  333. static void rppc_disconnect(struct rppc_instance *rpc)
  334. {
  335. int ret;
  336. size_t len;
  337. char kbuf[512];
  338. struct rppc_device *rppcdev = rpc->rppcdev;
  339. struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
  340. struct rppc_instance_handle *handle =
  341. RPPC_PAYLOAD(kbuf, rppc_instance_handle);
  342. if (rpc->state != RPPC_STATE_CONNECTED)
  343. return;
  344. hdr->msg_type = RPPC_MSGTYPE_DELETE_REQ;
  345. hdr->msg_len = sizeof(u32);
  346. handle->endpoint_address = rpc->dst;
  347. handle->status = 0;
  348. len = sizeof(struct rppc_msg_header) + hdr->msg_len;
  349. dev_dbg(rpc->dev, "disconnecting from RPC service at %d\n",
  350. rpc->dst);
  351. ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
  352. rppcdev->rpdev->dst, kbuf, len);
  353. if (ret)
  354. dev_err(rpc->dev, "rpmsg_send failed: %d\n", ret);
  355. /*
  356. * TODO: should we wait for a message to come back?
  357. * For now, no.
  358. */
  359. wait_for_completion_interruptible(&rpc->reply_arrived);
  360. }
  361. static int rppc_register_buffers(struct rppc_instance *rpc,
  362. unsigned long arg)
  363. {
  364. struct rppc_buf_fds data;
  365. int *fds = NULL;
  366. struct rppc_dma_buf **bufs = NULL;
  367. struct rppc_dma_buf *tmp;
  368. int i = 0, ret = 0;
  369. if (copy_from_user(&data, (char __user *)arg, sizeof(data)))
  370. return -EFAULT;
  371. /* impose a maximum number of buffers for now */
  372. if (data.num > RPPC_MAX_REG_FDS)
  373. return -EINVAL;
  374. fds = kcalloc(data.num, sizeof(*fds), GFP_KERNEL);
  375. if (!fds)
  376. return -ENOMEM;
  377. if (copy_from_user(fds, (char __user *)data.fds,
  378. sizeof(*fds) * data.num)) {
  379. ret = -EFAULT;
  380. goto free_fds;
  381. }
  382. for (i = 0; i < data.num; i++) {
  383. rcu_read_lock();
  384. if (!fcheck(fds[i])) {
  385. rcu_read_unlock();
  386. ret = -EBADF;
  387. goto free_fds;
  388. }
  389. rcu_read_unlock();
  390. tmp = rppc_find_dmabuf(rpc, fds[i]);
  391. if (!IS_ERR_OR_NULL(tmp)) {
  392. ret = -EEXIST;
  393. goto free_fds;
  394. }
  395. }
  396. bufs = kcalloc(data.num, sizeof(*bufs), GFP_KERNEL);
  397. if (!bufs) {
  398. ret = -ENOMEM;
  399. goto free_fds;
  400. }
  401. for (i = 0; i < data.num; i++) {
  402. bufs[i] = rppc_alloc_dmabuf(rpc, fds[i], false);
  403. if (IS_ERR(bufs[i])) {
  404. ret = PTR_ERR(bufs[i]);
  405. break;
  406. }
  407. }
  408. if (i == data.num)
  409. goto free_bufs;
  410. for (i -= 1; i >= 0; i--)
  411. rppc_free_dmabuf(bufs[i]->id, bufs[i], rpc);
  412. free_bufs:
  413. kfree(bufs);
  414. free_fds:
  415. kfree(fds);
  416. return ret;
  417. }
  418. static int rppc_unregister_buffers(struct rppc_instance *rpc,
  419. unsigned long arg)
  420. {
  421. struct rppc_buf_fds data;
  422. int *fds = NULL;
  423. struct rppc_dma_buf **bufs = NULL;
  424. int i = 0, ret = 0;
  425. if (copy_from_user(&data, (char __user *)arg, sizeof(data)))
  426. return -EFAULT;
  427. /* impose a maximum number of buffers for now */
  428. if (data.num > RPPC_MAX_REG_FDS)
  429. return -EINVAL;
  430. fds = kcalloc(data.num, sizeof(*fds), GFP_KERNEL);
  431. if (!fds)
  432. return -ENOMEM;
  433. if (copy_from_user(fds, (char __user *)data.fds,
  434. sizeof(*fds) * data.num)) {
  435. ret = -EFAULT;
  436. goto free_fds;
  437. }
  438. bufs = kcalloc(data.num, sizeof(*bufs), GFP_KERNEL);
  439. if (!bufs) {
  440. ret = -ENOMEM;
  441. goto free_fds;
  442. }
  443. for (i = 0; i < data.num; i++) {
  444. rcu_read_lock();
  445. if (!fcheck(fds[i])) {
  446. rcu_read_unlock();
  447. ret = -EBADF;
  448. goto free_bufs;
  449. }
  450. rcu_read_unlock();
  451. bufs[i] = rppc_find_dmabuf(rpc, fds[i]);
  452. if (IS_ERR_OR_NULL(bufs[i])) {
  453. ret = -EEXIST;
  454. goto free_bufs;
  455. }
  456. }
  457. for (i = 0; i < data.num; i++)
  458. rppc_free_dmabuf(bufs[i]->id, bufs[i], rpc);
  459. free_bufs:
  460. kfree(bufs);
  461. free_fds:
  462. kfree(fds);
  463. return ret;
  464. }
  465. /*
  466. * create a new rpc instance that a user-space client can use to invoke
  467. * remote functions. A new local address would be created and tied with
  468. * this instance for uniquely identifying the messages communicated by
  469. * this instance with the remote side.
  470. *
  471. * The function is blocking if there is no underlying connection manager
  472. * channel, unless the device is opened with non-blocking flags specifically.
  473. */
  474. static int rppc_open(struct inode *inode, struct file *filp)
  475. {
  476. struct rppc_device *rppcdev;
  477. struct rppc_instance *rpc;
  478. struct rpmsg_channel_info chinfo = {};
  479. rppcdev = container_of(inode->i_cdev, struct rppc_device, cdev);
  480. if (!rppcdev->rpdev)
  481. if ((filp->f_flags & O_NONBLOCK) ||
  482. wait_for_completion_interruptible(&rppcdev->comp))
  483. return -EBUSY;
  484. rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
  485. if (!rpc)
  486. return -ENOMEM;
  487. mutex_init(&rpc->lock);
  488. skb_queue_head_init(&rpc->queue);
  489. init_waitqueue_head(&rpc->readq);
  490. INIT_LIST_HEAD(&rpc->fxn_list);
  491. idr_init(&rpc->dma_idr);
  492. rpc->in_transition = 0;
  493. rpc->msg_id = 0;
  494. rpc->state = RPPC_STATE_DISCONNECTED;
  495. rpc->rppcdev = rppcdev;
  496. rpc->dev = get_device(rppcdev->dev);
  497. chinfo.src = RPMSG_ADDR_ANY;
  498. chinfo.dst = RPMSG_ADDR_ANY;
  499. rpc->ept = rpmsg_create_ept(rppcdev->rpdev, rppc_cb, rpc, chinfo);
  500. if (!rpc->ept) {
  501. dev_err(rpc->dev, "create ept failed\n");
  502. put_device(rpc->dev);
  503. kfree(rpc);
  504. return -ENOMEM;
  505. }
  506. filp->private_data = rpc;
  507. mutex_lock(&rppcdev->lock);
  508. list_add(&rpc->list, &rppcdev->instances);
  509. mutex_unlock(&rppcdev->lock);
  510. dev_dbg(rpc->dev, "local addr assigned: 0x%x\n", rpc->ept->addr);
  511. return 0;
  512. }
  513. /*
  514. * release and free all the resources associated with a particular rpc
  515. * instance. This includes the data structures maintaining the current
  516. * outstanding function invocations, and all the buffers registered for
  517. * use with this instance. Send a disconnect message and cleanup the
  518. * local end-point only if the instance is in a normal state, with the
  519. * remote connection manager functional.
  520. */
  521. static int rppc_release(struct inode *inode, struct file *filp)
  522. {
  523. struct rppc_instance *rpc = filp->private_data;
  524. struct rppc_device *rppcdev = rpc->rppcdev;
  525. struct sk_buff *skb = NULL;
  526. dev_dbg(rpc->dev, "releasing Instance %p, in state %d\n", rpc,
  527. rpc->state);
  528. if (rpc->state != RPPC_STATE_STALE) {
  529. if (rpc->ept) {
  530. rppc_disconnect(rpc);
  531. rpmsg_destroy_ept(rpc->ept);
  532. rpc->ept = NULL;
  533. }
  534. }
  535. rppc_delete_fxns(rpc);
  536. while (!skb_queue_empty(&rpc->queue)) {
  537. skb = skb_dequeue(&rpc->queue);
  538. kfree_skb(skb);
  539. }
  540. mutex_lock(&rpc->lock);
  541. idr_for_each(&rpc->dma_idr, rppc_free_dmabuf, rpc);
  542. idr_destroy(&rpc->dma_idr);
  543. mutex_unlock(&rpc->lock);
  544. mutex_lock(&rppcdev->lock);
  545. list_del(&rpc->list);
  546. mutex_unlock(&rppcdev->lock);
  547. dev_dbg(rpc->dev, "instance %p has been deleted!\n", rpc);
  548. if (list_empty(&rppcdev->instances))
  549. dev_dbg(rpc->dev, "all instances have been removed!\n");
  550. put_device(rpc->dev);
  551. kfree(rpc);
  552. return 0;
  553. }
  554. static long rppc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  555. {
  556. struct rppc_instance *rpc = filp->private_data;
  557. struct rppc_create_instance connect;
  558. int ret = 0;
  559. dev_dbg(rpc->dev, "%s: cmd %d, arg 0x%lx\n", __func__, cmd, arg);
  560. if (_IOC_TYPE(cmd) != RPPC_IOC_MAGIC)
  561. return -ENOTTY;
  562. if (_IOC_NR(cmd) > RPPC_IOC_MAXNR)
  563. return -ENOTTY;
  564. switch (cmd) {
  565. case RPPC_IOC_CREATE:
  566. ret = copy_from_user(&connect, (char __user *)arg,
  567. sizeof(connect));
  568. if (ret) {
  569. dev_err(rpc->dev, "%s: %d: copy_from_user fail: %d\n",
  570. __func__, _IOC_NR(cmd), ret);
  571. ret = -EFAULT;
  572. } else {
  573. connect.name[sizeof(connect.name) - 1] = '\0';
  574. ret = rppc_connect(rpc, &connect);
  575. }
  576. break;
  577. case RPPC_IOC_BUFREGISTER:
  578. ret = rppc_register_buffers(rpc, arg);
  579. break;
  580. case RPPC_IOC_BUFUNREGISTER:
  581. ret = rppc_unregister_buffers(rpc, arg);
  582. break;
  583. default:
  584. dev_err(rpc->dev, "unhandled ioctl cmd: %d\n", cmd);
  585. break;
  586. }
  587. return ret;
  588. }
  589. static ssize_t rppc_read(struct file *filp, char __user *buf, size_t len,
  590. loff_t *offp)
  591. {
  592. struct rppc_instance *rpc = filp->private_data;
  593. struct rppc_packet *packet = NULL;
  594. struct rppc_param_data *parameters = NULL;
  595. struct rppc_function *function = NULL;
  596. struct rppc_function_return returned;
  597. struct sk_buff *skb = NULL;
  598. int ret = 0;
  599. int use = sizeof(returned);
  600. DEFINE_WAIT(wait);
  601. if (mutex_lock_interruptible(&rpc->lock))
  602. return -ERESTARTSYS;
  603. /* instance is invalid */
  604. if (rpc->state == RPPC_STATE_STALE) {
  605. mutex_unlock(&rpc->lock);
  606. return -ENXIO;
  607. }
  608. /* not yet connected to the remote side */
  609. if (rpc->state == RPPC_STATE_DISCONNECTED) {
  610. mutex_unlock(&rpc->lock);
  611. return -ENOTCONN;
  612. }
  613. if (len > use) {
  614. mutex_unlock(&rpc->lock);
  615. return -EOVERFLOW;
  616. }
  617. if (len < use) {
  618. mutex_unlock(&rpc->lock);
  619. return -EINVAL;
  620. }
  621. /* TODO: Use the much simpler wait_event_interruptible API */
  622. while (skb_queue_empty(&rpc->queue)) {
  623. mutex_unlock(&rpc->lock);
  624. /* non-blocking requested ? return now */
  625. if (filp->f_flags & O_NONBLOCK)
  626. return -EAGAIN;
  627. prepare_to_wait_exclusive(&rpc->readq, &wait,
  628. TASK_INTERRUPTIBLE);
  629. if (skb_queue_empty(&rpc->queue) &&
  630. rpc->state != RPPC_STATE_STALE)
  631. schedule();
  632. finish_wait(&rpc->readq, &wait);
  633. if (signal_pending(current))
  634. return -ERESTARTSYS;
  635. ret = mutex_lock_interruptible(&rpc->lock);
  636. if (ret < 0)
  637. return -ERESTARTSYS;
  638. if (rpc->state == RPPC_STATE_STALE) {
  639. mutex_unlock(&rpc->lock);
  640. return -ENXIO;
  641. }
  642. /* make sure state is sane while we waited */
  643. if (rpc->state != RPPC_STATE_CONNECTED) {
  644. mutex_unlock(&rpc->lock);
  645. ret = -EIO;
  646. goto out;
  647. }
  648. }
  649. skb = skb_dequeue(&rpc->queue);
  650. if (WARN_ON(!skb)) {
  651. mutex_unlock(&rpc->lock);
  652. ret = -EIO;
  653. goto out;
  654. }
  655. mutex_unlock(&rpc->lock);
  656. packet = (struct rppc_packet *)skb->data;
  657. parameters = (struct rppc_param_data *)packet->data;
  658. /*
  659. * pull the function memory from the list and untranslate
  660. * the remote device address pointers in the packet back
  661. * to MPU pointers.
  662. */
  663. function = rppc_find_fxn(rpc, packet->msg_id);
  664. if (function && function->num_translations > 0) {
  665. ret = rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
  666. if (ret < 0) {
  667. dev_err(rpc->dev, "failed to translate back pointers from remote core!\n");
  668. goto failure;
  669. }
  670. }
  671. returned.fxn_id = RPPC_FXN_MASK(packet->fxn_id);
  672. returned.status = packet->result;
  673. if (copy_to_user(buf, &returned, use)) {
  674. dev_err(rpc->dev, "%s: copy_to_user fail\n", __func__);
  675. ret = -EFAULT;
  676. } else {
  677. ret = use;
  678. }
  679. failure:
  680. kfree(function);
  681. kfree_skb(skb);
  682. out:
  683. return ret;
  684. }
  685. static ssize_t rppc_write(struct file *filp, const char __user *ubuf,
  686. size_t len, loff_t *offp)
  687. {
  688. struct rppc_instance *rpc = filp->private_data;
  689. struct rppc_device *rppcdev = rpc->rppcdev;
  690. struct device *dev = rpc->dev;
  691. struct rppc_msg_header *hdr = NULL;
  692. struct rppc_function *function = NULL;
  693. struct rppc_packet *packet = NULL;
  694. struct rppc_param_data *parameters = NULL;
  695. char kbuf[512];
  696. int use = 0, ret = 0, param = 0;
  697. u32 sig_idx = 0;
  698. u32 sig_prm = 0;
  699. static u32 rppc_atomic_size[RPPC_PARAM_ATOMIC_MAX] = {
  700. 0, /* RPPC_PARAM_VOID */
  701. 1, /* RPPC_PARAM_S08 */
  702. 1, /* RPPC_PARAM_U08 */
  703. 2, /* RPPC_PARAM_S16 */
  704. 2, /* RPPC_PARAM_U16 */
  705. 4, /* RPPC_PARAM_S32 */
  706. 4, /* RPPC_PARAM_U32 */
  707. 8, /* RPPC_PARAM_S64 */
  708. 8 /* RPPC_PARAM_U64 */
  709. };
  710. if (len < sizeof(*function)) {
  711. ret = -ENOTSUPP;
  712. goto failure;
  713. }
  714. if (len > (sizeof(*function) + RPPC_MAX_TRANSLATIONS *
  715. sizeof(struct rppc_param_translation))) {
  716. ret = -ENOTSUPP;
  717. goto failure;
  718. }
  719. if (rpc->state != RPPC_STATE_CONNECTED) {
  720. ret = -ENOTCONN;
  721. goto failure;
  722. }
  723. function = kzalloc(len, GFP_KERNEL);
  724. if (!function) {
  725. ret = -ENOMEM;
  726. goto failure;
  727. }
  728. if (copy_from_user(function, ubuf, len)) {
  729. ret = -EMSGSIZE;
  730. goto failure;
  731. }
  732. /* increment the message id and wrap if needed */
  733. rpc->msg_id = (rpc->msg_id + 1) & 0xFFFF;
  734. memset(kbuf, 0, sizeof(kbuf));
  735. sig_idx = function->fxn_id + 1;
  736. hdr = (struct rppc_msg_header *)kbuf;
  737. hdr->msg_type = RPPC_MSGTYPE_FUNCTION_CALL;
  738. hdr->msg_len = sizeof(*packet);
  739. packet = RPPC_PAYLOAD(kbuf, rppc_packet);
  740. packet->desc = RPPC_DESC_EXEC_SYNC;
  741. packet->msg_id = rpc->msg_id;
  742. packet->flags = (RPPC_JOBID_DISCRETE << 16) | RPPC_POOLID_DEFAULT;
  743. packet->fxn_id = RPPC_SET_FXN_IDX(function->fxn_id);
  744. packet->result = 0;
  745. packet->data_size = sizeof(*parameters) * function->num_params;
  746. /* check the signatures against what were published */
  747. if (RPPC_SIG_NUM_PARAM(rppcdev->signatures[sig_idx]) !=
  748. function->num_params) {
  749. dev_err(dev, "number of parameters mismatch! params = %u expected = %u\n",
  750. function->num_params,
  751. RPPC_SIG_NUM_PARAM(rppcdev->signatures[sig_idx]));
  752. ret = -EINVAL;
  753. goto failure;
  754. }
  755. /*
  756. * compute the parameter pointer changes last since this will cause the
  757. * cache operations
  758. */
  759. parameters = (struct rppc_param_data *)packet->data;
  760. for (param = 0; param < function->num_params; param++) {
  761. sig_prm = param + 1;
  762. /*
  763. * check to make sure the parameter description matches the
  764. * signature published from the other side.
  765. */
  766. if (function->params[param].type == RPPC_PARAM_TYPE_PTR &&
  767. !RPPC_IS_PTR(
  768. rppcdev->signatures[sig_idx].params[sig_prm].type)) {
  769. dev_err(dev, "parameter %u Pointer Type Mismatch sig type:%x func %u\n",
  770. param, rppcdev->signatures[sig_idx].
  771. params[sig_prm].type, sig_idx);
  772. ret = -EINVAL;
  773. goto failure;
  774. } else if (param > 0 && function->params[param].type ==
  775. RPPC_PARAM_TYPE_ATOMIC) {
  776. if (!RPPC_IS_ATOMIC(
  777. rppcdev->signatures[sig_idx].params[sig_prm].type)) {
  778. dev_err(dev, "parameter Atomic Type Mismatch\n");
  779. ret = -EINVAL;
  780. goto failure;
  781. } else {
  782. u32 t = rppcdev->signatures[sig_idx].
  783. params[sig_prm].type;
  784. if (rppc_atomic_size[t] !=
  785. function->params[param].size) {
  786. dev_err(dev, "size mismatch! u:%u sig:%u\n",
  787. function->params[param].size,
  788. rppc_atomic_size[t]);
  789. ret = -EINVAL;
  790. goto failure;
  791. }
  792. }
  793. }
  794. parameters[param].size = function->params[param].size;
  795. /* check the type and lookup if it's a pointer */
  796. if (function->params[param].type == RPPC_PARAM_TYPE_PTR) {
  797. /*
  798. * internally the buffer translations takes care of the
  799. * offsets.
  800. */
  801. int fd = function->params[param].fd;
  802. parameters[param].data = (size_t)rppc_buffer_lookup(
  803. rpc,
  804. (virt_addr_t)
  805. function->
  806. params[param].data,
  807. (virt_addr_t)
  808. function->
  809. params[param].base,
  810. fd);
  811. } else if (function->params[param].type ==
  812. RPPC_PARAM_TYPE_ATOMIC) {
  813. parameters[param].data = function->params[param].data;
  814. } else {
  815. ret = -ENOTSUPP;
  816. goto failure;
  817. }
  818. }
  819. /* compute the size of the rpmsg packet */
  820. use = sizeof(*hdr) + hdr->msg_len + packet->data_size;
  821. /* failed to provide the translation data */
  822. if (function->num_translations > 0 &&
  823. len < (sizeof(*function) + (function->num_translations *
  824. sizeof(struct rppc_param_translation)))) {
  825. ret = -EINVAL;
  826. goto failure;
  827. }
  828. /*
  829. * if there are pointers to translate for the user, do so now.
  830. * alter our copy of function and the user's parameters so that
  831. * the proper pointers can be sent to remote cores
  832. */
  833. if (function->num_translations > 0) {
  834. ret = rppc_xlate_buffers(rpc, function, RPPC_UVA_TO_RPA);
  835. if (ret < 0) {
  836. dev_err(dev, "failed to translate all pointers for remote core!\n");
  837. goto failure;
  838. }
  839. }
  840. ret = rppc_add_fxn(rpc, function, rpc->msg_id);
  841. if (ret < 0) {
  842. rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
  843. goto failure;
  844. }
  845. rppc_print_msg(rpc, "TX:", kbuf);
  846. ret = rpmsg_send_offchannel(rppcdev->rpdev->ept, rpc->ept->addr,
  847. rpc->dst, kbuf, use);
  848. if (ret) {
  849. dev_err(dev, "rpmsg_send failed: %d\n", ret);
  850. rppc_find_fxn(rpc, rpc->msg_id);
  851. rppc_xlate_buffers(rpc, function, RPPC_RPA_TO_UVA);
  852. goto failure;
  853. }
  854. dev_dbg(dev, "==> sent msg to remote endpoint %u\n", rpc->dst);
  855. failure:
  856. if (ret >= 0)
  857. ret = len;
  858. else
  859. kfree(function);
  860. return ret;
  861. }
  862. static unsigned int rppc_poll(struct file *filp, struct poll_table_struct *wait)
  863. {
  864. struct rppc_instance *rpc = filp->private_data;
  865. unsigned int mask = 0;
  866. if (mutex_lock_interruptible(&rpc->lock))
  867. return -ERESTARTSYS;
  868. poll_wait(filp, &rpc->readq, wait);
  869. if (rpc->state == RPPC_STATE_STALE) {
  870. mask = POLLERR;
  871. goto out;
  872. }
  873. /* if the queue is not empty set the poll bit correctly */
  874. if (!skb_queue_empty(&rpc->queue))
  875. mask |= (POLLIN | POLLRDNORM);
  876. /* TODO: writes are deemed to be successful always, fix this later */
  877. if (true)
  878. mask |= POLLOUT | POLLWRNORM;
  879. out:
  880. mutex_unlock(&rpc->lock);
  881. return mask;
  882. }
  883. static const struct file_operations rppc_fops = {
  884. .owner = THIS_MODULE,
  885. .open = rppc_open,
  886. .release = rppc_release,
  887. .unlocked_ioctl = rppc_ioctl,
  888. .read = rppc_read,
  889. .write = rppc_write,
  890. .poll = rppc_poll,
  891. };
  892. /*
  893. * send a function query message, the sysfs entry will be created
  894. * during the processing of the response message
  895. */
  896. static int rppc_query_function(struct rpmsg_device *rpdev)
  897. {
  898. int ret = 0;
  899. u32 len = 0;
  900. char kbuf[512];
  901. struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
  902. struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
  903. struct rppc_query_function *fxn_info =
  904. (struct rppc_query_function *)hdr->msg_data;
  905. if (rppcdev->cur_func >= rppcdev->num_funcs)
  906. return -EINVAL;
  907. hdr->msg_type = RPPC_MSGTYPE_FUNCTION_QUERY;
  908. hdr->msg_len = sizeof(*fxn_info);
  909. len = sizeof(*hdr) + hdr->msg_len;
  910. fxn_info->info_type = RPPC_INFOTYPE_FUNC_SIGNATURE;
  911. fxn_info->fxn_id = rppcdev->cur_func++;
  912. dev_dbg(&rpdev->dev, "sending function query type %u for function %u\n",
  913. fxn_info->info_type, fxn_info->fxn_id);
  914. ret = rpmsg_send(rpdev->ept, (char *)kbuf, len);
  915. if (ret) {
  916. dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
  917. return ret;
  918. }
  919. return 0;
  920. }
  921. static void
  922. rppc_handle_devinfo_resp(struct rpmsg_device *rpdev, char *data, int len)
  923. {
  924. struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
  925. struct rppc_device_info *info;
  926. u32 exp_len = sizeof(*info) + sizeof(struct rppc_msg_header);
  927. if (len != exp_len) {
  928. dev_err(&rpdev->dev, "invalid message length %d (expected %d bytes)",
  929. len, exp_len);
  930. return;
  931. }
  932. info = RPPC_PAYLOAD(data, rppc_device_info);
  933. if (info->num_funcs > RPPC_MAX_NUM_FUNCS) {
  934. rppcdev->num_funcs = 0;
  935. dev_err(&rpdev->dev, "number of functions (%d) exceeds the limit supported(%d)\n",
  936. info->num_funcs, RPPC_MAX_NUM_FUNCS);
  937. return;
  938. }
  939. rppcdev->num_funcs = info->num_funcs;
  940. rppcdev->signatures = kcalloc(rppcdev->num_funcs,
  941. sizeof(struct rppc_func_signature),
  942. GFP_KERNEL);
  943. if (!rppcdev->signatures)
  944. return;
  945. dev_info(&rpdev->dev, "published functions = %u\n", info->num_funcs);
  946. /* send the function query for first function */
  947. if (rppc_query_function(rpdev) == -EINVAL)
  948. dev_err(&rpdev->dev, "failed to get a reasonable number of functions!\n");
  949. }
  950. static void
  951. rppc_handle_fxninfo_resp(struct rpmsg_device *rpdev, char *data, int len)
  952. {
  953. struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
  954. struct rppc_query_function *fxn_info;
  955. struct rppc_func_signature *signature;
  956. u32 exp_len = sizeof(*fxn_info) + sizeof(struct rppc_msg_header);
  957. int i;
  958. if (len != exp_len) {
  959. dev_err(&rpdev->dev, "invalid message length %d (expected %d bytes)",
  960. len, exp_len);
  961. return;
  962. }
  963. fxn_info = RPPC_PAYLOAD(data, rppc_query_function);
  964. dev_dbg(&rpdev->dev, "response for function query of type %u\n",
  965. fxn_info->info_type);
  966. switch (fxn_info->info_type) {
  967. case RPPC_INFOTYPE_FUNC_SIGNATURE:
  968. if (fxn_info->fxn_id >= rppcdev->num_funcs) {
  969. dev_err(&rpdev->dev, "function(%d) is out of range!\n",
  970. fxn_info->fxn_id);
  971. break;
  972. }
  973. memcpy(&rppcdev->signatures[fxn_info->fxn_id],
  974. &fxn_info->info.signature, sizeof(*signature));
  975. /* TODO: delete these debug prints later */
  976. dev_dbg(&rpdev->dev, "received info for func(%d); name = %s #params = %u\n",
  977. fxn_info->fxn_id, fxn_info->info.signature.name,
  978. fxn_info->info.signature.num_param);
  979. signature = &rppcdev->signatures[fxn_info->fxn_id];
  980. for (i = 0; i < signature->num_param; i++) {
  981. dev_dbg(&rpdev->dev, "param[%u] type = %x dir = %u\n",
  982. i, signature->params[i].type,
  983. signature->params[i].direction);
  984. }
  985. /* query again until we've hit our limit */
  986. if (rppc_query_function(rpdev) == -EINVAL) {
  987. dev_dbg(&rpdev->dev, "reached end of function list!\n");
  988. rppc_create_sysfs(rppcdev);
  989. }
  990. break;
  991. default:
  992. dev_err(&rpdev->dev, "unrecognized fxn query response %u\n",
  993. fxn_info->info_type);
  994. break;
  995. }
  996. }
  997. static int rppc_driver_cb(struct rpmsg_device *rpdev, void *data, int len,
  998. void *priv, u32 src)
  999. {
  1000. struct rppc_msg_header *hdr = data;
  1001. char *buf = (char *)data;
  1002. dev_dbg(&rpdev->dev, "<== incoming drv msg src %d len %d msg_type %d msg_len %d\n",
  1003. src, len, hdr->msg_type, hdr->msg_len);
  1004. if (len <= sizeof(*hdr)) {
  1005. dev_err(&rpdev->dev, "message truncated\n");
  1006. return -EINVAL;
  1007. }
  1008. switch (hdr->msg_type) {
  1009. case RPPC_MSGTYPE_DEVINFO_RESP:
  1010. rppc_handle_devinfo_resp(rpdev, buf, len);
  1011. break;
  1012. case RPPC_MSGTYPE_FUNCTION_INFO:
  1013. rppc_handle_fxninfo_resp(rpdev, buf, len);
  1014. break;
  1015. default:
  1016. dev_err(&rpdev->dev, "unrecognized message type %u\n",
  1017. hdr->msg_type);
  1018. break;
  1019. }
  1020. return 0;
  1021. }
  1022. static int find_rpccdev_by_name(int id, void *p, void *data)
  1023. {
  1024. struct rppc_device *rppcdev = p;
  1025. return strcmp(rppcdev->desc, data) ? 0 : (int)p;
  1026. }
  1027. /*
  1028. * send a device info query message, the device will be created
  1029. * during the processing of the response message
  1030. */
  1031. static int rppc_device_create(struct rpmsg_device *rpdev)
  1032. {
  1033. int ret;
  1034. u32 len;
  1035. char kbuf[512];
  1036. struct rppc_msg_header *hdr = (struct rppc_msg_header *)&kbuf[0];
  1037. hdr->msg_type = RPPC_MSGTYPE_DEVINFO_REQ;
  1038. hdr->msg_len = 0;
  1039. len = sizeof(*hdr);
  1040. ret = rpmsg_send(rpdev->ept, (char *)kbuf, len);
  1041. if (ret) {
  1042. dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
  1043. return ret;
  1044. }
  1045. return 0;
  1046. }
  1047. static int rppc_probe(struct rpmsg_device *rpdev)
  1048. {
  1049. int ret, minor;
  1050. int major = MAJOR(rppc_dev);
  1051. struct rppc_device *rppcdev = NULL;
  1052. dev_t dev;
  1053. char namedesc[RPMSG_NAME_SIZE];
  1054. dev_info(&rpdev->dev, "probing service %s with src %u dst %u\n",
  1055. rpdev->desc, rpdev->src, rpdev->dst);
  1056. mutex_lock(&rppc_devices_lock);
  1057. snprintf(namedesc, sizeof(namedesc), "%s", rpdev->desc);
  1058. rppcdev = (struct rppc_device *)idr_for_each(&rppc_devices,
  1059. find_rpccdev_by_name, namedesc);
  1060. if (rppcdev) {
  1061. rppcdev->rpdev = rpdev;
  1062. dev_set_drvdata(&rpdev->dev, rppcdev);
  1063. goto serv_up;
  1064. }
  1065. rppcdev = kzalloc(sizeof(*rppcdev), GFP_KERNEL);
  1066. if (!rppcdev) {
  1067. ret = -ENOMEM;
  1068. goto exit;
  1069. }
  1070. minor = idr_alloc(&rppc_devices, rppcdev, 0, 0, GFP_KERNEL);
  1071. if (minor < 0) {
  1072. ret = minor;
  1073. dev_err(&rpdev->dev, "failed to get a minor number: %d\n", ret);
  1074. goto free_rppcdev;
  1075. }
  1076. INIT_LIST_HEAD(&rppcdev->instances);
  1077. mutex_init(&rppcdev->lock);
  1078. init_completion(&rppcdev->comp);
  1079. rppcdev->minor = minor;
  1080. rppcdev->rpdev = rpdev;
  1081. strncpy(rppcdev->desc, namedesc, RPMSG_NAME_SIZE);
  1082. dev_set_drvdata(&rpdev->dev, rppcdev);
  1083. cdev_init(&rppcdev->cdev, &rppc_fops);
  1084. rppcdev->cdev.owner = THIS_MODULE;
  1085. dev = MKDEV(major, minor);
  1086. ret = cdev_add(&rppcdev->cdev, dev, 1);
  1087. if (ret) {
  1088. dev_err(&rpdev->dev, "cdev_add failed: %d\n", ret);
  1089. goto free_id;
  1090. }
  1091. serv_up:
  1092. rppcdev->dev = device_create(rppc_class, &rpdev->dev,
  1093. MKDEV(major, rppcdev->minor), NULL,
  1094. namedesc);
  1095. if (IS_ERR(rppcdev->dev)) {
  1096. int ret = PTR_ERR(rppcdev->dev);
  1097. dev_err(&rpdev->dev, "device_create failed: %d\n", ret);
  1098. goto free_cdev;
  1099. }
  1100. dev_set_drvdata(rppcdev->dev, rppcdev);
  1101. ret = rppc_device_create(rpdev);
  1102. if (ret) {
  1103. dev_err(&rpdev->dev, "failed to query channel info: %d\n", ret);
  1104. dev = MKDEV(MAJOR(rppc_dev), rppcdev->minor);
  1105. goto free_dev;
  1106. }
  1107. complete_all(&rppcdev->comp);
  1108. dev_dbg(&rpdev->dev, "new RPPC connection srv channel: %u -> %u!\n",
  1109. rpdev->src, rpdev->dst);
  1110. mutex_unlock(&rppc_devices_lock);
  1111. return 0;
  1112. free_dev:
  1113. device_destroy(rppc_class, dev);
  1114. free_cdev:
  1115. cdev_del(&rppcdev->cdev);
  1116. free_id:
  1117. idr_remove(&rppc_devices, rppcdev->minor);
  1118. free_rppcdev:
  1119. kfree(rppcdev);
  1120. exit:
  1121. mutex_unlock(&rppc_devices_lock);
  1122. return ret;
  1123. }
  1124. static void rppc_remove(struct rpmsg_device *rpdev)
  1125. {
  1126. struct rppc_device *rppcdev = dev_get_drvdata(&rpdev->dev);
  1127. struct rppc_instance *rpc = NULL;
  1128. int major = MAJOR(rppc_dev);
  1129. dev_dbg(&rpdev->dev, "removing rpmsg-rpc device %u.%u\n",
  1130. major, rppcdev->minor);
  1131. mutex_lock(&rppc_devices_lock);
  1132. rppc_remove_sysfs(rppcdev);
  1133. rppcdev->cur_func = 0;
  1134. kfree(rppcdev->signatures);
  1135. /* if there are no instances in the list, just teardown */
  1136. if (list_empty(&rppcdev->instances)) {
  1137. dev_dbg(&rpdev->dev, "no instances, removing device!\n");
  1138. device_destroy(rppc_class, MKDEV(major, rppcdev->minor));
  1139. cdev_del(&rppcdev->cdev);
  1140. idr_remove(&rppc_devices, rppcdev->minor);
  1141. kfree(rppcdev);
  1142. mutex_unlock(&rppc_devices_lock);
  1143. return;
  1144. }
  1145. /*
  1146. * if there are rpc instances that means that this is a recovery
  1147. * operation. Don't clean the rppcdev, and retain it for reuse.
  1148. * mark each instance as invalid, and complete any on-going transactions
  1149. */
  1150. init_completion(&rppcdev->comp);
  1151. mutex_lock(&rppcdev->lock);
  1152. list_for_each_entry(rpc, &rppcdev->instances, list) {
  1153. dev_dbg(&rpdev->dev, "instance %p in state %d\n",
  1154. rpc, rpc->state);
  1155. if ((rpc->state == RPPC_STATE_CONNECTED) && rpc->in_transition)
  1156. complete_all(&rpc->reply_arrived);
  1157. rpc->state = RPPC_STATE_STALE;
  1158. if (rpc->ept) {
  1159. rpmsg_destroy_ept(rpc->ept);
  1160. rpc->ept = NULL;
  1161. }
  1162. wake_up_interruptible(&rpc->readq);
  1163. }
  1164. device_destroy(rppc_class, MKDEV(major, rppcdev->minor));
  1165. rppcdev->dev = NULL;
  1166. rppcdev->rpdev = NULL;
  1167. mutex_unlock(&rppcdev->lock);
  1168. mutex_unlock(&rppc_devices_lock);
  1169. dev_dbg(&rpdev->dev, "removed rpmsg rpmsg-rpc service %s\n",
  1170. rpdev->desc);
  1171. }
  1172. static struct rpmsg_device_id rppc_id_table[] = {
  1173. {.name = "rpmsg-rpc"},
  1174. {},
  1175. };
  1176. static struct rpmsg_driver rppc_driver = {
  1177. .drv.name = KBUILD_MODNAME,
  1178. .id_table = rppc_id_table,
  1179. .probe = rppc_probe,
  1180. .remove = rppc_remove,
  1181. .callback = rppc_driver_cb,
  1182. };
  1183. static int __init rppc_init(void)
  1184. {
  1185. int ret;
  1186. ret = alloc_chrdev_region(&rppc_dev, 0, RPPC_MAX_DEVICES,
  1187. KBUILD_MODNAME);
  1188. if (ret) {
  1189. pr_err("alloc_chrdev_region failed: %d\n", ret);
  1190. goto out;
  1191. }
  1192. rppc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
  1193. if (IS_ERR(rppc_class)) {
  1194. ret = PTR_ERR(rppc_class);
  1195. pr_err("class_create failed: %d\n", ret);
  1196. goto unreg_region;
  1197. }
  1198. ret = register_rpmsg_driver(&rppc_driver);
  1199. if (ret) {
  1200. pr_err("register_rpmsg_driver failed: %d\n", ret);
  1201. goto destroy_class;
  1202. }
  1203. return 0;
  1204. destroy_class:
  1205. class_destroy(rppc_class);
  1206. unreg_region:
  1207. unregister_chrdev_region(rppc_dev, RPPC_MAX_DEVICES);
  1208. out:
  1209. return ret;
  1210. }
  1211. static void __exit rppc_exit(void)
  1212. {
  1213. unregister_rpmsg_driver(&rppc_driver);
  1214. class_destroy(rppc_class);
  1215. unregister_chrdev_region(rppc_dev, RPPC_MAX_DEVICES);
  1216. }
  1217. module_init(rppc_init);
  1218. module_exit(rppc_exit);
  1219. MODULE_DEVICE_TABLE(rpmsg, rppc_id_table);
  1220. MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
  1221. MODULE_AUTHOR("Erik Rainey <erik.rainey@ti.com>");
  1222. MODULE_DESCRIPTION("Remote Processor Procedure Call Driver");
  1223. MODULE_ALIAS("rpmsg:rpmsg-rpc");
  1224. MODULE_LICENSE("GPL v2");