rpmsg_rpc_dmabuf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * Remote Processor Procedure Call Driver
  3. *
  4. * Copyright (C) 2012-2017 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * Erik Rainey <erik.rainey@ti.com>
  7. * Suman Anna <s-anna@ti.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/dma-buf.h>
  19. #include <linux/rpmsg_rpc.h>
  20. #include "rpmsg_rpc_internal.h"
  21. #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
  22. defined(CONFIG_SOC_DRA7XX)
  23. /*
  24. * TODO: Remove tiler_stride_from_region & rppc_recalc_off from here, and
  25. * rely on OMAPDRM/TILER code for OMAP dependencies
  26. */
  27. /**
  28. * tiler_stride_from_region() - calculate stride value for OMAP TILER
  29. * @localphys: The local physical address.
  30. *
  31. * Returns the stride value as seen by remote processors based on the local
  32. * address given to the function. This stride value is calculated based on the
  33. * actual bus address, and is assumed that the TILER regions are mapped in a
  34. * in a linear fashion.
  35. *
  36. * The physical address range decoding of local addresses is as follows:
  37. *
  38. * 0x60000000 - 0x67FFFFFF : 8-bit region (Stride is 16K bytes)
  39. * 0x68000000 - 0x6FFFFFFF : 16-bit region (Stride is 32K bytes)
  40. * 0x70000000 - 0x77FFFFFF : 32-bit region (Stride is 32K bytes)
  41. * 0x78000000 - 0x7FFFFFFF : Page mode region (Stride is 0 bytes)
  42. *
  43. * Return: stride value
  44. */
  45. static long tiler_stride_from_region(phys_addr_t localphys)
  46. {
  47. switch (localphys & 0xf8000000) {
  48. case 0x60000000:
  49. return 0x4000;
  50. case 0x68000000:
  51. case 0x70000000:
  52. return 0x8000;
  53. default:
  54. return 0;
  55. }
  56. }
  57. /**
  58. * rppc_recalc_off() - Recalculate the unsigned offset in a buffer due to
  59. * it's location in the TILER.
  60. * @lpa: local physical address
  61. * @uoff: unsigned offset
  62. *
  63. * Return: adjusted offset accounting for TILER region
  64. */
  65. static long rppc_recalc_off(phys_addr_t lpa, long uoff)
  66. {
  67. long stride = tiler_stride_from_region(lpa);
  68. return (stride != 0) ? (stride * (uoff / PAGE_SIZE)) +
  69. (uoff & (PAGE_SIZE - 1)) : uoff;
  70. }
  71. #else
  72. static inline long rppc_recalc_off(phys_addr_t lpa, long uoff)
  73. {
  74. return uoff;
  75. }
  76. #endif
  77. /**
  78. * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor
  79. * @rpc - rppc instance handle
  80. * @fd - dma_buf file descriptor
  81. * @autoreg: flag indicating the mode of creation
  82. *
  83. * This function primarily imports a buffer into the driver and holds
  84. * a reference to the buffer on behalf of the remote processor. The
  85. * buffer to be imported is represented by a dma-buf file descriptor,
  86. * and as such is agnostic of the buffer allocator and/or exporter.
  87. * The buffer is imported using the dma-buf api, and a driver specific
  88. * buffer descriptor is used to store the imported buffer properties.
  89. * The imported buffers are all stored in a rppc instance specific
  90. * idr, to be used for looking up and cleaning up the driver buffer
  91. * descriptors.
  92. *
  93. * The @autoreg field is used to dictate the manner in which the buffer
  94. * is imported. The user-side can pre-register the buffers with the driver
  95. * (which will import the buffers) if the application is going to use
  96. * these repeatedly in consecutive function invocations. The buffers
  97. * are auto-imported if the user-side has not registered them previously
  98. * and are un-imported once the remote function call returns.
  99. *
  100. * This function is to be called only after checking that buffer has
  101. * not been imported already (see rppc_find_dmabuf).
  102. *
  103. * Return: allocated rppc_dma_buf or error
  104. */
  105. struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd,
  106. bool autoreg)
  107. {
  108. struct rppc_dma_buf *dma;
  109. void *ret;
  110. int id;
  111. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  112. if (!dma)
  113. return ERR_PTR(-ENOMEM);
  114. dma->fd = fd;
  115. dma->autoreg = !!autoreg;
  116. dma->buf = dma_buf_get(dma->fd);
  117. if (IS_ERR(dma->buf)) {
  118. ret = dma->buf;
  119. goto free_dma;
  120. }
  121. dma->attach = dma_buf_attach(dma->buf, rpc->dev);
  122. if (IS_ERR(dma->attach)) {
  123. ret = dma->attach;
  124. goto put_buf;
  125. }
  126. dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL);
  127. if (IS_ERR(dma->sgt)) {
  128. ret = dma->sgt;
  129. goto detach_buf;
  130. }
  131. dma->pa = sg_dma_address(dma->sgt->sgl);
  132. mutex_lock(&rpc->lock);
  133. id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL);
  134. dma->id = id;
  135. mutex_unlock(&rpc->lock);
  136. if (id < 0) {
  137. ret = ERR_PTR(id);
  138. goto unmap_buf;
  139. }
  140. return dma;
  141. unmap_buf:
  142. dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
  143. detach_buf:
  144. dma_buf_detach(dma->buf, dma->attach);
  145. put_buf:
  146. dma_buf_put(dma->buf);
  147. free_dma:
  148. kfree(dma);
  149. return ret;
  150. }
  151. /**
  152. * rppc_free_dmabuf - release the imported buffer
  153. * @id: idr index of the imported buffer descriptor
  154. * @p: imported buffer descriptor allocated during rppc_alloc_dmabuf
  155. * @data: rpc instance handle
  156. *
  157. * This function is used to release a buffer that has been previously
  158. * imported through a rppc_alloc_dmabuf call. The function can be used
  159. * either individually for releasing a specific buffer or in a loop iterator
  160. * for releasing all the buffers associated with a remote function call, or
  161. * during cleanup of the rpc instance.
  162. *
  163. * Return: 0 on success, and -ENOENT if invalid pointers passed in
  164. */
  165. int rppc_free_dmabuf(int id, void *p, void *data)
  166. {
  167. struct rppc_dma_buf *dma = p;
  168. struct rppc_instance *rpc = data;
  169. if (!dma || !rpc)
  170. return -ENOENT;
  171. dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
  172. dma_buf_detach(dma->buf, dma->attach);
  173. dma_buf_put(dma->buf);
  174. WARN_ON(id != dma->id);
  175. idr_remove(&rpc->dma_idr, id);
  176. kfree(dma);
  177. return 0;
  178. }
  179. /**
  180. * rppc_free_auto_dmabuf - release an auto-registered imported buffer
  181. * @id: idr index of the imported buffer descriptor
  182. * @p: imported buffer descriptor allocated during the rppc_alloc_dmabuf
  183. * @data: rpc instance handle
  184. *
  185. * This function is used to release a buffer that has been previously
  186. * imported automatically in the remote function invocation path (for
  187. * rppc_alloc_dmabuf invocations with autoreg set as true). The function
  188. * is used as a loop iterator for releasing all such buffers associated
  189. * with a remote function call, and is called after processing the
  190. * translations while handling the return message of an executed function
  191. * call.
  192. *
  193. * Return: 0 on success or if the buffer is not auto-imported, and -ENOENT
  194. * if invalid pointers passed in
  195. */
  196. static int rppc_free_auto_dmabuf(int id, void *p, void *data)
  197. {
  198. struct rppc_dma_buf *dma = p;
  199. struct rppc_instance *rpc = data;
  200. if (WARN_ON(!dma || !rpc))
  201. return -ENOENT;
  202. if (!dma->autoreg)
  203. return 0;
  204. rppc_free_dmabuf(id, p, data);
  205. return 0;
  206. }
  207. /**
  208. * find_dma_by_fd - find the allocated buffer descriptor
  209. * @id: idr loop index
  210. * @p: imported buffer descriptor associated with each idr index @id
  211. * @data: dma-buf file descriptor of the buffer
  212. *
  213. * This is a idr iterator helper function, used for checking if a buffer
  214. * has been imported before and present within the rpc instance's idr.
  215. *
  216. * Return: rpc buffer descriptor if file descriptor matches, and 0 otherwise
  217. */
  218. static int find_dma_by_fd(int id, void *p, void *data)
  219. {
  220. struct rppc_dma_buf *dma = p;
  221. int fd = (int)data;
  222. if (dma->fd == fd)
  223. return (int)p;
  224. return 0;
  225. }
  226. /**
  227. * rppc_find_dmabuf - find and return the rppc buffer descriptor of an imported
  228. * buffer
  229. * @rpc: rpc instance
  230. * @fd: dma-buf file descriptor of the buffer
  231. *
  232. * This function is used to find and return the rppc buffer descriptor of an
  233. * imported buffer. The function is used to check if ia buffer has already
  234. * been imported (during manual registration to return an error), and to return
  235. * the rppc buffer descriptor to be used for freeing (during manual
  236. * deregistration). It is also used during auto-registration to see if the
  237. * buffer needs to be imported through a rppc_alloc_dmabuf if not found.
  238. *
  239. * Return: rppc buffer descriptor of the buffer if it has already been imported,
  240. * or NULL otherwise.
  241. */
  242. struct rppc_dma_buf *rppc_find_dmabuf(struct rppc_instance *rpc, int fd)
  243. {
  244. struct rppc_dma_buf *node = NULL;
  245. void *data = (void *)fd;
  246. dev_dbg(rpc->dev, "looking for fd %u\n", fd);
  247. mutex_lock(&rpc->lock);
  248. node = (struct rppc_dma_buf *)
  249. idr_for_each(&rpc->dma_idr, find_dma_by_fd, data);
  250. mutex_unlock(&rpc->lock);
  251. dev_dbg(rpc->dev, "returning node %p for fd %u\n",
  252. node, fd);
  253. return node;
  254. }
  255. /**
  256. * rppc_map_page - import and map a kernel page in a dma_buf
  257. * @rpc - rppc instance handle
  258. * @fd: file descriptor of the dma_buf to import
  259. * @offset: offset of the translate location within the buffer
  260. * @base_ptr: pointer for returning mapped kernel address
  261. * @dmabuf: pointer for returning the imported dma_buf
  262. *
  263. * A helper function to import the dma_buf buffer and map into kernel
  264. * the page containing the offset within the buffer. The function is
  265. * called by rppc_xlate_buffers and returns the pointers to the kernel
  266. * mapped address and the imported dma_buf handle in arguments. The
  267. * mapping is used for performing in-place translation of the user
  268. * provided pointer at location @offset within the buffer.
  269. *
  270. * The mapping is achieved through the appropriate dma_buf ops, and
  271. * the page will be unmapped after performing the translation. See
  272. * also rppc_unmap_page.
  273. *
  274. * Return: 0 on success, or an appropriate failure code otherwise
  275. */
  276. static int rppc_map_page(struct rppc_instance *rpc, int fd, u32 offset,
  277. u8 **base_ptr, struct dma_buf **dmabuf)
  278. {
  279. int ret = 0;
  280. u8 *ptr = NULL;
  281. struct dma_buf *dbuf = NULL;
  282. u32 pg_offset;
  283. unsigned long pg_num;
  284. size_t begin, end = PAGE_SIZE;
  285. struct device *dev = rpc->dev;
  286. if (!base_ptr || !dmabuf)
  287. return -EINVAL;
  288. pg_offset = (offset & (PAGE_SIZE - 1));
  289. begin = offset & PAGE_MASK;
  290. pg_num = offset >> PAGE_SHIFT;
  291. dbuf = dma_buf_get(fd);
  292. if (IS_ERR(dbuf)) {
  293. ret = PTR_ERR(dbuf);
  294. dev_err(dev, "invalid dma_buf file descriptor passed! fd = %d ret = %d\n",
  295. fd, ret);
  296. goto out;
  297. }
  298. ret = dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL);
  299. if (ret < 0) {
  300. dev_err(dev, "failed to acquire cpu access to the dma buf fd = %d offset = 0x%x, ret = %d\n",
  301. fd, offset, ret);
  302. goto put_dmabuf;
  303. }
  304. ptr = dma_buf_kmap(dbuf, pg_num);
  305. if (!ptr) {
  306. ret = -ENOBUFS;
  307. dev_err(dev, "failed to map the page containing the translation into kernel fd = %d offset = 0x%x\n",
  308. fd, offset);
  309. goto end_cpuaccess;
  310. }
  311. *base_ptr = ptr;
  312. *dmabuf = dbuf;
  313. dev_dbg(dev, "kmap'd base_ptr = %p buf = %p into kernel from %zu for %zu bytes, pg_offset = 0x%x\n",
  314. ptr, dbuf, begin, end, pg_offset);
  315. return 0;
  316. end_cpuaccess:
  317. dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL);
  318. put_dmabuf:
  319. dma_buf_put(dbuf);
  320. out:
  321. return ret;
  322. }
  323. /**
  324. * rppc_unmap_page - unmap and release a previously mapped page
  325. * @rpc - rppc instance handle
  326. * @offset: offset of the translate location within the buffer
  327. * @base_ptr: kernel mapped address for the page to be unmapped
  328. * @dmabuf: imported dma_buf to be released
  329. *
  330. * This function is called by rppc_xlate_buffers to unmap the
  331. * page and release the imported buffer. It essentially undoes
  332. * the functionality of rppc_map_page.
  333. */
  334. static void rppc_unmap_page(struct rppc_instance *rpc, u32 offset,
  335. u8 *base_ptr, struct dma_buf *dmabuf)
  336. {
  337. u32 pg_offset;
  338. unsigned long pg_num;
  339. size_t begin, end = PAGE_SIZE;
  340. struct device *dev = rpc->dev;
  341. if (!base_ptr || !dmabuf)
  342. return;
  343. pg_offset = (offset & (PAGE_SIZE - 1));
  344. begin = offset & PAGE_MASK;
  345. pg_num = offset >> PAGE_SHIFT;
  346. dev_dbg(dev, "Unkmaping base_ptr = %p of buf = %p from %zu to %zu bytes\n",
  347. base_ptr, dmabuf, begin, end);
  348. dma_buf_kunmap(dmabuf, pg_num, base_ptr);
  349. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  350. dma_buf_put(dmabuf);
  351. }
  352. /**
  353. * rppc_buffer_lookup - convert a buffer pointer to a remote processor pointer
  354. * @rpc: rpc instance
  355. * @uva: buffer pointer that needs to be translated
  356. * @buva: base pointer of the allocated buffer
  357. * @fd: dma-buf file descriptor of the allocated buffer
  358. *
  359. * This function is used for converting a pointer value in the function
  360. * arguments to its appropriate remote processor device address value.
  361. * The @uva and @buva are used for identifying the offset of the function
  362. * argument pointer in an original allocation. This supports the cases where
  363. * an offset pointer (eg: alignment, packed buffers etc) needs to be passed
  364. * as the argument rather than the actual allocated pointer.
  365. *
  366. * The remote processor device address is done by retrieving the base physical
  367. * address of the buffer by importing the buffer and converting it to the
  368. * remote processor device address using a remoteproc api, with adjustments
  369. * to the offset.
  370. *
  371. * The offset is specifically adjusted for OMAP TILER to account for the stride
  372. * and mapping onto the remote processor.
  373. *
  374. * Return: remote processor device address, 0 on failure (implies invalid
  375. * arguments)
  376. */
  377. dev_addr_t rppc_buffer_lookup(struct rppc_instance *rpc, virt_addr_t uva,
  378. virt_addr_t buva, int fd)
  379. {
  380. phys_addr_t lpa = 0;
  381. dev_addr_t rda = 0;
  382. long uoff = uva - buva;
  383. struct device *dev = rpc->dev;
  384. struct rppc_dma_buf *buf;
  385. dev_dbg(dev, "buva = %p uva = %p offset = %ld [0x%016lx] fd = %d\n",
  386. (void *)buva, (void *)uva, uoff, (ulong)uoff, fd);
  387. if (uoff < 0) {
  388. dev_err(dev, "invalid pointer values for uva = %p from buva = %p\n",
  389. (void *)uva, (void *)buva);
  390. return rda;
  391. }
  392. buf = rppc_find_dmabuf(rpc, fd);
  393. if (IS_ERR_OR_NULL(buf)) {
  394. buf = rppc_alloc_dmabuf(rpc, fd, true);
  395. if (IS_ERR(buf))
  396. goto out;
  397. }
  398. lpa = buf->pa;
  399. WARN_ON(lpa != sg_dma_address(buf->sgt->sgl));
  400. uoff = rppc_recalc_off(lpa, uoff);
  401. lpa += uoff;
  402. rda = rppc_local_to_remote_da(rpc, lpa);
  403. out:
  404. dev_dbg(dev, "host uva %p == host pa %pa => remote da %p (fd %d)\n",
  405. (void *)uva, &lpa, (void *)rda, fd);
  406. return rda;
  407. }
  408. /**
  409. * rppc_xlate_buffers - translate argument pointers in the marshalled packet
  410. * @rpc: rppc instance
  411. * @func: rppc function packet being acted upon
  412. * @direction: direction of translation
  413. *
  414. * This function translates all the pointers within the function call packet
  415. * structure, based on the translation descriptor structures. The translation
  416. * replaces the pointers to the appropriate pointers based on the direction.
  417. * The function is invoked in preparing the packet to be sent to the remote
  418. * processor-side and replaces the pointers to the remote processor device
  419. * address pointers; and in processing the packet back after executing the
  420. * function and replacing back the remote processor device addresses with
  421. * the original pointers.
  422. *
  423. * Return: 0 on success, or an appropriate failure code otherwise
  424. */
  425. int rppc_xlate_buffers(struct rppc_instance *rpc, struct rppc_function *func,
  426. int direction)
  427. {
  428. u8 *base_ptr = NULL;
  429. struct dma_buf *dbuf = NULL;
  430. struct device *dev = rpc->dev;
  431. u32 ptr_idx, pri_offset, sec_offset, offset, pg_offset, size;
  432. int i, limit, inc = 1;
  433. virt_addr_t kva, uva, buva;
  434. dev_addr_t rda;
  435. int ret = 0, final_ret = 0;
  436. int xlate_fd;
  437. limit = func->num_translations;
  438. if (WARN_ON(!limit))
  439. return 0;
  440. dev_dbg(dev, "operating on %d pointers\n", func->num_translations);
  441. /* sanity check the translation elements */
  442. for (i = 0; i < limit; i++) {
  443. ptr_idx = func->translations[i].index;
  444. pri_offset = func->params[ptr_idx].data -
  445. func->params[ptr_idx].base;
  446. sec_offset = func->translations[i].offset;
  447. size = func->params[ptr_idx].size;
  448. if (ptr_idx >= RPPC_MAX_PARAMETERS) {
  449. dev_err(dev, "xlate[%d] - invalid parameter pointer index %u\n",
  450. i, ptr_idx);
  451. return -EINVAL;
  452. }
  453. if (func->params[ptr_idx].type != RPPC_PARAM_TYPE_PTR) {
  454. dev_err(dev, "xlate[%d] - parameter index %u is not a pointer (type %u)\n",
  455. i, ptr_idx, func->params[ptr_idx].type);
  456. return -EINVAL;
  457. }
  458. if (func->params[ptr_idx].data == 0) {
  459. dev_err(dev, "xlate[%d] - supplied user pointer is NULL!\n",
  460. i);
  461. return -EINVAL;
  462. }
  463. if (sec_offset > (size - sizeof(virt_addr_t))) {
  464. dev_err(dev, "xlate[%d] offset is larger than data area! (sec_offset = %u size = %u)\n",
  465. i, sec_offset, size);
  466. return -ENOSPC;
  467. }
  468. }
  469. /*
  470. * we may have a failure during translation, in which case use the same
  471. * loop to unwind the whole operation
  472. */
  473. for (i = 0; i != limit; i += inc) {
  474. dev_dbg(dev, "starting translation %d of %d by %d\n",
  475. i, limit, inc);
  476. ptr_idx = func->translations[i].index;
  477. pri_offset = func->params[ptr_idx].data -
  478. func->params[ptr_idx].base;
  479. sec_offset = func->translations[i].offset;
  480. offset = pri_offset + sec_offset;
  481. pg_offset = (offset & (PAGE_SIZE - 1));
  482. /*
  483. * map into kernel the page containing the offset, where the
  484. * pointer needs to be translated.
  485. */
  486. ret = rppc_map_page(rpc, func->params[ptr_idx].fd, offset,
  487. &base_ptr, &dbuf);
  488. if (ret) {
  489. dev_err(dev, "rppc_map_page failed, translation = %d param_index = %d fd = %d ret = %d\n",
  490. i, ptr_idx, func->params[ptr_idx].fd, ret);
  491. goto unwind;
  492. }
  493. /*
  494. * perform the actual translation as per the direction.
  495. */
  496. if (direction == RPPC_UVA_TO_RPA) {
  497. kva = (virt_addr_t)&base_ptr[pg_offset];
  498. if (kva & 0x3) {
  499. dev_err(dev, "kernel virtual address %p is not aligned for translation = %d\n",
  500. (void *)kva, i);
  501. ret = -EADDRNOTAVAIL;
  502. goto unmap;
  503. }
  504. uva = *(virt_addr_t *)kva;
  505. if (!uva) {
  506. dev_err(dev, "user pointer in the translated offset location is NULL for translation = %d\n",
  507. i);
  508. print_hex_dump(KERN_DEBUG, "KMAP: ",
  509. DUMP_PREFIX_NONE, 16, 1,
  510. base_ptr, PAGE_SIZE, true);
  511. ret = -EADDRNOTAVAIL;
  512. goto unmap;
  513. }
  514. buva = (virt_addr_t)func->translations[i].base;
  515. xlate_fd = func->translations[i].fd;
  516. dev_dbg(dev, "replacing UVA %p at KVA %p prt_idx = %u pg_offset = 0x%x fd = %d\n",
  517. (void *)uva, (void *)kva, ptr_idx,
  518. pg_offset, xlate_fd);
  519. /* compute the corresponding remote device address */
  520. rda = rppc_buffer_lookup(rpc, uva, buva, xlate_fd);
  521. if (!rda) {
  522. ret = -ENODATA;
  523. goto unmap;
  524. }
  525. /*
  526. * replace the pointer, save the old value for replacing
  527. * it back on the function return path
  528. */
  529. func->translations[i].fd = (int32_t)uva;
  530. *(virt_addr_t *)kva = rda;
  531. dev_dbg(dev, "replaced UVA %p with RDA %p at KVA %p\n",
  532. (void *)uva, (void *)rda, (void *)kva);
  533. } else if (direction == RPPC_RPA_TO_UVA) {
  534. kva = (virt_addr_t)&base_ptr[pg_offset];
  535. if (kva & 0x3) {
  536. ret = -EADDRNOTAVAIL;
  537. goto unmap;
  538. }
  539. rda = *(virt_addr_t *)kva;
  540. uva = (virt_addr_t)func->translations[i].fd;
  541. WARN_ON(!uva);
  542. *(virt_addr_t *)kva = uva;
  543. dev_dbg(dev, "replaced RDA %p with UVA %p at KVA %p\n",
  544. (void *)rda, (void *)uva, (void *)kva);
  545. }
  546. unmap:
  547. /*
  548. * unmap the page containing the translation from kernel, the
  549. * next translation acting on the same fd might be in a
  550. * different page altogether from the current one
  551. */
  552. rppc_unmap_page(rpc, offset, base_ptr, dbuf);
  553. dbuf = NULL;
  554. base_ptr = NULL;
  555. if (!ret)
  556. continue;
  557. unwind:
  558. /*
  559. * unwind all the previous translations if the failure occurs
  560. * while sending a message to the remote-side. There's nothing
  561. * to do but to continue if the failure occurs during the
  562. * processing of a function response.
  563. */
  564. if (direction == RPPC_UVA_TO_RPA) {
  565. dev_err(dev, "unwinding UVA to RDA translations! translation = %d\n",
  566. i);
  567. direction = RPPC_RPA_TO_UVA;
  568. inc = -1;
  569. limit = -1;
  570. } else if (direction == RPPC_RPA_TO_UVA) {
  571. dev_err(dev, "error during UVA to RDA translations!! current translation = %d\n",
  572. i);
  573. }
  574. /*
  575. * store away the return value to return back to caller
  576. * in case of an error, record only the first error
  577. */
  578. if (!final_ret)
  579. final_ret = ret;
  580. }
  581. /*
  582. * all the in-place pointer replacements are done, release all the
  583. * imported buffers during the remote function return path
  584. */
  585. if (direction == RPPC_RPA_TO_UVA) {
  586. mutex_lock(&rpc->lock);
  587. idr_for_each(&rpc->dma_idr, rppc_free_auto_dmabuf, rpc);
  588. mutex_unlock(&rpc->lock);
  589. }
  590. return final_ret;
  591. }