rmi_spi.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/rmi.h>
  12. #include <linux/slab.h>
  13. #include <linux/spi/spi.h>
  14. #include <linux/irq.h>
  15. #include <linux/of.h>
  16. #include "rmi_driver.h"
  17. #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
  18. #define RMI_PAGE_SELECT_REGISTER 0x00FF
  19. #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
  20. #define RMI_SPI_XFER_SIZE_LIMIT 255
  21. #define BUFFER_SIZE_INCREMENT 32
  22. enum rmi_spi_op {
  23. RMI_SPI_WRITE = 0,
  24. RMI_SPI_READ,
  25. RMI_SPI_V2_READ_UNIFIED,
  26. RMI_SPI_V2_READ_SPLIT,
  27. RMI_SPI_V2_WRITE,
  28. };
  29. struct rmi_spi_cmd {
  30. enum rmi_spi_op op;
  31. u16 addr;
  32. };
  33. struct rmi_spi_xport {
  34. struct rmi_transport_dev xport;
  35. struct spi_device *spi;
  36. struct mutex page_mutex;
  37. int page;
  38. int irq;
  39. u8 *rx_buf;
  40. u8 *tx_buf;
  41. int xfer_buf_size;
  42. struct spi_transfer *rx_xfers;
  43. struct spi_transfer *tx_xfers;
  44. int rx_xfer_count;
  45. int tx_xfer_count;
  46. };
  47. static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
  48. {
  49. struct spi_device *spi = rmi_spi->spi;
  50. int buf_size = rmi_spi->xfer_buf_size
  51. ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
  52. struct spi_transfer *xfer_buf;
  53. void *buf;
  54. void *tmp;
  55. while (buf_size < len)
  56. buf_size *= 2;
  57. if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
  58. buf_size = RMI_SPI_XFER_SIZE_LIMIT;
  59. tmp = rmi_spi->rx_buf;
  60. buf = devm_kzalloc(&spi->dev, buf_size * 2,
  61. GFP_KERNEL | GFP_DMA);
  62. if (!buf)
  63. return -ENOMEM;
  64. rmi_spi->rx_buf = buf;
  65. rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
  66. rmi_spi->xfer_buf_size = buf_size;
  67. if (tmp)
  68. devm_kfree(&spi->dev, tmp);
  69. if (rmi_spi->xport.pdata.spi_data.read_delay_us)
  70. rmi_spi->rx_xfer_count = buf_size;
  71. else
  72. rmi_spi->rx_xfer_count = 1;
  73. if (rmi_spi->xport.pdata.spi_data.write_delay_us)
  74. rmi_spi->tx_xfer_count = buf_size;
  75. else
  76. rmi_spi->tx_xfer_count = 1;
  77. /*
  78. * Allocate a pool of spi_transfer buffers for devices which need
  79. * per byte delays.
  80. */
  81. tmp = rmi_spi->rx_xfers;
  82. xfer_buf = devm_kzalloc(&spi->dev,
  83. (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
  84. * sizeof(struct spi_transfer), GFP_KERNEL);
  85. if (!xfer_buf)
  86. return -ENOMEM;
  87. rmi_spi->rx_xfers = xfer_buf;
  88. rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
  89. if (tmp)
  90. devm_kfree(&spi->dev, tmp);
  91. return 0;
  92. }
  93. static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
  94. const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
  95. int tx_len, u8 *rx_buf, int rx_len)
  96. {
  97. struct spi_device *spi = rmi_spi->spi;
  98. struct rmi_device_platform_data_spi *spi_data =
  99. &rmi_spi->xport.pdata.spi_data;
  100. struct spi_message msg;
  101. struct spi_transfer *xfer;
  102. int ret = 0;
  103. int len;
  104. int cmd_len = 0;
  105. int total_tx_len;
  106. int i;
  107. u16 addr = cmd->addr;
  108. spi_message_init(&msg);
  109. switch (cmd->op) {
  110. case RMI_SPI_WRITE:
  111. case RMI_SPI_READ:
  112. cmd_len += 2;
  113. break;
  114. case RMI_SPI_V2_READ_UNIFIED:
  115. case RMI_SPI_V2_READ_SPLIT:
  116. case RMI_SPI_V2_WRITE:
  117. cmd_len += 4;
  118. break;
  119. }
  120. total_tx_len = cmd_len + tx_len;
  121. len = max(total_tx_len, rx_len);
  122. if (len > RMI_SPI_XFER_SIZE_LIMIT)
  123. return -EINVAL;
  124. if (rmi_spi->xfer_buf_size < len)
  125. rmi_spi_manage_pools(rmi_spi, len);
  126. if (addr == 0)
  127. /*
  128. * SPI needs an address. Use 0x7FF if we want to keep
  129. * reading from the last position of the register pointer.
  130. */
  131. addr = 0x7FF;
  132. switch (cmd->op) {
  133. case RMI_SPI_WRITE:
  134. rmi_spi->tx_buf[0] = (addr >> 8);
  135. rmi_spi->tx_buf[1] = addr & 0xFF;
  136. break;
  137. case RMI_SPI_READ:
  138. rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
  139. rmi_spi->tx_buf[1] = addr & 0xFF;
  140. break;
  141. case RMI_SPI_V2_READ_UNIFIED:
  142. break;
  143. case RMI_SPI_V2_READ_SPLIT:
  144. break;
  145. case RMI_SPI_V2_WRITE:
  146. rmi_spi->tx_buf[0] = 0x40;
  147. rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
  148. rmi_spi->tx_buf[2] = addr & 0xFF;
  149. rmi_spi->tx_buf[3] = tx_len;
  150. break;
  151. }
  152. if (tx_buf)
  153. memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
  154. if (rmi_spi->tx_xfer_count > 1) {
  155. for (i = 0; i < total_tx_len; i++) {
  156. xfer = &rmi_spi->tx_xfers[i];
  157. memset(xfer, 0, sizeof(struct spi_transfer));
  158. xfer->tx_buf = &rmi_spi->tx_buf[i];
  159. xfer->len = 1;
  160. xfer->delay_usecs = spi_data->write_delay_us;
  161. spi_message_add_tail(xfer, &msg);
  162. }
  163. } else {
  164. xfer = rmi_spi->tx_xfers;
  165. memset(xfer, 0, sizeof(struct spi_transfer));
  166. xfer->tx_buf = rmi_spi->tx_buf;
  167. xfer->len = total_tx_len;
  168. spi_message_add_tail(xfer, &msg);
  169. }
  170. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
  171. __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
  172. total_tx_len, total_tx_len, rmi_spi->tx_buf);
  173. if (rx_buf) {
  174. if (rmi_spi->rx_xfer_count > 1) {
  175. for (i = 0; i < rx_len; i++) {
  176. xfer = &rmi_spi->rx_xfers[i];
  177. memset(xfer, 0, sizeof(struct spi_transfer));
  178. xfer->rx_buf = &rmi_spi->rx_buf[i];
  179. xfer->len = 1;
  180. xfer->delay_usecs = spi_data->read_delay_us;
  181. spi_message_add_tail(xfer, &msg);
  182. }
  183. } else {
  184. xfer = rmi_spi->rx_xfers;
  185. memset(xfer, 0, sizeof(struct spi_transfer));
  186. xfer->rx_buf = rmi_spi->rx_buf;
  187. xfer->len = rx_len;
  188. spi_message_add_tail(xfer, &msg);
  189. }
  190. }
  191. ret = spi_sync(spi, &msg);
  192. if (ret < 0) {
  193. dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
  194. return ret;
  195. }
  196. if (rx_buf) {
  197. memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
  198. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
  199. __func__, rx_len, rx_len, rx_buf);
  200. }
  201. return 0;
  202. }
  203. /*
  204. * rmi_set_page - Set RMI page
  205. * @xport: The pointer to the rmi_transport_dev struct
  206. * @page: The new page address.
  207. *
  208. * RMI devices have 16-bit addressing, but some of the transport
  209. * implementations (like SMBus) only have 8-bit addressing. So RMI implements
  210. * a page address at 0xff of every page so we can reliable page addresses
  211. * every 256 registers.
  212. *
  213. * The page_mutex lock must be held when this function is entered.
  214. *
  215. * Returns zero on success, non-zero on failure.
  216. */
  217. static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
  218. {
  219. struct rmi_spi_cmd cmd;
  220. int ret;
  221. cmd.op = RMI_SPI_WRITE;
  222. cmd.addr = RMI_PAGE_SELECT_REGISTER;
  223. ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
  224. if (ret)
  225. rmi_spi->page = page;
  226. return ret;
  227. }
  228. static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
  229. const void *buf, size_t len)
  230. {
  231. struct rmi_spi_xport *rmi_spi =
  232. container_of(xport, struct rmi_spi_xport, xport);
  233. struct rmi_spi_cmd cmd;
  234. int ret;
  235. mutex_lock(&rmi_spi->page_mutex);
  236. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  237. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  238. if (ret)
  239. goto exit;
  240. }
  241. cmd.op = RMI_SPI_WRITE;
  242. cmd.addr = addr;
  243. ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
  244. exit:
  245. mutex_unlock(&rmi_spi->page_mutex);
  246. return ret;
  247. }
  248. static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
  249. void *buf, size_t len)
  250. {
  251. struct rmi_spi_xport *rmi_spi =
  252. container_of(xport, struct rmi_spi_xport, xport);
  253. struct rmi_spi_cmd cmd;
  254. int ret;
  255. mutex_lock(&rmi_spi->page_mutex);
  256. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  257. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  258. if (ret)
  259. goto exit;
  260. }
  261. cmd.op = RMI_SPI_READ;
  262. cmd.addr = addr;
  263. ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
  264. exit:
  265. mutex_unlock(&rmi_spi->page_mutex);
  266. return ret;
  267. }
  268. static const struct rmi_transport_ops rmi_spi_ops = {
  269. .write_block = rmi_spi_write_block,
  270. .read_block = rmi_spi_read_block,
  271. };
  272. static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
  273. {
  274. struct rmi_spi_xport *rmi_spi = dev_id;
  275. struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
  276. int ret;
  277. ret = rmi_process_interrupt_requests(rmi_dev);
  278. if (ret)
  279. rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
  280. "Failed to process interrupt request: %d\n", ret);
  281. return IRQ_HANDLED;
  282. }
  283. static int rmi_spi_init_irq(struct spi_device *spi)
  284. {
  285. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  286. int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
  287. int ret;
  288. if (!irq_flags)
  289. irq_flags = IRQF_TRIGGER_LOW;
  290. ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
  291. rmi_spi_irq, irq_flags | IRQF_ONESHOT,
  292. dev_name(&spi->dev), rmi_spi);
  293. if (ret < 0) {
  294. dev_warn(&spi->dev, "Failed to register interrupt %d\n",
  295. rmi_spi->irq);
  296. return ret;
  297. }
  298. return 0;
  299. }
  300. #ifdef CONFIG_OF
  301. static int rmi_spi_of_probe(struct spi_device *spi,
  302. struct rmi_device_platform_data *pdata)
  303. {
  304. struct device *dev = &spi->dev;
  305. int retval;
  306. retval = rmi_of_property_read_u32(dev,
  307. &pdata->spi_data.read_delay_us,
  308. "spi-rx-delay-us", 1);
  309. if (retval)
  310. return retval;
  311. retval = rmi_of_property_read_u32(dev,
  312. &pdata->spi_data.write_delay_us,
  313. "spi-tx-delay-us", 1);
  314. if (retval)
  315. return retval;
  316. return 0;
  317. }
  318. static const struct of_device_id rmi_spi_of_match[] = {
  319. { .compatible = "syna,rmi4-spi" },
  320. {},
  321. };
  322. MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
  323. #else
  324. static inline int rmi_spi_of_probe(struct spi_device *spi,
  325. struct rmi_device_platform_data *pdata)
  326. {
  327. return -ENODEV;
  328. }
  329. #endif
  330. static void rmi_spi_unregister_transport(void *data)
  331. {
  332. struct rmi_spi_xport *rmi_spi = data;
  333. rmi_unregister_transport_device(&rmi_spi->xport);
  334. }
  335. static int rmi_spi_probe(struct spi_device *spi)
  336. {
  337. struct rmi_spi_xport *rmi_spi;
  338. struct rmi_device_platform_data *pdata;
  339. struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
  340. int retval;
  341. if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
  342. return -EINVAL;
  343. rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
  344. GFP_KERNEL);
  345. if (!rmi_spi)
  346. return -ENOMEM;
  347. pdata = &rmi_spi->xport.pdata;
  348. if (spi->dev.of_node) {
  349. retval = rmi_spi_of_probe(spi, pdata);
  350. if (retval)
  351. return retval;
  352. } else if (spi_pdata) {
  353. *pdata = *spi_pdata;
  354. }
  355. if (pdata->spi_data.bits_per_word)
  356. spi->bits_per_word = pdata->spi_data.bits_per_word;
  357. if (pdata->spi_data.mode)
  358. spi->mode = pdata->spi_data.mode;
  359. retval = spi_setup(spi);
  360. if (retval < 0) {
  361. dev_err(&spi->dev, "spi_setup failed!\n");
  362. return retval;
  363. }
  364. if (spi->irq > 0)
  365. rmi_spi->irq = spi->irq;
  366. rmi_spi->spi = spi;
  367. mutex_init(&rmi_spi->page_mutex);
  368. rmi_spi->xport.dev = &spi->dev;
  369. rmi_spi->xport.proto_name = "spi";
  370. rmi_spi->xport.ops = &rmi_spi_ops;
  371. spi_set_drvdata(spi, rmi_spi);
  372. retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
  373. if (retval)
  374. return retval;
  375. /*
  376. * Setting the page to zero will (a) make sure the PSR is in a
  377. * known state, and (b) make sure we can talk to the device.
  378. */
  379. retval = rmi_set_page(rmi_spi, 0);
  380. if (retval) {
  381. dev_err(&spi->dev, "Failed to set page select to 0.\n");
  382. return retval;
  383. }
  384. retval = rmi_register_transport_device(&rmi_spi->xport);
  385. if (retval) {
  386. dev_err(&spi->dev, "failed to register transport.\n");
  387. return retval;
  388. }
  389. retval = devm_add_action_or_reset(&spi->dev,
  390. rmi_spi_unregister_transport,
  391. rmi_spi);
  392. if (retval)
  393. return retval;
  394. retval = rmi_spi_init_irq(spi);
  395. if (retval < 0)
  396. return retval;
  397. dev_info(&spi->dev, "registered RMI SPI driver\n");
  398. return 0;
  399. }
  400. #ifdef CONFIG_PM_SLEEP
  401. static int rmi_spi_suspend(struct device *dev)
  402. {
  403. struct spi_device *spi = to_spi_device(dev);
  404. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  405. int ret;
  406. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
  407. if (ret)
  408. dev_warn(dev, "Failed to resume device: %d\n", ret);
  409. disable_irq(rmi_spi->irq);
  410. if (device_may_wakeup(&spi->dev)) {
  411. ret = enable_irq_wake(rmi_spi->irq);
  412. if (!ret)
  413. dev_warn(dev, "Failed to enable irq for wake: %d\n",
  414. ret);
  415. }
  416. return ret;
  417. }
  418. static int rmi_spi_resume(struct device *dev)
  419. {
  420. struct spi_device *spi = to_spi_device(dev);
  421. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  422. int ret;
  423. enable_irq(rmi_spi->irq);
  424. if (device_may_wakeup(&spi->dev)) {
  425. ret = disable_irq_wake(rmi_spi->irq);
  426. if (!ret)
  427. dev_warn(dev, "Failed to disable irq for wake: %d\n",
  428. ret);
  429. }
  430. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
  431. if (ret)
  432. dev_warn(dev, "Failed to resume device: %d\n", ret);
  433. return ret;
  434. }
  435. #endif
  436. #ifdef CONFIG_PM
  437. static int rmi_spi_runtime_suspend(struct device *dev)
  438. {
  439. struct spi_device *spi = to_spi_device(dev);
  440. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  441. int ret;
  442. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
  443. if (ret)
  444. dev_warn(dev, "Failed to resume device: %d\n", ret);
  445. disable_irq(rmi_spi->irq);
  446. return 0;
  447. }
  448. static int rmi_spi_runtime_resume(struct device *dev)
  449. {
  450. struct spi_device *spi = to_spi_device(dev);
  451. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  452. int ret;
  453. enable_irq(rmi_spi->irq);
  454. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
  455. if (ret)
  456. dev_warn(dev, "Failed to resume device: %d\n", ret);
  457. return 0;
  458. }
  459. #endif
  460. static const struct dev_pm_ops rmi_spi_pm = {
  461. SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
  462. SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
  463. NULL)
  464. };
  465. static const struct spi_device_id rmi_id[] = {
  466. { "rmi4_spi", 0 },
  467. { }
  468. };
  469. MODULE_DEVICE_TABLE(spi, rmi_id);
  470. static struct spi_driver rmi_spi_driver = {
  471. .driver = {
  472. .name = "rmi4_spi",
  473. .pm = &rmi_spi_pm,
  474. .of_match_table = of_match_ptr(rmi_spi_of_match),
  475. },
  476. .id_table = rmi_id,
  477. .probe = rmi_spi_probe,
  478. };
  479. module_spi_driver(rmi_spi_driver);
  480. MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
  481. MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
  482. MODULE_DESCRIPTION("RMI SPI driver");
  483. MODULE_LICENSE("GPL");
  484. MODULE_VERSION(RMI_DRIVER_VERSION);