omap-aes-gcm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for OMAP AES GCM HW acceleration.
  5. *
  6. * Copyright (c) 2016 Texas Instruments Incorporated
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as published
  10. * by the Free Software Foundation.
  11. *
  12. */
  13. #include <linux/errno.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmaengine.h>
  17. #include <linux/omap-dma.h>
  18. #include <linux/interrupt.h>
  19. #include <crypto/aes.h>
  20. #include <crypto/scatterwalk.h>
  21. #include <crypto/skcipher.h>
  22. #include <crypto/internal/aead.h>
  23. #include "omap-aes.h"
  24. static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
  25. struct aead_request *req);
  26. static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
  27. {
  28. struct aead_request *req = dd->aead_req;
  29. dd->flags &= ~FLAGS_BUSY;
  30. dd->in_sg = NULL;
  31. dd->out_sg = NULL;
  32. req->base.complete(&req->base, ret);
  33. }
  34. static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
  35. {
  36. void *buf;
  37. u8 *tag;
  38. int pages, alen, clen, i, ret = 0, nsg;
  39. struct omap_aes_reqctx *rctx;
  40. alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
  41. clen = ALIGN(dd->total, AES_BLOCK_SIZE);
  42. rctx = aead_request_ctx(dd->aead_req);
  43. nsg = !!(dd->assoc_len && dd->total);
  44. dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
  45. DMA_FROM_DEVICE);
  46. dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
  47. dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
  48. omap_aes_crypt_dma_stop(dd);
  49. if (dd->sgs_copied & AES_OUT_DATA_COPIED) {
  50. buf = sg_virt(&dd->out_sgl);
  51. scatterwalk_map_and_copy(buf, dd->orig_out,
  52. dd->aead_req->assoclen, dd->total, 1);
  53. pages = get_order(clen);
  54. free_pages((unsigned long)buf, pages);
  55. }
  56. if (dd->flags & FLAGS_ENCRYPT)
  57. scatterwalk_map_and_copy(rctx->auth_tag,
  58. dd->aead_req->dst,
  59. dd->total + dd->aead_req->assoclen,
  60. dd->authsize, 1);
  61. if (dd->sgs_copied & AES_ASSOC_DATA_COPIED) {
  62. buf = sg_virt(&dd->in_sgl[0]);
  63. pages = get_order(alen);
  64. free_pages((unsigned long)buf, pages);
  65. }
  66. if (dd->sgs_copied & AES_IN_DATA_COPIED) {
  67. buf = sg_virt(&dd->in_sgl[nsg]);
  68. pages = get_order(clen);
  69. free_pages((unsigned long)buf, pages);
  70. }
  71. if (!(dd->flags & FLAGS_ENCRYPT)) {
  72. tag = (u8 *)rctx->auth_tag;
  73. for (i = 0; i < dd->authsize; i++) {
  74. if (tag[i]) {
  75. dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
  76. ret = -EBADMSG;
  77. }
  78. }
  79. }
  80. omap_aes_gcm_finish_req(dd, ret);
  81. omap_aes_gcm_handle_queue(dd, NULL);
  82. }
  83. static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
  84. struct aead_request *req)
  85. {
  86. void *buf_in;
  87. int pages, alen, clen, cryptlen, nsg, assoclen;
  88. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  89. unsigned int authlen = crypto_aead_authsize(aead);
  90. u32 dec = !(dd->flags & FLAGS_ENCRYPT);
  91. struct scatterlist *input, *assoc, tmp[2];
  92. if (dd->flags & FLAGS_RFC4106_GCM)
  93. assoclen = req->assoclen - 8;
  94. else
  95. assoclen = req->assoclen;
  96. alen = ALIGN(assoclen, AES_BLOCK_SIZE);
  97. cryptlen = req->cryptlen - (dec * authlen);
  98. clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
  99. dd->sgs_copied = 0;
  100. nsg = !!(assoclen && req->cryptlen);
  101. assoc = &req->src[0];
  102. sg_init_table(dd->in_sgl, nsg + 1);
  103. if (assoclen) {
  104. if (omap_aes_copy_needed(assoc, assoclen)) {
  105. dd->sgs_copied |= AES_ASSOC_DATA_COPIED;
  106. pages = get_order(alen);
  107. buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
  108. if (!buf_in) {
  109. pr_err("Couldn't allocate for unaligncases.\n");
  110. return -1;
  111. }
  112. scatterwalk_map_and_copy(buf_in, assoc, 0,
  113. assoclen, 0);
  114. memset(buf_in + assoclen, 0, alen - assoclen);
  115. } else {
  116. buf_in = sg_virt(assoc);
  117. }
  118. sg_set_buf(dd->in_sgl, buf_in, alen);
  119. }
  120. if (req->cryptlen) {
  121. input = scatterwalk_ffwd(tmp, req->src, req->assoclen);
  122. if (omap_aes_copy_needed(input, req->cryptlen)) {
  123. dd->sgs_copied |= AES_IN_DATA_COPIED;
  124. pages = get_order(clen);
  125. buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
  126. if (!buf_in) {
  127. pr_err("Couldn't allocate for unaligncases.\n");
  128. return -1;
  129. }
  130. scatterwalk_map_and_copy(buf_in, input, 0, cryptlen, 0);
  131. memset(buf_in + cryptlen, 0, clen - cryptlen);
  132. } else {
  133. buf_in = sg_virt(input);
  134. }
  135. sg_set_buf(&dd->in_sgl[nsg], buf_in, clen);
  136. }
  137. dd->in_sg = dd->in_sgl;
  138. dd->total = cryptlen;
  139. dd->assoc_len = assoclen;
  140. dd->authsize = authlen;
  141. if (omap_aes_copy_needed(req->dst, cryptlen + assoclen)) {
  142. pages = get_order(clen);
  143. buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
  144. if (!buf_in) {
  145. pr_err("Couldn't allocate for unaligned cases.\n");
  146. return -1;
  147. }
  148. sg_init_one(&dd->out_sgl, buf_in, clen);
  149. dd->out_sg = &dd->out_sgl;
  150. dd->orig_out = req->dst;
  151. dd->sgs_copied |= AES_OUT_DATA_COPIED;
  152. } else {
  153. dd->out_sg = scatterwalk_ffwd(tmp, req->dst, req->assoclen);
  154. }
  155. dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
  156. dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
  157. return 0;
  158. }
  159. static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
  160. {
  161. struct omap_aes_gcm_result *res = req->data;
  162. if (err == -EINPROGRESS)
  163. return;
  164. res->err = err;
  165. complete(&res->completion);
  166. }
  167. static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
  168. {
  169. struct scatterlist iv_sg, tag_sg;
  170. struct skcipher_request *sk_req;
  171. struct omap_aes_gcm_result result;
  172. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  173. int ret = 0;
  174. sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
  175. if (!sk_req) {
  176. pr_err("skcipher: Failed to allocate request\n");
  177. return -1;
  178. }
  179. init_completion(&result.completion);
  180. sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
  181. sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
  182. skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  183. omap_aes_gcm_complete, &result);
  184. ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
  185. skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
  186. NULL);
  187. ret = crypto_skcipher_encrypt(sk_req);
  188. switch (ret) {
  189. case 0:
  190. break;
  191. case -EINPROGRESS:
  192. case -EBUSY:
  193. ret = wait_for_completion_interruptible(&result.completion);
  194. if (!ret) {
  195. ret = result.err;
  196. if (!ret) {
  197. reinit_completion(&result.completion);
  198. break;
  199. }
  200. }
  201. /* fall through */
  202. default:
  203. pr_err("Encryptio of IV failed for GCM mode");
  204. break;
  205. }
  206. skcipher_request_free(sk_req);
  207. return ret;
  208. }
  209. void omap_aes_gcm_dma_out_callback(void *data)
  210. {
  211. struct omap_aes_dev *dd = data;
  212. struct omap_aes_reqctx *rctx;
  213. int i, val;
  214. u32 *auth_tag, tag[4];
  215. if (!(dd->flags & FLAGS_ENCRYPT))
  216. scatterwalk_map_and_copy(tag, dd->aead_req->src,
  217. dd->total + dd->aead_req->assoclen,
  218. dd->authsize, 0);
  219. rctx = aead_request_ctx(dd->aead_req);
  220. auth_tag = (u32 *)rctx->auth_tag;
  221. for (i = 0; i < 4; i++) {
  222. val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
  223. auth_tag[i] = val ^ auth_tag[i];
  224. if (!(dd->flags & FLAGS_ENCRYPT))
  225. auth_tag[i] = auth_tag[i] ^ tag[i];
  226. }
  227. omap_aes_gcm_done_task(dd);
  228. }
  229. static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
  230. struct aead_request *req)
  231. {
  232. struct omap_aes_ctx *ctx;
  233. struct aead_request *backlog;
  234. struct omap_aes_reqctx *rctx;
  235. unsigned long flags;
  236. int err, ret = 0;
  237. spin_lock_irqsave(&dd->lock, flags);
  238. if (req)
  239. ret = aead_enqueue_request(&dd->aead_queue, req);
  240. if (dd->flags & FLAGS_BUSY) {
  241. spin_unlock_irqrestore(&dd->lock, flags);
  242. return ret;
  243. }
  244. backlog = aead_get_backlog(&dd->aead_queue);
  245. req = aead_dequeue_request(&dd->aead_queue);
  246. if (req)
  247. dd->flags |= FLAGS_BUSY;
  248. spin_unlock_irqrestore(&dd->lock, flags);
  249. if (!req)
  250. return ret;
  251. if (backlog)
  252. backlog->base.complete(&backlog->base, -EINPROGRESS);
  253. ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  254. rctx = aead_request_ctx(req);
  255. dd->ctx = ctx;
  256. rctx->dd = dd;
  257. dd->aead_req = req;
  258. rctx->mode &= FLAGS_MODE_MASK;
  259. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  260. err = omap_aes_gcm_copy_buffers(dd, req);
  261. if (err)
  262. return err;
  263. err = omap_aes_write_ctrl(dd);
  264. if (!err)
  265. err = omap_aes_crypt_dma_start(dd);
  266. if (err) {
  267. omap_aes_gcm_finish_req(dd, err);
  268. omap_aes_gcm_handle_queue(dd, NULL);
  269. }
  270. return ret;
  271. }
  272. static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
  273. {
  274. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  275. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  276. unsigned int authlen = crypto_aead_authsize(aead);
  277. struct omap_aes_dev *dd;
  278. __be32 counter = cpu_to_be32(1);
  279. int err, assoclen;
  280. memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
  281. memcpy(rctx->iv + 12, &counter, 4);
  282. err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
  283. if (err)
  284. return err;
  285. if (mode & FLAGS_RFC4106_GCM)
  286. assoclen = req->assoclen - 8;
  287. else
  288. assoclen = req->assoclen;
  289. if (assoclen + req->cryptlen == 0) {
  290. scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
  291. 1);
  292. return 0;
  293. }
  294. dd = omap_aes_find_dev(rctx);
  295. if (!dd)
  296. return -ENODEV;
  297. rctx->mode = mode;
  298. return omap_aes_gcm_handle_queue(dd, req);
  299. }
  300. int omap_aes_gcm_encrypt(struct aead_request *req)
  301. {
  302. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  303. memcpy(rctx->iv, req->iv, 12);
  304. return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
  305. }
  306. int omap_aes_gcm_decrypt(struct aead_request *req)
  307. {
  308. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  309. memcpy(rctx->iv, req->iv, 12);
  310. return omap_aes_gcm_crypt(req, FLAGS_GCM);
  311. }
  312. int omap_aes_4106gcm_encrypt(struct aead_request *req)
  313. {
  314. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  315. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  316. memcpy(rctx->iv, ctx->nonce, 4);
  317. memcpy(rctx->iv + 4, req->iv, 8);
  318. return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
  319. FLAGS_RFC4106_GCM);
  320. }
  321. int omap_aes_4106gcm_decrypt(struct aead_request *req)
  322. {
  323. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  324. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  325. memcpy(rctx->iv, ctx->nonce, 4);
  326. memcpy(rctx->iv + 4, req->iv, 8);
  327. return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
  328. }
  329. int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  330. unsigned int keylen)
  331. {
  332. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  333. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  334. keylen != AES_KEYSIZE_256)
  335. return -EINVAL;
  336. memcpy(ctx->key, key, keylen);
  337. ctx->keylen = keylen;
  338. return 0;
  339. }
  340. int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  341. unsigned int keylen)
  342. {
  343. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  344. if (keylen < 4)
  345. return -EINVAL;
  346. keylen -= 4;
  347. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  348. keylen != AES_KEYSIZE_256)
  349. return -EINVAL;
  350. memcpy(ctx->key, key, keylen);
  351. memcpy(ctx->nonce, key + keylen, 4);
  352. ctx->keylen = keylen;
  353. return 0;
  354. }