ablkcipher.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/types.h>
  16. #include <crypto/aes.h>
  17. #include <crypto/des.h>
  18. #include <crypto/internal/skcipher.h>
  19. #include "cipher.h"
  20. static LIST_HEAD(ablkcipher_algs);
  21. static void qce_ablkcipher_done(void *data)
  22. {
  23. struct crypto_async_request *async_req = data;
  24. struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  25. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  26. struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  27. struct qce_device *qce = tmpl->qce;
  28. enum dma_data_direction dir_src, dir_dst;
  29. u32 status;
  30. int error;
  31. bool diff_dst;
  32. diff_dst = (req->src != req->dst) ? true : false;
  33. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  34. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  35. error = qce_dma_terminate_all(&qce->dma);
  36. if (error)
  37. dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
  38. error);
  39. if (diff_dst)
  40. dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
  41. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  42. sg_free_table(&rctx->dst_tbl);
  43. error = qce_check_status(qce, &status);
  44. if (error < 0)
  45. dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
  46. qce->async_req_done(tmpl->qce, error);
  47. }
  48. static int
  49. qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
  50. {
  51. struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  52. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  53. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  54. struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  55. struct qce_device *qce = tmpl->qce;
  56. enum dma_data_direction dir_src, dir_dst;
  57. struct scatterlist *sg;
  58. bool diff_dst;
  59. gfp_t gfp;
  60. int ret;
  61. rctx->iv = req->info;
  62. rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  63. rctx->cryptlen = req->nbytes;
  64. diff_dst = (req->src != req->dst) ? true : false;
  65. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  66. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  67. rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
  68. if (diff_dst)
  69. rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  70. else
  71. rctx->dst_nents = rctx->src_nents;
  72. if (rctx->src_nents < 0) {
  73. dev_err(qce->dev, "Invalid numbers of src SG.\n");
  74. return rctx->src_nents;
  75. }
  76. if (rctx->dst_nents < 0) {
  77. dev_err(qce->dev, "Invalid numbers of dst SG.\n");
  78. return -rctx->dst_nents;
  79. }
  80. rctx->dst_nents += 1;
  81. gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  82. GFP_KERNEL : GFP_ATOMIC;
  83. ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
  84. if (ret)
  85. return ret;
  86. sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
  87. sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
  88. if (IS_ERR(sg)) {
  89. ret = PTR_ERR(sg);
  90. goto error_free;
  91. }
  92. sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
  93. if (IS_ERR(sg)) {
  94. ret = PTR_ERR(sg);
  95. goto error_free;
  96. }
  97. sg_mark_end(sg);
  98. rctx->dst_sg = rctx->dst_tbl.sgl;
  99. ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  100. if (ret < 0)
  101. goto error_free;
  102. if (diff_dst) {
  103. ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  104. if (ret < 0)
  105. goto error_unmap_dst;
  106. rctx->src_sg = req->src;
  107. } else {
  108. rctx->src_sg = rctx->dst_sg;
  109. }
  110. ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
  111. rctx->dst_sg, rctx->dst_nents,
  112. qce_ablkcipher_done, async_req);
  113. if (ret)
  114. goto error_unmap_src;
  115. qce_dma_issue_pending(&qce->dma);
  116. ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
  117. if (ret)
  118. goto error_terminate;
  119. return 0;
  120. error_terminate:
  121. qce_dma_terminate_all(&qce->dma);
  122. error_unmap_src:
  123. if (diff_dst)
  124. dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  125. error_unmap_dst:
  126. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  127. error_free:
  128. sg_free_table(&rctx->dst_tbl);
  129. return ret;
  130. }
  131. static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
  132. unsigned int keylen)
  133. {
  134. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
  135. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  136. unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
  137. int ret;
  138. if (!key || !keylen)
  139. return -EINVAL;
  140. if (IS_AES(flags)) {
  141. switch (keylen) {
  142. case AES_KEYSIZE_128:
  143. case AES_KEYSIZE_256:
  144. break;
  145. default:
  146. goto fallback;
  147. }
  148. } else if (IS_DES(flags)) {
  149. u32 tmp[DES_EXPKEY_WORDS];
  150. ret = des_ekey(tmp, key);
  151. if (!ret && crypto_ablkcipher_get_flags(ablk) &
  152. CRYPTO_TFM_REQ_WEAK_KEY)
  153. goto weakkey;
  154. }
  155. ctx->enc_keylen = keylen;
  156. memcpy(ctx->enc_key, key, keylen);
  157. return 0;
  158. fallback:
  159. ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
  160. if (!ret)
  161. ctx->enc_keylen = keylen;
  162. return ret;
  163. weakkey:
  164. crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
  165. return -EINVAL;
  166. }
  167. static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
  168. {
  169. struct crypto_tfm *tfm =
  170. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  171. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  172. struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  173. struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
  174. int ret;
  175. rctx->flags = tmpl->alg_flags;
  176. rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
  177. if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
  178. ctx->enc_keylen != AES_KEYSIZE_256) {
  179. SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  180. skcipher_request_set_tfm(subreq, ctx->fallback);
  181. skcipher_request_set_callback(subreq, req->base.flags,
  182. NULL, NULL);
  183. skcipher_request_set_crypt(subreq, req->src, req->dst,
  184. req->nbytes, req->info);
  185. ret = encrypt ? crypto_skcipher_encrypt(subreq) :
  186. crypto_skcipher_decrypt(subreq);
  187. skcipher_request_zero(subreq);
  188. return ret;
  189. }
  190. return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
  191. }
  192. static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
  193. {
  194. return qce_ablkcipher_crypt(req, 1);
  195. }
  196. static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
  197. {
  198. return qce_ablkcipher_crypt(req, 0);
  199. }
  200. static int qce_ablkcipher_init(struct crypto_tfm *tfm)
  201. {
  202. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  203. memset(ctx, 0, sizeof(*ctx));
  204. tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
  205. ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
  206. CRYPTO_ALG_ASYNC |
  207. CRYPTO_ALG_NEED_FALLBACK);
  208. if (IS_ERR(ctx->fallback))
  209. return PTR_ERR(ctx->fallback);
  210. return 0;
  211. }
  212. static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
  213. {
  214. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  215. crypto_free_skcipher(ctx->fallback);
  216. }
  217. struct qce_ablkcipher_def {
  218. unsigned long flags;
  219. const char *name;
  220. const char *drv_name;
  221. unsigned int blocksize;
  222. unsigned int ivsize;
  223. unsigned int min_keysize;
  224. unsigned int max_keysize;
  225. };
  226. static const struct qce_ablkcipher_def ablkcipher_def[] = {
  227. {
  228. .flags = QCE_ALG_AES | QCE_MODE_ECB,
  229. .name = "ecb(aes)",
  230. .drv_name = "ecb-aes-qce",
  231. .blocksize = AES_BLOCK_SIZE,
  232. .ivsize = AES_BLOCK_SIZE,
  233. .min_keysize = AES_MIN_KEY_SIZE,
  234. .max_keysize = AES_MAX_KEY_SIZE,
  235. },
  236. {
  237. .flags = QCE_ALG_AES | QCE_MODE_CBC,
  238. .name = "cbc(aes)",
  239. .drv_name = "cbc-aes-qce",
  240. .blocksize = AES_BLOCK_SIZE,
  241. .ivsize = AES_BLOCK_SIZE,
  242. .min_keysize = AES_MIN_KEY_SIZE,
  243. .max_keysize = AES_MAX_KEY_SIZE,
  244. },
  245. {
  246. .flags = QCE_ALG_AES | QCE_MODE_CTR,
  247. .name = "ctr(aes)",
  248. .drv_name = "ctr-aes-qce",
  249. .blocksize = AES_BLOCK_SIZE,
  250. .ivsize = AES_BLOCK_SIZE,
  251. .min_keysize = AES_MIN_KEY_SIZE,
  252. .max_keysize = AES_MAX_KEY_SIZE,
  253. },
  254. {
  255. .flags = QCE_ALG_AES | QCE_MODE_XTS,
  256. .name = "xts(aes)",
  257. .drv_name = "xts-aes-qce",
  258. .blocksize = AES_BLOCK_SIZE,
  259. .ivsize = AES_BLOCK_SIZE,
  260. .min_keysize = AES_MIN_KEY_SIZE,
  261. .max_keysize = AES_MAX_KEY_SIZE,
  262. },
  263. {
  264. .flags = QCE_ALG_DES | QCE_MODE_ECB,
  265. .name = "ecb(des)",
  266. .drv_name = "ecb-des-qce",
  267. .blocksize = DES_BLOCK_SIZE,
  268. .ivsize = 0,
  269. .min_keysize = DES_KEY_SIZE,
  270. .max_keysize = DES_KEY_SIZE,
  271. },
  272. {
  273. .flags = QCE_ALG_DES | QCE_MODE_CBC,
  274. .name = "cbc(des)",
  275. .drv_name = "cbc-des-qce",
  276. .blocksize = DES_BLOCK_SIZE,
  277. .ivsize = DES_BLOCK_SIZE,
  278. .min_keysize = DES_KEY_SIZE,
  279. .max_keysize = DES_KEY_SIZE,
  280. },
  281. {
  282. .flags = QCE_ALG_3DES | QCE_MODE_ECB,
  283. .name = "ecb(des3_ede)",
  284. .drv_name = "ecb-3des-qce",
  285. .blocksize = DES3_EDE_BLOCK_SIZE,
  286. .ivsize = 0,
  287. .min_keysize = DES3_EDE_KEY_SIZE,
  288. .max_keysize = DES3_EDE_KEY_SIZE,
  289. },
  290. {
  291. .flags = QCE_ALG_3DES | QCE_MODE_CBC,
  292. .name = "cbc(des3_ede)",
  293. .drv_name = "cbc-3des-qce",
  294. .blocksize = DES3_EDE_BLOCK_SIZE,
  295. .ivsize = DES3_EDE_BLOCK_SIZE,
  296. .min_keysize = DES3_EDE_KEY_SIZE,
  297. .max_keysize = DES3_EDE_KEY_SIZE,
  298. },
  299. };
  300. static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
  301. struct qce_device *qce)
  302. {
  303. struct qce_alg_template *tmpl;
  304. struct crypto_alg *alg;
  305. int ret;
  306. tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
  307. if (!tmpl)
  308. return -ENOMEM;
  309. alg = &tmpl->alg.crypto;
  310. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
  311. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  312. def->drv_name);
  313. alg->cra_blocksize = def->blocksize;
  314. alg->cra_ablkcipher.ivsize = def->ivsize;
  315. alg->cra_ablkcipher.min_keysize = def->min_keysize;
  316. alg->cra_ablkcipher.max_keysize = def->max_keysize;
  317. alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
  318. alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
  319. alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
  320. alg->cra_priority = 300;
  321. alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  322. CRYPTO_ALG_NEED_FALLBACK;
  323. alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
  324. alg->cra_alignmask = 0;
  325. alg->cra_type = &crypto_ablkcipher_type;
  326. alg->cra_module = THIS_MODULE;
  327. alg->cra_init = qce_ablkcipher_init;
  328. alg->cra_exit = qce_ablkcipher_exit;
  329. INIT_LIST_HEAD(&alg->cra_list);
  330. INIT_LIST_HEAD(&tmpl->entry);
  331. tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
  332. tmpl->alg_flags = def->flags;
  333. tmpl->qce = qce;
  334. ret = crypto_register_alg(alg);
  335. if (ret) {
  336. kfree(tmpl);
  337. dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
  338. return ret;
  339. }
  340. list_add_tail(&tmpl->entry, &ablkcipher_algs);
  341. dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
  342. return 0;
  343. }
  344. static void qce_ablkcipher_unregister(struct qce_device *qce)
  345. {
  346. struct qce_alg_template *tmpl, *n;
  347. list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
  348. crypto_unregister_alg(&tmpl->alg.crypto);
  349. list_del(&tmpl->entry);
  350. kfree(tmpl);
  351. }
  352. }
  353. static int qce_ablkcipher_register(struct qce_device *qce)
  354. {
  355. int ret, i;
  356. for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
  357. ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
  358. if (ret)
  359. goto err;
  360. }
  361. return 0;
  362. err:
  363. qce_ablkcipher_unregister(qce);
  364. return ret;
  365. }
  366. const struct qce_algo_ops ablkcipher_ops = {
  367. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  368. .register_algs = qce_ablkcipher_register,
  369. .unregister_algs = qce_ablkcipher_unregister,
  370. .async_req_handle = qce_ablkcipher_async_req_handle,
  371. };