keystone-sa-utils.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875
  1. /*
  2. * Keystone crypto accelerator driver
  3. *
  4. * Copyright (C) 2015, 2016 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Sandeep Nair
  7. * Vitaly Andrianov
  8. *
  9. * Contributors:Tinku Mannan
  10. * Hao Zhang
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * version 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. */
  21. #include <linux/interrupt.h>
  22. #include <linux/dmapool.h>
  23. #include <linux/delay.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/soc/ti/knav_dma.h>
  26. #include <linux/soc/ti/knav_qmss.h>
  27. #include <linux/crypto.h>
  28. #include <crypto/algapi.h>
  29. #include <crypto/aead.h>
  30. #include <crypto/internal/aead.h>
  31. #include <crypto/authenc.h>
  32. #include <crypto/des.h>
  33. #include <crypto/sha.h>
  34. #include <crypto/scatterwalk.h>
  35. #include "keystone-sa.h"
  36. #include "keystone-sa-hlp.h"
  37. #define SA_SW0_EVICT_FL_SHIFT 16
  38. #define SA_SW0_TEAR_FL_SHIFT 17
  39. #define SA_SW0_NOPAYLD_FL_SHIFT 18
  40. #define SA_SW0_CMDL_INFO_SHIFT 20
  41. #define SA_SW0_ENG_ID_SHIFT 25
  42. #define SA_SW0_CPPI_DST_INFO_PRESENT BIT(30)
  43. #define SA_CMDL_PRESENT BIT(4)
  44. #define SA_SW2_EGRESS_CPPI_FLOW_ID_SHIFT 16
  45. #define SA_SW2_EGRESS_CPPI_STATUS_LEN_SHIFT 24
  46. #define SA_CMDL_UPD_ENC 0x0001
  47. #define SA_CMDL_UPD_AUTH 0x0002
  48. #define SA_CMDL_UPD_ENC_IV 0x0004
  49. #define SA_CMDL_UPD_AUTH_IV 0x0008
  50. #define SA_CMDL_UPD_AUX_KEY 0x0010
  51. /* Command label parameters for GCM */
  52. #define SA_CMDL_UPD_ENC_SIZE 0x0080
  53. #define SA_CMDL_UPD_AAD 0x0010
  54. /* size of SCCTL structure in bytes */
  55. #define SA_SCCTL_SZ 8
  56. /* Tear down the Security Context */
  57. #define SA_SC_TEAR_RETRIES 5
  58. #define SA_SC_TEAR_DELAY 20 /* msecs */
  59. /* Algorithm interface functions & templates */
  60. struct sa_alg_tmpl {
  61. u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
  62. union {
  63. struct crypto_alg crypto;
  64. struct aead_alg aead;
  65. } alg;
  66. bool registered;
  67. };
  68. /* Number of elements in scatterlist */
  69. static int sg_count(struct scatterlist *sg, int len)
  70. {
  71. int sg_nents = 0;
  72. while (sg && (len > 0)) {
  73. sg_nents++;
  74. len -= sg->length;
  75. sg = sg_next(sg);
  76. }
  77. return sg_nents;
  78. }
  79. /* buffer capacity of scatterlist */
  80. static int sg_len(struct scatterlist *sg)
  81. {
  82. int len = 0;
  83. while (sg) {
  84. len += sg->length;
  85. sg = sg_next(sg);
  86. }
  87. return len;
  88. }
  89. /* Copy buffer content from list of hwdesc-s to DST SG list */
  90. static int sa_hwdesc2sg_copy(struct knav_dma_desc **hwdesc,
  91. struct scatterlist *dst,
  92. unsigned int src_offset, unsigned int dst_offset,
  93. size_t len, int num)
  94. {
  95. struct scatter_walk walk;
  96. int sglen, cplen;
  97. int j = 0;
  98. sglen = hwdesc[0]->desc_info & KNAV_DMA_DESC_PKT_LEN_MASK;
  99. if (unlikely(len + src_offset > sglen)) {
  100. pr_err("[%s] src len(%d) less than (%d)\n", __func__,
  101. sglen, len + src_offset);
  102. return -EINVAL;
  103. }
  104. sglen = sg_len(dst);
  105. if (unlikely(len + dst_offset > sglen)) {
  106. pr_err("[%s] dst len(%d) less than (%d)\n", __func__,
  107. sglen, len + dst_offset);
  108. return -EINVAL;
  109. }
  110. scatterwalk_start(&walk, dst);
  111. scatterwalk_advance(&walk, dst_offset);
  112. while ((j < num) && (len > 0)) {
  113. cplen = min((int)len, (int)(hwdesc[j]->buff_len - src_offset));
  114. if (likely(cplen)) {
  115. scatterwalk_copychunks(((char *)hwdesc[j]->sw_data[0] +
  116. src_offset),
  117. &walk, cplen, 1);
  118. }
  119. len -= cplen;
  120. j++;
  121. src_offset = 0;
  122. }
  123. return 0;
  124. }
  125. static void scatterwalk_copy(void *buf, struct scatterlist *sg,
  126. unsigned int start, unsigned int nbytes, int out)
  127. {
  128. struct scatter_walk walk;
  129. unsigned int offset = 0;
  130. if (!nbytes)
  131. return;
  132. for (;;) {
  133. scatterwalk_start(&walk, sg);
  134. if (start < offset + sg->length)
  135. break;
  136. offset += sg->length;
  137. sg = sg_next(sg);
  138. }
  139. scatterwalk_advance(&walk, start - offset);
  140. scatterwalk_copychunks(buf, &walk, nbytes, out);
  141. }
  142. /* Command Label Definitions and utility functions */
  143. struct sa_cmdl_cfg {
  144. int enc1st;
  145. int aalg;
  146. u8 enc_eng_id;
  147. u8 auth_eng_id;
  148. u8 iv_size;
  149. const u8 *akey;
  150. u16 akey_len;
  151. u32 salt;
  152. };
  153. /* Format general command label */
  154. static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
  155. struct sa_cmdl_upd_info *upd_info)
  156. {
  157. u8 offset = 0;
  158. u32 *word_ptr = (u32 *)cmdl;
  159. int i;
  160. int ret = 0;
  161. /* Clear the command label */
  162. memset(cmdl, 0, (SA_MAX_CMDL_WORDS * sizeof(u32)));
  163. /* Iniialize the command update structure */
  164. memset(upd_info, 0, sizeof(*upd_info));
  165. upd_info->enc_size.offset = 2;
  166. upd_info->enc_size.size = 2;
  167. upd_info->enc_offset.size = 1;
  168. upd_info->enc_size2.size = 4;
  169. upd_info->auth_size.offset = 2;
  170. upd_info->auth_size.size = 2;
  171. upd_info->auth_offset.size = 1;
  172. if (cfg->aalg == SA_AALG_ID_AES_XCBC) {
  173. /* Derive K2/K3 subkeys */
  174. ret = sa_aes_xcbc_subkey(NULL, (u8 *)&upd_info->aux_key[0],
  175. (u8 *)&upd_info->aux_key[AES_BLOCK_SIZE
  176. / sizeof(u32)],
  177. cfg->akey, cfg->akey_len);
  178. if (ret)
  179. return ret;
  180. /*
  181. * Format the key into 32bit CPU words
  182. * from a big-endian stream
  183. */
  184. for (i = 0; i < SA_MAX_AUX_DATA_WORDS; i++)
  185. upd_info->aux_key[i] =
  186. be32_to_cpu(upd_info->aux_key[i]);
  187. }
  188. if (cfg->enc1st) {
  189. if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
  190. upd_info->flags |= SA_CMDL_UPD_ENC;
  191. upd_info->enc_size.index = 0;
  192. upd_info->enc_offset.index = 1;
  193. if ((cfg->enc_eng_id == SA_ENG_ID_EM1) &&
  194. (cfg->auth_eng_id == SA_ENG_ID_EM1))
  195. cfg->auth_eng_id = SA_ENG_ID_EM2;
  196. /* Encryption command label */
  197. if (cfg->auth_eng_id != SA_ENG_ID_NONE)
  198. cmdl[SA_CMDL_OFFSET_NESC] = cfg->auth_eng_id;
  199. else
  200. cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  201. /* Encryption modes requiring IV */
  202. if (cfg->iv_size) {
  203. upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  204. upd_info->enc_iv.index =
  205. SA_CMDL_HEADER_SIZE_BYTES >> 2;
  206. upd_info->enc_iv.size = cfg->iv_size;
  207. cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  208. SA_CMDL_HEADER_SIZE_BYTES +
  209. cfg->iv_size;
  210. cmdl[SA_CMDL_OFFSET_OPTION_CTRL1] =
  211. (SA_CTX_ENC_AUX2_OFFSET |
  212. (cfg->iv_size >> 3));
  213. offset = SA_CMDL_HEADER_SIZE_BYTES +
  214. cfg->iv_size;
  215. } else {
  216. cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  217. SA_CMDL_HEADER_SIZE_BYTES;
  218. offset = SA_CMDL_HEADER_SIZE_BYTES;
  219. }
  220. }
  221. if (cfg->auth_eng_id != SA_ENG_ID_NONE) {
  222. upd_info->flags |= SA_CMDL_UPD_AUTH;
  223. upd_info->auth_size.index = offset >> 2;
  224. upd_info->auth_offset.index =
  225. upd_info->auth_size.index + 1;
  226. cmdl[offset + SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  227. /* Algorithm with subkeys */
  228. if ((cfg->aalg == SA_AALG_ID_AES_XCBC) ||
  229. (cfg->aalg == SA_AALG_ID_CMAC)) {
  230. upd_info->flags |= SA_CMDL_UPD_AUX_KEY;
  231. upd_info->aux_key_info.index =
  232. (offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  233. cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  234. SA_CMDL_HEADER_SIZE_BYTES + 16;
  235. cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  236. (SA_CTX_ENC_AUX1_OFFSET | (16 >> 3));
  237. offset += SA_CMDL_HEADER_SIZE_BYTES + 16;
  238. } else {
  239. cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  240. SA_CMDL_HEADER_SIZE_BYTES;
  241. offset += SA_CMDL_HEADER_SIZE_BYTES;
  242. }
  243. }
  244. } else {
  245. /* Auth first */
  246. if (cfg->auth_eng_id != SA_ENG_ID_NONE) {
  247. upd_info->flags |= SA_CMDL_UPD_AUTH;
  248. upd_info->auth_size.index = 0;
  249. upd_info->auth_offset.index = 1;
  250. if ((cfg->auth_eng_id == SA_ENG_ID_EM1) &&
  251. (cfg->enc_eng_id == SA_ENG_ID_EM1))
  252. cfg->enc_eng_id = SA_ENG_ID_EM2;
  253. /* Authentication command label */
  254. if (cfg->enc_eng_id != SA_ENG_ID_NONE)
  255. cmdl[SA_CMDL_OFFSET_NESC] = cfg->enc_eng_id;
  256. else
  257. cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  258. /* Algorithm with subkeys */
  259. if ((cfg->aalg == SA_AALG_ID_AES_XCBC) ||
  260. (cfg->aalg == SA_AALG_ID_CMAC)) {
  261. upd_info->flags |= SA_CMDL_UPD_AUX_KEY;
  262. upd_info->aux_key_info.index =
  263. (SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  264. cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  265. SA_CMDL_HEADER_SIZE_BYTES + 16;
  266. cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  267. (SA_CTX_ENC_AUX1_OFFSET | (16 >> 3));
  268. offset = SA_CMDL_HEADER_SIZE_BYTES + 16;
  269. } else {
  270. cmdl[SA_CMDL_OFFSET_LABEL_LEN] =
  271. SA_CMDL_HEADER_SIZE_BYTES;
  272. offset = SA_CMDL_HEADER_SIZE_BYTES;
  273. }
  274. }
  275. if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
  276. upd_info->flags |= SA_CMDL_UPD_ENC;
  277. upd_info->enc_size.index = offset >> 2;
  278. upd_info->enc_offset.index =
  279. upd_info->enc_size.index + 1;
  280. cmdl[offset + SA_CMDL_OFFSET_NESC] = SA_ENG_ID_OUTPORT2;
  281. /* Encryption modes requiring IV */
  282. if (cfg->iv_size) {
  283. upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  284. upd_info->enc_iv.index =
  285. (offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  286. upd_info->enc_iv.size = cfg->iv_size;
  287. cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  288. SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  289. cmdl[offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  290. (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
  291. offset += SA_CMDL_HEADER_SIZE_BYTES +
  292. cfg->iv_size;
  293. } else {
  294. cmdl[offset + SA_CMDL_OFFSET_LABEL_LEN] =
  295. SA_CMDL_HEADER_SIZE_BYTES;
  296. offset += SA_CMDL_HEADER_SIZE_BYTES;
  297. }
  298. }
  299. }
  300. offset = roundup(offset, 8);
  301. for (i = 0; i < offset / 4; i++)
  302. word_ptr[i] = be32_to_cpu(word_ptr[i]);
  303. return offset;
  304. }
  305. /*
  306. * Format GCM command label
  307. *
  308. * 1-Command Header (4 Bytes)
  309. * - NESC (1 byte)
  310. * - Cmdl Len (1 byte)
  311. * - Payload Size (2 bytes)
  312. *
  313. * 2 - Control information (4 bytes)
  314. * - Offset (1 bytes)
  315. * - Opt Ctrl1 (1 bytes)
  316. * - Opt Ctrl2 (1 byte)
  317. * - Opt Ctrl3 (1 byte)
  318. *
  319. * 3 - Option 1 - Total Encryption Length (8 bytes)
  320. *
  321. * 4 - Option 2: AAD (16 bytes)
  322. *
  323. * 5 - Option 3: AES-CTR IV (salt (4 bytes) | IV (16 bytes) | 1)
  324. */
  325. static int sa_format_cmdl_gcm(struct sa_cmdl_cfg *cfg, u8 *cmdl,
  326. struct sa_cmdl_upd_info *upd_info)
  327. {
  328. u8 offset = 0;
  329. u32 *word_ptr = (u32 *)cmdl;
  330. int i;
  331. /* Clear the command label */
  332. memset(cmdl, 0, (SA_MAX_CMDL_WORDS * sizeof(u32)));
  333. if (upd_info->submode == SA_MODE_GCM) {
  334. /* Construct Command label header */
  335. cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_FINAL;
  336. cmdl[SA_CMDL_OFFSET_LABEL_LEN] = SA_GCM_SIZE;
  337. cmdl[SA_CMDL_OFFSET_OPTION_CTRL1] = SA_GCM_OPT1;
  338. cmdl[SA_CMDL_OFFSET_OPTION_CTRL2] = SA_GCM_OPT2;
  339. cmdl[SA_CMDL_OFFSET_OPTION_CTRL3] = SA_GCM_OPT3;
  340. /* Option 1: Total Encryption Length (8 bytes) */
  341. /* Option 2: AAD (16 bytes) */
  342. /* Option 3: AES-CTR IV (salt (4 bytes) | IV (8 bytes) | 0x1) */
  343. /* Fill in the Salt Value */
  344. word_ptr[8] = cfg->salt;
  345. /*
  346. * Format the Command label into 32bit CPU words
  347. * from a big-endian stream
  348. */
  349. offset = roundup(SA_GCM_SIZE, 8);
  350. for (i = 0; i < offset/4; i++)
  351. word_ptr[i] = be32_to_cpu(word_ptr[i]);
  352. word_ptr[11] = 1;
  353. return offset;
  354. } else if (upd_info->submode == SA_MODE_GMAC) {
  355. /* Construct Command label header */
  356. cmdl[SA_CMDL_OFFSET_NESC] = SA_ENG_ID_FINAL;
  357. cmdl[SA_CMDL_OFFSET_LABEL_LEN] = SA_GMAC_SIZE;
  358. cmdl[SA_CMDL_OFFSET_OPTION_CTRL1] = SA_GMAC_OPT1;
  359. cmdl[SA_CMDL_OFFSET_OPTION_CTRL2] = SA_GMAC_OPT2;
  360. cmdl[SA_CMDL_OFFSET_OPTION_CTRL3] = SA_GMAC_OPT3;
  361. /* Option 1: Total Authentication + Payload Length (8 bytes) */
  362. /* Option 2: AAD | Payload (16 bytes) */
  363. /* Option 3: AES-CTR IV (salt (4 bytes) | IV (8 bytes) | 0x1) */
  364. /* Fill in the Salt Value */
  365. word_ptr[8] = cfg->salt;
  366. /*
  367. * Format the Command label into 32bit CPU words
  368. * from a big-endian stream
  369. */
  370. offset = roundup(SA_GMAC_SIZE, 8);
  371. for (i = 0; i < offset/4; i++)
  372. word_ptr[i] = be32_to_cpu(word_ptr[i]);
  373. word_ptr[11] = 1;
  374. return offset;
  375. }
  376. dev_err(sa_ks2_dev, "(%s): Unsupported mode\n", __func__);
  377. return -1;
  378. }
  379. static inline void sa_copy_iv(u32 *out, const u8 *iv, bool size16)
  380. {
  381. int j;
  382. for (j = 0; j < ((size16) ? 4 : 2); j++) {
  383. *out = cpu_to_be32(*((u32 *)iv));
  384. iv += 4;
  385. out++;
  386. }
  387. }
  388. /* Update Command label */
  389. static inline void
  390. sa_update_cmdl(struct device *dev, u8 enc_offset, u16 enc_size, u8 *enc_iv,
  391. u16 auth_size, u8 *auth_iv, u8 aad_size,
  392. u8 *aad, struct sa_cmdl_upd_info *upd_info, u32 *cmdl)
  393. {
  394. switch (upd_info->submode) {
  395. case SA_MODE_GEN:
  396. if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
  397. cmdl[upd_info->enc_size.index] &= 0xffff0000;
  398. cmdl[upd_info->enc_size.index] |= enc_size;
  399. cmdl[upd_info->enc_offset.index] &= 0x00ffffff;
  400. cmdl[upd_info->enc_offset.index] |=
  401. ((u32)enc_offset << 24);
  402. if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
  403. sa_copy_iv(&cmdl[upd_info->enc_iv.index],
  404. enc_iv,
  405. (upd_info->enc_iv.size > 8));
  406. }
  407. }
  408. if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
  409. cmdl[upd_info->auth_size.index] &= 0xffff0000;
  410. cmdl[upd_info->auth_size.index] |= auth_size;
  411. cmdl[upd_info->auth_offset.index] &= 0x00ffffff;
  412. cmdl[upd_info->auth_offset.index] |= 0;
  413. if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
  414. sa_copy_iv(&cmdl[upd_info->auth_iv.index],
  415. auth_iv,
  416. (upd_info->auth_iv.size > 8));
  417. }
  418. if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
  419. int offset = (auth_size & 0xF) ? 4 : 0;
  420. memcpy(&cmdl[upd_info->aux_key_info.index],
  421. &upd_info->aux_key[offset], 16);
  422. }
  423. }
  424. break;
  425. case SA_MODE_GCM:
  426. /* Update Command label header (8 bytes) */
  427. cmdl[0] |= enc_size;
  428. cmdl[1] |= (enc_offset << 24);
  429. /* Option 1: Store encryption length (8 byte) */
  430. cmdl[3] |= (enc_size << 3);
  431. /* Option 2: Store AAD with zero padding (16 bytes) */
  432. cmdl[4] = SA_MK_U32(aad[0], aad[1], aad[2], aad[3]);
  433. cmdl[5] = SA_MK_U32(aad[4], aad[5], aad[6], aad[7]);
  434. /* ESN */
  435. if (aad_size == 12) {
  436. cmdl[6] =
  437. SA_MK_U32(aad[8], aad[9], aad[10], aad[11]);
  438. }
  439. /* Option 3: AES CTR IV (salt|IV|1) */
  440. cmdl[9] = SA_MK_U32(enc_iv[0], enc_iv[1], enc_iv[2], enc_iv[3]);
  441. cmdl[10] = SA_MK_U32(enc_iv[4], enc_iv[5], enc_iv[6], enc_iv[7]);
  442. break;
  443. case SA_MODE_GMAC:
  444. /* Update Command label header (8 bytes) */
  445. /* Auth offset - 16 bytes */
  446. cmdl[1] |= (16 << 24);
  447. /* Option 1: Store Authentication length (8 byte) */
  448. cmdl[3] |= (auth_size << 3);/* Payload Length + AAD + IV */
  449. /* Option 2: Store AAD with zero padding (16 bytes) */
  450. cmdl[4] = SA_MK_U32(aad[0], aad[1], aad[2], aad[3]);
  451. cmdl[5] = SA_MK_U32(aad[4], aad[5], aad[6], aad[7]);
  452. /* ESN */
  453. if (aad_size == 12) {
  454. /* Payload Length + Remaining IV Size */
  455. cmdl[0] |= enc_size + 4;
  456. cmdl[6] = SA_MK_U32(aad[8], aad[9], aad[10], aad[11]);
  457. cmdl[7] = SA_MK_U32(enc_iv[0], enc_iv[1],
  458. enc_iv[2], enc_iv[3]);
  459. } else {
  460. /* Payload Length */
  461. cmdl[0] |= enc_size;
  462. /* Append IV */
  463. cmdl[6] = SA_MK_U32(enc_iv[0], enc_iv[1],
  464. enc_iv[2], enc_iv[3]);
  465. cmdl[7] = SA_MK_U32(enc_iv[4], enc_iv[5],
  466. enc_iv[6], enc_iv[7]);
  467. }
  468. /* Option 3: AES CTR IV (salt|IV|1) */
  469. cmdl[9] = SA_MK_U32(enc_iv[0], enc_iv[1],
  470. enc_iv[2], enc_iv[3]);
  471. cmdl[10] = SA_MK_U32(enc_iv[4], enc_iv[5],
  472. enc_iv[6], enc_iv[7]);
  473. break;
  474. case SA_MODE_CCM:
  475. default:
  476. dev_err(dev, "unsupported mode(%d)\n", upd_info->submode);
  477. break;
  478. }
  479. }
  480. /* Format SWINFO words to be sent to SA */
  481. static
  482. void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
  483. u8 cmdl_present, u8 cmdl_offset, u8 flags, u16 queue_id,
  484. u8 flow_id, u8 hash_size, u32 *swinfo)
  485. {
  486. swinfo[0] = sc_id;
  487. swinfo[0] |= (flags << SA_SW0_EVICT_FL_SHIFT);
  488. if (likely(cmdl_present))
  489. swinfo[0] |= ((cmdl_offset | SA_CMDL_PRESENT) <<
  490. SA_SW0_CMDL_INFO_SHIFT);
  491. swinfo[0] |= (eng_id << SA_SW0_ENG_ID_SHIFT);
  492. swinfo[0] |= SA_SW0_CPPI_DST_INFO_PRESENT;
  493. swinfo[1] = sc_phys;
  494. swinfo[2] = (queue_id | (flow_id << SA_SW2_EGRESS_CPPI_FLOW_ID_SHIFT) |
  495. (hash_size << SA_SW2_EGRESS_CPPI_STATUS_LEN_SHIFT));
  496. }
  497. /* Security context creation functions */
  498. /* Dump the security context */
  499. static void sa_dump_sc(u8 *buf, u32 dma_addr)
  500. {
  501. #ifdef DEBUG
  502. dev_info(sa_ks2_dev, "Security context dump for %p:\n",
  503. (void *)dma_addr);
  504. print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  505. 16, 1, buf, SA_CTX_MAX_SZ, false);
  506. #endif
  507. }
  508. /* Initialize Security context */
  509. static
  510. int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
  511. u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
  512. const char *cra_name, u8 enc,
  513. u32 *swinfo)
  514. {
  515. struct sa_eng_info *enc_eng, *auth_eng;
  516. int ealg_id, aalg_id, use_enc = 0;
  517. int enc_sc_offset, auth_sc_offset;
  518. u8 php_f, php_e, eng0_f, eng1_f;
  519. u8 *sc_buf = ctx->sc;
  520. u16 sc_id = ctx->sc_id;
  521. u16 aad_len = 0; /* Currently not supporting AEAD algo */
  522. u8 first_engine;
  523. u8 hash_size;
  524. int ret = 0;
  525. memset(sc_buf, 0, SA_CTX_MAX_SZ);
  526. sa_conv_calg_to_salg(cra_name, &ealg_id, &aalg_id);
  527. enc_eng = sa_get_engine_info(ealg_id);
  528. auth_eng = sa_get_engine_info(aalg_id);
  529. if (!enc_eng->sc_size && !auth_eng->sc_size)
  530. return -EINVAL;
  531. if (auth_eng->eng_id <= SA_ENG_ID_EM2)
  532. use_enc = 1;
  533. /* Determine the order of encryption & Authentication contexts */
  534. if (enc || !use_enc) {
  535. if (aalg_id == SA_AALG_ID_GMAC) {
  536. eng0_f = SA_CTX_SIZE_TO_DMA_SIZE(auth_eng->sc_size);
  537. eng1_f = SA_CTX_SIZE_TO_DMA_SIZE(enc_eng->sc_size);
  538. } else {
  539. eng0_f = SA_CTX_SIZE_TO_DMA_SIZE(enc_eng->sc_size);
  540. eng1_f = SA_CTX_SIZE_TO_DMA_SIZE(auth_eng->sc_size);
  541. }
  542. enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  543. auth_sc_offset = enc_sc_offset + enc_eng->sc_size;
  544. } else {
  545. eng0_f = SA_CTX_SIZE_TO_DMA_SIZE(auth_eng->sc_size);
  546. eng1_f = SA_CTX_SIZE_TO_DMA_SIZE(enc_eng->sc_size);
  547. auth_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  548. enc_sc_offset = auth_sc_offset + auth_eng->sc_size;
  549. }
  550. php_f = SA_CTX_DMA_SIZE_64;
  551. php_e = SA_CTX_DMA_SIZE_64;
  552. /* SCCTL Owner info: 0=host, 1=CP_ACE */
  553. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
  554. /* SCCTL F/E control */
  555. sc_buf[1] = SA_CTX_SCCTL_MK_DMA_INFO(php_f, eng0_f, eng1_f, php_e);
  556. memcpy(&sc_buf[2], &sc_id, 2);
  557. memcpy(&sc_buf[4], &ctx->sc_phys, 4);
  558. /* Initialize the rest of PHP context */
  559. memset(sc_buf + SA_SCCTL_SZ, 0, SA_CTX_PHP_PE_CTX_SZ - SA_SCCTL_SZ);
  560. /* Prepare context for encryption engine */
  561. if (enc_eng->sc_size) {
  562. ret = sa_set_sc_enc(ealg_id, enc_key, enc_key_sz, aad_len,
  563. enc, &sc_buf[enc_sc_offset]);
  564. if (ret)
  565. return ret;
  566. }
  567. /* Prepare context for authentication engine */
  568. if (auth_eng->sc_size) {
  569. if (use_enc) {
  570. if (sa_set_sc_enc(aalg_id, auth_key, auth_key_sz,
  571. aad_len, 0, &sc_buf[auth_sc_offset]))
  572. return -1;
  573. } else
  574. sa_set_sc_auth(aalg_id, auth_key, auth_key_sz,
  575. &sc_buf[auth_sc_offset]);
  576. }
  577. /* Set the ownership of context to CP_ACE */
  578. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
  579. /* swizzle the security context */
  580. sa_swiz_128(sc_buf, sc_buf, SA_CTX_MAX_SZ);
  581. /* Setup SWINFO */
  582. if (ealg_id == SA_EALG_ID_GCM) {
  583. /* For GCM enc and dec performed by same engine */
  584. first_engine = enc_eng->eng_id;
  585. } else if ((ealg_id == SA_EALG_ID_NULL) ||
  586. (ealg_id == SA_EALG_ID_NONE))
  587. first_engine = auth_eng->eng_id;
  588. else
  589. first_engine = enc ? enc_eng->eng_id : auth_eng->eng_id;
  590. hash_size = AES_BLOCK_SIZE;
  591. if (aalg_id != SA_AALG_ID_NONE) {
  592. hash_size = sa_get_hash_size(aalg_id);
  593. if (!hash_size)
  594. return -EINVAL;
  595. }
  596. /* Round up the tag size to multiple of 8 */
  597. hash_size = roundup(hash_size, 8);
  598. sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
  599. /*
  600. * For run-time self tests in the cryptographic
  601. * algorithm manager framework the EVICT flag is required.
  602. * EVICT is also required if the key gets changed for the context.
  603. */
  604. SA_SW_INFO_FLAG_EVICT,
  605. ctx->rx_compl_qid, ctx->rx_flow, hash_size, swinfo);
  606. sa_dump_sc(sc_buf, ctx->sc_phys);
  607. return 0;
  608. }
  609. static int sa_tear_sc(struct sa_ctx_info *ctx,
  610. struct keystone_crypto_data *pdata)
  611. {
  612. struct device *dev = &pdata->pdev->dev;
  613. int own_off, cnt = SA_SC_TEAR_RETRIES;
  614. struct knav_dma_desc *hwdesc;
  615. struct sa_dma_req_ctx *dma_ctx;
  616. int ret = 0;
  617. u32 packet_info;
  618. int j;
  619. dma_addr_t dma_addr;
  620. u32 dma_sz;
  621. dma_ctx = kmem_cache_alloc(pdata->dma_req_ctx_cache, GFP_KERNEL);
  622. if (!dma_ctx) {
  623. ret = -ENOMEM;
  624. goto err;
  625. }
  626. dma_ctx->dev_data = pdata;
  627. dma_ctx->pkt = false;
  628. sa_set_swinfo(SA_ENG_ID_OUTPORT2, ctx->sc_id, ctx->sc_phys, 0, 0,
  629. (SA_SW_INFO_FLAG_TEAR | SA_SW_INFO_FLAG_EVICT |
  630. SA_SW_INFO_FLAG_NOPD),
  631. ctx->rx_compl_qid, ctx->rx_flow, 0, &ctx->epib[1]);
  632. ctx->epib[0] = 0;
  633. /* map the packet */
  634. packet_info = KNAV_DMA_DESC_HAS_EPIB |
  635. (pdata->tx_compl_qid << KNAV_DMA_DESC_RETQ_SHIFT);
  636. hwdesc = knav_pool_desc_get(pdata->tx_pool);
  637. if (IS_ERR_OR_NULL(hwdesc)) {
  638. dev_dbg(dev, "out of tx pool desc\n");
  639. ret = -ENOBUFS;
  640. goto err;
  641. }
  642. memset(hwdesc, 0, sizeof(struct knav_dma_desc));
  643. for (j = 0; j < 4; j++)
  644. hwdesc->epib[j] = ctx->epib[j];
  645. hwdesc->packet_info = packet_info;
  646. knav_pool_desc_map(pdata->tx_pool, hwdesc, sizeof(hwdesc),
  647. &dma_addr, &dma_sz);
  648. hwdesc->sw_data[0] = (u32)dma_addr;
  649. hwdesc->sw_data[1] = dma_sz;
  650. hwdesc->sw_data[2] = (u32)dma_ctx;
  651. knav_queue_push(pdata->tx_submit_q, dma_addr,
  652. sizeof(struct knav_dma_desc), 0);
  653. /*
  654. * Check that CP_ACE has released the context
  655. * by making sure that the owner bit is 0
  656. */
  657. /*
  658. * Security context had been swizzled by 128 bits
  659. * before handing to CP_ACE
  660. */
  661. own_off = ((SA_CTX_SCCTL_OWNER_OFFSET / 16) * 16)
  662. + (15 - (SA_CTX_SCCTL_OWNER_OFFSET % 16));
  663. while (__raw_readb(&ctx->sc[own_off])) {
  664. if (!--cnt)
  665. return -EAGAIN;
  666. msleep_interruptible(SA_SC_TEAR_DELAY);
  667. }
  668. return 0;
  669. err:
  670. atomic_inc(&pdata->stats.sc_tear_dropped);
  671. if (dma_ctx)
  672. kmem_cache_free(pdata->dma_req_ctx_cache, dma_ctx);
  673. return ret;
  674. }
  675. /* Free the per direction context memory */
  676. static int sa_free_ctx_info(struct sa_ctx_info *ctx,
  677. struct keystone_crypto_data *data)
  678. {
  679. unsigned long bn;
  680. int ret = 0;
  681. ret = sa_tear_sc(ctx, data);
  682. if (ret) {
  683. dev_err(sa_ks2_dev,
  684. "Failed to tear down context id(%x)\n", ctx->sc_id);
  685. return ret;
  686. }
  687. bn = ctx->sc_id - data->sc_id_start;
  688. spin_lock(&data->scid_lock);
  689. __clear_bit(bn, data->ctx_bm);
  690. data->sc_id--;
  691. spin_unlock(&data->scid_lock);
  692. if (ctx->sc) {
  693. dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
  694. ctx->sc = NULL;
  695. }
  696. return 0;
  697. }
  698. /* Initialize the per direction context memory */
  699. static int sa_init_ctx_info(struct sa_ctx_info *ctx,
  700. struct keystone_crypto_data *data)
  701. {
  702. unsigned long bn;
  703. int err;
  704. spin_lock(&data->scid_lock);
  705. if (data->sc_id > data->sc_id_end) {
  706. spin_unlock(&data->scid_lock);
  707. dev_err(&data->pdev->dev, "Out of SC IDs\n");
  708. return -ENOMEM;
  709. }
  710. bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
  711. __set_bit(bn, data->ctx_bm);
  712. data->sc_id++;
  713. spin_unlock(&data->scid_lock);
  714. ctx->sc_id = (u16)(data->sc_id_start + bn);
  715. ctx->rx_flow = knav_dma_get_flow(data->rx_chan);
  716. ctx->rx_compl_qid = data->rx_compl_qid;
  717. ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
  718. if (!ctx->sc) {
  719. dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
  720. err = -ENOMEM;
  721. goto scid_rollback;
  722. }
  723. return 0;
  724. scid_rollback:
  725. spin_lock(&data->scid_lock);
  726. __clear_bit(bn, data->ctx_bm);
  727. data->sc_id--;
  728. spin_unlock(&data->scid_lock);
  729. return err;
  730. }
  731. /* Initialize TFM context */
  732. static int sa_init_tfm(struct crypto_tfm *tfm)
  733. {
  734. struct crypto_alg *alg = tfm->__crt_alg;
  735. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  736. struct keystone_crypto_data *data = dev_get_drvdata(sa_ks2_dev);
  737. int ret;
  738. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AEAD) {
  739. memset(ctx, 0, sizeof(*ctx));
  740. ctx->dev_data = data;
  741. ret = sa_init_ctx_info(&ctx->enc, data);
  742. if (ret)
  743. return ret;
  744. ret = sa_init_ctx_info(&ctx->dec, data);
  745. if (ret) {
  746. sa_free_ctx_info(&ctx->enc, data);
  747. return ret;
  748. }
  749. }
  750. dev_dbg(sa_ks2_dev, "%s(0x%p) sc-ids(0x%x(0x%x), 0x%x(0x%x))\n",
  751. __func__, tfm, ctx->enc.sc_id, ctx->enc.sc_phys,
  752. ctx->dec.sc_id, ctx->dec.sc_phys);
  753. return 0;
  754. }
  755. static int sa_gcm_get_aad(struct aead_request *req, u8 *aad, u8 *aad_len)
  756. {
  757. struct scatter_walk walk;
  758. int ret = 0;
  759. *aad_len = req->assoclen - crypto_aead_ivsize(crypto_aead_reqtfm(req));
  760. scatterwalk_start(&walk, req->src);
  761. scatterwalk_copychunks(aad, &walk, *aad_len, 0);
  762. scatterwalk_done(&walk, 0, 0);
  763. return ret;
  764. }
  765. /* Algorithm init */
  766. static int sa_cra_init_aead(struct crypto_aead *tfm)
  767. {
  768. return sa_init_tfm(crypto_aead_tfm(tfm));
  769. }
  770. /* Algorithm context teardown */
  771. static void sa_exit_tfm(struct crypto_tfm *tfm)
  772. {
  773. struct crypto_alg *alg = tfm->__crt_alg;
  774. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  775. struct keystone_crypto_data *data = dev_get_drvdata(sa_ks2_dev);
  776. dev_dbg(sa_ks2_dev, "%s(0x%p) sc-ids(0x%x(0x%x), 0x%x(0x%x))\n",
  777. __func__, tfm, ctx->enc.sc_id, ctx->enc.sc_phys,
  778. ctx->dec.sc_id, ctx->dec.sc_phys);
  779. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
  780. == CRYPTO_ALG_TYPE_AEAD) {
  781. sa_free_ctx_info(&ctx->enc, data);
  782. sa_free_ctx_info(&ctx->dec, data);
  783. }
  784. }
  785. static void sa_exit_tfm_aead(struct crypto_aead *tfm)
  786. {
  787. return sa_exit_tfm(crypto_aead_tfm(tfm));
  788. }
  789. /* AEAD algorithm configuration interface function */
  790. static int sa_aead_setkey(struct crypto_aead *authenc,
  791. const u8 *key, unsigned int keylen)
  792. {
  793. struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
  794. struct crypto_authenc_keys keys;
  795. const char *cra_name;
  796. struct sa_eng_info *enc_eng, *auth_eng;
  797. int ealg_id, aalg_id, cmdl_len;
  798. struct sa_cmdl_cfg cfg;
  799. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  800. goto badkey;
  801. cra_name = crypto_tfm_alg_name(crypto_aead_tfm(authenc));
  802. sa_conv_calg_to_salg(cra_name, &ealg_id, &aalg_id);
  803. enc_eng = sa_get_engine_info(ealg_id);
  804. auth_eng = sa_get_engine_info(aalg_id);
  805. memset(&cfg, 0, sizeof(cfg));
  806. cfg.enc1st = 1;
  807. cfg.aalg = aalg_id;
  808. cfg.enc_eng_id = enc_eng->eng_id;
  809. cfg.auth_eng_id = auth_eng->eng_id;
  810. cfg.iv_size = crypto_aead_ivsize(authenc);
  811. cfg.akey = keys.authkey;
  812. cfg.akey_len = keys.authkeylen;
  813. /* Setup Encryption Security Context & Command label template */
  814. if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
  815. keys.authkey, keys.authkeylen,
  816. cra_name, 1, &ctx->enc.epib[1]))
  817. goto badkey;
  818. cmdl_len = sa_format_cmdl_gen(&cfg,
  819. (u8 *)ctx->enc.cmdl,
  820. &ctx->enc.cmdl_upd_info);
  821. if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  822. goto badkey;
  823. ctx->enc.cmdl_size = cmdl_len;
  824. /* Setup Decryption Security Context & Command label template */
  825. if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
  826. keys.authkey, keys.authkeylen,
  827. cra_name, 0, &ctx->dec.epib[1]))
  828. goto badkey;
  829. cfg.enc1st = 0;
  830. cfg.enc_eng_id = enc_eng->eng_id;
  831. cfg.auth_eng_id = auth_eng->eng_id;
  832. cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
  833. &ctx->dec.cmdl_upd_info);
  834. if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  835. goto badkey;
  836. ctx->dec.cmdl_size = cmdl_len;
  837. return 0;
  838. badkey:
  839. dev_err(sa_ks2_dev, "%s: badkey\n", __func__);
  840. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  841. return -EINVAL;
  842. }
  843. /**
  844. * sa_prepare_tx_desc() - prepare a chain of tx descriptors
  845. * @pdata: struct keystone_crypto_data pinter
  846. * @_sg: struct scatterlist source list
  847. * @num_sg: number of buffers in the _sg list
  848. * @pslen: length of protocol specific data
  849. * @psdata: pointer to the protocol specific data
  850. * @epiblen: EPIB length
  851. * @epib: pointer to EPIB (extended packet info block)
  852. * @ctx: struct sa_dma_req_ctx pointer
  853. *
  854. * For each buffer in the source _sg list the function gets a hardware
  855. * descriptor from tx_pool and fills the buffer descriptor fields and maps the
  856. * descriptor. For the first descriptor (packet descriptor) it also sets the
  857. * psinfo and epib fields.
  858. *
  859. * Returns dma address of the first descripto on success, NULL otherwise.
  860. */
  861. static dma_addr_t
  862. sa_prepare_tx_desc(struct keystone_crypto_data *pdata, struct scatterlist *_sg,
  863. int num_sg, u32 pslen, u32 *psdata,
  864. u32 epiblen, u32 *epib, struct sa_dma_req_ctx *ctx)
  865. {
  866. struct device *dev = &pdata->pdev->dev;
  867. struct knav_dma_desc *hwdesc = NULL;
  868. struct scatterlist *sg = _sg;
  869. u32 packet_len = 0;
  870. u32 nsg;
  871. u32 next_desc = 0;
  872. u32 packet_info;
  873. packet_info = KNAV_DMA_DESC_HAS_EPIB |
  874. ((pslen / sizeof(u32)) << KNAV_DMA_DESC_PSLEN_SHIFT) |
  875. (pdata->tx_compl_qid << KNAV_DMA_DESC_RETQ_SHIFT);
  876. for (sg += num_sg - 1, nsg = num_sg; nsg > 0; sg--, nsg--) {
  877. u32 buflen, orig_len;
  878. int i;
  879. dma_addr_t dma_addr;
  880. u32 dma_sz;
  881. u32 *out, *in;
  882. hwdesc = knav_pool_desc_get(pdata->tx_pool);
  883. if (IS_ERR_OR_NULL(hwdesc)) {
  884. dev_dbg(dev, "out of tx pool desc\n");
  885. return 0;
  886. }
  887. buflen = sg_dma_len(sg) & KNAV_DMA_DESC_PKT_LEN_MASK;
  888. orig_len = buflen;
  889. packet_len += buflen;
  890. if (nsg == 1) { /* extra fileds for packed descriptor */
  891. for (out = hwdesc->epib, in = epib, i = 0;
  892. i < epiblen / sizeof(u32); i++)
  893. *out++ = *in++;
  894. for (out = hwdesc->psdata, in = psdata, i = 0;
  895. i < pslen / sizeof(u32); i++)
  896. *out++ = *in++;
  897. }
  898. hwdesc->desc_info = packet_len;
  899. hwdesc->tag_info = 0;
  900. hwdesc->packet_info = packet_info;
  901. hwdesc->buff_len = buflen;
  902. hwdesc->buff = sg_dma_address(sg);
  903. hwdesc->next_desc = next_desc;
  904. hwdesc->orig_len = orig_len;
  905. hwdesc->orig_buff = sg_dma_address(sg);
  906. knav_pool_desc_map(pdata->tx_pool, hwdesc, sizeof(hwdesc),
  907. &dma_addr, &dma_sz);
  908. hwdesc->sw_data[0] = (u32)dma_addr;
  909. hwdesc->sw_data[1] = dma_sz;
  910. hwdesc->sw_data[2] = (u32)ctx;
  911. next_desc = (u32)dma_addr;
  912. }
  913. return (unlikely(!hwdesc)) ? 0 : hwdesc->sw_data[0];
  914. }
  915. void sa_tx_completion_process(struct keystone_crypto_data *dev_data)
  916. {
  917. struct knav_dma_desc *hwdesc = NULL;
  918. dma_addr_t dma;
  919. struct sa_dma_req_ctx *ctx = NULL;
  920. u32 pkt_len;
  921. u32 calc_pkt_len;
  922. for (;;) {
  923. dma = knav_queue_pop(dev_data->tx_compl_q, NULL);
  924. if (!dma) {
  925. dev_dbg(sa_ks2_dev, "no desc in the queue %d\n",
  926. dev_data->tx_compl_qid);
  927. break;
  928. }
  929. ctx = NULL;
  930. pkt_len = 0;
  931. calc_pkt_len = 0;
  932. do {
  933. hwdesc = knav_pool_desc_unmap(dev_data->tx_pool, dma,
  934. sizeof(hwdesc));
  935. if (!hwdesc) {
  936. pr_err("failed to unmap descriptor 0x%08x\n",
  937. dma);
  938. break;
  939. }
  940. /* take the req_ctx from the first descriptor */
  941. if (!ctx) {
  942. ctx = (struct sa_dma_req_ctx
  943. *)hwdesc->sw_data[2];
  944. pkt_len = hwdesc->desc_info &
  945. KNAV_DMA_DESC_PKT_LEN_MASK;
  946. }
  947. calc_pkt_len += hwdesc->buff_len;
  948. dma = hwdesc->next_desc;
  949. knav_pool_desc_put(dev_data->tx_pool, hwdesc);
  950. } while (dma);
  951. #ifdef DEBUG
  952. if (pkt_len != calc_pkt_len)
  953. pr_err("[%s] calculated packet length doesn't match %d/%d\n",
  954. __func__, calc_pkt_len, pkt_len);
  955. #endif
  956. if ((pkt_len > 0) && ctx) {
  957. dma_unmap_sg(&ctx->dev_data->pdev->dev, ctx->src,
  958. ctx->src_nents, DMA_TO_DEVICE);
  959. if (likely(ctx->pkt)) {
  960. atomic_add(ctx->src_nents,
  961. &ctx->dev_data->tx_dma_desc_cnt);
  962. atomic_inc(&ctx->dev_data->stats.tx_pkts);
  963. }
  964. }
  965. if (ctx)
  966. kmem_cache_free(ctx->dev_data->dma_req_ctx_cache, ctx);
  967. }
  968. }
  969. /**
  970. * sa_rx_desc_process() - proccess descriptors related
  971. * to one trasnsformation received from SA
  972. *
  973. * @dev_data: struct keystone_crypto_data pointer
  974. * @hwdesc: array of pointers to descriptors
  975. * @num: number descriptors in the array
  976. *
  977. * From the first descriptor, which is a packer descriptor, the function
  978. * retrieves all algorithm parameters including pointer to original request.
  979. * If the transformation was an encryption, it copies calculated authentication
  980. * tag to the destination list, otherwise compare received tag with calculated.
  981. *
  982. * After that it copies all buffers from hw descriptors to the destination list
  983. * and call aead_request_complete() callback.
  984. *
  985. * At the end the function frees all buffers.
  986. */
  987. static
  988. void sa_rx_desc_process(struct keystone_crypto_data *dev_data,
  989. struct knav_dma_desc **hwdesc, int num)
  990. {
  991. int j;
  992. unsigned int alg_type;
  993. u32 req_sub_type;
  994. alg_type = hwdesc[0]->psdata[0] & CRYPTO_ALG_TYPE_MASK;
  995. req_sub_type = hwdesc[0]->psdata[0] >> SA_REQ_SUBTYPE_SHIFT;
  996. if (likely(alg_type == CRYPTO_ALG_TYPE_AEAD)) {
  997. int auth_words, auth_size, enc_len, enc_offset, i;
  998. struct aead_request *req;
  999. struct crypto_aead *tfm;
  1000. int enc, err = 0;
  1001. unsigned int ivsize;
  1002. req = (struct aead_request *)hwdesc[0]->psdata[1];
  1003. tfm = crypto_aead_reqtfm(req);
  1004. auth_size = crypto_aead_authsize(tfm);
  1005. ivsize = crypto_aead_ivsize(tfm);
  1006. if (req_sub_type == SA_REQ_SUBTYPE_ENC) {
  1007. enc_offset = req->assoclen;
  1008. enc_len = req->cryptlen;
  1009. enc = 1;
  1010. } else if (req_sub_type == SA_REQ_SUBTYPE_DEC) {
  1011. enc_offset = req->assoclen;
  1012. enc_len = req->cryptlen - auth_size;
  1013. enc = 0;
  1014. } else {
  1015. err = -EBADMSG;
  1016. goto aead_err;
  1017. }
  1018. /* NOTE: We receive the tag as host endian 32bit words */
  1019. auth_words = auth_size / sizeof(u32);
  1020. for (i = 2; i < (auth_words + SA_PSDATA_CTX_WORDS); i++)
  1021. hwdesc[0]->psdata[i] = htonl(hwdesc[0]->psdata[i]);
  1022. /* if encryption, copy the authentication tag */
  1023. if (enc) {
  1024. scatterwalk_copy(
  1025. &hwdesc[0]->psdata[SA_PSDATA_CTX_WORDS],
  1026. req->dst, enc_offset + enc_len, auth_size, 1);
  1027. } else {
  1028. /* Verify the authentication tag */
  1029. u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
  1030. scatterwalk_copy(auth_tag, req->src,
  1031. enc_len + req->assoclen,
  1032. auth_size, 0);
  1033. err = memcmp(&hwdesc[0]->psdata[SA_PSDATA_CTX_WORDS],
  1034. auth_tag, auth_size) ? -EBADMSG : 0;
  1035. if (unlikely(err))
  1036. goto aead_err;
  1037. }
  1038. /* Copy the encrypted/decrypted data */
  1039. if (unlikely(sa_hwdesc2sg_copy(hwdesc, req->dst, enc_offset,
  1040. enc_offset, enc_len, num)))
  1041. err = -EBADMSG;
  1042. aead_err:
  1043. aead_request_complete(req, err);
  1044. }
  1045. /* free buffers here */
  1046. for (j = 0; j < num; j++) {
  1047. if (hwdesc[j]->orig_len == PAGE_SIZE) {
  1048. __free_page((struct page *)hwdesc[j]->sw_data[1]);
  1049. atomic_dec(&dev_data->rx_dma_page_cnt);
  1050. } else {
  1051. kfree((void *)hwdesc[j]->sw_data[0]);
  1052. }
  1053. }
  1054. atomic_inc(&dev_data->stats.rx_pkts);
  1055. }
  1056. /**
  1057. * sa_rx_completion_process() - processes received from SA buffers
  1058. *
  1059. * @dev_data: struct keystone_crypto_data pointer
  1060. *
  1061. * The function is called from rx tasklet. It retreives one or multiple
  1062. * chained hw descriptors and calls sa_rx_desc_process(). After that it
  1063. * returns all descriptors into the rx_pool.
  1064. */
  1065. void sa_rx_completion_process(struct keystone_crypto_data *dev_data)
  1066. {
  1067. struct knav_dma_desc *hwdesc[MAX_SKB_FRAGS];
  1068. int j, desc_num;
  1069. dma_addr_t dma;
  1070. u32 pkt_len;
  1071. u32 calc_pkt_len;
  1072. int wait4pkt = 1;
  1073. for (;;) {
  1074. dma = knav_queue_pop(dev_data->rx_compl_q, NULL);
  1075. if (!dma) {
  1076. dev_dbg(sa_ks2_dev, "no desc in the queue %d\n",
  1077. dev_data->rx_compl_qid);
  1078. break;
  1079. }
  1080. pkt_len = 0;
  1081. calc_pkt_len = 0;
  1082. wait4pkt = 1;
  1083. desc_num = 0;
  1084. do {
  1085. hwdesc[desc_num] =
  1086. knav_pool_desc_unmap(dev_data->rx_pool, dma,
  1087. sizeof(hwdesc));
  1088. if (!hwdesc[desc_num]) {
  1089. pr_err("failed to unmap descriptor 0x%08x\n",
  1090. dma);
  1091. break;
  1092. }
  1093. if (hwdesc[desc_num]->orig_len == PAGE_SIZE) {
  1094. dma_unmap_page(sa_ks2_dev,
  1095. hwdesc[desc_num]->orig_buff,
  1096. PAGE_SIZE,
  1097. DMA_FROM_DEVICE);
  1098. } else {
  1099. dma_unmap_single(sa_ks2_dev,
  1100. hwdesc[desc_num]->orig_buff,
  1101. SA_RX_BUF0_SIZE,
  1102. DMA_FROM_DEVICE);
  1103. }
  1104. /* take the req_ctx from the first descriptor */
  1105. if (wait4pkt) {
  1106. pkt_len = hwdesc[desc_num]->desc_info &
  1107. KNAV_DMA_DESC_PKT_LEN_MASK;
  1108. wait4pkt = 0;
  1109. }
  1110. calc_pkt_len += hwdesc[desc_num]->buff_len;
  1111. dma = hwdesc[desc_num]->next_desc;
  1112. desc_num++;
  1113. } while (dma);
  1114. #ifdef DEBUG
  1115. if (pkt_len != calc_pkt_len)
  1116. pr_err("[%s] calculated packet length doesn't match %d/%d\n",
  1117. __func__, calc_pkt_len, pkt_len);
  1118. #endif
  1119. /* retrieve data and copy it to the destination sg list */
  1120. sa_rx_desc_process(dev_data, hwdesc, desc_num);
  1121. /* return descriptor to the pool */
  1122. for (j = 0; j < desc_num; j++)
  1123. knav_pool_desc_put(dev_data->rx_pool, hwdesc[j]);
  1124. }
  1125. }
  1126. /**
  1127. * sa_aead_perform() - perform AEAD transformation
  1128. * @req: struct aead_request pointer
  1129. * @iv: initial vector
  1130. * enc: boolean flag true for encryption, false for decryption
  1131. *
  1132. * This function prepare
  1133. *
  1134. * 1) checks whether the driver has enought buffers to receive transformed
  1135. * data.
  1136. * 2) allocates request context and fills appropriate fields in it.
  1137. * 3) maps source list
  1138. * 4) prepare tx dma desctiprors and submits them to the SA queue.
  1139. *
  1140. * Return: -EINPROGRESS on success, appropriate error code
  1141. */
  1142. static int sa_aead_perform(struct aead_request *req, u8 *iv, bool enc)
  1143. {
  1144. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1145. struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1146. struct sa_ctx_info *sa_ctx = enc ? &ctx->enc : &ctx->dec;
  1147. dma_addr_t desc_dma_addr;
  1148. struct keystone_crypto_data *pdata = dev_get_drvdata(sa_ks2_dev);
  1149. struct sa_dma_req_ctx *req_ctx = NULL;
  1150. u8 enc_offset;
  1151. int sg_nents;
  1152. int psdata_offset, ret = 0;
  1153. u8 *auth_iv = NULL;
  1154. u8 aad[16];
  1155. u8 aad_len = 0;
  1156. u16 enc_len;
  1157. u16 auth_len;
  1158. u32 req_type;
  1159. int n_bufs;
  1160. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  1161. GFP_KERNEL : GFP_ATOMIC;
  1162. if (enc) {
  1163. iv = req->iv;
  1164. enc_offset = req->assoclen;
  1165. enc_len = req->cryptlen;
  1166. auth_len = req->assoclen + req->cryptlen;
  1167. } else {
  1168. enc_offset = req->assoclen;
  1169. enc_len = req->cryptlen - crypto_aead_authsize(tfm);
  1170. auth_len = req->assoclen + req->cryptlen -
  1171. crypto_aead_authsize(tfm);
  1172. }
  1173. /* Parse out AAD values */
  1174. if (sa_ctx->cmdl_upd_info.submode == SA_MODE_GCM) {
  1175. sa_gcm_get_aad(req, aad, &aad_len);
  1176. /*
  1177. * Set the AAD size to the configured
  1178. * AAD size when first packet is received.
  1179. * AAD size CANNOT be changed after this.
  1180. */
  1181. if (sa_ctx->cmdl_upd_info.aad.index == 0) {
  1182. sa_ctx->cmdl_upd_info.aad.index = 0xFF;
  1183. sa_ctx->cmdl_upd_info.aad.size = aad_len;
  1184. sa_ctx->sc[SA_CTX_PHP_PE_CTX_SZ + 64 + 24] =
  1185. (aad_len << 3);
  1186. }
  1187. if (sa_ctx->cmdl_upd_info.aad.size != aad_len) {
  1188. atomic_inc(&pdata->stats.tx_dropped);
  1189. dev_err(sa_ks2_dev, "ERROR: AAD Size Mismatch (%d, %d)\n",
  1190. aad_len,
  1191. sa_ctx->cmdl_upd_info.aad.size);
  1192. return -EPERM;
  1193. }
  1194. } else if (sa_ctx->cmdl_upd_info.submode == SA_MODE_GMAC) {
  1195. sa_gcm_get_aad(req, aad, &aad_len);
  1196. }
  1197. /* Allocate descriptor & submit packet */
  1198. sg_nents = sg_count(req->src, auth_len);
  1199. if (unlikely(atomic_sub_return(sg_nents, &pdata->tx_dma_desc_cnt)
  1200. < 0)) {
  1201. ret = -EBUSY;
  1202. goto err_0;
  1203. }
  1204. n_bufs = auth_len - SA_RX_BUF0_SIZE;
  1205. n_bufs = (n_bufs <= 0) ? 0 :
  1206. DIV_ROUND_UP(n_bufs, PAGE_SIZE);
  1207. if (unlikely(atomic_read(&pdata->rx_dma_page_cnt) < n_bufs)) {
  1208. ret = -EBUSY;
  1209. goto err_0;
  1210. }
  1211. req_ctx = kmem_cache_alloc(pdata->dma_req_ctx_cache, flags);
  1212. if (unlikely(!req_ctx)) {
  1213. ret = -ENOMEM;
  1214. goto err_0;
  1215. }
  1216. memcpy(req_ctx->cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  1217. /* Update Command Label */
  1218. sa_update_cmdl(sa_ks2_dev, enc_offset, enc_len,
  1219. iv, auth_len, auth_iv, aad_len, aad,
  1220. &sa_ctx->cmdl_upd_info, req_ctx->cmdl);
  1221. /*
  1222. * Last 2 words in PSDATA will have the crypto alg type &
  1223. * crypto request pointer
  1224. */
  1225. req_type = CRYPTO_ALG_TYPE_AEAD;
  1226. if (enc)
  1227. req_type |= (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
  1228. else
  1229. req_type |= (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
  1230. psdata_offset = sa_ctx->cmdl_size / sizeof(u32);
  1231. req_ctx->cmdl[psdata_offset++] = req_type;
  1232. req_ctx->cmdl[psdata_offset] = (u32)req;
  1233. /* map the packet */
  1234. req_ctx->src = req->src;
  1235. req_ctx->src_nents = dma_map_sg(sa_ks2_dev, req_ctx->src,
  1236. sg_nents, DMA_TO_DEVICE);
  1237. if (unlikely(req_ctx->src_nents != sg_nents)) {
  1238. dev_warn_ratelimited(sa_ks2_dev, "failed to map tx pkt\n");
  1239. ret = -EIO;
  1240. goto err;
  1241. }
  1242. req_ctx->dev_data = pdata;
  1243. req_ctx->pkt = true;
  1244. desc_dma_addr = sa_prepare_tx_desc(pdata, req_ctx->src,
  1245. sg_nents,
  1246. (sa_ctx->cmdl_size +
  1247. (SA_PSDATA_CTX_WORDS *
  1248. sizeof(u32))),
  1249. req_ctx->cmdl,
  1250. sizeof(sa_ctx->epib),
  1251. sa_ctx->epib,
  1252. req_ctx);
  1253. if (desc_dma_addr == 0) {
  1254. ret = -EIO;
  1255. goto err;
  1256. }
  1257. knav_queue_push(pdata->tx_submit_q, desc_dma_addr,
  1258. sizeof(struct knav_dma_desc), 0);
  1259. return -EINPROGRESS;
  1260. err:
  1261. if (req_ctx)
  1262. kmem_cache_free(pdata->dma_req_ctx_cache, req_ctx);
  1263. err_0:
  1264. atomic_add(sg_nents, &pdata->tx_dma_desc_cnt);
  1265. return ret;
  1266. }
  1267. /* AEAD algorithm encrypt interface function */
  1268. static int sa_aead_encrypt(struct aead_request *req)
  1269. {
  1270. return sa_aead_perform(req, req->iv, true);
  1271. }
  1272. /* AEAD algorithm decrypt interface function */
  1273. static int sa_aead_decrypt(struct aead_request *req)
  1274. {
  1275. return sa_aead_perform(req, req->iv, false);
  1276. }
  1277. /* GCM algorithm configuration interface function */
  1278. static int sa_aead_gcm_setkey(struct crypto_aead *authenc,
  1279. const u8 *key, unsigned int keylen)
  1280. {
  1281. struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
  1282. unsigned int enckey_len;
  1283. struct sa_eng_info *enc_eng, *auth_eng;
  1284. int ealg_id, aalg_id, cmdl_len;
  1285. struct sa_cmdl_cfg cfg;
  1286. u8 const *enc_key;
  1287. const char *cra_name;
  1288. u32 *temp_key;
  1289. cra_name = crypto_tfm_alg_name(crypto_aead_tfm(authenc));
  1290. sa_conv_calg_to_salg(cra_name, &ealg_id, &aalg_id);
  1291. if (ealg_id != SA_EALG_ID_NONE) {
  1292. /* GCM */
  1293. enc_eng = sa_get_engine_info(ealg_id);
  1294. enckey_len = keylen - 4;
  1295. enc_key = key;
  1296. memset(&cfg, 0, sizeof(cfg));
  1297. cfg.enc_eng_id = enc_eng->eng_id;
  1298. cfg.iv_size = crypto_aead_ivsize(authenc);
  1299. /* Prpoerties not applicable to GCM */
  1300. cfg.aalg = SA_EALG_ID_NONE;
  1301. cfg.auth_eng_id = SA_ENG_ID_NONE;
  1302. cfg.akey = NULL;
  1303. cfg.akey_len = 0;
  1304. /* Iniialize the command update structure */
  1305. memset(&ctx->enc.cmdl_upd_info, 0,
  1306. sizeof(struct sa_cmdl_upd_info));
  1307. ctx->enc.cmdl_upd_info.submode = SA_MODE_GCM;
  1308. /* Default AAD size to 8 */
  1309. ctx->enc.cmdl_upd_info.aad.size = 8;
  1310. ctx->enc.cmdl_upd_info.aad.index = 0;
  1311. memset(&ctx->dec.cmdl_upd_info, 0,
  1312. sizeof(struct sa_cmdl_upd_info));
  1313. ctx->dec.cmdl_upd_info.submode = SA_MODE_GCM;
  1314. /* Default AAD size to 8 */
  1315. ctx->dec.cmdl_upd_info.aad.size = 8;
  1316. ctx->dec.cmdl_upd_info.aad.index = 0;
  1317. } else {
  1318. /* GMAC */
  1319. auth_eng = sa_get_engine_info(aalg_id);
  1320. memset(&cfg, 0, sizeof(cfg));
  1321. cfg.iv_size = crypto_aead_ivsize(authenc);
  1322. cfg.aalg = aalg_id;
  1323. cfg.auth_eng_id = auth_eng->eng_id;
  1324. cfg.akey = key;
  1325. cfg.akey_len = keylen - 4;
  1326. cfg.enc_eng_id = SA_ENG_ID_NONE;
  1327. enckey_len = 0;
  1328. enc_key = NULL;
  1329. /* Iniialize the command update structure */
  1330. memset(&ctx->enc.cmdl_upd_info, 0,
  1331. sizeof(struct sa_cmdl_upd_info));
  1332. ctx->enc.cmdl_upd_info.submode = SA_MODE_GMAC;
  1333. memset(&ctx->dec.cmdl_upd_info, 0,
  1334. sizeof(struct sa_cmdl_upd_info));
  1335. ctx->dec.cmdl_upd_info.submode = SA_MODE_GMAC;
  1336. }
  1337. /* Store Salt/NONCE value */
  1338. temp_key = (u32 *) &key[keylen - 4];
  1339. cfg.salt = *temp_key;
  1340. /* Setup Encryption Security Context & Command label template */
  1341. if (sa_init_sc(&ctx->enc, enc_key, enckey_len, cfg.akey,
  1342. cfg.akey_len, cra_name, 1, &ctx->enc.epib[1]))
  1343. goto badkey;
  1344. cmdl_len = sa_format_cmdl_gcm(&cfg,
  1345. (u8 *)ctx->enc.cmdl,
  1346. &ctx->enc.cmdl_upd_info);
  1347. if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1348. goto badkey;
  1349. ctx->enc.cmdl_size = cmdl_len;
  1350. /* Setup Decryption Security Context & Command label template */
  1351. if (sa_init_sc(&ctx->dec, enc_key, enckey_len, cfg.akey,
  1352. cfg.akey_len, cra_name, 0, &ctx->dec.epib[1]))
  1353. goto badkey;
  1354. cmdl_len = sa_format_cmdl_gcm(&cfg,
  1355. (u8 *)ctx->dec.cmdl,
  1356. &ctx->dec.cmdl_upd_info);
  1357. if ((cmdl_len <= 0) || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1358. goto badkey;
  1359. ctx->dec.cmdl_size = cmdl_len;
  1360. return 0;
  1361. badkey:
  1362. dev_err(sa_ks2_dev, "%s: badkey\n", __func__);
  1363. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1364. return -EINVAL;
  1365. }
  1366. static struct sa_alg_tmpl sa_algs[] = {
  1367. /* AEAD algorithms */
  1368. { .type = CRYPTO_ALG_TYPE_AEAD,
  1369. .alg.aead = {
  1370. .base = {
  1371. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1372. .cra_driver_name =
  1373. "authenc(hmac(sha1),cbc(aes))-keystone-sa",
  1374. .cra_blocksize = AES_BLOCK_SIZE,
  1375. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1376. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1377. CRYPTO_ALG_ASYNC,
  1378. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1379. .cra_module = THIS_MODULE,
  1380. .cra_alignmask = 0,
  1381. .cra_priority = 3000,
  1382. },
  1383. .ivsize = AES_BLOCK_SIZE,
  1384. .maxauthsize = SHA1_DIGEST_SIZE,
  1385. .init = sa_cra_init_aead,
  1386. .exit = sa_exit_tfm_aead,
  1387. .setkey = sa_aead_setkey,
  1388. .encrypt = sa_aead_encrypt,
  1389. .decrypt = sa_aead_decrypt,
  1390. }
  1391. },
  1392. { .type = CRYPTO_ALG_TYPE_AEAD,
  1393. .alg.aead = {
  1394. .base = {
  1395. .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1396. .cra_driver_name =
  1397. "authenc(hmac(sha1),cbc(des3_ede))-keystone-sa",
  1398. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1399. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1400. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1401. CRYPTO_ALG_ASYNC,
  1402. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1403. .cra_module = THIS_MODULE,
  1404. .cra_alignmask = 0,
  1405. .cra_priority = 3000,
  1406. },
  1407. .ivsize = DES3_EDE_BLOCK_SIZE,
  1408. .maxauthsize = SHA1_DIGEST_SIZE,
  1409. .init = sa_cra_init_aead,
  1410. .exit = sa_exit_tfm_aead,
  1411. .setkey = sa_aead_setkey,
  1412. .encrypt = sa_aead_encrypt,
  1413. .decrypt = sa_aead_decrypt,
  1414. }
  1415. },
  1416. { .type = CRYPTO_ALG_TYPE_AEAD,
  1417. .alg.aead = {
  1418. .base = {
  1419. .cra_name = "authenc(xcbc(aes),cbc(aes))",
  1420. .cra_driver_name =
  1421. "authenc(xcbc(aes),cbc(aes))-keystone-sa",
  1422. .cra_blocksize = AES_BLOCK_SIZE,
  1423. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1424. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1425. CRYPTO_ALG_ASYNC,
  1426. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1427. .cra_module = THIS_MODULE,
  1428. .cra_alignmask = 0,
  1429. .cra_priority = 3000,
  1430. },
  1431. .ivsize = AES_BLOCK_SIZE,
  1432. .maxauthsize = AES_XCBC_DIGEST_SIZE,
  1433. .init = sa_cra_init_aead,
  1434. .exit = sa_exit_tfm_aead,
  1435. .setkey = sa_aead_setkey,
  1436. .encrypt = sa_aead_encrypt,
  1437. .decrypt = sa_aead_decrypt,
  1438. }
  1439. },
  1440. { .type = CRYPTO_ALG_TYPE_AEAD,
  1441. .alg.aead = {
  1442. .base = {
  1443. .cra_name = "authenc(xcbc(aes),cbc(des3_ede))",
  1444. .cra_driver_name =
  1445. "authenc(xcbc(aes),cbc(des3_ede))-keystone-sa",
  1446. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1447. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1448. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1449. CRYPTO_ALG_ASYNC,
  1450. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1451. .cra_module = THIS_MODULE,
  1452. .cra_alignmask = 0,
  1453. .cra_priority = 3000,
  1454. },
  1455. .ivsize = DES3_EDE_BLOCK_SIZE,
  1456. .maxauthsize = AES_XCBC_DIGEST_SIZE,
  1457. .init = sa_cra_init_aead,
  1458. .exit = sa_exit_tfm_aead,
  1459. .setkey = sa_aead_setkey,
  1460. .encrypt = sa_aead_encrypt,
  1461. .decrypt = sa_aead_decrypt,
  1462. }
  1463. },
  1464. { .type = CRYPTO_ALG_TYPE_AEAD,
  1465. .alg.aead = {
  1466. .base = {
  1467. .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
  1468. .cra_driver_name =
  1469. "authenc-hmac-sha1-cipher_null-keystone-sa",
  1470. .cra_blocksize = NULL_BLOCK_SIZE,
  1471. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1472. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1473. CRYPTO_ALG_ASYNC,
  1474. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1475. .cra_module = THIS_MODULE,
  1476. .cra_alignmask = 0,
  1477. .cra_priority = 3000,
  1478. },
  1479. .ivsize = NULL_IV_SIZE,
  1480. .maxauthsize = SHA1_DIGEST_SIZE,
  1481. .init = sa_cra_init_aead,
  1482. .exit = sa_exit_tfm_aead,
  1483. .setkey = sa_aead_setkey,
  1484. .encrypt = sa_aead_encrypt,
  1485. .decrypt = sa_aead_decrypt,
  1486. }
  1487. },
  1488. { .type = CRYPTO_ALG_TYPE_AEAD,
  1489. .alg.aead = {
  1490. .base = {
  1491. .cra_name = "rfc4106(gcm(aes))",
  1492. .cra_driver_name =
  1493. "rfc4106-gcm-aes-keystone-sa",
  1494. .cra_blocksize = AES_BLOCK_SIZE,
  1495. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1496. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1497. CRYPTO_ALG_ASYNC,
  1498. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1499. .cra_module = THIS_MODULE,
  1500. .cra_alignmask = 0,
  1501. .cra_priority = 3000,
  1502. },
  1503. .ivsize = 8,
  1504. .maxauthsize = AES_BLOCK_SIZE,
  1505. .init = sa_cra_init_aead,
  1506. .exit = sa_exit_tfm_aead,
  1507. .setkey = sa_aead_gcm_setkey,
  1508. .encrypt = sa_aead_encrypt,
  1509. .decrypt = sa_aead_decrypt,
  1510. }
  1511. },
  1512. { .type = CRYPTO_ALG_TYPE_AEAD,
  1513. .alg.aead = {
  1514. .base = {
  1515. .cra_name = "rfc4543(gcm(aes))",
  1516. .cra_driver_name =
  1517. "rfc4543-gcm-aes-keystone-sa",
  1518. .cra_blocksize = AES_BLOCK_SIZE,
  1519. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1520. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1521. CRYPTO_ALG_ASYNC,
  1522. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1523. .cra_module = THIS_MODULE,
  1524. .cra_alignmask = 0,
  1525. .cra_priority = 3000,
  1526. },
  1527. .ivsize = 8,
  1528. .maxauthsize = AES_BLOCK_SIZE,
  1529. .init = sa_cra_init_aead,
  1530. .exit = sa_exit_tfm_aead,
  1531. .setkey = sa_aead_gcm_setkey,
  1532. .encrypt = sa_aead_encrypt,
  1533. .decrypt = sa_aead_decrypt,
  1534. }
  1535. },
  1536. { .type = CRYPTO_ALG_TYPE_AEAD,
  1537. .alg.aead = {
  1538. .base = {
  1539. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1540. .cra_driver_name =
  1541. "authenc-hmac-sha256-cbc-aes-keystone-sa",
  1542. .cra_blocksize = AES_BLOCK_SIZE,
  1543. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1544. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1545. CRYPTO_ALG_ASYNC,
  1546. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1547. .cra_module = THIS_MODULE,
  1548. .cra_alignmask = 0,
  1549. .cra_priority = 3000,
  1550. },
  1551. .ivsize = AES_BLOCK_SIZE,
  1552. .maxauthsize = SHA256_DIGEST_SIZE,
  1553. .init = sa_cra_init_aead,
  1554. .exit = sa_exit_tfm_aead,
  1555. .setkey = sa_aead_setkey,
  1556. .encrypt = sa_aead_encrypt,
  1557. .decrypt = sa_aead_decrypt,
  1558. }
  1559. },
  1560. };
  1561. /* Register the algorithms in crypto framework */
  1562. void sa_register_algos(const struct device *dev)
  1563. {
  1564. char *alg_name;
  1565. u32 type;
  1566. int i, err, num_algs = ARRAY_SIZE(sa_algs);
  1567. for (i = 0; i < num_algs; i++) {
  1568. type = sa_algs[i].type;
  1569. if (type == CRYPTO_ALG_TYPE_AEAD) {
  1570. alg_name = sa_algs[i].alg.aead.base.cra_name;
  1571. err = crypto_register_aead(&sa_algs[i].alg.aead);
  1572. } else {
  1573. dev_err(dev,
  1574. "un-supported crypto algorithm (%d)",
  1575. sa_algs[i].type);
  1576. continue;
  1577. }
  1578. if (err)
  1579. dev_err(dev, "Failed to register '%s'\n", alg_name);
  1580. else
  1581. sa_algs[i].registered = true;
  1582. }
  1583. }
  1584. /* un-register the algorithms from crypto framework */
  1585. void sa_unregister_algos(const struct device *dev)
  1586. {
  1587. char *alg_name;
  1588. int err = 0, i, num_algs = ARRAY_SIZE(sa_algs);
  1589. for (i = 0; i < num_algs; i++) {
  1590. if (sa_algs[i].registered) {
  1591. if (sa_algs[i].type == CRYPTO_ALG_TYPE_AEAD) {
  1592. alg_name = sa_algs[i].alg.aead.base.cra_name;
  1593. crypto_unregister_aead(&sa_algs[i].alg.aead);
  1594. err = 0;
  1595. } else {
  1596. alg_name = sa_algs[i].alg.crypto.cra_name;
  1597. err = crypto_unregister_alg(&sa_algs[i].alg.crypto);
  1598. }
  1599. sa_algs[i].registered = false;
  1600. }
  1601. if (err)
  1602. dev_err(dev, "Failed to unregister '%s'", alg_name);
  1603. }
  1604. }