ifxmips_async_aes.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. /******************************************************************************
  2. **
  3. ** FILE NAME : ifxmips_async_aes.c
  4. ** PROJECT : IFX UEIP
  5. ** MODULES : DEU Module
  6. **
  7. ** DATE : October 11, 2010
  8. ** AUTHOR : Mohammad Firdaus
  9. ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
  10. ** COPYRIGHT : Copyright (c) 2010
  11. ** Infineon Technologies AG
  12. ** Am Campeon 1-12, 85579 Neubiberg, Germany
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License as published by
  16. ** the Free Software Foundation; either version 2 of the License, or
  17. ** (at your option) any later version.
  18. **
  19. ** HISTORY
  20. ** $Date $Author $Comment
  21. ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
  22. ** 11, Oct 2010 Mohammad Firdaus Kernel Port incl. Async. Ablkcipher mode
  23. ** 21,March 2011 Mohammad Firdaus Changes for Kernel 2.6.32 and IPSec integration
  24. *******************************************************************************/
  25. /*!
  26. \defgroup IFX_DEU IFX_DEU_DRIVERS
  27. \ingroup API
  28. \brief ifx DEU driver module
  29. */
  30. /*!
  31. \file ifxmips_async_aes.c
  32. \ingroup IFX_DEU
  33. \brief AES Encryption Driver main file
  34. */
  35. /*!
  36. \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
  37. \ingroup IFX_DEU
  38. \brief IFX AES driver Functions
  39. */
  40. #include <linux/wait.h>
  41. #include <linux/crypto.h>
  42. #include <linux/kernel.h>
  43. #include <linux/kthread.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/list.h>
  47. #include <crypto/ctr.h>
  48. #include <crypto/aes.h>
  49. #include <crypto/algapi.h>
  50. #include <crypto/scatterwalk.h>
  51. #include <asm/ifx/ifx_regs.h>
  52. #include <asm/ifx/ifx_types.h>
  53. #include <asm/ifx/common_routines.h>
  54. #include <asm/ifx/irq.h>
  55. #include <asm/ifx/ifx_pmu.h>
  56. #include <asm/ifx/ifx_gpio.h>
  57. #include <asm/kmap_types.h>
  58. #include "ifxmips_deu.h"
  59. #if defined(CONFIG_DANUBE)
  60. #include "ifxmips_deu_danube.h"
  61. extern int ifx_danube_pre_1_4;
  62. #elif defined(CONFIG_AR9)
  63. #include "ifxmips_deu_ar9.h"
  64. #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
  65. #include "ifxmips_deu_vr9.h"
  66. #else
  67. #error "Unkown platform"
  68. #endif
  69. /* DMA related header and variables */
  70. spinlock_t aes_lock;
  71. #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
  72. #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
  73. #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
  74. /* Definition of constants */
  75. //#define AES_START IFX_AES_CON
  76. #define AES_MIN_KEY_SIZE 16
  77. #define AES_MAX_KEY_SIZE 32
  78. #define AES_BLOCK_SIZE 16
  79. #define CTR_RFC3686_NONCE_SIZE 4
  80. #define CTR_RFC3686_IV_SIZE 8
  81. #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
  82. #ifdef CRYPTO_DEBUG
  83. extern char debug_level;
  84. #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
  85. #else
  86. #define DPRINTF(level, format, args...)
  87. #endif /* CRYPTO_DEBUG */
  88. static int disable_multiblock = 0;
  89. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
  90. module_param(disable_multiblock, int, 0);
  91. #else
  92. MODULE_PARM_DESC(disable_multiblock, "Disable encryption of whole multiblock buffers");
  93. #endif
  94. static int disable_deudma = 1;
  95. /* Function decleration */
  96. int aes_chip_init(void);
  97. u32 endian_swap(u32 input);
  98. u32 input_swap(u32 input);
  99. u32* memory_alignment(const u8 *arg, u32 *buff_alloc, int in_out, int nbytes);
  100. void aes_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
  101. int aes_memory_allocate(int value);
  102. int des_memory_allocate(int value);
  103. void memory_release(u32 *addr);
  104. struct aes_ctx {
  105. int key_length;
  106. u32 buf[AES_MAX_KEY_SIZE];
  107. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  108. };
  109. struct aes_container {
  110. u8 *iv;
  111. u8 *src_buf;
  112. u8 *dst_buf;
  113. int mode;
  114. int encdec;
  115. int complete;
  116. int flag;
  117. u32 bytes_processed;
  118. u32 nbytes;
  119. struct ablkcipher_request arequest;
  120. };
  121. aes_priv_t *aes_queue;
  122. extern deu_drv_priv_t deu_dma_priv;
  123. void hexdump(unsigned char *buf, unsigned int len)
  124. {
  125. print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  126. 16, 1,
  127. buf, len, false);
  128. }
  129. /*! \fn void lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg,
  130. size_t nbytes, int encdec, int mode)
  131. * \ingroup IFX_AES_FUNCTIONS
  132. * \brief main interface to AES hardware
  133. * \param ctx_arg crypto algo context
  134. * \param out_arg output bytestream
  135. * \param in_arg input bytestream
  136. * \param iv_arg initialization vector
  137. * \param nbytes length of bytestream
  138. * \param encdec 1 for encrypt; 0 for decrypt
  139. * \param mode operation mode such as ebc, cbc, ctr
  140. *
  141. */
  142. static int lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
  143. u8 *iv_arg, size_t nbytes, int encdec, int mode)
  144. {
  145. /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  146. volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
  147. struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
  148. u32 *in_key = ctx->buf;
  149. unsigned long flag;
  150. /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  151. int key_len = ctx->key_length;
  152. volatile struct deu_dma_t *dma = (struct deu_dma_t *) IFX_DEU_DMA_CON;
  153. struct dma_device_info *dma_device = ifx_deu[0].dma_device;
  154. deu_drv_priv_t *deu_priv = (deu_drv_priv_t *)dma_device->priv;
  155. int wlen = 0;
  156. //u32 *outcopy = NULL;
  157. u32 *dword_mem_aligned_in = NULL;
  158. CRTCL_SECT_START;
  159. /* 128, 192 or 256 bit key length */
  160. aes->controlr.K = key_len / 8 - 2;
  161. if (key_len == 128 / 8) {
  162. aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
  163. aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
  164. aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
  165. aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
  166. }
  167. else if (key_len == 192 / 8) {
  168. aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
  169. aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
  170. aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
  171. aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
  172. aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
  173. aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
  174. }
  175. else if (key_len == 256 / 8) {
  176. aes->K7R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
  177. aes->K6R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
  178. aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
  179. aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
  180. aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
  181. aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
  182. aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 6));
  183. aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 7));
  184. }
  185. else {
  186. printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
  187. CRTCL_SECT_END;
  188. return -EINVAL;
  189. }
  190. /* let HW pre-process DEcryption key in any case (even if
  191. ENcryption is used). Key Valid (KV) bit is then only
  192. checked in decryption routine! */
  193. aes->controlr.PNK = 1;
  194. while (aes->controlr.BUS) {
  195. // this will not take long
  196. }
  197. AES_DMA_MISC_CONFIG();
  198. aes->controlr.E_D = !encdec; //encryption
  199. aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
  200. //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
  201. if (mode > 0) {
  202. aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
  203. aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
  204. aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
  205. aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
  206. };
  207. /* Prepare Rx buf length used in dma psuedo interrupt */
  208. deu_priv->deu_rx_buf = (u32 *)out_arg;
  209. deu_priv->deu_rx_len = nbytes;
  210. /* memory alignment issue */
  211. dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, aes_buff_in, BUFFER_IN, nbytes);
  212. dma->controlr.ALGO = 1; //AES
  213. dma->controlr.BS = 0;
  214. aes->controlr.DAU = 0;
  215. dma->controlr.EN = 1;
  216. while (aes->controlr.BUS) {
  217. // wait for AES to be ready
  218. };
  219. deu_priv->outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, aes_buff_out, BUFFER_OUT, nbytes);
  220. deu_priv->event_src = AES_ASYNC_EVENT;
  221. wlen = dma_device_write (dma_device, (u8 *)dword_mem_aligned_in, nbytes, NULL);
  222. if (wlen != nbytes) {
  223. dma->controlr.EN = 0;
  224. CRTCL_SECT_END;
  225. printk (KERN_ERR "[%s %s %d]: dma_device_write fail!\n", __FILE__, __func__, __LINE__);
  226. return -EINVAL;
  227. }
  228. // WAIT_AES_DMA_READY();
  229. CRTCL_SECT_END;
  230. if (mode > 0) {
  231. *((u32 *) iv_arg) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg));
  232. *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
  233. *((u32 *) iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
  234. *((u32 *) iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
  235. }
  236. return -EINPROGRESS;
  237. }
  238. /* \fn static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
  239. * \ingroup IFX_AES_FUNCTIONS
  240. * \brief Counts and return the number of scatterlists
  241. * \param *sl Function pointer to the scatterlist
  242. * \param total_bytes The total number of bytes that needs to be encrypted/decrypted
  243. * \return The number of scatterlists
  244. */
  245. static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
  246. {
  247. int i = 0;
  248. do {
  249. total_bytes -= sl[i].length;
  250. i++;
  251. } while (total_bytes > 0);
  252. return i;
  253. }
  254. /* \fn void lq_sg_init(struct scatterlist *src,
  255. * struct scatterlist *dst)
  256. * \ingroup IFX_AES_FUNCTIONS
  257. * \brief Maps the scatterlists into a source/destination page.
  258. * \param *src Pointer to the source scatterlist
  259. * \param *dst Pointer to the destination scatterlist
  260. */
  261. static void lq_sg_init(struct aes_container *aes_con,struct scatterlist *src,
  262. struct scatterlist *dst)
  263. {
  264. struct page *dst_page, *src_page;
  265. src_page = sg_virt(src);
  266. aes_con->src_buf = (char *) src_page;
  267. dst_page = sg_virt(dst);
  268. aes_con->dst_buf = (char *) dst_page;
  269. }
  270. /* \fn static void lq_sg_complete(struct aes_container *aes_con)
  271. * \ingroup IFX_AES_FUNCTIONS
  272. * \brief Free the used up memory after encryt/decrypt.
  273. */
  274. static void lq_sg_complete(struct aes_container *aes_con)
  275. {
  276. unsigned long queue_flag;
  277. spin_lock_irqsave(&aes_queue->lock, queue_flag);
  278. kfree(aes_con);
  279. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  280. }
  281. /* \fn static inline struct aes_container *aes_container_cast (
  282. * struct scatterlist *dst)
  283. * \ingroup IFX_AES_FUNCTIONS
  284. * \brief Locate the structure aes_container in memory.
  285. * \param *areq Pointer to memory location where ablkcipher_request is located
  286. * \return *aes_cointainer The function pointer to aes_container
  287. */
  288. static inline struct aes_container *aes_container_cast (
  289. struct ablkcipher_request *areq)
  290. {
  291. return container_of(areq, struct aes_container, arequest);
  292. }
  293. /* \fn static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
  294. * \ int state)
  295. * \ingroup IFX_AES_FUNCTIONS
  296. * \brief Process next packet to be encrypt/decrypt
  297. * \param *aes_con AES container structure
  298. * \param *areq Pointer to memory location where ablkcipher_request is located
  299. * \param state The state of the current packet (part of scatterlist or new packet)
  300. * \return -EINVAL: error, -EINPROGRESS: Crypto still running, 1: no more scatterlist
  301. */
  302. static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
  303. int state)
  304. {
  305. u8 *iv;
  306. int mode, dir, err = -EINVAL;
  307. unsigned long queue_flag;
  308. u32 inc, nbytes, remain, chunk_size;
  309. struct scatterlist *src = NULL;
  310. struct scatterlist *dst = NULL;
  311. struct crypto_ablkcipher *cipher;
  312. struct aes_ctx *ctx;
  313. spin_lock_irqsave(&aes_queue->lock, queue_flag);
  314. dir = aes_con->encdec;
  315. mode = aes_con->mode;
  316. iv = aes_con->iv;
  317. if (state & PROCESS_SCATTER) {
  318. src = scatterwalk_sg_next(areq->src);
  319. dst = scatterwalk_sg_next(areq->dst);
  320. if (!src || !dst) {
  321. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  322. return 1;
  323. }
  324. }
  325. else if (state & PROCESS_NEW_PACKET) {
  326. src = areq->src;
  327. dst = areq->dst;
  328. }
  329. remain = aes_con->bytes_processed;
  330. chunk_size = src->length;
  331. if (remain > DEU_MAX_PACKET_SIZE)
  332. inc = DEU_MAX_PACKET_SIZE;
  333. else if (remain > chunk_size)
  334. inc = chunk_size;
  335. else
  336. inc = remain;
  337. remain -= inc;
  338. aes_con->nbytes = inc;
  339. if (state & PROCESS_SCATTER) {
  340. aes_con->src_buf += aes_con->nbytes;
  341. aes_con->dst_buf += aes_con->nbytes;
  342. }
  343. lq_sg_init(aes_con, src, dst);
  344. nbytes = aes_con->nbytes;
  345. //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
  346. // __LINE__, __func__, nbytes, chunk_size);
  347. cipher = crypto_ablkcipher_reqtfm(areq);
  348. ctx = crypto_ablkcipher_ctx(cipher);
  349. if (aes_queue->hw_status == AES_IDLE)
  350. aes_queue->hw_status = AES_STARTED;
  351. aes_con->bytes_processed -= aes_con->nbytes;
  352. err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
  353. if (err == -EBUSY) {
  354. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  355. printk("Failed to enqueue request, ln: %d, err: %d\n",
  356. __LINE__, err);
  357. return -EINVAL;
  358. }
  359. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  360. err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode);
  361. return err;
  362. }
  363. /* \fn static void process_queue (unsigned long data)
  364. * \ingroup IFX_AES_FUNCTIONS
  365. * \brief tasklet to signal the dequeuing of the next packet to be processed
  366. * \param unsigned long data Not used
  367. * \return void
  368. */
  369. static void process_queue(unsigned long data)
  370. {
  371. DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
  372. deu_dma_priv.aes_event_flags);
  373. }
  374. /* \fn static int aes_crypto_thread (void *data)
  375. * \ingroup IFX_AES_FUNCTIONS
  376. * \brief AES thread that handles crypto requests from upper layer & DMA
  377. * \param *data Not used
  378. * \return -EINVAL: DEU failure, -EBUSY: DEU HW busy, 0: exit thread
  379. */
  380. static int aes_crypto_thread (void *data)
  381. {
  382. struct aes_container *aes_con = NULL;
  383. struct ablkcipher_request *areq = NULL;
  384. int err;
  385. unsigned long queue_flag;
  386. daemonize("lq_aes_thread");
  387. printk("AES Queue Manager Starting\n");
  388. while (1)
  389. {
  390. DEU_WAIT_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
  391. deu_dma_priv.aes_event_flags);
  392. spin_lock_irqsave(&aes_queue->lock, queue_flag);
  393. /* wait to prevent starting a crypto session before
  394. * exiting the dma interrupt thread.
  395. */
  396. if (aes_queue->hw_status == AES_STARTED) {
  397. areq = ablkcipher_dequeue_request(&aes_queue->list);
  398. aes_con = aes_container_cast(areq);
  399. aes_queue->hw_status = AES_BUSY;
  400. }
  401. else if (aes_queue->hw_status == AES_IDLE) {
  402. areq = ablkcipher_dequeue_request(&aes_queue->list);
  403. aes_con = aes_container_cast(areq);
  404. aes_queue->hw_status = AES_STARTED;
  405. }
  406. else if (aes_queue->hw_status == AES_BUSY) {
  407. areq = ablkcipher_dequeue_request(&aes_queue->list);
  408. aes_con = aes_container_cast(areq);
  409. }
  410. else if (aes_queue->hw_status == AES_COMPLETED) {
  411. lq_sg_complete(aes_con);
  412. aes_queue->hw_status = AES_IDLE;
  413. areq->base.complete(&areq->base, 0);
  414. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  415. return 0;
  416. }
  417. //printk("debug ln: %d, bytes proc: %d\n", __LINE__, aes_con->bytes_processed);
  418. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  419. if (!aes_con) {
  420. printk("AES_CON return null\n");
  421. goto aes_done;
  422. }
  423. if (aes_con->bytes_processed == 0) {
  424. goto aes_done;
  425. }
  426. /* Process new packet or the next packet in a scatterlist */
  427. if (aes_con->flag & PROCESS_NEW_PACKET) {
  428. aes_con->flag = PROCESS_SCATTER;
  429. err = process_next_packet(aes_con, areq, PROCESS_NEW_PACKET);
  430. }
  431. else
  432. err = process_next_packet(aes_con, areq, PROCESS_SCATTER);
  433. if (err == -EINVAL) {
  434. areq->base.complete(&areq->base, err);
  435. lq_sg_complete(aes_con);
  436. printk("src/dst returned -EINVAL in func: %s\n", __func__);
  437. }
  438. else if (err > 0) {
  439. printk("src/dst returned zero in func: %s\n", __func__);
  440. goto aes_done;
  441. }
  442. continue;
  443. aes_done:
  444. //printk("debug line - %d, func: %s, qlen: %d\n", __LINE__, __func__, aes_queue->list.qlen);
  445. areq->base.complete(&areq->base, 0);
  446. lq_sg_complete(aes_con);
  447. spin_lock_irqsave(&aes_queue->lock, queue_flag);
  448. if (aes_queue->list.qlen > 0) {
  449. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  450. tasklet_schedule(&aes_queue->aes_task);
  451. }
  452. else {
  453. aes_queue->hw_status = AES_IDLE;
  454. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  455. }
  456. } //while(1)
  457. return 0;
  458. }
  459. /* \fn static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
  460. u8 *iv, int dir, int mode)
  461. * \ingroup IFX_AES_FUNCTIONS
  462. * \brief starts the process of queuing DEU requests
  463. * \param *ctx crypto algo contax
  464. * \param *areq Pointer to the balkcipher requests
  465. * \param *iv Pointer to intput vector location
  466. * \param dir Encrypt/Decrypt
  467. * \mode The mode AES algo is running
  468. * \return 0 if success
  469. */
  470. static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
  471. u8 *iv, int dir, int mode)
  472. {
  473. int err = -EINVAL;
  474. unsigned long queue_flag;
  475. struct scatterlist *src = areq->src;
  476. struct scatterlist *dst = areq->dst;
  477. struct aes_container *aes_con = NULL;
  478. u32 remain, inc, nbytes = areq->nbytes;
  479. u32 chunk_bytes = src->length;
  480. aes_con = (struct aes_container *)kmalloc(sizeof(struct aes_container),
  481. GFP_KERNEL);
  482. if (!(aes_con)) {
  483. printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
  484. __func__, __LINE__);
  485. return -ENOMEM;
  486. }
  487. /* AES encrypt/decrypt mode */
  488. if (mode == 5) {
  489. nbytes = AES_BLOCK_SIZE;
  490. chunk_bytes = AES_BLOCK_SIZE;
  491. mode = 0;
  492. }
  493. aes_con->bytes_processed = nbytes;
  494. aes_con->arequest = *(areq);
  495. remain = nbytes;
  496. //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
  497. // __LINE__, __func__, nbytes, chunk_bytes);
  498. if (remain > DEU_MAX_PACKET_SIZE)
  499. inc = DEU_MAX_PACKET_SIZE;
  500. else if (remain > chunk_bytes)
  501. inc = chunk_bytes;
  502. else
  503. inc = remain;
  504. remain -= inc;
  505. lq_sg_init(aes_con, src, dst);
  506. if (remain <= 0)
  507. aes_con->complete = 1;
  508. else
  509. aes_con->complete = 0;
  510. aes_con->nbytes = inc;
  511. aes_con->iv = iv;
  512. aes_con->mode = mode;
  513. aes_con->encdec = dir;
  514. spin_lock_irqsave(&aes_queue->lock, queue_flag);
  515. if (aes_queue->hw_status == AES_STARTED || aes_queue->hw_status == AES_BUSY ||
  516. aes_queue->list.qlen > 0) {
  517. aes_con->flag = PROCESS_NEW_PACKET;
  518. err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
  519. /* max queue length reached */
  520. if (err == -EBUSY) {
  521. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  522. printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
  523. return err;
  524. }
  525. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  526. return -EINPROGRESS;
  527. }
  528. else if (aes_queue->hw_status == AES_IDLE)
  529. aes_queue->hw_status = AES_STARTED;
  530. aes_con->flag = PROCESS_SCATTER;
  531. aes_con->bytes_processed -= aes_con->nbytes;
  532. /* or enqueue the whole structure so as to get back the info
  533. * at the moment that it's queued. nbytes might be different */
  534. err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
  535. if (err == -EBUSY) {
  536. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  537. printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
  538. return err;
  539. }
  540. spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
  541. return lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, inc, dir, mode);
  542. }
  543. /* \fn static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
  544. * unsigned int keylen)
  545. * \ingroup IFX_AES_FUNCTIONS
  546. * \brief Sets AES key
  547. * \param *tfm Pointer to the ablkcipher transform
  548. * \param *in_key Pointer to input keys
  549. * \param key_len Length of the AES keys
  550. * \return 0 is success, -EINVAL if bad key length
  551. */
  552. static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
  553. unsigned int keylen)
  554. {
  555. struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  556. unsigned long *flags = (unsigned long *) &tfm->base.crt_flags;
  557. DPRINTF(2, "set_key in %s\n", __FILE__);
  558. if (keylen != 16 && keylen != 24 && keylen != 32) {
  559. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  560. return -EINVAL;
  561. }
  562. ctx->key_length = keylen;
  563. DPRINTF(0, "ctx @%p, keylen %d, ctx->key_length %d\n", ctx, keylen, ctx->key_length);
  564. memcpy ((u8 *) (ctx->buf), in_key, keylen);
  565. return 0;
  566. }
  567. /* \fn static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
  568. * unsigned int keylen)
  569. * \ingroup IFX_AES_FUNCTIONS
  570. * \brief Sets AES key
  571. * \param *tfm Pointer to the ablkcipher transform
  572. * \param *key Pointer to input keys
  573. * \param keylen Length of AES keys
  574. * \return 0 is success, -EINVAL if bad key length
  575. */
  576. static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  577. unsigned int keylen)
  578. {
  579. return aes_setkey(tfm, key, keylen);
  580. }
  581. /* \fn static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
  582. * unsigned int keylen)
  583. * \ingroup IFX_AES_FUNCTIONS
  584. * \brief Sets AES key
  585. * \param *tfm Pointer to the ablkcipher transform
  586. * \param *in_key Pointer to input keys
  587. * \param key_len Length of the AES keys
  588. * \return 0 is success, -EINVAL if bad key length
  589. */
  590. static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm,
  591. const u8 *in_key, unsigned int keylen)
  592. {
  593. struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  594. unsigned long *flags = (unsigned long *)&tfm->base.crt_flags;
  595. DPRINTF(2, "ctr_rfc3686_aes_set_key in %s\n", __FILE__);
  596. memcpy(ctx->nonce, in_key + (keylen - CTR_RFC3686_NONCE_SIZE),
  597. CTR_RFC3686_NONCE_SIZE);
  598. keylen -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
  599. if (keylen != 16 && keylen != 24 && keylen != 32) {
  600. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  601. return -EINVAL;
  602. }
  603. ctx->key_length = keylen;
  604. memcpy ((u8 *) (ctx->buf), in_key, keylen);
  605. return 0;
  606. }
  607. /* \fn static int aes_encrypt(struct ablkcipher_request *areq)
  608. * \ingroup IFX_AES_FUNCTIONS
  609. * \brief Encrypt function for AES algo
  610. * \param *areq Pointer to ablkcipher request in memory
  611. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  612. */
  613. static int aes_encrypt (struct ablkcipher_request *areq)
  614. {
  615. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  616. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  617. return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_ENCRYPT, 5);
  618. }
  619. /* \fn static int aes_decrypt(struct ablkcipher_request *areq)
  620. * \ingroup IFX_AES_FUNCTIONS
  621. * \brief Decrypt function for AES algo
  622. * \param *areq Pointer to ablkcipher request in memory
  623. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  624. */
  625. static int aes_decrypt (struct ablkcipher_request *areq)
  626. {
  627. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  628. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  629. return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_DECRYPT, 5);
  630. }
  631. /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
  632. * \ingroup IFX_AES_FUNCTIONS
  633. * \brief Encrypt function for AES algo
  634. * \param *areq Pointer to ablkcipher request in memory
  635. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  636. */
  637. static int ecb_aes_encrypt (struct ablkcipher_request *areq)
  638. {
  639. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  640. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  641. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 0);
  642. }
  643. /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
  644. * \ingroup IFX_AES_FUNCTIONS
  645. * \brief Decrypt function for AES algo
  646. * \param *areq Pointer to ablkcipher request in memory
  647. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  648. */
  649. static int ecb_aes_decrypt(struct ablkcipher_request *areq)
  650. {
  651. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  652. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  653. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 0);
  654. }
  655. /* \fn static int cbc_aes_encrypt(struct ablkcipher_request *areq)
  656. * \ingroup IFX_AES_FUNCTIONS
  657. * \brief Encrypt function for AES algo
  658. * \param *areq Pointer to ablkcipher request in memory
  659. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  660. */
  661. static int cbc_aes_encrypt (struct ablkcipher_request *areq)
  662. {
  663. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  664. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  665. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 1);
  666. }
  667. /* \fn static int cbc_aes_decrypt(struct ablkcipher_request *areq)
  668. * \ingroup IFX_AES_FUNCTIONS
  669. * \brief Decrypt function for AES algo
  670. * \param *areq Pointer to ablkcipher request in memory
  671. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  672. */
  673. static int cbc_aes_decrypt(struct ablkcipher_request *areq)
  674. {
  675. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  676. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  677. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 1);
  678. }
  679. #if 0
  680. static int ofb_aes_encrypt (struct ablkcipher_request *areq)
  681. {
  682. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  683. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  684. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 2);
  685. }
  686. static int ofb_aes_decrypt(struct ablkcipher_request *areq)
  687. {
  688. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  689. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  690. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 2);
  691. }
  692. static int cfb_aes_encrypt (struct ablkcipher_request *areq)
  693. {
  694. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  695. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  696. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 3);
  697. }
  698. static int cfb_aes_decrypt(struct ablkcipher_request *areq)
  699. {
  700. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  701. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  702. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 3);
  703. }
  704. #endif
  705. /* \fn static int ctr_aes_encrypt(struct ablkcipher_request *areq)
  706. * \ingroup IFX_AES_FUNCTIONS
  707. * \brief Encrypt function for AES algo
  708. * \param *areq Pointer to ablkcipher request in memory
  709. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  710. */
  711. static int ctr_aes_encrypt (struct ablkcipher_request *areq)
  712. {
  713. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  714. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  715. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
  716. }
  717. /* \fn static int ctr_aes_decrypt(struct ablkcipher_request *areq)
  718. * \ingroup IFX_AES_FUNCTIONS
  719. * \brief Decrypt function for AES algo
  720. * \param *areq Pointer to ablkcipher request in memory
  721. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  722. */
  723. static int ctr_aes_decrypt(struct ablkcipher_request *areq)
  724. {
  725. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  726. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  727. return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
  728. }
  729. /* \fn static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
  730. * \ingroup IFX_AES_FUNCTIONS
  731. * \brief Encrypt function for AES algo
  732. * \param *areq Pointer to ablkcipher request in memory
  733. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  734. */
  735. static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
  736. {
  737. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  738. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  739. int ret;
  740. u8 *info = areq->info;
  741. u8 rfc3686_iv[16];
  742. memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
  743. memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
  744. /* initialize counter portion of counter block */
  745. *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
  746. cpu_to_be32(1);
  747. areq->info = rfc3686_iv;
  748. ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
  749. areq->info = info;
  750. return ret;
  751. }
  752. /* \fn static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
  753. * \ingroup IFX_AES_FUNCTIONS
  754. * \brief Decrypt function for AES algo
  755. * \param *areq Pointer to ablkcipher request in memory
  756. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  757. */
  758. static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
  759. {
  760. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  761. struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  762. int ret;
  763. u8 *info = areq->info;
  764. u8 rfc3686_iv[16];
  765. /* set up counter block */
  766. memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
  767. memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
  768. /* initialize counter portion of counter block */
  769. *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
  770. cpu_to_be32(1);
  771. areq->info = rfc3686_iv;
  772. ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
  773. areq->info = info;
  774. return ret;
  775. }
  776. struct lq_aes_alg {
  777. struct crypto_alg alg;
  778. };
  779. /* AES supported algo array */
  780. static struct lq_aes_alg aes_drivers_alg[] = {
  781. {
  782. .alg = {
  783. .cra_name = "aes",
  784. .cra_driver_name = "ifxdeu-aes",
  785. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  786. .cra_blocksize = AES_BLOCK_SIZE,
  787. .cra_ctxsize = sizeof(struct aes_ctx),
  788. .cra_type = &crypto_ablkcipher_type,
  789. .cra_priority = 300,
  790. .cra_module = THIS_MODULE,
  791. .cra_ablkcipher = {
  792. .setkey = aes_setkey,
  793. .encrypt = aes_encrypt,
  794. .decrypt = aes_decrypt,
  795. .geniv = "eseqiv",
  796. .min_keysize = AES_MIN_KEY_SIZE,
  797. .max_keysize = AES_MAX_KEY_SIZE,
  798. .ivsize = AES_BLOCK_SIZE,
  799. }
  800. }
  801. },{
  802. .alg = {
  803. .cra_name = "ecb(aes)",
  804. .cra_driver_name = "ifxdeu-ecb(aes)",
  805. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  806. .cra_blocksize = AES_BLOCK_SIZE,
  807. .cra_ctxsize = sizeof(struct aes_ctx),
  808. .cra_type = &crypto_ablkcipher_type,
  809. .cra_priority = 300,
  810. .cra_module = THIS_MODULE,
  811. .cra_ablkcipher = {
  812. .setkey = aes_generic_setkey,
  813. .encrypt = ecb_aes_encrypt,
  814. .decrypt = ecb_aes_decrypt,
  815. .geniv = "eseqiv",
  816. .min_keysize = AES_MIN_KEY_SIZE,
  817. .max_keysize = AES_MAX_KEY_SIZE,
  818. .ivsize = AES_BLOCK_SIZE,
  819. }
  820. }
  821. },{
  822. .alg = {
  823. .cra_name = "cbc(aes)",
  824. .cra_driver_name = "ifxdeu-cbc(aes)",
  825. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  826. .cra_blocksize = AES_BLOCK_SIZE,
  827. .cra_ctxsize = sizeof(struct aes_ctx),
  828. .cra_type = &crypto_ablkcipher_type,
  829. .cra_priority = 300,
  830. .cra_module = THIS_MODULE,
  831. .cra_ablkcipher = {
  832. .setkey = aes_generic_setkey,
  833. .encrypt = cbc_aes_encrypt,
  834. .decrypt = cbc_aes_decrypt,
  835. .geniv = "eseqiv",
  836. .min_keysize = AES_MIN_KEY_SIZE,
  837. .max_keysize = AES_MAX_KEY_SIZE,
  838. .ivsize = AES_BLOCK_SIZE,
  839. }
  840. }
  841. },{
  842. .alg = {
  843. .cra_name = "ctr(aes)",
  844. .cra_driver_name = "ifxdeu-ctr(aes)",
  845. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  846. .cra_blocksize = AES_BLOCK_SIZE,
  847. .cra_ctxsize = sizeof(struct aes_ctx),
  848. .cra_type = &crypto_ablkcipher_type,
  849. .cra_priority = 300,
  850. .cra_module = THIS_MODULE,
  851. .cra_ablkcipher = {
  852. .setkey = aes_generic_setkey,
  853. .encrypt = ctr_aes_encrypt,
  854. .decrypt = ctr_aes_decrypt,
  855. .geniv = "eseqiv",
  856. .min_keysize = AES_MIN_KEY_SIZE,
  857. .max_keysize = AES_MAX_KEY_SIZE,
  858. .ivsize = AES_BLOCK_SIZE,
  859. }
  860. }
  861. },{
  862. .alg = {
  863. .cra_name = "rfc3686(ctr(aes))",
  864. .cra_driver_name = "ifxdeu-rfc3686(ctr(aes))",
  865. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  866. .cra_blocksize = AES_BLOCK_SIZE,
  867. .cra_ctxsize = sizeof(struct aes_ctx),
  868. .cra_type = &crypto_ablkcipher_type,
  869. .cra_priority = 300,
  870. .cra_module = THIS_MODULE,
  871. .cra_ablkcipher = {
  872. .setkey = rfc3686_aes_setkey,
  873. .encrypt = rfc3686_aes_encrypt,
  874. .decrypt = rfc3686_aes_decrypt,
  875. .geniv = "eseqiv",
  876. .min_keysize = AES_MIN_KEY_SIZE,
  877. .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
  878. //.max_keysize = AES_MAX_KEY_SIZE,
  879. //.ivsize = CTR_RFC3686_IV_SIZE,
  880. .ivsize = AES_BLOCK_SIZE, // else cannot reg
  881. }
  882. }
  883. }
  884. };
  885. /* \fn int __init lqdeu_async_aes_init (void)
  886. * \ingroup IFX_AES_FUNCTIONS
  887. * \brief Initializes the Async. AES driver
  888. * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
  889. */
  890. int __init lqdeu_async_aes_init (void)
  891. {
  892. int i, j, ret = -EINVAL;
  893. #define IFX_DEU_DRV_VERSION "2.0.0"
  894. printk(KERN_INFO "Lantiq Technologies DEU Driver version %s\n", IFX_DEU_DRV_VERSION);
  895. for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
  896. ret = crypto_register_alg(&aes_drivers_alg[i].alg);
  897. printk("driver: %s\n", aes_drivers_alg[i].alg.cra_name);
  898. if (ret)
  899. goto aes_err;
  900. }
  901. aes_chip_init();
  902. CRTCL_SECT_INIT;
  903. printk (KERN_NOTICE "Lantiq DEU AES initialized %s %s.\n",
  904. disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
  905. return ret;
  906. aes_err:
  907. for (j = 0; j < i; j++)
  908. crypto_unregister_alg(&aes_drivers_alg[j].alg);
  909. printk(KERN_ERR "Lantiq %s driver initialization failed!\n", (char *)&aes_drivers_alg[i].alg.cra_driver_name);
  910. return ret;
  911. ctr_rfc3686_aes_err:
  912. for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
  913. if (!strcmp((char *)&aes_drivers_alg[i].alg.cra_name, "rfc3686(ctr(aes))"))
  914. crypto_unregister_alg(&aes_drivers_alg[j].alg);
  915. }
  916. printk (KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n");
  917. return ret;
  918. }
  919. /*! \fn void __exit ifxdeu_fini_aes (void)
  920. * \ingroup IFX_AES_FUNCTIONS
  921. * \brief unregister aes driver
  922. */
  923. void __exit lqdeu_fini_async_aes (void)
  924. {
  925. int i;
  926. for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++)
  927. crypto_unregister_alg(&aes_drivers_alg[i].alg);
  928. aes_queue->hw_status = AES_COMPLETED;
  929. DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
  930. deu_dma_priv.aes_event_flags);
  931. kfree(aes_queue);
  932. }