ixp4xx.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. /*
  2. * An OCF module that uses Intels IXP CryptACC API to do the crypto.
  3. * This driver requires the IXP400 Access Library that is available
  4. * from Intel in order to operate (or compile).
  5. *
  6. * Written by David McCullough <david_mccullough@mcafee.com>
  7. * Copyright (C) 2006-2011 David McCullough
  8. * Copyright (C) 2004-2005 Intel Corporation.
  9. *
  10. * LICENSE TERMS
  11. *
  12. * The free distribution and use of this software in both source and binary
  13. * form is allowed (with or without changes) provided that:
  14. *
  15. * 1. distributions of this source code include the above copyright
  16. * notice, this list of conditions and the following disclaimer;
  17. *
  18. * 2. distributions in binary form include the above copyright
  19. * notice, this list of conditions and the following disclaimer
  20. * in the documentation and/or other associated materials;
  21. *
  22. * 3. the copyright holder's name is not used to endorse products
  23. * built using this software without specific written permission.
  24. *
  25. * ALTERNATIVELY, provided that this notice is retained in full, this product
  26. * may be distributed under the terms of the GNU General Public License (GPL),
  27. * in which case the provisions of the GPL apply INSTEAD OF those given above.
  28. *
  29. * DISCLAIMER
  30. *
  31. * This software is provided 'as is' with no explicit or implied warranties
  32. * in respect of its properties, including, but not limited to, correctness
  33. * and/or fitness for purpose.
  34. */
  35. #include <linux/version.h>
  36. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
  37. #include <linux/config.h>
  38. #endif
  39. #include <linux/module.h>
  40. #include <linux/init.h>
  41. #include <linux/list.h>
  42. #include <linux/slab.h>
  43. #include <linux/sched.h>
  44. #include <linux/wait.h>
  45. #include <linux/crypto.h>
  46. #include <linux/interrupt.h>
  47. #include <asm/scatterlist.h>
  48. #include <IxTypes.h>
  49. #include <IxOsBuffMgt.h>
  50. #include <IxNpeDl.h>
  51. #include <IxCryptoAcc.h>
  52. #include <IxQMgr.h>
  53. #include <IxOsServices.h>
  54. #include <IxOsCacheMMU.h>
  55. #include <cryptodev.h>
  56. #include <uio.h>
  57. #ifndef IX_MBUF_PRIV
  58. #define IX_MBUF_PRIV(x) ((x)->priv)
  59. #endif
  60. struct ixp_data;
  61. struct ixp_q {
  62. struct list_head ixp_q_list;
  63. struct ixp_data *ixp_q_data;
  64. struct cryptop *ixp_q_crp;
  65. struct cryptodesc *ixp_q_ccrd;
  66. struct cryptodesc *ixp_q_acrd;
  67. IX_MBUF ixp_q_mbuf;
  68. UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
  69. UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
  70. unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
  71. unsigned char *ixp_q_iv;
  72. };
  73. struct ixp_data {
  74. int ixp_registered; /* is the context registered */
  75. int ixp_crd_flags; /* detect direction changes */
  76. int ixp_cipher_alg;
  77. int ixp_auth_alg;
  78. UINT32 ixp_ctx_id;
  79. UINT32 ixp_hash_key_id; /* used when hashing */
  80. IxCryptoAccCtx ixp_ctx;
  81. IX_MBUF ixp_pri_mbuf;
  82. IX_MBUF ixp_sec_mbuf;
  83. struct work_struct ixp_pending_work;
  84. struct work_struct ixp_registration_work;
  85. struct list_head ixp_q; /* unprocessed requests */
  86. };
  87. #ifdef __ixp46X
  88. #define MAX_IOP_SIZE 64 /* words */
  89. #define MAX_OOP_SIZE 128
  90. #define MAX_PARAMS 3
  91. struct ixp_pkq {
  92. struct list_head pkq_list;
  93. struct cryptkop *pkq_krp;
  94. IxCryptoAccPkeEauInOperands pkq_op;
  95. IxCryptoAccPkeEauOpResult pkq_result;
  96. UINT32 pkq_ibuf0[MAX_IOP_SIZE];
  97. UINT32 pkq_ibuf1[MAX_IOP_SIZE];
  98. UINT32 pkq_ibuf2[MAX_IOP_SIZE];
  99. UINT32 pkq_obuf[MAX_OOP_SIZE];
  100. };
  101. static LIST_HEAD(ixp_pkq); /* current PK wait list */
  102. static struct ixp_pkq *ixp_pk_cur;
  103. static spinlock_t ixp_pkq_lock;
  104. #endif /* __ixp46X */
  105. static int ixp_blocked = 0;
  106. static int32_t ixp_id = -1;
  107. static struct ixp_data **ixp_sessions = NULL;
  108. static u_int32_t ixp_sesnum = 0;
  109. static int ixp_process(device_t, struct cryptop *, int);
  110. static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
  111. static int ixp_freesession(device_t, u_int64_t);
  112. #ifdef __ixp46X
  113. static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
  114. #endif
  115. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  116. static kmem_cache_t *qcache;
  117. #else
  118. static struct kmem_cache *qcache;
  119. #endif
  120. #define debug ixp_debug
  121. static int ixp_debug = 0;
  122. module_param(ixp_debug, int, 0644);
  123. MODULE_PARM_DESC(ixp_debug, "Enable debug");
  124. static int ixp_init_crypto = 1;
  125. module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
  126. MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
  127. static void ixp_process_pending(void *arg);
  128. static void ixp_registration(void *arg);
  129. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  130. static void ixp_process_pending_wq(struct work_struct *work);
  131. static void ixp_registration_wq(struct work_struct *work);
  132. #endif
  133. /*
  134. * dummy device structure
  135. */
  136. static struct {
  137. softc_device_decl sc_dev;
  138. } ixpdev;
  139. static device_method_t ixp_methods = {
  140. /* crypto device methods */
  141. DEVMETHOD(cryptodev_newsession, ixp_newsession),
  142. DEVMETHOD(cryptodev_freesession,ixp_freesession),
  143. DEVMETHOD(cryptodev_process, ixp_process),
  144. #ifdef __ixp46X
  145. DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
  146. #endif
  147. };
  148. /*
  149. * Generate a new software session.
  150. */
  151. static int
  152. ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
  153. {
  154. struct ixp_data *ixp;
  155. u_int32_t i;
  156. #define AUTH_LEN(cri, def) \
  157. (cri->cri_mlen ? cri->cri_mlen : (def))
  158. dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
  159. if (sid == NULL || cri == NULL) {
  160. dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
  161. return EINVAL;
  162. }
  163. if (ixp_sessions) {
  164. for (i = 1; i < ixp_sesnum; i++)
  165. if (ixp_sessions[i] == NULL)
  166. break;
  167. } else
  168. i = 1; /* NB: to silence compiler warning */
  169. if (ixp_sessions == NULL || i == ixp_sesnum) {
  170. struct ixp_data **ixpd;
  171. if (ixp_sessions == NULL) {
  172. i = 1; /* We leave ixp_sessions[0] empty */
  173. ixp_sesnum = CRYPTO_SW_SESSIONS;
  174. } else
  175. ixp_sesnum *= 2;
  176. ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
  177. if (ixpd == NULL) {
  178. /* Reset session number */
  179. if (ixp_sesnum == CRYPTO_SW_SESSIONS)
  180. ixp_sesnum = 0;
  181. else
  182. ixp_sesnum /= 2;
  183. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  184. return ENOBUFS;
  185. }
  186. memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
  187. /* Copy existing sessions */
  188. if (ixp_sessions) {
  189. memcpy(ixpd, ixp_sessions,
  190. (ixp_sesnum / 2) * sizeof(struct ixp_data *));
  191. kfree(ixp_sessions);
  192. }
  193. ixp_sessions = ixpd;
  194. }
  195. ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
  196. SLAB_ATOMIC);
  197. if (ixp_sessions[i] == NULL) {
  198. ixp_freesession(NULL, i);
  199. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  200. return ENOBUFS;
  201. }
  202. *sid = i;
  203. ixp = ixp_sessions[i];
  204. memset(ixp, 0, sizeof(*ixp));
  205. ixp->ixp_cipher_alg = -1;
  206. ixp->ixp_auth_alg = -1;
  207. ixp->ixp_ctx_id = -1;
  208. INIT_LIST_HEAD(&ixp->ixp_q);
  209. ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
  210. while (cri) {
  211. switch (cri->cri_alg) {
  212. case CRYPTO_DES_CBC:
  213. ixp->ixp_cipher_alg = cri->cri_alg;
  214. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
  215. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  216. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  217. ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
  218. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
  219. IX_CRYPTO_ACC_DES_IV_64;
  220. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  221. cri->cri_key, (cri->cri_klen + 7) / 8);
  222. break;
  223. case CRYPTO_3DES_CBC:
  224. ixp->ixp_cipher_alg = cri->cri_alg;
  225. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
  226. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  227. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  228. ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
  229. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
  230. IX_CRYPTO_ACC_DES_IV_64;
  231. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  232. cri->cri_key, (cri->cri_klen + 7) / 8);
  233. break;
  234. case CRYPTO_RIJNDAEL128_CBC:
  235. ixp->ixp_cipher_alg = cri->cri_alg;
  236. ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
  237. ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
  238. ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
  239. ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
  240. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
  241. memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
  242. cri->cri_key, (cri->cri_klen + 7) / 8);
  243. break;
  244. case CRYPTO_MD5:
  245. case CRYPTO_MD5_HMAC:
  246. ixp->ixp_auth_alg = cri->cri_alg;
  247. ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
  248. ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
  249. ixp->ixp_ctx.authCtx.aadLen = 0;
  250. /* Only MD5_HMAC needs a key */
  251. if (cri->cri_alg == CRYPTO_MD5_HMAC) {
  252. ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
  253. if (ixp->ixp_ctx.authCtx.authKeyLen >
  254. sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
  255. printk(
  256. "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
  257. cri->cri_klen);
  258. ixp_freesession(NULL, i);
  259. return EINVAL;
  260. }
  261. memcpy(ixp->ixp_ctx.authCtx.key.authKey,
  262. cri->cri_key, (cri->cri_klen + 7) / 8);
  263. }
  264. break;
  265. case CRYPTO_SHA1:
  266. case CRYPTO_SHA1_HMAC:
  267. ixp->ixp_auth_alg = cri->cri_alg;
  268. ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
  269. ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
  270. ixp->ixp_ctx.authCtx.aadLen = 0;
  271. /* Only SHA1_HMAC needs a key */
  272. if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
  273. ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
  274. if (ixp->ixp_ctx.authCtx.authKeyLen >
  275. sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
  276. printk(
  277. "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
  278. cri->cri_klen);
  279. ixp_freesession(NULL, i);
  280. return EINVAL;
  281. }
  282. memcpy(ixp->ixp_ctx.authCtx.key.authKey,
  283. cri->cri_key, (cri->cri_klen + 7) / 8);
  284. }
  285. break;
  286. default:
  287. printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
  288. ixp_freesession(NULL, i);
  289. return EINVAL;
  290. }
  291. cri = cri->cri_next;
  292. }
  293. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  294. INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
  295. INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
  296. #else
  297. INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
  298. INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
  299. #endif
  300. return 0;
  301. }
  302. /*
  303. * Free a session.
  304. */
  305. static int
  306. ixp_freesession(device_t dev, u_int64_t tid)
  307. {
  308. u_int32_t sid = CRYPTO_SESID2LID(tid);
  309. dprintk("%s()\n", __FUNCTION__);
  310. if (sid > ixp_sesnum || ixp_sessions == NULL ||
  311. ixp_sessions[sid] == NULL) {
  312. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  313. return EINVAL;
  314. }
  315. /* Silently accept and return */
  316. if (sid == 0)
  317. return 0;
  318. if (ixp_sessions[sid]) {
  319. if (ixp_sessions[sid]->ixp_ctx_id != -1) {
  320. ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
  321. ixp_sessions[sid]->ixp_ctx_id = -1;
  322. }
  323. kfree(ixp_sessions[sid]);
  324. }
  325. ixp_sessions[sid] = NULL;
  326. if (ixp_blocked) {
  327. ixp_blocked = 0;
  328. crypto_unblock(ixp_id, CRYPTO_SYMQ);
  329. }
  330. return 0;
  331. }
  332. /*
  333. * callback for when hash processing is complete
  334. */
  335. static void
  336. ixp_hash_perform_cb(
  337. UINT32 hash_key_id,
  338. IX_MBUF *bufp,
  339. IxCryptoAccStatus status)
  340. {
  341. struct ixp_q *q;
  342. dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
  343. if (bufp == NULL) {
  344. printk("ixp: NULL buf in %s\n", __FUNCTION__);
  345. return;
  346. }
  347. q = IX_MBUF_PRIV(bufp);
  348. if (q == NULL) {
  349. printk("ixp: NULL priv in %s\n", __FUNCTION__);
  350. return;
  351. }
  352. if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
  353. /* On success, need to copy hash back into original client buffer */
  354. memcpy(q->ixp_hash_dest, q->ixp_hash_src,
  355. (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
  356. SHA1_HASH_LEN : MD5_HASH_LEN);
  357. }
  358. else {
  359. printk("ixp: hash perform failed status=%d\n", status);
  360. q->ixp_q_crp->crp_etype = EINVAL;
  361. }
  362. /* Free internal buffer used for hashing */
  363. kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
  364. crypto_done(q->ixp_q_crp);
  365. kmem_cache_free(qcache, q);
  366. }
  367. /*
  368. * setup a request and perform it
  369. */
  370. static void
  371. ixp_q_process(struct ixp_q *q)
  372. {
  373. IxCryptoAccStatus status;
  374. struct ixp_data *ixp = q->ixp_q_data;
  375. int auth_off = 0;
  376. int auth_len = 0;
  377. int crypt_off = 0;
  378. int crypt_len = 0;
  379. int icv_off = 0;
  380. char *crypt_func;
  381. dprintk("%s(%p)\n", __FUNCTION__, q);
  382. if (q->ixp_q_ccrd) {
  383. if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
  384. if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
  385. q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
  386. } else {
  387. q->ixp_q_iv = q->ixp_q_iv_data;
  388. read_random(q->ixp_q_iv, ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen);
  389. }
  390. if ((q->ixp_q_ccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
  391. crypto_copyback(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
  392. q->ixp_q_ccrd->crd_inject,
  393. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
  394. (caddr_t) q->ixp_q_iv);
  395. } else {
  396. if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT)
  397. q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
  398. else {
  399. q->ixp_q_iv = q->ixp_q_iv_data;
  400. crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
  401. q->ixp_q_ccrd->crd_inject,
  402. ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
  403. (caddr_t) q->ixp_q_iv);
  404. }
  405. }
  406. if (q->ixp_q_acrd) {
  407. auth_off = q->ixp_q_acrd->crd_skip;
  408. auth_len = q->ixp_q_acrd->crd_len;
  409. icv_off = q->ixp_q_acrd->crd_inject;
  410. }
  411. crypt_off = q->ixp_q_ccrd->crd_skip;
  412. crypt_len = q->ixp_q_ccrd->crd_len;
  413. } else { /* if (q->ixp_q_acrd) */
  414. auth_off = q->ixp_q_acrd->crd_skip;
  415. auth_len = q->ixp_q_acrd->crd_len;
  416. icv_off = q->ixp_q_acrd->crd_inject;
  417. }
  418. if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
  419. struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
  420. if (skb_shinfo(skb)->nr_frags) {
  421. /*
  422. * DAVIDM fix this limitation one day by using
  423. * a buffer pool and chaining, it is not currently
  424. * needed for current user/kernel space acceleration
  425. */
  426. printk("ixp: Cannot handle fragmented skb's yet !\n");
  427. q->ixp_q_crp->crp_etype = ENOENT;
  428. goto done;
  429. }
  430. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  431. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
  432. IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
  433. } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
  434. struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
  435. if (uiop->uio_iovcnt != 1) {
  436. /*
  437. * DAVIDM fix this limitation one day by using
  438. * a buffer pool and chaining, it is not currently
  439. * needed for current user/kernel space acceleration
  440. */
  441. printk("ixp: Cannot handle more than 1 iovec yet !\n");
  442. q->ixp_q_crp->crp_etype = ENOENT;
  443. goto done;
  444. }
  445. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  446. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
  447. IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
  448. } else /* contig buffer */ {
  449. IX_MBUF_MLEN(&q->ixp_q_mbuf) =
  450. IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
  451. IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
  452. }
  453. IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
  454. if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
  455. /*
  456. * For SHA1 and MD5 hash, need to create an internal buffer that is big
  457. * enough to hold the original data + the appropriate padding for the
  458. * hash algorithm.
  459. */
  460. UINT8 *tbuf = NULL;
  461. IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
  462. ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
  463. tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
  464. if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
  465. printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
  466. IX_MBUF_MLEN(&q->ixp_q_mbuf));
  467. q->ixp_q_crp->crp_etype = ENOMEM;
  468. goto done;
  469. }
  470. memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
  471. /* Set location in client buffer to copy hash into */
  472. q->ixp_hash_dest =
  473. &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
  474. IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
  475. /* Set location in internal buffer for where hash starts */
  476. q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
  477. crypt_func = "ixCryptoAccHashPerform";
  478. status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
  479. &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
  480. &ixp->ixp_hash_key_id);
  481. }
  482. else {
  483. crypt_func = "ixCryptoAccAuthCryptPerform";
  484. status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
  485. NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
  486. q->ixp_q_iv);
  487. }
  488. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  489. return;
  490. if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
  491. q->ixp_q_crp->crp_etype = ENOMEM;
  492. goto done;
  493. }
  494. printk("ixp: %s failed %u\n", crypt_func, status);
  495. q->ixp_q_crp->crp_etype = EINVAL;
  496. done:
  497. crypto_done(q->ixp_q_crp);
  498. kmem_cache_free(qcache, q);
  499. }
  500. /*
  501. * because we cannot process the Q from the Register callback
  502. * we do it here on a task Q.
  503. */
  504. static void
  505. ixp_process_pending(void *arg)
  506. {
  507. struct ixp_data *ixp = arg;
  508. struct ixp_q *q = NULL;
  509. dprintk("%s(%p)\n", __FUNCTION__, arg);
  510. if (!ixp)
  511. return;
  512. while (!list_empty(&ixp->ixp_q)) {
  513. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  514. list_del(&q->ixp_q_list);
  515. ixp_q_process(q);
  516. }
  517. }
  518. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  519. static void
  520. ixp_process_pending_wq(struct work_struct *work)
  521. {
  522. struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
  523. ixp_process_pending(ixp);
  524. }
  525. #endif
  526. /*
  527. * callback for when context registration is complete
  528. */
  529. static void
  530. ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
  531. {
  532. int i;
  533. struct ixp_data *ixp;
  534. struct ixp_q *q;
  535. dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
  536. /*
  537. * free any buffer passed in to this routine
  538. */
  539. if (bufp) {
  540. IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
  541. kfree(IX_MBUF_MDATA(bufp));
  542. IX_MBUF_MDATA(bufp) = NULL;
  543. }
  544. for (i = 0; i < ixp_sesnum; i++) {
  545. ixp = ixp_sessions[i];
  546. if (ixp && ixp->ixp_ctx_id == ctx_id)
  547. break;
  548. }
  549. if (i >= ixp_sesnum) {
  550. printk("ixp: invalid context id %d\n", ctx_id);
  551. return;
  552. }
  553. if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
  554. /* this is normal to free the first of two buffers */
  555. dprintk("ixp: register not finished yet.\n");
  556. return;
  557. }
  558. if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
  559. printk("ixp: register failed 0x%x\n", status);
  560. while (!list_empty(&ixp->ixp_q)) {
  561. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  562. list_del(&q->ixp_q_list);
  563. q->ixp_q_crp->crp_etype = EINVAL;
  564. crypto_done(q->ixp_q_crp);
  565. kmem_cache_free(qcache, q);
  566. }
  567. return;
  568. }
  569. /*
  570. * we are now registered, we cannot start processing the Q here
  571. * or we get strange errors with AES (DES/3DES seem to be ok).
  572. */
  573. ixp->ixp_registered = 1;
  574. schedule_work(&ixp->ixp_pending_work);
  575. }
  576. /*
  577. * callback for when data processing is complete
  578. */
  579. static void
  580. ixp_perform_cb(
  581. UINT32 ctx_id,
  582. IX_MBUF *sbufp,
  583. IX_MBUF *dbufp,
  584. IxCryptoAccStatus status)
  585. {
  586. struct ixp_q *q;
  587. dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
  588. dbufp, status);
  589. if (sbufp == NULL) {
  590. printk("ixp: NULL sbuf in ixp_perform_cb\n");
  591. return;
  592. }
  593. q = IX_MBUF_PRIV(sbufp);
  594. if (q == NULL) {
  595. printk("ixp: NULL priv in ixp_perform_cb\n");
  596. return;
  597. }
  598. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  599. printk("ixp: perform failed status=%d\n", status);
  600. q->ixp_q_crp->crp_etype = EINVAL;
  601. }
  602. crypto_done(q->ixp_q_crp);
  603. kmem_cache_free(qcache, q);
  604. }
  605. /*
  606. * registration is not callable at IRQ time, so we defer
  607. * to a task queue, this routines completes the registration for us
  608. * when the task queue runs
  609. *
  610. * Unfortunately this means we cannot tell OCF that the driver is blocked,
  611. * we do that on the next request.
  612. */
  613. static void
  614. ixp_registration(void *arg)
  615. {
  616. struct ixp_data *ixp = arg;
  617. struct ixp_q *q = NULL;
  618. IX_MBUF *pri = NULL, *sec = NULL;
  619. int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
  620. if (!ixp) {
  621. printk("ixp: ixp_registration with no arg\n");
  622. return;
  623. }
  624. if (ixp->ixp_ctx_id != -1) {
  625. ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
  626. ixp->ixp_ctx_id = -1;
  627. }
  628. if (list_empty(&ixp->ixp_q)) {
  629. printk("ixp: ixp_registration with no Q\n");
  630. return;
  631. }
  632. /*
  633. * setup the primary and secondary buffers
  634. */
  635. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  636. if (q->ixp_q_acrd) {
  637. pri = &ixp->ixp_pri_mbuf;
  638. sec = &ixp->ixp_sec_mbuf;
  639. IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
  640. IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
  641. IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
  642. IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
  643. }
  644. /* Only need to register if a crypt op or HMAC op */
  645. if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
  646. ixp->ixp_auth_alg == CRYPTO_MD5)) {
  647. status = ixCryptoAccCtxRegister(
  648. &ixp->ixp_ctx,
  649. pri, sec,
  650. ixp_register_cb,
  651. ixp_perform_cb,
  652. &ixp->ixp_ctx_id);
  653. }
  654. else {
  655. /* Otherwise we start processing pending q */
  656. schedule_work(&ixp->ixp_pending_work);
  657. }
  658. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  659. return;
  660. if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
  661. printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
  662. ixp_blocked = 1;
  663. /* perhaps we should return EGAIN on queued ops ? */
  664. return;
  665. }
  666. printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
  667. ixp->ixp_ctx_id = -1;
  668. /*
  669. * everything waiting is toasted
  670. */
  671. while (!list_empty(&ixp->ixp_q)) {
  672. q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
  673. list_del(&q->ixp_q_list);
  674. q->ixp_q_crp->crp_etype = ENOENT;
  675. crypto_done(q->ixp_q_crp);
  676. kmem_cache_free(qcache, q);
  677. }
  678. }
  679. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  680. static void
  681. ixp_registration_wq(struct work_struct *work)
  682. {
  683. struct ixp_data *ixp = container_of(work, struct ixp_data,
  684. ixp_registration_work);
  685. ixp_registration(ixp);
  686. }
  687. #endif
  688. /*
  689. * Process a request.
  690. */
  691. static int
  692. ixp_process(device_t dev, struct cryptop *crp, int hint)
  693. {
  694. struct ixp_data *ixp;
  695. unsigned int lid;
  696. struct ixp_q *q = NULL;
  697. int status;
  698. dprintk("%s()\n", __FUNCTION__);
  699. /* Sanity check */
  700. if (crp == NULL) {
  701. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  702. return EINVAL;
  703. }
  704. crp->crp_etype = 0;
  705. if (ixp_blocked)
  706. return ERESTART;
  707. if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
  708. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  709. crp->crp_etype = EINVAL;
  710. goto done;
  711. }
  712. /*
  713. * find the session we are using
  714. */
  715. lid = crp->crp_sid & 0xffffffff;
  716. if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
  717. ixp_sessions[lid] == NULL) {
  718. crp->crp_etype = ENOENT;
  719. dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
  720. goto done;
  721. }
  722. ixp = ixp_sessions[lid];
  723. /*
  724. * setup a new request ready for queuing
  725. */
  726. q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
  727. if (q == NULL) {
  728. dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
  729. crp->crp_etype = ENOMEM;
  730. goto done;
  731. }
  732. /*
  733. * save some cycles by only zeroing the important bits
  734. */
  735. memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
  736. q->ixp_q_ccrd = NULL;
  737. q->ixp_q_acrd = NULL;
  738. q->ixp_q_crp = crp;
  739. q->ixp_q_data = ixp;
  740. /*
  741. * point the cipher and auth descriptors appropriately
  742. * check that we have something to do
  743. */
  744. if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
  745. q->ixp_q_ccrd = crp->crp_desc;
  746. else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
  747. q->ixp_q_acrd = crp->crp_desc;
  748. else {
  749. crp->crp_etype = ENOENT;
  750. dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
  751. goto done;
  752. }
  753. if (crp->crp_desc->crd_next) {
  754. if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
  755. q->ixp_q_ccrd = crp->crp_desc->crd_next;
  756. else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
  757. q->ixp_q_acrd = crp->crp_desc->crd_next;
  758. else {
  759. crp->crp_etype = ENOENT;
  760. dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
  761. goto done;
  762. }
  763. }
  764. /*
  765. * If there is a direction change for this context then we mark it as
  766. * unregistered and re-register is for the new direction. This is not
  767. * a very expensive operation and currently only tends to happen when
  768. * user-space application are doing benchmarks
  769. *
  770. * DM - we should be checking for pending requests before unregistering.
  771. */
  772. if (q->ixp_q_ccrd && ixp->ixp_registered &&
  773. ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
  774. dprintk("%s - detected direction change on session\n", __FUNCTION__);
  775. ixp->ixp_registered = 0;
  776. }
  777. /*
  778. * if we are registered, call straight into the perform code
  779. */
  780. if (ixp->ixp_registered) {
  781. ixp_q_process(q);
  782. return 0;
  783. }
  784. /*
  785. * the only part of the context not set in newsession is the direction
  786. * dependent parts
  787. */
  788. if (q->ixp_q_ccrd) {
  789. ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
  790. if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
  791. ixp->ixp_ctx.operation = q->ixp_q_acrd ?
  792. IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
  793. } else {
  794. ixp->ixp_ctx.operation = q->ixp_q_acrd ?
  795. IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
  796. }
  797. } else {
  798. /* q->ixp_q_acrd must be set if we are here */
  799. ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
  800. }
  801. status = list_empty(&ixp->ixp_q);
  802. list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
  803. if (status)
  804. schedule_work(&ixp->ixp_registration_work);
  805. return 0;
  806. done:
  807. if (q)
  808. kmem_cache_free(qcache, q);
  809. crypto_done(crp);
  810. return 0;
  811. }
  812. #ifdef __ixp46X
  813. /*
  814. * key processing support for the ixp465
  815. */
  816. /*
  817. * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
  818. * assume zeroed and only copy bits that are significant
  819. */
  820. static int
  821. ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
  822. {
  823. unsigned char *src = (unsigned char *) p->crp_p;
  824. unsigned char *dst;
  825. int len, bits = p->crp_nbits;
  826. dprintk("%s()\n", __FUNCTION__);
  827. if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
  828. dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
  829. bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
  830. return -1;
  831. }
  832. len = (bits + 31) / 32; /* the number UINT32's needed */
  833. dst = (unsigned char *) &buf[len];
  834. dst--;
  835. while (bits > 0) {
  836. *dst-- = *src++;
  837. bits -= 8;
  838. }
  839. #if 0 /* no need to zero remaining bits as it is done during request alloc */
  840. while (dst > (unsigned char *) buf)
  841. *dst-- = '\0';
  842. #endif
  843. op->pData = buf;
  844. op->dataLen = len;
  845. return 0;
  846. }
  847. /*
  848. * copy out the result, be as forgiving as we can about small output buffers
  849. */
  850. static int
  851. ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
  852. {
  853. unsigned char *dst = (unsigned char *) p->crp_p;
  854. unsigned char *src = (unsigned char *) buf;
  855. int len, z, bits = p->crp_nbits;
  856. dprintk("%s()\n", __FUNCTION__);
  857. len = op->dataLen * sizeof(UINT32);
  858. /* skip leading zeroes to be small buffer friendly */
  859. z = 0;
  860. while (z < len && src[z] == '\0')
  861. z++;
  862. src += len;
  863. src--;
  864. len -= z;
  865. while (len > 0 && bits > 0) {
  866. *dst++ = *src--;
  867. len--;
  868. bits -= 8;
  869. }
  870. while (bits > 0) {
  871. *dst++ = '\0';
  872. bits -= 8;
  873. }
  874. if (len > 0) {
  875. dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
  876. __FUNCTION__, len, z, p->crp_nbits / 8);
  877. return -1;
  878. }
  879. return 0;
  880. }
  881. /*
  882. * the parameter offsets for exp_mod
  883. */
  884. #define IXP_PARAM_BASE 0
  885. #define IXP_PARAM_EXP 1
  886. #define IXP_PARAM_MOD 2
  887. #define IXP_PARAM_RES 3
  888. /*
  889. * key processing complete callback, is also used to start processing
  890. * by passing a NULL for pResult
  891. */
  892. static void
  893. ixp_kperform_cb(
  894. IxCryptoAccPkeEauOperation operation,
  895. IxCryptoAccPkeEauOpResult *pResult,
  896. BOOL carryOrBorrow,
  897. IxCryptoAccStatus status)
  898. {
  899. struct ixp_pkq *q, *tmp;
  900. unsigned long flags;
  901. dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
  902. carryOrBorrow, status);
  903. /* handle a completed request */
  904. if (pResult) {
  905. if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
  906. q = ixp_pk_cur;
  907. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  908. dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
  909. q->pkq_krp->krp_status = ERANGE; /* could do better */
  910. } else {
  911. /* copy out the result */
  912. if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
  913. &q->pkq_result, q->pkq_obuf))
  914. q->pkq_krp->krp_status = ERANGE;
  915. }
  916. crypto_kdone(q->pkq_krp);
  917. kfree(q);
  918. ixp_pk_cur = NULL;
  919. } else
  920. printk("%s - callback with invalid result pointer\n", __FUNCTION__);
  921. }
  922. spin_lock_irqsave(&ixp_pkq_lock, flags);
  923. if (ixp_pk_cur || list_empty(&ixp_pkq)) {
  924. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  925. return;
  926. }
  927. list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
  928. list_del(&q->pkq_list);
  929. ixp_pk_cur = q;
  930. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  931. status = ixCryptoAccPkeEauPerform(
  932. IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
  933. &q->pkq_op,
  934. ixp_kperform_cb,
  935. &q->pkq_result);
  936. if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
  937. dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
  938. return; /* callback will return here for callback */
  939. } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
  940. printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
  941. } else {
  942. printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
  943. __FUNCTION__, status);
  944. }
  945. q->pkq_krp->krp_status = ERANGE; /* could do better */
  946. crypto_kdone(q->pkq_krp);
  947. kfree(q);
  948. spin_lock_irqsave(&ixp_pkq_lock, flags);
  949. }
  950. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  951. }
  952. static int
  953. ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
  954. {
  955. struct ixp_pkq *q;
  956. int rc = 0;
  957. unsigned long flags;
  958. dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
  959. krp->krp_param[IXP_PARAM_BASE].crp_nbits,
  960. krp->krp_param[IXP_PARAM_EXP].crp_nbits,
  961. krp->krp_param[IXP_PARAM_MOD].crp_nbits,
  962. krp->krp_param[IXP_PARAM_RES].crp_nbits);
  963. if (krp->krp_op != CRK_MOD_EXP) {
  964. krp->krp_status = EOPNOTSUPP;
  965. goto err;
  966. }
  967. q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
  968. if (q == NULL) {
  969. krp->krp_status = ENOMEM;
  970. goto err;
  971. }
  972. /*
  973. * The PKE engine does not appear to zero the output buffer
  974. * appropriately, so we need to do it all here.
  975. */
  976. memset(q, 0, sizeof(*q));
  977. q->pkq_krp = krp;
  978. INIT_LIST_HEAD(&q->pkq_list);
  979. if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
  980. q->pkq_ibuf0))
  981. rc = 1;
  982. if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
  983. &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
  984. rc = 2;
  985. if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
  986. &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
  987. rc = 3;
  988. if (rc) {
  989. kfree(q);
  990. krp->krp_status = ERANGE;
  991. goto err;
  992. }
  993. q->pkq_result.pData = q->pkq_obuf;
  994. q->pkq_result.dataLen =
  995. (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
  996. spin_lock_irqsave(&ixp_pkq_lock, flags);
  997. list_add_tail(&q->pkq_list, &ixp_pkq);
  998. spin_unlock_irqrestore(&ixp_pkq_lock, flags);
  999. if (!ixp_pk_cur)
  1000. ixp_kperform_cb(0, NULL, 0, 0);
  1001. return (0);
  1002. err:
  1003. crypto_kdone(krp);
  1004. return (0);
  1005. }
  1006. #ifdef CONFIG_OCF_RANDOMHARVEST
  1007. /*
  1008. * We run the random number generator output through SHA so that it
  1009. * is FIPS compliant.
  1010. */
  1011. static volatile int sha_done = 0;
  1012. static unsigned char sha_digest[20];
  1013. static void
  1014. ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
  1015. {
  1016. dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
  1017. if (sha_digest != digest)
  1018. printk("digest error\n");
  1019. if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
  1020. sha_done = 1;
  1021. else
  1022. sha_done = -status;
  1023. }
  1024. static int
  1025. ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
  1026. {
  1027. IxCryptoAccStatus status;
  1028. int i, n, rc;
  1029. dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
  1030. memset(buf, 0, maxwords * sizeof(*buf));
  1031. status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
  1032. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  1033. dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
  1034. __FUNCTION__, status);
  1035. return 0;
  1036. }
  1037. /*
  1038. * run the random data through SHA to make it look more random
  1039. */
  1040. n = sizeof(sha_digest); /* process digest bytes at a time */
  1041. rc = 0;
  1042. for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
  1043. if ((maxwords - i) * sizeof(*buf) < n)
  1044. n = (maxwords - i) * sizeof(*buf);
  1045. sha_done = 0;
  1046. status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
  1047. (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
  1048. if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
  1049. dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
  1050. return -EIO;
  1051. }
  1052. while (!sha_done)
  1053. schedule();
  1054. if (sha_done < 0) {
  1055. dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
  1056. return 0;
  1057. }
  1058. memcpy(&buf[i], sha_digest, n);
  1059. rc += n / sizeof(*buf);;
  1060. }
  1061. return rc;
  1062. }
  1063. #endif /* CONFIG_OCF_RANDOMHARVEST */
  1064. #endif /* __ixp46X */
  1065. /*
  1066. * our driver startup and shutdown routines
  1067. */
  1068. static int
  1069. ixp_init(void)
  1070. {
  1071. dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
  1072. if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
  1073. printk("ixCryptoAccInit failed, assuming already initialised!\n");
  1074. qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
  1075. SLAB_HWCACHE_ALIGN, NULL
  1076. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  1077. , NULL
  1078. #endif
  1079. );
  1080. if (!qcache) {
  1081. printk("failed to create Qcache\n");
  1082. return -ENOENT;
  1083. }
  1084. memset(&ixpdev, 0, sizeof(ixpdev));
  1085. softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
  1086. ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
  1087. CRYPTOCAP_F_HARDWARE);
  1088. if (ixp_id < 0)
  1089. panic("IXP/OCF crypto device cannot initialize!");
  1090. #define REGISTER(alg) \
  1091. crypto_register(ixp_id,alg,0,0)
  1092. REGISTER(CRYPTO_DES_CBC);
  1093. REGISTER(CRYPTO_3DES_CBC);
  1094. REGISTER(CRYPTO_RIJNDAEL128_CBC);
  1095. #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
  1096. REGISTER(CRYPTO_MD5);
  1097. REGISTER(CRYPTO_SHA1);
  1098. #endif
  1099. REGISTER(CRYPTO_MD5_HMAC);
  1100. REGISTER(CRYPTO_SHA1_HMAC);
  1101. #undef REGISTER
  1102. #ifdef __ixp46X
  1103. spin_lock_init(&ixp_pkq_lock);
  1104. /*
  1105. * we do not enable the go fast options here as they can potentially
  1106. * allow timing based attacks
  1107. *
  1108. * http://www.openssl.org/news/secadv_20030219.txt
  1109. */
  1110. ixCryptoAccPkeEauExpConfig(0, 0);
  1111. crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
  1112. #ifdef CONFIG_OCF_RANDOMHARVEST
  1113. crypto_rregister(ixp_id, ixp_read_random, NULL);
  1114. #endif
  1115. #endif
  1116. return 0;
  1117. }
  1118. static void
  1119. ixp_exit(void)
  1120. {
  1121. dprintk("%s()\n", __FUNCTION__);
  1122. crypto_unregister_all(ixp_id);
  1123. ixp_id = -1;
  1124. kmem_cache_destroy(qcache);
  1125. qcache = NULL;
  1126. }
  1127. module_init(ixp_init);
  1128. module_exit(ixp_exit);
  1129. MODULE_LICENSE("Dual BSD/GPL");
  1130. MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
  1131. MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");