cryptosoft.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322
  1. /*
  2. * An OCF module that uses the linux kernel cryptoapi, based on the
  3. * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
  4. * but is mostly unrecognisable,
  5. *
  6. * Written by David McCullough <david_mccullough@mcafee.com>
  7. * Copyright (C) 2004-2011 David McCullough
  8. * Copyright (C) 2004-2005 Intel Corporation.
  9. *
  10. * LICENSE TERMS
  11. *
  12. * The free distribution and use of this software in both source and binary
  13. * form is allowed (with or without changes) provided that:
  14. *
  15. * 1. distributions of this source code include the above copyright
  16. * notice, this list of conditions and the following disclaimer;
  17. *
  18. * 2. distributions in binary form include the above copyright
  19. * notice, this list of conditions and the following disclaimer
  20. * in the documentation and/or other associated materials;
  21. *
  22. * 3. the copyright holder's name is not used to endorse products
  23. * built using this software without specific written permission.
  24. *
  25. * ALTERNATIVELY, provided that this notice is retained in full, this product
  26. * may be distributed under the terms of the GNU General Public License (GPL),
  27. * in which case the provisions of the GPL apply INSTEAD OF those given above.
  28. *
  29. * DISCLAIMER
  30. *
  31. * This software is provided 'as is' with no explicit or implied warranties
  32. * in respect of its properties, including, but not limited to, correctness
  33. * and/or fitness for purpose.
  34. * ---------------------------------------------------------------------------
  35. */
  36. #include <linux/version.h>
  37. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
  38. #include <linux/config.h>
  39. #endif
  40. #include <linux/module.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43. #include <linux/slab.h>
  44. #include <linux/sched.h>
  45. #include <linux/wait.h>
  46. #include <linux/crypto.h>
  47. #include <linux/mm.h>
  48. #include <linux/skbuff.h>
  49. #include <linux/random.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/spinlock.h>
  52. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
  53. #include <linux/scatterlist.h>
  54. #endif
  55. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
  56. #include <crypto/hash.h>
  57. #endif
  58. #include <cryptodev.h>
  59. #include <uio.h>
  60. struct {
  61. softc_device_decl sc_dev;
  62. } swcr_softc;
  63. #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
  64. #define SW_TYPE_CIPHER 0x01
  65. #define SW_TYPE_HMAC 0x02
  66. #define SW_TYPE_HASH 0x04
  67. #define SW_TYPE_COMP 0x08
  68. #define SW_TYPE_BLKCIPHER 0x10
  69. #define SW_TYPE_ALG_MASK 0x1f
  70. #define SW_TYPE_ASYNC 0x8000
  71. #define SW_TYPE_INUSE 0x10000000
  72. /* We change some of the above if we have an async interface */
  73. #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
  74. #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
  75. #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
  76. #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
  77. #define SCATTERLIST_MAX 16
  78. struct swcr_data {
  79. struct work_struct workq;
  80. int sw_type;
  81. int sw_alg;
  82. struct crypto_tfm *sw_tfm;
  83. spinlock_t sw_tfm_lock;
  84. union {
  85. struct {
  86. char *sw_key;
  87. int sw_klen;
  88. int sw_mlen;
  89. } hmac;
  90. void *sw_comp_buf;
  91. } u;
  92. struct swcr_data *sw_next;
  93. };
  94. struct swcr_req {
  95. struct swcr_data *sw_head;
  96. struct swcr_data *sw;
  97. struct cryptop *crp;
  98. struct cryptodesc *crd;
  99. struct scatterlist sg[SCATTERLIST_MAX];
  100. unsigned char iv[EALG_MAX_BLOCK_LEN];
  101. char result[HASH_MAX_LEN];
  102. void *crypto_req;
  103. };
  104. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  105. static kmem_cache_t *swcr_req_cache;
  106. #else
  107. static struct kmem_cache *swcr_req_cache;
  108. #endif
  109. #ifndef CRYPTO_TFM_MODE_CBC
  110. /*
  111. * As of linux-2.6.21 this is no longer defined, and presumably no longer
  112. * needed to be passed into the crypto core code.
  113. */
  114. #define CRYPTO_TFM_MODE_CBC 0
  115. #define CRYPTO_TFM_MODE_ECB 0
  116. #endif
  117. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  118. /*
  119. * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
  120. * API into old API.
  121. */
  122. /* Symmetric/Block Cipher */
  123. struct blkcipher_desc
  124. {
  125. struct crypto_tfm *tfm;
  126. void *info;
  127. };
  128. #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
  129. #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
  130. #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
  131. #define crypto_blkcipher_cast(X) X
  132. #define crypto_blkcipher_tfm(X) X
  133. #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
  134. #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
  135. #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
  136. #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
  137. #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
  138. crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  139. #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
  140. crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
  141. #define crypto_blkcipher_set_flags(x, y) /* nop */
  142. #define crypto_free_blkcipher(x) crypto_free_tfm(x)
  143. #define crypto_free_comp crypto_free_tfm
  144. #define crypto_free_hash crypto_free_tfm
  145. /* Hash/HMAC/Digest */
  146. struct hash_desc
  147. {
  148. struct crypto_tfm *tfm;
  149. };
  150. #define hmac(X) #X , 0
  151. #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
  152. #define crypto_hash_cast(X) X
  153. #define crypto_hash_tfm(X) X
  154. #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
  155. #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
  156. #define crypto_hash_digest(W, X, Y, Z) \
  157. crypto_digest_digest((W)->tfm, X, sg_num, Z)
  158. /* Asymmetric Cipher */
  159. #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
  160. /* Compression */
  161. #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
  162. #define crypto_comp_tfm(X) X
  163. #define crypto_comp_cast(X) X
  164. #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
  165. #define plain(X) #X , 0
  166. #else
  167. #define ecb(X) "ecb(" #X ")" , 0
  168. #define cbc(X) "cbc(" #X ")" , 0
  169. #define hmac(X) "hmac(" #X ")" , 0
  170. #define plain(X) #X , 0
  171. #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  172. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
  173. /* no ablkcipher in older kernels */
  174. #define crypto_alloc_ablkcipher(a,b,c) (NULL)
  175. #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
  176. #define crypto_ablkcipher_set_flags(a, b) /* nop */
  177. #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
  178. #define crypto_has_ablkcipher(a,b,c) (0)
  179. #else
  180. #define HAVE_ABLKCIPHER
  181. #endif
  182. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
  183. /* no ahash in older kernels */
  184. #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
  185. #define crypto_alloc_ahash(a,b,c) (NULL)
  186. #define crypto_ahash_digestsize(x) 0
  187. #else
  188. #define HAVE_AHASH
  189. #endif
  190. struct crypto_details {
  191. char *alg_name;
  192. int mode;
  193. int sw_type;
  194. };
  195. static struct crypto_details crypto_details[] = {
  196. [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
  197. [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
  198. [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
  199. [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
  200. [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
  201. [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
  202. [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
  203. [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
  204. [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
  205. [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
  206. [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
  207. [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
  208. [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
  209. [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
  210. [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
  211. [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
  212. [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
  213. [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
  214. [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
  215. [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
  216. [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
  217. [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
  218. [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
  219. [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
  220. [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
  221. };
  222. int32_t swcr_id = -1;
  223. module_param(swcr_id, int, 0444);
  224. MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
  225. int swcr_fail_if_compression_grows = 1;
  226. module_param(swcr_fail_if_compression_grows, int, 0644);
  227. MODULE_PARM_DESC(swcr_fail_if_compression_grows,
  228. "Treat compression that results in more data as a failure");
  229. int swcr_no_ahash = 0;
  230. module_param(swcr_no_ahash, int, 0644);
  231. MODULE_PARM_DESC(swcr_no_ahash,
  232. "Do not use async hash/hmac even if available");
  233. int swcr_no_ablk = 0;
  234. module_param(swcr_no_ablk, int, 0644);
  235. MODULE_PARM_DESC(swcr_no_ablk,
  236. "Do not use async blk ciphers even if available");
  237. static struct swcr_data **swcr_sessions = NULL;
  238. static u_int32_t swcr_sesnum = 0;
  239. static int swcr_process(device_t, struct cryptop *, int);
  240. static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
  241. static int swcr_freesession(device_t, u_int64_t);
  242. static device_method_t swcr_methods = {
  243. /* crypto device methods */
  244. DEVMETHOD(cryptodev_newsession, swcr_newsession),
  245. DEVMETHOD(cryptodev_freesession,swcr_freesession),
  246. DEVMETHOD(cryptodev_process, swcr_process),
  247. };
  248. #define debug swcr_debug
  249. int swcr_debug = 0;
  250. module_param(swcr_debug, int, 0644);
  251. MODULE_PARM_DESC(swcr_debug, "Enable debug");
  252. static void swcr_process_req(struct swcr_req *req);
  253. /*
  254. * somethings just need to be run with user context no matter whether
  255. * the kernel compression libs use vmalloc/vfree for example.
  256. */
  257. typedef struct {
  258. struct work_struct wq;
  259. void (*func)(void *arg);
  260. void *arg;
  261. } execute_later_t;
  262. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  263. static void
  264. doing_it_now(struct work_struct *wq)
  265. {
  266. execute_later_t *w = container_of(wq, execute_later_t, wq);
  267. (w->func)(w->arg);
  268. kfree(w);
  269. }
  270. #else
  271. static void
  272. doing_it_now(void *arg)
  273. {
  274. execute_later_t *w = (execute_later_t *) arg;
  275. (w->func)(w->arg);
  276. kfree(w);
  277. }
  278. #endif
  279. static void
  280. execute_later(void (fn)(void *), void *arg)
  281. {
  282. execute_later_t *w;
  283. w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
  284. if (w) {
  285. memset(w, '\0', sizeof(w));
  286. w->func = fn;
  287. w->arg = arg;
  288. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  289. INIT_WORK(&w->wq, doing_it_now);
  290. #else
  291. INIT_WORK(&w->wq, doing_it_now, w);
  292. #endif
  293. schedule_work(&w->wq);
  294. }
  295. }
  296. /*
  297. * Generate a new software session.
  298. */
  299. static int
  300. swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
  301. {
  302. struct swcr_data **swd;
  303. u_int32_t i;
  304. int error;
  305. char *algo;
  306. int mode;
  307. dprintk("%s()\n", __FUNCTION__);
  308. if (sid == NULL || cri == NULL) {
  309. dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
  310. return EINVAL;
  311. }
  312. if (swcr_sessions) {
  313. for (i = 1; i < swcr_sesnum; i++)
  314. if (swcr_sessions[i] == NULL)
  315. break;
  316. } else
  317. i = 1; /* NB: to silence compiler warning */
  318. if (swcr_sessions == NULL || i == swcr_sesnum) {
  319. if (swcr_sessions == NULL) {
  320. i = 1; /* We leave swcr_sessions[0] empty */
  321. swcr_sesnum = CRYPTO_SW_SESSIONS;
  322. } else
  323. swcr_sesnum *= 2;
  324. swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
  325. if (swd == NULL) {
  326. /* Reset session number */
  327. if (swcr_sesnum == CRYPTO_SW_SESSIONS)
  328. swcr_sesnum = 0;
  329. else
  330. swcr_sesnum /= 2;
  331. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  332. return ENOBUFS;
  333. }
  334. memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
  335. /* Copy existing sessions */
  336. if (swcr_sessions) {
  337. memcpy(swd, swcr_sessions,
  338. (swcr_sesnum / 2) * sizeof(struct swcr_data *));
  339. kfree(swcr_sessions);
  340. }
  341. swcr_sessions = swd;
  342. }
  343. swd = &swcr_sessions[i];
  344. *sid = i;
  345. while (cri) {
  346. *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
  347. SLAB_ATOMIC);
  348. if (*swd == NULL) {
  349. swcr_freesession(NULL, i);
  350. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  351. return ENOBUFS;
  352. }
  353. memset(*swd, 0, sizeof(struct swcr_data));
  354. if (cri->cri_alg < 0 ||
  355. cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
  356. printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
  357. swcr_freesession(NULL, i);
  358. return EINVAL;
  359. }
  360. algo = crypto_details[cri->cri_alg].alg_name;
  361. if (!algo || !*algo) {
  362. printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
  363. swcr_freesession(NULL, i);
  364. return EINVAL;
  365. }
  366. mode = crypto_details[cri->cri_alg].mode;
  367. (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
  368. (*swd)->sw_alg = cri->cri_alg;
  369. spin_lock_init(&(*swd)->sw_tfm_lock);
  370. /* Algorithm specific configuration */
  371. switch (cri->cri_alg) {
  372. case CRYPTO_NULL_CBC:
  373. cri->cri_klen = 0; /* make it work with crypto API */
  374. break;
  375. default:
  376. break;
  377. }
  378. if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
  379. dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
  380. algo, mode);
  381. /* try async first */
  382. (*swd)->sw_tfm = swcr_no_ablk ? NULL :
  383. crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
  384. if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
  385. dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
  386. (*swd)->sw_type |= SW_TYPE_ASYNC;
  387. } else {
  388. (*swd)->sw_tfm = crypto_blkcipher_tfm(
  389. crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
  390. if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
  391. dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
  392. }
  393. if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
  394. int err;
  395. dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
  396. algo,mode);
  397. err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
  398. (*swd)->sw_tfm = NULL; /* ensure NULL */
  399. swcr_freesession(NULL, i);
  400. return err;
  401. }
  402. if (debug) {
  403. dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
  404. __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
  405. for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
  406. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  407. cri->cri_key[i] & 0xff);
  408. dprintk("\n");
  409. }
  410. if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  411. /* OCF doesn't enforce keys */
  412. crypto_ablkcipher_set_flags(
  413. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  414. CRYPTO_TFM_REQ_WEAK_KEY);
  415. error = crypto_ablkcipher_setkey(
  416. __crypto_ablkcipher_cast((*swd)->sw_tfm),
  417. cri->cri_key, (cri->cri_klen + 7) / 8);
  418. } else {
  419. /* OCF doesn't enforce keys */
  420. crypto_blkcipher_set_flags(
  421. crypto_blkcipher_cast((*swd)->sw_tfm),
  422. CRYPTO_TFM_REQ_WEAK_KEY);
  423. error = crypto_blkcipher_setkey(
  424. crypto_blkcipher_cast((*swd)->sw_tfm),
  425. cri->cri_key, (cri->cri_klen + 7) / 8);
  426. }
  427. if (error) {
  428. printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
  429. (*swd)->sw_tfm->crt_flags);
  430. swcr_freesession(NULL, i);
  431. return error;
  432. }
  433. } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
  434. dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
  435. algo, mode);
  436. /* try async first */
  437. (*swd)->sw_tfm = swcr_no_ahash ? NULL :
  438. crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
  439. if ((*swd)->sw_tfm) {
  440. dprintk("%s %s hash is async\n", __FUNCTION__, algo);
  441. (*swd)->sw_type |= SW_TYPE_ASYNC;
  442. } else {
  443. dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
  444. (*swd)->sw_tfm = crypto_hash_tfm(
  445. crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
  446. }
  447. if (!(*swd)->sw_tfm) {
  448. dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
  449. algo, mode);
  450. swcr_freesession(NULL, i);
  451. return EINVAL;
  452. }
  453. (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
  454. (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
  455. SLAB_ATOMIC);
  456. if ((*swd)->u.hmac.sw_key == NULL) {
  457. swcr_freesession(NULL, i);
  458. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  459. return ENOBUFS;
  460. }
  461. memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
  462. if (cri->cri_mlen) {
  463. (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
  464. } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
  465. (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
  466. __crypto_ahash_cast((*swd)->sw_tfm));
  467. } else {
  468. (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
  469. crypto_hash_cast((*swd)->sw_tfm));
  470. }
  471. } else if ((*swd)->sw_type & SW_TYPE_COMP) {
  472. (*swd)->sw_tfm = crypto_comp_tfm(
  473. crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
  474. if (!(*swd)->sw_tfm) {
  475. dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
  476. algo, mode);
  477. swcr_freesession(NULL, i);
  478. return EINVAL;
  479. }
  480. (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
  481. if ((*swd)->u.sw_comp_buf == NULL) {
  482. swcr_freesession(NULL, i);
  483. dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
  484. return ENOBUFS;
  485. }
  486. } else {
  487. printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
  488. swcr_freesession(NULL, i);
  489. return EINVAL;
  490. }
  491. cri = cri->cri_next;
  492. swd = &((*swd)->sw_next);
  493. }
  494. return 0;
  495. }
  496. /*
  497. * Free a session.
  498. */
  499. static int
  500. swcr_freesession(device_t dev, u_int64_t tid)
  501. {
  502. struct swcr_data *swd;
  503. u_int32_t sid = CRYPTO_SESID2LID(tid);
  504. dprintk("%s()\n", __FUNCTION__);
  505. if (sid > swcr_sesnum || swcr_sessions == NULL ||
  506. swcr_sessions[sid] == NULL) {
  507. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  508. return(EINVAL);
  509. }
  510. /* Silently accept and return */
  511. if (sid == 0)
  512. return(0);
  513. while ((swd = swcr_sessions[sid]) != NULL) {
  514. swcr_sessions[sid] = swd->sw_next;
  515. if (swd->sw_tfm) {
  516. switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
  517. #ifdef HAVE_AHASH
  518. case SW_TYPE_AHMAC:
  519. case SW_TYPE_AHASH:
  520. crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
  521. break;
  522. #endif
  523. #ifdef HAVE_ABLKCIPHER
  524. case SW_TYPE_ABLKCIPHER:
  525. crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
  526. break;
  527. #endif
  528. case SW_TYPE_BLKCIPHER:
  529. crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
  530. break;
  531. case SW_TYPE_HMAC:
  532. case SW_TYPE_HASH:
  533. crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
  534. break;
  535. case SW_TYPE_COMP:
  536. if (in_interrupt())
  537. execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
  538. else
  539. crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
  540. break;
  541. default:
  542. crypto_free_tfm(swd->sw_tfm);
  543. break;
  544. }
  545. swd->sw_tfm = NULL;
  546. }
  547. if (swd->sw_type & SW_TYPE_COMP) {
  548. if (swd->u.sw_comp_buf)
  549. kfree(swd->u.sw_comp_buf);
  550. } else {
  551. if (swd->u.hmac.sw_key)
  552. kfree(swd->u.hmac.sw_key);
  553. }
  554. kfree(swd);
  555. }
  556. return 0;
  557. }
  558. static void swcr_process_req_complete(struct swcr_req *req)
  559. {
  560. dprintk("%s()\n", __FUNCTION__);
  561. if (req->sw->sw_type & SW_TYPE_INUSE) {
  562. unsigned long flags;
  563. spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
  564. req->sw->sw_type &= ~SW_TYPE_INUSE;
  565. spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
  566. }
  567. if (req->crp->crp_etype)
  568. goto done;
  569. switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
  570. #if defined(HAVE_AHASH)
  571. case SW_TYPE_AHMAC:
  572. case SW_TYPE_AHASH:
  573. crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
  574. req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
  575. ahash_request_free(req->crypto_req);
  576. break;
  577. #endif
  578. #if defined(HAVE_ABLKCIPHER)
  579. case SW_TYPE_ABLKCIPHER:
  580. ablkcipher_request_free(req->crypto_req);
  581. break;
  582. #endif
  583. case SW_TYPE_CIPHER:
  584. case SW_TYPE_HMAC:
  585. case SW_TYPE_HASH:
  586. case SW_TYPE_COMP:
  587. case SW_TYPE_BLKCIPHER:
  588. break;
  589. default:
  590. req->crp->crp_etype = EINVAL;
  591. goto done;
  592. }
  593. req->crd = req->crd->crd_next;
  594. if (req->crd) {
  595. swcr_process_req(req);
  596. return;
  597. }
  598. done:
  599. dprintk("%s crypto_done %p\n", __FUNCTION__, req);
  600. crypto_done(req->crp);
  601. kmem_cache_free(swcr_req_cache, req);
  602. }
  603. #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
  604. static void swcr_process_callback(struct crypto_async_request *creq, int err)
  605. {
  606. struct swcr_req *req = creq->data;
  607. dprintk("%s()\n", __FUNCTION__);
  608. if (err) {
  609. if (err == -EINPROGRESS)
  610. return;
  611. dprintk("%s() fail %d\n", __FUNCTION__, -err);
  612. req->crp->crp_etype = -err;
  613. }
  614. swcr_process_req_complete(req);
  615. }
  616. #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
  617. static void swcr_process_req(struct swcr_req *req)
  618. {
  619. struct swcr_data *sw;
  620. struct cryptop *crp = req->crp;
  621. struct cryptodesc *crd = req->crd;
  622. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  623. struct uio *uiop = (struct uio *) crp->crp_buf;
  624. int sg_num, sg_len, skip;
  625. dprintk("%s()\n", __FUNCTION__);
  626. /*
  627. * Find the crypto context.
  628. *
  629. * XXX Note that the logic here prevents us from having
  630. * XXX the same algorithm multiple times in a session
  631. * XXX (or rather, we can but it won't give us the right
  632. * XXX results). To do that, we'd need some way of differentiating
  633. * XXX between the various instances of an algorithm (so we can
  634. * XXX locate the correct crypto context).
  635. */
  636. for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
  637. ;
  638. /* No such context ? */
  639. if (sw == NULL) {
  640. crp->crp_etype = EINVAL;
  641. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  642. goto done;
  643. }
  644. /*
  645. * for some types we need to ensure only one user as info is stored in
  646. * the tfm during an operation that can get corrupted
  647. */
  648. switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
  649. #ifdef HAVE_AHASH
  650. case SW_TYPE_AHMAC:
  651. case SW_TYPE_AHASH:
  652. #endif
  653. case SW_TYPE_HMAC:
  654. case SW_TYPE_HASH: {
  655. unsigned long flags;
  656. spin_lock_irqsave(&sw->sw_tfm_lock, flags);
  657. if (sw->sw_type & SW_TYPE_INUSE) {
  658. spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
  659. execute_later((void (*)(void *))swcr_process_req, (void *)req);
  660. return;
  661. }
  662. sw->sw_type |= SW_TYPE_INUSE;
  663. spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
  664. } break;
  665. }
  666. req->sw = sw;
  667. skip = crd->crd_skip;
  668. /*
  669. * setup the SG list skip from the start of the buffer
  670. */
  671. memset(req->sg, 0, sizeof(req->sg));
  672. sg_init_table(req->sg, SCATTERLIST_MAX);
  673. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  674. int i, len;
  675. sg_num = 0;
  676. sg_len = 0;
  677. if (skip < skb_headlen(skb)) {
  678. len = skb_headlen(skb) - skip;
  679. if (len + sg_len > crd->crd_len)
  680. len = crd->crd_len - sg_len;
  681. sg_set_page(&req->sg[sg_num],
  682. virt_to_page(skb->data + skip), len,
  683. offset_in_page(skb->data + skip));
  684. sg_len += len;
  685. sg_num++;
  686. skip = 0;
  687. } else
  688. skip -= skb_headlen(skb);
  689. for (i = 0; sg_len < crd->crd_len &&
  690. i < skb_shinfo(skb)->nr_frags &&
  691. sg_num < SCATTERLIST_MAX; i++) {
  692. if (skip < skb_shinfo(skb)->frags[i].size) {
  693. len = skb_shinfo(skb)->frags[i].size - skip;
  694. if (len + sg_len > crd->crd_len)
  695. len = crd->crd_len - sg_len;
  696. sg_set_page(&req->sg[sg_num],
  697. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  698. len,
  699. skb_shinfo(skb)->frags[i].page_offset + skip);
  700. sg_len += len;
  701. sg_num++;
  702. skip = 0;
  703. } else
  704. skip -= skb_shinfo(skb)->frags[i].size;
  705. }
  706. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  707. int len;
  708. sg_len = 0;
  709. for (sg_num = 0; sg_len < crd->crd_len &&
  710. sg_num < uiop->uio_iovcnt &&
  711. sg_num < SCATTERLIST_MAX; sg_num++) {
  712. if (skip <= uiop->uio_iov[sg_num].iov_len) {
  713. len = uiop->uio_iov[sg_num].iov_len - skip;
  714. if (len + sg_len > crd->crd_len)
  715. len = crd->crd_len - sg_len;
  716. sg_set_page(&req->sg[sg_num],
  717. virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
  718. len,
  719. offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
  720. sg_len += len;
  721. skip = 0;
  722. } else
  723. skip -= uiop->uio_iov[sg_num].iov_len;
  724. }
  725. } else {
  726. sg_len = (crp->crp_ilen - skip);
  727. if (sg_len > crd->crd_len)
  728. sg_len = crd->crd_len;
  729. sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
  730. sg_len, offset_in_page(crp->crp_buf + skip));
  731. sg_num = 1;
  732. }
  733. if (sg_num > 0)
  734. sg_mark_end(&req->sg[sg_num-1]);
  735. switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
  736. #ifdef HAVE_AHASH
  737. case SW_TYPE_AHMAC:
  738. case SW_TYPE_AHASH:
  739. {
  740. int ret;
  741. /* check we have room for the result */
  742. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  743. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  744. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  745. crd->crd_inject, sw->u.hmac.sw_mlen);
  746. crp->crp_etype = EINVAL;
  747. goto done;
  748. }
  749. req->crypto_req =
  750. ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
  751. if (!req->crypto_req) {
  752. crp->crp_etype = ENOMEM;
  753. dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
  754. goto done;
  755. }
  756. ahash_request_set_callback(req->crypto_req,
  757. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  758. memset(req->result, 0, sizeof(req->result));
  759. if (sw->sw_type & SW_TYPE_AHMAC)
  760. crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
  761. sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
  762. ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
  763. ret = crypto_ahash_digest(req->crypto_req);
  764. switch (ret) {
  765. case -EINPROGRESS:
  766. case -EBUSY:
  767. return;
  768. default:
  769. case 0:
  770. dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
  771. crp->crp_etype = ret;
  772. goto done;
  773. }
  774. } break;
  775. #endif /* HAVE_AHASH */
  776. #ifdef HAVE_ABLKCIPHER
  777. case SW_TYPE_ABLKCIPHER: {
  778. int ret;
  779. unsigned char *ivp = req->iv;
  780. int ivsize =
  781. crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
  782. if (sg_len < crypto_ablkcipher_blocksize(
  783. __crypto_ablkcipher_cast(sw->sw_tfm))) {
  784. crp->crp_etype = EINVAL;
  785. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  786. sg_len, crypto_ablkcipher_blocksize(
  787. __crypto_ablkcipher_cast(sw->sw_tfm)));
  788. goto done;
  789. }
  790. if (ivsize > sizeof(req->iv)) {
  791. crp->crp_etype = EINVAL;
  792. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  793. goto done;
  794. }
  795. req->crypto_req = ablkcipher_request_alloc(
  796. __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
  797. if (!req->crypto_req) {
  798. crp->crp_etype = ENOMEM;
  799. dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
  800. __FILE__, __LINE__);
  801. goto done;
  802. }
  803. ablkcipher_request_set_callback(req->crypto_req,
  804. CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
  805. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  806. int i, error;
  807. if (debug) {
  808. dprintk("%s key:", __FUNCTION__);
  809. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  810. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  811. crd->crd_key[i] & 0xff);
  812. dprintk("\n");
  813. }
  814. /* OCF doesn't enforce keys */
  815. crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
  816. CRYPTO_TFM_REQ_WEAK_KEY);
  817. error = crypto_ablkcipher_setkey(
  818. __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
  819. (crd->crd_klen + 7) / 8);
  820. if (error) {
  821. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  822. error, sw->sw_tfm->crt_flags);
  823. crp->crp_etype = -error;
  824. }
  825. }
  826. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  827. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  828. ivp = crd->crd_iv;
  829. else
  830. get_random_bytes(ivp, ivsize);
  831. /*
  832. * do we have to copy the IV back to the buffer ?
  833. */
  834. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  835. crypto_copyback(crp->crp_flags, crp->crp_buf,
  836. crd->crd_inject, ivsize, (caddr_t)ivp);
  837. }
  838. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  839. sg_len, ivp);
  840. ret = crypto_ablkcipher_encrypt(req->crypto_req);
  841. } else { /*decrypt */
  842. if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  843. ivp = crd->crd_iv;
  844. else
  845. crypto_copydata(crp->crp_flags, crp->crp_buf,
  846. crd->crd_inject, ivsize, (caddr_t)ivp);
  847. ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
  848. sg_len, ivp);
  849. ret = crypto_ablkcipher_decrypt(req->crypto_req);
  850. }
  851. switch (ret) {
  852. case -EINPROGRESS:
  853. case -EBUSY:
  854. return;
  855. default:
  856. case 0:
  857. dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
  858. crp->crp_etype = ret;
  859. goto done;
  860. }
  861. } break;
  862. #endif /* HAVE_ABLKCIPHER */
  863. case SW_TYPE_BLKCIPHER: {
  864. unsigned char iv[EALG_MAX_BLOCK_LEN];
  865. unsigned char *ivp = iv;
  866. struct blkcipher_desc desc;
  867. int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
  868. if (sg_len < crypto_blkcipher_blocksize(
  869. crypto_blkcipher_cast(sw->sw_tfm))) {
  870. crp->crp_etype = EINVAL;
  871. dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
  872. sg_len, crypto_blkcipher_blocksize(
  873. crypto_blkcipher_cast(sw->sw_tfm)));
  874. goto done;
  875. }
  876. if (ivsize > sizeof(iv)) {
  877. crp->crp_etype = EINVAL;
  878. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  879. goto done;
  880. }
  881. if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
  882. int i, error;
  883. if (debug) {
  884. dprintk("%s key:", __FUNCTION__);
  885. for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
  886. dprintk("%s0x%x", (i % 8) ? " " : "\n ",
  887. crd->crd_key[i] & 0xff);
  888. dprintk("\n");
  889. }
  890. /* OCF doesn't enforce keys */
  891. crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
  892. CRYPTO_TFM_REQ_WEAK_KEY);
  893. error = crypto_blkcipher_setkey(
  894. crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
  895. (crd->crd_klen + 7) / 8);
  896. if (error) {
  897. dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
  898. error, sw->sw_tfm->crt_flags);
  899. crp->crp_etype = -error;
  900. }
  901. }
  902. memset(&desc, 0, sizeof(desc));
  903. desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
  904. if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
  905. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  906. ivp = crd->crd_iv;
  907. } else {
  908. get_random_bytes(ivp, ivsize);
  909. }
  910. /*
  911. * do we have to copy the IV back to the buffer ?
  912. */
  913. if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  914. crypto_copyback(crp->crp_flags, crp->crp_buf,
  915. crd->crd_inject, ivsize, (caddr_t)ivp);
  916. }
  917. desc.info = ivp;
  918. crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
  919. } else { /*decrypt */
  920. if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
  921. ivp = crd->crd_iv;
  922. } else {
  923. crypto_copydata(crp->crp_flags, crp->crp_buf,
  924. crd->crd_inject, ivsize, (caddr_t)ivp);
  925. }
  926. desc.info = ivp;
  927. crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
  928. }
  929. } break;
  930. case SW_TYPE_HMAC:
  931. case SW_TYPE_HASH:
  932. {
  933. char result[HASH_MAX_LEN];
  934. struct hash_desc desc;
  935. /* check we have room for the result */
  936. if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
  937. dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
  938. "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
  939. crd->crd_inject, sw->u.hmac.sw_mlen);
  940. crp->crp_etype = EINVAL;
  941. goto done;
  942. }
  943. memset(&desc, 0, sizeof(desc));
  944. desc.tfm = crypto_hash_cast(sw->sw_tfm);
  945. memset(result, 0, sizeof(result));
  946. if (sw->sw_type & SW_TYPE_HMAC) {
  947. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  948. crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
  949. req->sg, sg_num, result);
  950. #else
  951. crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
  952. sw->u.hmac.sw_klen);
  953. crypto_hash_digest(&desc, req->sg, sg_len, result);
  954. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
  955. } else { /* SW_TYPE_HASH */
  956. crypto_hash_digest(&desc, req->sg, sg_len, result);
  957. }
  958. crypto_copyback(crp->crp_flags, crp->crp_buf,
  959. crd->crd_inject, sw->u.hmac.sw_mlen, result);
  960. }
  961. break;
  962. case SW_TYPE_COMP: {
  963. void *ibuf = NULL;
  964. void *obuf = sw->u.sw_comp_buf;
  965. int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
  966. int ret = 0;
  967. /*
  968. * we need to use an additional copy if there is more than one
  969. * input chunk since the kernel comp routines do not handle
  970. * SG yet. Otherwise we just use the input buffer as is.
  971. * Rather than allocate another buffer we just split the tmp
  972. * buffer we already have.
  973. * Perhaps we should just use zlib directly ?
  974. */
  975. if (sg_num > 1) {
  976. int blk;
  977. ibuf = obuf;
  978. for (blk = 0; blk < sg_num; blk++) {
  979. memcpy(obuf, sg_virt(&req->sg[blk]),
  980. req->sg[blk].length);
  981. obuf += req->sg[blk].length;
  982. }
  983. olen -= sg_len;
  984. } else
  985. ibuf = sg_virt(&req->sg[0]);
  986. if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
  987. ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
  988. ibuf, ilen, obuf, &olen);
  989. if (!ret && olen > crd->crd_len) {
  990. dprintk("cryptosoft: ERANGE compress %d into %d\n",
  991. crd->crd_len, olen);
  992. if (swcr_fail_if_compression_grows)
  993. ret = ERANGE;
  994. }
  995. } else { /* decompress */
  996. ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
  997. ibuf, ilen, obuf, &olen);
  998. if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
  999. dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
  1000. "space for %d,at offset %d\n",
  1001. crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
  1002. ret = ETOOSMALL;
  1003. }
  1004. }
  1005. if (ret)
  1006. dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
  1007. /*
  1008. * on success copy result back,
  1009. * linux crpyto API returns -errno, we need to fix that
  1010. */
  1011. crp->crp_etype = ret < 0 ? -ret : ret;
  1012. if (ret == 0) {
  1013. /* copy back the result and return it's size */
  1014. crypto_copyback(crp->crp_flags, crp->crp_buf,
  1015. crd->crd_inject, olen, obuf);
  1016. crp->crp_olen = olen;
  1017. }
  1018. } break;
  1019. default:
  1020. /* Unknown/unsupported algorithm */
  1021. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1022. crp->crp_etype = EINVAL;
  1023. goto done;
  1024. }
  1025. done:
  1026. swcr_process_req_complete(req);
  1027. }
  1028. /*
  1029. * Process a crypto request.
  1030. */
  1031. static int
  1032. swcr_process(device_t dev, struct cryptop *crp, int hint)
  1033. {
  1034. struct swcr_req *req = NULL;
  1035. u_int32_t lid;
  1036. dprintk("%s()\n", __FUNCTION__);
  1037. /* Sanity check */
  1038. if (crp == NULL) {
  1039. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1040. return EINVAL;
  1041. }
  1042. crp->crp_etype = 0;
  1043. if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
  1044. dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
  1045. crp->crp_etype = EINVAL;
  1046. goto done;
  1047. }
  1048. lid = crp->crp_sid & 0xffffffff;
  1049. if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
  1050. swcr_sessions[lid] == NULL) {
  1051. crp->crp_etype = ENOENT;
  1052. dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
  1053. goto done;
  1054. }
  1055. /*
  1056. * do some error checking outside of the loop for SKB and IOV processing
  1057. * this leaves us with valid skb or uiop pointers for later
  1058. */
  1059. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  1060. struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
  1061. if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
  1062. printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
  1063. skb_shinfo(skb)->nr_frags);
  1064. goto done;
  1065. }
  1066. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  1067. struct uio *uiop = (struct uio *) crp->crp_buf;
  1068. if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
  1069. printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
  1070. uiop->uio_iovcnt);
  1071. goto done;
  1072. }
  1073. }
  1074. /*
  1075. * setup a new request ready for queuing
  1076. */
  1077. req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
  1078. if (req == NULL) {
  1079. dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
  1080. crp->crp_etype = ENOMEM;
  1081. goto done;
  1082. }
  1083. memset(req, 0, sizeof(*req));
  1084. req->sw_head = swcr_sessions[lid];
  1085. req->crp = crp;
  1086. req->crd = crp->crp_desc;
  1087. swcr_process_req(req);
  1088. return 0;
  1089. done:
  1090. crypto_done(crp);
  1091. if (req)
  1092. kmem_cache_free(swcr_req_cache, req);
  1093. return 0;
  1094. }
  1095. static int
  1096. cryptosoft_init(void)
  1097. {
  1098. int i, sw_type, mode;
  1099. char *algo;
  1100. dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
  1101. swcr_req_cache = kmem_cache_create("cryptosoft_req",
  1102. sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
  1103. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  1104. , NULL
  1105. #endif
  1106. );
  1107. if (!swcr_req_cache) {
  1108. printk("cryptosoft: failed to create request cache\n");
  1109. return -ENOENT;
  1110. }
  1111. softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
  1112. swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
  1113. CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
  1114. if (swcr_id < 0) {
  1115. printk("cryptosoft: Software crypto device cannot initialize!");
  1116. return -ENODEV;
  1117. }
  1118. #define REGISTER(alg) \
  1119. crypto_register(swcr_id, alg, 0,0)
  1120. for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
  1121. int found;
  1122. algo = crypto_details[i].alg_name;
  1123. if (!algo || !*algo) {
  1124. dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
  1125. continue;
  1126. }
  1127. mode = crypto_details[i].mode;
  1128. sw_type = crypto_details[i].sw_type;
  1129. found = 0;
  1130. switch (sw_type & SW_TYPE_ALG_MASK) {
  1131. case SW_TYPE_CIPHER:
  1132. found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
  1133. break;
  1134. case SW_TYPE_HMAC:
  1135. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1136. break;
  1137. case SW_TYPE_HASH:
  1138. found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
  1139. break;
  1140. case SW_TYPE_COMP:
  1141. found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
  1142. break;
  1143. case SW_TYPE_BLKCIPHER:
  1144. found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
  1145. if (!found && !swcr_no_ablk)
  1146. found = crypto_has_ablkcipher(algo, 0, 0);
  1147. break;
  1148. }
  1149. if (found) {
  1150. REGISTER(i);
  1151. } else {
  1152. dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
  1153. __FUNCTION__, sw_type, i, algo);
  1154. }
  1155. }
  1156. return 0;
  1157. }
  1158. static void
  1159. cryptosoft_exit(void)
  1160. {
  1161. dprintk("%s()\n", __FUNCTION__);
  1162. crypto_unregister_all(swcr_id);
  1163. swcr_id = -1;
  1164. kmem_cache_destroy(swcr_req_cache);
  1165. }
  1166. late_initcall(cryptosoft_init);
  1167. module_exit(cryptosoft_exit);
  1168. MODULE_LICENSE("Dual BSD/GPL");
  1169. MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
  1170. MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");