pasemi.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /*
  2. * Copyright (C) 2007 PA Semi, Inc
  3. *
  4. * Driver for the PA Semi PWRficient DMA Crypto Engine
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/version.h>
  20. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
  21. #include <linux/config.h>
  22. #endif
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/timer.h>
  27. #include <linux/random.h>
  28. #include <linux/skbuff.h>
  29. #include <asm/scatterlist.h>
  30. #include <linux/moduleparam.h>
  31. #include <linux/pci.h>
  32. #include <cryptodev.h>
  33. #include <uio.h>
  34. #include "pasemi_fnu.h"
  35. #define DRV_NAME "pasemi"
  36. #define TIMER_INTERVAL 1000
  37. static void pasemi_dma_remove(struct pci_dev *pdev);
  38. static struct pasdma_status volatile * dma_status;
  39. static int debug;
  40. module_param(debug, int, 0644);
  41. MODULE_PARM_DESC(debug, "Enable debug");
  42. static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
  43. {
  44. desc->postop = 0;
  45. desc->quad[0] = hdr;
  46. desc->quad_cnt = 1;
  47. desc->size = 1;
  48. }
  49. static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
  50. {
  51. desc->quad[desc->quad_cnt++] = val;
  52. desc->size = (desc->quad_cnt + 1) / 2;
  53. }
  54. static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
  55. {
  56. desc->quad[0] |= hdr;
  57. }
  58. static int pasemi_desc_size(struct pasemi_desc *desc)
  59. {
  60. return desc->size;
  61. }
  62. static void pasemi_ring_add_desc(
  63. struct pasemi_fnu_txring *ring,
  64. struct pasemi_desc *desc,
  65. struct cryptop *crp) {
  66. int i;
  67. int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
  68. TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
  69. TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
  70. TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
  71. for (i = 0; i < desc->quad_cnt; i += 2) {
  72. ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
  73. ring->desc[ring_index] = desc->quad[i];
  74. ring->desc[ring_index + 1] = desc->quad[i + 1];
  75. ring->next_to_fill++;
  76. }
  77. if (desc->quad_cnt & 1)
  78. ring->desc[ring_index + 1] = 0;
  79. }
  80. static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
  81. {
  82. out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
  83. incr);
  84. }
  85. /*
  86. * Generate a new software session.
  87. */
  88. static int
  89. pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
  90. {
  91. struct cryptoini *c, *encini = NULL, *macini = NULL;
  92. struct pasemi_softc *sc = device_get_softc(dev);
  93. struct pasemi_session *ses = NULL, **sespp;
  94. int sesn, blksz = 0;
  95. u64 ccmd = 0;
  96. unsigned long flags;
  97. struct pasemi_desc init_desc;
  98. struct pasemi_fnu_txring *txring;
  99. DPRINTF("%s()\n", __FUNCTION__);
  100. if (sidp == NULL || cri == NULL || sc == NULL) {
  101. DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
  102. return -EINVAL;
  103. }
  104. for (c = cri; c != NULL; c = c->cri_next) {
  105. if (ALG_IS_SIG(c->cri_alg)) {
  106. if (macini)
  107. return -EINVAL;
  108. macini = c;
  109. } else if (ALG_IS_CIPHER(c->cri_alg)) {
  110. if (encini)
  111. return -EINVAL;
  112. encini = c;
  113. } else {
  114. DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
  115. return -EINVAL;
  116. }
  117. }
  118. if (encini == NULL && macini == NULL)
  119. return -EINVAL;
  120. if (encini) {
  121. /* validate key length */
  122. switch (encini->cri_alg) {
  123. case CRYPTO_DES_CBC:
  124. if (encini->cri_klen != 64)
  125. return -EINVAL;
  126. ccmd = DMA_CALGO_DES;
  127. break;
  128. case CRYPTO_3DES_CBC:
  129. if (encini->cri_klen != 192)
  130. return -EINVAL;
  131. ccmd = DMA_CALGO_3DES;
  132. break;
  133. case CRYPTO_AES_CBC:
  134. if (encini->cri_klen != 128 &&
  135. encini->cri_klen != 192 &&
  136. encini->cri_klen != 256)
  137. return -EINVAL;
  138. ccmd = DMA_CALGO_AES;
  139. break;
  140. case CRYPTO_ARC4:
  141. if (encini->cri_klen != 128)
  142. return -EINVAL;
  143. ccmd = DMA_CALGO_ARC;
  144. break;
  145. default:
  146. DPRINTF("UNKNOWN encini->cri_alg %d\n",
  147. encini->cri_alg);
  148. return -EINVAL;
  149. }
  150. }
  151. if (macini) {
  152. switch (macini->cri_alg) {
  153. case CRYPTO_MD5:
  154. case CRYPTO_MD5_HMAC:
  155. blksz = 16;
  156. break;
  157. case CRYPTO_SHA1:
  158. case CRYPTO_SHA1_HMAC:
  159. blksz = 20;
  160. break;
  161. default:
  162. DPRINTF("UNKNOWN macini->cri_alg %d\n",
  163. macini->cri_alg);
  164. return -EINVAL;
  165. }
  166. if (((macini->cri_klen + 7) / 8) > blksz) {
  167. DPRINTF("key length %d bigger than blksize %d not supported\n",
  168. ((macini->cri_klen + 7) / 8), blksz);
  169. return -EINVAL;
  170. }
  171. }
  172. for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
  173. if (sc->sc_sessions[sesn] == NULL) {
  174. sc->sc_sessions[sesn] = (struct pasemi_session *)
  175. kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
  176. ses = sc->sc_sessions[sesn];
  177. break;
  178. } else if (sc->sc_sessions[sesn]->used == 0) {
  179. ses = sc->sc_sessions[sesn];
  180. break;
  181. }
  182. }
  183. if (ses == NULL) {
  184. sespp = (struct pasemi_session **)
  185. kzalloc(sc->sc_nsessions * 2 *
  186. sizeof(struct pasemi_session *), GFP_ATOMIC);
  187. if (sespp == NULL)
  188. return -ENOMEM;
  189. memcpy(sespp, sc->sc_sessions,
  190. sc->sc_nsessions * sizeof(struct pasemi_session *));
  191. kfree(sc->sc_sessions);
  192. sc->sc_sessions = sespp;
  193. sesn = sc->sc_nsessions;
  194. ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
  195. kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
  196. if (ses == NULL)
  197. return -ENOMEM;
  198. sc->sc_nsessions *= 2;
  199. }
  200. ses->used = 1;
  201. ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
  202. sizeof(struct pasemi_session), DMA_TO_DEVICE);
  203. /* enter the channel scheduler */
  204. spin_lock_irqsave(&sc->sc_chnlock, flags);
  205. /* ARC4 has to be processed by the even channel */
  206. if (encini && (encini->cri_alg == CRYPTO_ARC4))
  207. ses->chan = sc->sc_lastchn & ~1;
  208. else
  209. ses->chan = sc->sc_lastchn;
  210. sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
  211. spin_unlock_irqrestore(&sc->sc_chnlock, flags);
  212. txring = &sc->tx[ses->chan];
  213. if (encini) {
  214. ses->ccmd = ccmd;
  215. ses->keysz = (encini->cri_klen - 63) / 64;
  216. memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
  217. pasemi_desc_start(&init_desc,
  218. XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
  219. pasemi_desc_build(&init_desc,
  220. XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
  221. }
  222. if (macini) {
  223. if (macini->cri_alg == CRYPTO_MD5_HMAC ||
  224. macini->cri_alg == CRYPTO_SHA1_HMAC)
  225. memcpy(ses->hkey, macini->cri_key, blksz);
  226. else {
  227. /* Load initialization constants(RFC 1321, 3174) */
  228. ses->hiv[0] = 0x67452301efcdab89ULL;
  229. ses->hiv[1] = 0x98badcfe10325476ULL;
  230. ses->hiv[2] = 0xc3d2e1f000000000ULL;
  231. }
  232. ses->hseq = 0ULL;
  233. }
  234. spin_lock_irqsave(&txring->fill_lock, flags);
  235. if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
  236. txring->next_to_clean) > TX_RING_SIZE) {
  237. spin_unlock_irqrestore(&txring->fill_lock, flags);
  238. return ERESTART;
  239. }
  240. if (encini) {
  241. pasemi_ring_add_desc(txring, &init_desc, NULL);
  242. pasemi_ring_incr(sc, ses->chan,
  243. pasemi_desc_size(&init_desc));
  244. }
  245. txring->sesn = sesn;
  246. spin_unlock_irqrestore(&txring->fill_lock, flags);
  247. *sidp = PASEMI_SID(sesn);
  248. return 0;
  249. }
  250. /*
  251. * Deallocate a session.
  252. */
  253. static int
  254. pasemi_freesession(device_t dev, u_int64_t tid)
  255. {
  256. struct pasemi_softc *sc = device_get_softc(dev);
  257. int session;
  258. u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
  259. DPRINTF("%s()\n", __FUNCTION__);
  260. if (sc == NULL)
  261. return -EINVAL;
  262. session = PASEMI_SESSION(sid);
  263. if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
  264. return -EINVAL;
  265. pci_unmap_single(sc->dma_pdev,
  266. sc->sc_sessions[session]->dma_addr,
  267. sizeof(struct pasemi_session), DMA_TO_DEVICE);
  268. memset(sc->sc_sessions[session], 0,
  269. sizeof(struct pasemi_session));
  270. return 0;
  271. }
  272. static int
  273. pasemi_process(device_t dev, struct cryptop *crp, int hint)
  274. {
  275. int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
  276. struct pasemi_softc *sc = device_get_softc(dev);
  277. struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
  278. caddr_t ivp;
  279. struct pasemi_desc init_desc, work_desc;
  280. struct pasemi_session *ses;
  281. struct sk_buff *skb;
  282. struct uio *uiop;
  283. unsigned long flags;
  284. struct pasemi_fnu_txring *txring;
  285. DPRINTF("%s()\n", __FUNCTION__);
  286. if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
  287. return -EINVAL;
  288. crp->crp_etype = 0;
  289. if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
  290. return -EINVAL;
  291. ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
  292. crd1 = crp->crp_desc;
  293. if (crd1 == NULL) {
  294. err = -EINVAL;
  295. goto errout;
  296. }
  297. crd2 = crd1->crd_next;
  298. if (ALG_IS_SIG(crd1->crd_alg)) {
  299. maccrd = crd1;
  300. if (crd2 == NULL)
  301. enccrd = NULL;
  302. else if (ALG_IS_CIPHER(crd2->crd_alg) &&
  303. (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
  304. enccrd = crd2;
  305. else
  306. goto erralg;
  307. } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
  308. enccrd = crd1;
  309. if (crd2 == NULL)
  310. maccrd = NULL;
  311. else if (ALG_IS_SIG(crd2->crd_alg) &&
  312. (crd1->crd_flags & CRD_F_ENCRYPT))
  313. maccrd = crd2;
  314. else
  315. goto erralg;
  316. } else
  317. goto erralg;
  318. chsel = ses->chan;
  319. txring = &sc->tx[chsel];
  320. if (enccrd && !maccrd) {
  321. if (enccrd->crd_alg == CRYPTO_ARC4)
  322. reinit = 1;
  323. reinit_size = 0x40;
  324. srclen = crp->crp_ilen;
  325. pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
  326. | XCT_FUN_FUN(chsel));
  327. if (enccrd->crd_flags & CRD_F_ENCRYPT)
  328. pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
  329. else
  330. pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
  331. } else if (enccrd && maccrd) {
  332. if (enccrd->crd_alg == CRYPTO_ARC4)
  333. reinit = 1;
  334. reinit_size = 0x68;
  335. if (enccrd->crd_flags & CRD_F_ENCRYPT) {
  336. /* Encrypt -> Authenticate */
  337. pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
  338. | XCT_FUN_A | XCT_FUN_FUN(chsel));
  339. srclen = maccrd->crd_skip + maccrd->crd_len;
  340. } else {
  341. /* Authenticate -> Decrypt */
  342. pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
  343. | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
  344. pasemi_desc_build(&work_desc, 0);
  345. pasemi_desc_build(&work_desc, 0);
  346. pasemi_desc_build(&work_desc, 0);
  347. work_desc.postop = PASEMI_CHECK_SIG;
  348. srclen = crp->crp_ilen;
  349. }
  350. pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
  351. pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
  352. } else if (!enccrd && maccrd) {
  353. srclen = maccrd->crd_len;
  354. pasemi_desc_start(&init_desc,
  355. XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
  356. pasemi_desc_build(&init_desc,
  357. XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
  358. pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
  359. | XCT_FUN_A | XCT_FUN_FUN(chsel));
  360. }
  361. if (enccrd) {
  362. switch (enccrd->crd_alg) {
  363. case CRYPTO_3DES_CBC:
  364. pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
  365. XCT_FUN_BCM_CBC);
  366. ivsize = sizeof(u64);
  367. break;
  368. case CRYPTO_DES_CBC:
  369. pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
  370. XCT_FUN_BCM_CBC);
  371. ivsize = sizeof(u64);
  372. break;
  373. case CRYPTO_AES_CBC:
  374. pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
  375. XCT_FUN_BCM_CBC);
  376. ivsize = 2 * sizeof(u64);
  377. break;
  378. case CRYPTO_ARC4:
  379. pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
  380. ivsize = 0;
  381. break;
  382. default:
  383. printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
  384. enccrd->crd_alg);
  385. err = -EINVAL;
  386. goto errout;
  387. }
  388. ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
  389. if (enccrd->crd_flags & CRD_F_ENCRYPT) {
  390. if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
  391. memcpy(ivp, enccrd->crd_iv, ivsize);
  392. else
  393. read_random(ivp, ivsize);
  394. /* If IV is not present in the buffer already, it has to be copied there */
  395. if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
  396. crypto_copyback(crp->crp_flags, crp->crp_buf,
  397. enccrd->crd_inject, ivsize, ivp);
  398. } else {
  399. if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
  400. /* IV is provided expicitly in descriptor */
  401. memcpy(ivp, enccrd->crd_iv, ivsize);
  402. else
  403. /* IV is provided in the packet */
  404. crypto_copydata(crp->crp_flags, crp->crp_buf,
  405. enccrd->crd_inject, ivsize,
  406. ivp);
  407. }
  408. }
  409. if (maccrd) {
  410. switch (maccrd->crd_alg) {
  411. case CRYPTO_MD5:
  412. pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
  413. XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
  414. break;
  415. case CRYPTO_SHA1:
  416. pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
  417. XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
  418. break;
  419. case CRYPTO_MD5_HMAC:
  420. pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
  421. XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
  422. break;
  423. case CRYPTO_SHA1_HMAC:
  424. pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
  425. XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
  426. break;
  427. default:
  428. printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
  429. maccrd->crd_alg);
  430. err = -EINVAL;
  431. goto errout;
  432. }
  433. }
  434. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  435. /* using SKB buffers */
  436. skb = (struct sk_buff *)crp->crp_buf;
  437. if (skb_shinfo(skb)->nr_frags) {
  438. printk(DRV_NAME ": skb frags unimplemented\n");
  439. err = -EINVAL;
  440. goto errout;
  441. }
  442. pasemi_desc_build(
  443. &work_desc,
  444. XCT_FUN_DST_PTR(skb->len, pci_map_single(
  445. sc->dma_pdev, skb->data,
  446. skb->len, DMA_TO_DEVICE)));
  447. pasemi_desc_build(
  448. &work_desc,
  449. XCT_FUN_SRC_PTR(
  450. srclen, pci_map_single(
  451. sc->dma_pdev, skb->data,
  452. srclen, DMA_TO_DEVICE)));
  453. pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
  454. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  455. /* using IOV buffers */
  456. uiop = (struct uio *)crp->crp_buf;
  457. if (uiop->uio_iovcnt > 1) {
  458. printk(DRV_NAME ": iov frags unimplemented\n");
  459. err = -EINVAL;
  460. goto errout;
  461. }
  462. /* crp_olen is never set; always use crp_ilen */
  463. pasemi_desc_build(
  464. &work_desc,
  465. XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
  466. sc->dma_pdev,
  467. uiop->uio_iov->iov_base,
  468. crp->crp_ilen, DMA_TO_DEVICE)));
  469. pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
  470. pasemi_desc_build(
  471. &work_desc,
  472. XCT_FUN_SRC_PTR(srclen, pci_map_single(
  473. sc->dma_pdev,
  474. uiop->uio_iov->iov_base,
  475. srclen, DMA_TO_DEVICE)));
  476. } else {
  477. /* using contig buffers */
  478. pasemi_desc_build(
  479. &work_desc,
  480. XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
  481. sc->dma_pdev,
  482. crp->crp_buf,
  483. crp->crp_ilen, DMA_TO_DEVICE)));
  484. pasemi_desc_build(
  485. &work_desc,
  486. XCT_FUN_SRC_PTR(srclen, pci_map_single(
  487. sc->dma_pdev,
  488. crp->crp_buf, srclen,
  489. DMA_TO_DEVICE)));
  490. pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
  491. }
  492. spin_lock_irqsave(&txring->fill_lock, flags);
  493. if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
  494. txring->sesn = PASEMI_SESSION(crp->crp_sid);
  495. reinit = 1;
  496. }
  497. if (enccrd) {
  498. pasemi_desc_start(&init_desc,
  499. XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
  500. pasemi_desc_build(&init_desc,
  501. XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
  502. }
  503. if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
  504. pasemi_desc_size(&work_desc)) -
  505. txring->next_to_clean) > TX_RING_SIZE) {
  506. spin_unlock_irqrestore(&txring->fill_lock, flags);
  507. err = ERESTART;
  508. goto errout;
  509. }
  510. pasemi_ring_add_desc(txring, &init_desc, NULL);
  511. pasemi_ring_add_desc(txring, &work_desc, crp);
  512. pasemi_ring_incr(sc, chsel,
  513. pasemi_desc_size(&init_desc) +
  514. pasemi_desc_size(&work_desc));
  515. spin_unlock_irqrestore(&txring->fill_lock, flags);
  516. mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
  517. return 0;
  518. erralg:
  519. printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
  520. crd1->crd_alg, crd2->crd_alg);
  521. err = -EINVAL;
  522. errout:
  523. if (err != ERESTART) {
  524. crp->crp_etype = err;
  525. crypto_done(crp);
  526. }
  527. return err;
  528. }
  529. static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
  530. {
  531. int i, j, ring_idx;
  532. struct pasemi_fnu_txring *ring = &sc->tx[chan];
  533. u16 delta_cnt;
  534. int flags, loops = 10;
  535. int desc_size;
  536. struct cryptop *crp;
  537. spin_lock_irqsave(&ring->clean_lock, flags);
  538. while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
  539. & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
  540. && loops--) {
  541. for (i = 0; i < delta_cnt; i++) {
  542. desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
  543. crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
  544. if (crp) {
  545. ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
  546. if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
  547. /* Need to make sure signature matched,
  548. * if not - return error */
  549. if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
  550. crp->crp_etype = -EINVAL;
  551. }
  552. crypto_done(TX_DESC_INFO(ring,
  553. ring->next_to_clean).cf_crp);
  554. TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
  555. pci_unmap_single(
  556. sc->dma_pdev,
  557. XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
  558. PCI_DMA_TODEVICE);
  559. ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
  560. ring->next_to_clean++;
  561. for (j = 1; j < desc_size; j++) {
  562. ring_idx = 2 *
  563. (ring->next_to_clean &
  564. (TX_RING_SIZE-1));
  565. pci_unmap_single(
  566. sc->dma_pdev,
  567. XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
  568. PCI_DMA_TODEVICE);
  569. if (ring->desc[ring_idx + 1])
  570. pci_unmap_single(
  571. sc->dma_pdev,
  572. XCT_PTR_ADDR_LEN(
  573. ring->desc[
  574. ring_idx + 1]),
  575. PCI_DMA_TODEVICE);
  576. ring->desc[ring_idx] =
  577. ring->desc[ring_idx + 1] = 0;
  578. ring->next_to_clean++;
  579. }
  580. } else {
  581. for (j = 0; j < desc_size; j++) {
  582. ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
  583. ring->desc[ring_idx] =
  584. ring->desc[ring_idx + 1] = 0;
  585. ring->next_to_clean++;
  586. }
  587. }
  588. }
  589. ring->total_pktcnt += delta_cnt;
  590. }
  591. spin_unlock_irqrestore(&ring->clean_lock, flags);
  592. return 0;
  593. }
  594. static void sweepup_tx(struct pasemi_softc *sc)
  595. {
  596. int i;
  597. for (i = 0; i < sc->sc_num_channels; i++)
  598. pasemi_clean_tx(sc, i);
  599. }
  600. static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
  601. {
  602. struct pasemi_softc *sc = arg;
  603. unsigned int reg;
  604. int chan = irq - sc->base_irq;
  605. int chan_index = sc->base_chan + chan;
  606. u64 stat = dma_status->tx_sta[chan_index];
  607. DPRINTF("%s()\n", __FUNCTION__);
  608. if (!(stat & PAS_STATUS_CAUSE_M))
  609. return IRQ_NONE;
  610. pasemi_clean_tx(sc, chan);
  611. stat = dma_status->tx_sta[chan_index];
  612. reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
  613. PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
  614. if (stat & PAS_STATUS_SOFT)
  615. reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
  616. out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
  617. return IRQ_HANDLED;
  618. }
  619. static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
  620. {
  621. u32 val;
  622. int chan_index = chan + sc->base_chan;
  623. int ret;
  624. struct pasemi_fnu_txring *ring;
  625. ring = &sc->tx[chan];
  626. spin_lock_init(&ring->fill_lock);
  627. spin_lock_init(&ring->clean_lock);
  628. ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
  629. TX_RING_SIZE, GFP_KERNEL);
  630. if (!ring->desc_info)
  631. return -ENOMEM;
  632. /* Allocate descriptors */
  633. ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
  634. TX_RING_SIZE *
  635. 2 * sizeof(u64),
  636. &ring->dma, GFP_KERNEL);
  637. if (!ring->desc)
  638. return -ENOMEM;
  639. memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
  640. out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
  641. ring->total_pktcnt = 0;
  642. out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
  643. PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
  644. val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
  645. val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
  646. out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
  647. out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
  648. PAS_DMA_TXCHAN_CFG_TY_FUNC |
  649. PAS_DMA_TXCHAN_CFG_TATTR(chan) |
  650. PAS_DMA_TXCHAN_CFG_WT(2));
  651. /* enable tx channel */
  652. out_le32(sc->dma_regs +
  653. PAS_DMA_TXCHAN_TCMDSTA(chan_index),
  654. PAS_DMA_TXCHAN_TCMDSTA_EN);
  655. out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
  656. PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
  657. ring->next_to_fill = 0;
  658. ring->next_to_clean = 0;
  659. snprintf(ring->irq_name, sizeof(ring->irq_name),
  660. "%s%d", "crypto", chan);
  661. ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
  662. ret = request_irq(ring->irq, (irq_handler_t)
  663. pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
  664. if (ret) {
  665. printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
  666. ring->irq, ret);
  667. ring->irq = -1;
  668. return ret;
  669. }
  670. setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
  671. return 0;
  672. }
  673. static device_method_t pasemi_methods = {
  674. /* crypto device methods */
  675. DEVMETHOD(cryptodev_newsession, pasemi_newsession),
  676. DEVMETHOD(cryptodev_freesession, pasemi_freesession),
  677. DEVMETHOD(cryptodev_process, pasemi_process),
  678. };
  679. /* Set up the crypto device structure, private data,
  680. * and anything else we need before we start */
  681. static int
  682. pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  683. {
  684. struct pasemi_softc *sc;
  685. int ret, i;
  686. DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
  687. sc = kzalloc(sizeof(*sc), GFP_KERNEL);
  688. if (!sc)
  689. return -ENOMEM;
  690. softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
  691. pci_set_drvdata(pdev, sc);
  692. spin_lock_init(&sc->sc_chnlock);
  693. sc->sc_sessions = (struct pasemi_session **)
  694. kzalloc(PASEMI_INITIAL_SESSIONS *
  695. sizeof(struct pasemi_session *), GFP_ATOMIC);
  696. if (sc->sc_sessions == NULL) {
  697. ret = -ENOMEM;
  698. goto out;
  699. }
  700. sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
  701. sc->sc_lastchn = 0;
  702. sc->base_irq = pdev->irq + 6;
  703. sc->base_chan = 6;
  704. sc->sc_cid = -1;
  705. sc->dma_pdev = pdev;
  706. sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
  707. if (!sc->iob_pdev) {
  708. dev_err(&pdev->dev, "Can't find I/O Bridge\n");
  709. ret = -ENODEV;
  710. goto out;
  711. }
  712. /* This is hardcoded and ugly, but we have some firmware versions
  713. * who don't provide the register space in the device tree. Luckily
  714. * they are at well-known locations so we can just do the math here.
  715. */
  716. sc->dma_regs =
  717. ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
  718. sc->iob_regs =
  719. ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
  720. if (!sc->dma_regs || !sc->iob_regs) {
  721. dev_err(&pdev->dev, "Can't map registers\n");
  722. ret = -ENODEV;
  723. goto out;
  724. }
  725. dma_status = __ioremap(0xfd800000, 0x1000, 0);
  726. if (!dma_status) {
  727. ret = -ENODEV;
  728. dev_err(&pdev->dev, "Can't map dmastatus space\n");
  729. goto out;
  730. }
  731. sc->tx = (struct pasemi_fnu_txring *)
  732. kzalloc(sizeof(struct pasemi_fnu_txring)
  733. * 8, GFP_KERNEL);
  734. if (!sc->tx) {
  735. ret = -ENOMEM;
  736. goto out;
  737. }
  738. /* Initialize the h/w */
  739. out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
  740. (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
  741. PAS_DMA_COM_CFG_FWF));
  742. out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
  743. for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
  744. sc->sc_num_channels++;
  745. ret = pasemi_dma_setup_tx_resources(sc, i);
  746. if (ret)
  747. goto out;
  748. }
  749. sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
  750. CRYPTOCAP_F_HARDWARE);
  751. if (sc->sc_cid < 0) {
  752. printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
  753. ret = -ENXIO;
  754. goto out;
  755. }
  756. /* register algorithms with the framework */
  757. printk(DRV_NAME ":");
  758. crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
  759. crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
  760. crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
  761. crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
  762. crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
  763. crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
  764. crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
  765. crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
  766. return 0;
  767. out:
  768. pasemi_dma_remove(pdev);
  769. return ret;
  770. }
  771. #define MAX_RETRIES 5000
  772. static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
  773. {
  774. struct pasemi_fnu_txring *ring = &sc->tx[chan];
  775. int chan_index = chan + sc->base_chan;
  776. int retries;
  777. u32 stat;
  778. /* Stop the channel */
  779. out_le32(sc->dma_regs +
  780. PAS_DMA_TXCHAN_TCMDSTA(chan_index),
  781. PAS_DMA_TXCHAN_TCMDSTA_ST);
  782. for (retries = 0; retries < MAX_RETRIES; retries++) {
  783. stat = in_le32(sc->dma_regs +
  784. PAS_DMA_TXCHAN_TCMDSTA(chan_index));
  785. if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
  786. break;
  787. cond_resched();
  788. }
  789. if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
  790. dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
  791. chan_index);
  792. /* Disable the channel */
  793. out_le32(sc->dma_regs +
  794. PAS_DMA_TXCHAN_TCMDSTA(chan_index),
  795. 0);
  796. if (ring->desc_info)
  797. kfree((void *) ring->desc_info);
  798. if (ring->desc)
  799. dma_free_coherent(&sc->dma_pdev->dev,
  800. TX_RING_SIZE *
  801. 2 * sizeof(u64),
  802. (void *) ring->desc, ring->dma);
  803. if (ring->irq != -1)
  804. free_irq(ring->irq, sc);
  805. del_timer(&ring->crypto_timer);
  806. }
  807. static void pasemi_dma_remove(struct pci_dev *pdev)
  808. {
  809. struct pasemi_softc *sc = pci_get_drvdata(pdev);
  810. int i;
  811. DPRINTF("%s()\n", __FUNCTION__);
  812. if (sc->sc_cid >= 0) {
  813. crypto_unregister_all(sc->sc_cid);
  814. }
  815. if (sc->tx) {
  816. for (i = 0; i < sc->sc_num_channels; i++)
  817. pasemi_free_tx_resources(sc, i);
  818. kfree(sc->tx);
  819. }
  820. if (sc->sc_sessions) {
  821. for (i = 0; i < sc->sc_nsessions; i++)
  822. kfree(sc->sc_sessions[i]);
  823. kfree(sc->sc_sessions);
  824. }
  825. if (sc->iob_pdev)
  826. pci_dev_put(sc->iob_pdev);
  827. if (sc->dma_regs)
  828. iounmap(sc->dma_regs);
  829. if (sc->iob_regs)
  830. iounmap(sc->iob_regs);
  831. kfree(sc);
  832. }
  833. static struct pci_device_id pasemi_dma_pci_tbl[] = {
  834. { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
  835. };
  836. MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
  837. static struct pci_driver pasemi_dma_driver = {
  838. .name = "pasemi_dma",
  839. .id_table = pasemi_dma_pci_tbl,
  840. .probe = pasemi_dma_probe,
  841. .remove = pasemi_dma_remove,
  842. };
  843. static void __exit pasemi_dma_cleanup_module(void)
  844. {
  845. pci_unregister_driver(&pasemi_dma_driver);
  846. __iounmap(dma_status);
  847. dma_status = NULL;
  848. }
  849. int pasemi_dma_init_module(void)
  850. {
  851. return pci_register_driver(&pasemi_dma_driver);
  852. }
  853. module_init(pasemi_dma_init_module);
  854. module_exit(pasemi_dma_cleanup_module);
  855. MODULE_LICENSE("Dual BSD/GPL");
  856. MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
  857. MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");