safe.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230
  1. /*-
  2. * Linux port done by David McCullough <david_mccullough@mcafee.com>
  3. * Copyright (C) 2004-2010 David McCullough
  4. * The license and original author are listed below.
  5. *
  6. * Copyright (c) 2003 Sam Leffler, Errno Consulting
  7. * Copyright (c) 2003 Global Technology Associates, Inc.
  8. * All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. __FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
  32. */
  33. #include <linux/version.h>
  34. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
  35. #include <linux/config.h>
  36. #endif
  37. #include <linux/module.h>
  38. #include <linux/kernel.h>
  39. #include <linux/init.h>
  40. #include <linux/list.h>
  41. #include <linux/slab.h>
  42. #include <linux/wait.h>
  43. #include <linux/sched.h>
  44. #include <linux/pci.h>
  45. #include <linux/delay.h>
  46. #include <linux/interrupt.h>
  47. #include <linux/spinlock.h>
  48. #include <linux/random.h>
  49. #include <linux/skbuff.h>
  50. #include <asm/io.h>
  51. /*
  52. * SafeNet SafeXcel-1141 hardware crypto accelerator
  53. */
  54. #include <cryptodev.h>
  55. #include <uio.h>
  56. #include <safe/safereg.h>
  57. #include <safe/safevar.h>
  58. #if 1
  59. #define DPRINTF(a) do { \
  60. if (debug) { \
  61. printk("%s: ", sc ? \
  62. device_get_nameunit(sc->sc_dev) : "safe"); \
  63. printk a; \
  64. } \
  65. } while (0)
  66. #else
  67. #define DPRINTF(a)
  68. #endif
  69. /*
  70. * until we find a cleaner way, include the BSD md5/sha1 code
  71. * here
  72. */
  73. #define HMAC_HACK 1
  74. #ifdef HMAC_HACK
  75. #include <safe/hmachack.h>
  76. #include <safe/md5.h>
  77. #include <safe/md5.c>
  78. #include <safe/sha1.h>
  79. #include <safe/sha1.c>
  80. #endif /* HMAC_HACK */
  81. /* add proc entry for this */
  82. struct safe_stats safestats;
  83. #define debug safe_debug
  84. int safe_debug = 0;
  85. module_param(safe_debug, int, 0644);
  86. MODULE_PARM_DESC(safe_debug, "Enable debug");
  87. static void safe_callback(struct safe_softc *, struct safe_ringentry *);
  88. static void safe_feed(struct safe_softc *, struct safe_ringentry *);
  89. #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
  90. static void safe_rng_init(struct safe_softc *);
  91. int safe_rngbufsize = 8; /* 32 bytes each read */
  92. module_param(safe_rngbufsize, int, 0644);
  93. MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
  94. int safe_rngmaxalarm = 8; /* max alarms before reset */
  95. module_param(safe_rngmaxalarm, int, 0644);
  96. MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
  97. #endif /* SAFE_NO_RNG */
  98. static void safe_totalreset(struct safe_softc *sc);
  99. static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
  100. static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
  101. static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
  102. static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
  103. static int safe_kstart(struct safe_softc *sc);
  104. static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
  105. static void safe_kfeed(struct safe_softc *sc);
  106. static void safe_kpoll(unsigned long arg);
  107. static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
  108. u_int32_t len, struct crparam *n);
  109. static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
  110. static int safe_freesession(device_t, u_int64_t);
  111. static int safe_process(device_t, struct cryptop *, int);
  112. static device_method_t safe_methods = {
  113. /* crypto device methods */
  114. DEVMETHOD(cryptodev_newsession, safe_newsession),
  115. DEVMETHOD(cryptodev_freesession,safe_freesession),
  116. DEVMETHOD(cryptodev_process, safe_process),
  117. DEVMETHOD(cryptodev_kprocess, safe_kprocess),
  118. };
  119. #define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
  120. #define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
  121. #define SAFE_MAX_CHIPS 8
  122. static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
  123. /*
  124. * split our buffers up into safe DMAable byte fragments to avoid lockup
  125. * bug in 1141 HW on rev 1.0.
  126. */
  127. static int
  128. pci_map_linear(
  129. struct safe_softc *sc,
  130. struct safe_operand *buf,
  131. void *addr,
  132. int len)
  133. {
  134. dma_addr_t tmp;
  135. int chunk, tlen = len;
  136. tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
  137. buf->mapsize += len;
  138. while (len > 0) {
  139. chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
  140. buf->segs[buf->nsegs].ds_addr = tmp;
  141. buf->segs[buf->nsegs].ds_len = chunk;
  142. buf->segs[buf->nsegs].ds_tlen = tlen;
  143. buf->nsegs++;
  144. tmp += chunk;
  145. len -= chunk;
  146. tlen = 0;
  147. }
  148. return 0;
  149. }
  150. /*
  151. * map in a given uio buffer (great on some arches :-)
  152. */
  153. static int
  154. pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
  155. {
  156. struct iovec *iov = uio->uio_iov;
  157. int n;
  158. DPRINTF(("%s()\n", __FUNCTION__));
  159. buf->mapsize = 0;
  160. buf->nsegs = 0;
  161. for (n = 0; n < uio->uio_iovcnt; n++) {
  162. pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
  163. iov++;
  164. }
  165. /* identify this buffer by the first segment */
  166. buf->map = (void *) buf->segs[0].ds_addr;
  167. return(0);
  168. }
  169. /*
  170. * map in a given sk_buff
  171. */
  172. static int
  173. pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
  174. {
  175. int i;
  176. DPRINTF(("%s()\n", __FUNCTION__));
  177. buf->mapsize = 0;
  178. buf->nsegs = 0;
  179. pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
  180. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  181. pci_map_linear(sc, buf,
  182. page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
  183. skb_shinfo(skb)->frags[i].page_offset,
  184. skb_shinfo(skb)->frags[i].size);
  185. }
  186. /* identify this buffer by the first segment */
  187. buf->map = (void *) buf->segs[0].ds_addr;
  188. return(0);
  189. }
  190. #if 0 /* not needed at this time */
  191. static void
  192. pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
  193. {
  194. int i;
  195. DPRINTF(("%s()\n", __FUNCTION__));
  196. for (i = 0; i < buf->nsegs; i++)
  197. pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
  198. buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
  199. }
  200. #endif
  201. static void
  202. pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
  203. {
  204. int i;
  205. DPRINTF(("%s()\n", __FUNCTION__));
  206. for (i = 0; i < buf->nsegs; i++) {
  207. if (buf->segs[i].ds_tlen) {
  208. DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
  209. pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
  210. buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
  211. DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
  212. }
  213. buf->segs[i].ds_addr = 0;
  214. buf->segs[i].ds_len = 0;
  215. buf->segs[i].ds_tlen = 0;
  216. }
  217. buf->nsegs = 0;
  218. buf->mapsize = 0;
  219. buf->map = 0;
  220. }
  221. /*
  222. * SafeXcel Interrupt routine
  223. */
  224. static irqreturn_t
  225. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
  226. safe_intr(int irq, void *arg)
  227. #else
  228. safe_intr(int irq, void *arg, struct pt_regs *regs)
  229. #endif
  230. {
  231. struct safe_softc *sc = arg;
  232. int stat;
  233. unsigned long flags;
  234. stat = READ_REG(sc, SAFE_HM_STAT);
  235. DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
  236. if (stat == 0) /* shared irq, not for us */
  237. return IRQ_NONE;
  238. WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
  239. if ((stat & SAFE_INT_PE_DDONE)) {
  240. /*
  241. * Descriptor(s) done; scan the ring and
  242. * process completed operations.
  243. */
  244. spin_lock_irqsave(&sc->sc_ringmtx, flags);
  245. while (sc->sc_back != sc->sc_front) {
  246. struct safe_ringentry *re = sc->sc_back;
  247. #ifdef SAFE_DEBUG
  248. if (debug) {
  249. safe_dump_ringstate(sc, __func__);
  250. safe_dump_request(sc, __func__, re);
  251. }
  252. #endif
  253. /*
  254. * safe_process marks ring entries that were allocated
  255. * but not used with a csr of zero. This insures the
  256. * ring front pointer never needs to be set backwards
  257. * in the event that an entry is allocated but not used
  258. * because of a setup error.
  259. */
  260. DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
  261. if (re->re_desc.d_csr != 0) {
  262. if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
  263. DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
  264. break;
  265. }
  266. if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
  267. DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
  268. break;
  269. }
  270. sc->sc_nqchip--;
  271. safe_callback(sc, re);
  272. }
  273. if (++(sc->sc_back) == sc->sc_ringtop)
  274. sc->sc_back = sc->sc_ring;
  275. }
  276. spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
  277. }
  278. /*
  279. * Check to see if we got any DMA Error
  280. */
  281. if (stat & SAFE_INT_PE_ERROR) {
  282. printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
  283. (int)READ_REG(sc, SAFE_PE_DMASTAT));
  284. safestats.st_dmaerr++;
  285. safe_totalreset(sc);
  286. #if 0
  287. safe_feed(sc);
  288. #endif
  289. }
  290. if (sc->sc_needwakeup) { /* XXX check high watermark */
  291. int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
  292. DPRINTF(("%s: wakeup crypto %x\n", __func__,
  293. sc->sc_needwakeup));
  294. sc->sc_needwakeup &= ~wakeup;
  295. crypto_unblock(sc->sc_cid, wakeup);
  296. }
  297. return IRQ_HANDLED;
  298. }
  299. /*
  300. * safe_feed() - post a request to chip
  301. */
  302. static void
  303. safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
  304. {
  305. DPRINTF(("%s()\n", __FUNCTION__));
  306. #ifdef SAFE_DEBUG
  307. if (debug) {
  308. safe_dump_ringstate(sc, __func__);
  309. safe_dump_request(sc, __func__, re);
  310. }
  311. #endif
  312. sc->sc_nqchip++;
  313. if (sc->sc_nqchip > safestats.st_maxqchip)
  314. safestats.st_maxqchip = sc->sc_nqchip;
  315. /* poke h/w to check descriptor ring, any value can be written */
  316. WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
  317. }
  318. #define N(a) (sizeof(a) / sizeof (a[0]))
  319. static void
  320. safe_setup_enckey(struct safe_session *ses, caddr_t key)
  321. {
  322. int i;
  323. bcopy(key, ses->ses_key, ses->ses_klen / 8);
  324. /* PE is little-endian, insure proper byte order */
  325. for (i = 0; i < N(ses->ses_key); i++)
  326. ses->ses_key[i] = htole32(ses->ses_key[i]);
  327. }
  328. static void
  329. safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
  330. {
  331. #ifdef HMAC_HACK
  332. MD5_CTX md5ctx;
  333. SHA1_CTX sha1ctx;
  334. int i;
  335. for (i = 0; i < klen; i++)
  336. key[i] ^= HMAC_IPAD_VAL;
  337. if (algo == CRYPTO_MD5_HMAC) {
  338. MD5Init(&md5ctx);
  339. MD5Update(&md5ctx, key, klen);
  340. MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
  341. bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
  342. } else {
  343. SHA1Init(&sha1ctx);
  344. SHA1Update(&sha1ctx, key, klen);
  345. SHA1Update(&sha1ctx, hmac_ipad_buffer,
  346. SHA1_HMAC_BLOCK_LEN - klen);
  347. bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
  348. }
  349. for (i = 0; i < klen; i++)
  350. key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
  351. if (algo == CRYPTO_MD5_HMAC) {
  352. MD5Init(&md5ctx);
  353. MD5Update(&md5ctx, key, klen);
  354. MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
  355. bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
  356. } else {
  357. SHA1Init(&sha1ctx);
  358. SHA1Update(&sha1ctx, key, klen);
  359. SHA1Update(&sha1ctx, hmac_opad_buffer,
  360. SHA1_HMAC_BLOCK_LEN - klen);
  361. bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
  362. }
  363. for (i = 0; i < klen; i++)
  364. key[i] ^= HMAC_OPAD_VAL;
  365. #if 0
  366. /*
  367. * this code prevents SHA working on a BE host,
  368. * so it is obviously wrong. I think the byte
  369. * swap setup we do with the chip fixes this for us
  370. */
  371. /* PE is little-endian, insure proper byte order */
  372. for (i = 0; i < N(ses->ses_hminner); i++) {
  373. ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
  374. ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
  375. }
  376. #endif
  377. #else /* HMAC_HACK */
  378. printk("safe: md5/sha not implemented\n");
  379. #endif /* HMAC_HACK */
  380. }
  381. #undef N
  382. /*
  383. * Allocate a new 'session' and return an encoded session id. 'sidp'
  384. * contains our registration id, and should contain an encoded session
  385. * id on successful allocation.
  386. */
  387. static int
  388. safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
  389. {
  390. struct safe_softc *sc = device_get_softc(dev);
  391. struct cryptoini *c, *encini = NULL, *macini = NULL;
  392. struct safe_session *ses = NULL;
  393. int sesn;
  394. DPRINTF(("%s()\n", __FUNCTION__));
  395. if (sidp == NULL || cri == NULL || sc == NULL)
  396. return (EINVAL);
  397. for (c = cri; c != NULL; c = c->cri_next) {
  398. if (c->cri_alg == CRYPTO_MD5_HMAC ||
  399. c->cri_alg == CRYPTO_SHA1_HMAC ||
  400. c->cri_alg == CRYPTO_NULL_HMAC) {
  401. if (macini)
  402. return (EINVAL);
  403. macini = c;
  404. } else if (c->cri_alg == CRYPTO_DES_CBC ||
  405. c->cri_alg == CRYPTO_3DES_CBC ||
  406. c->cri_alg == CRYPTO_AES_CBC ||
  407. c->cri_alg == CRYPTO_NULL_CBC) {
  408. if (encini)
  409. return (EINVAL);
  410. encini = c;
  411. } else
  412. return (EINVAL);
  413. }
  414. if (encini == NULL && macini == NULL)
  415. return (EINVAL);
  416. if (encini) { /* validate key length */
  417. switch (encini->cri_alg) {
  418. case CRYPTO_DES_CBC:
  419. if (encini->cri_klen != 64)
  420. return (EINVAL);
  421. break;
  422. case CRYPTO_3DES_CBC:
  423. if (encini->cri_klen != 192)
  424. return (EINVAL);
  425. break;
  426. case CRYPTO_AES_CBC:
  427. if (encini->cri_klen != 128 &&
  428. encini->cri_klen != 192 &&
  429. encini->cri_klen != 256)
  430. return (EINVAL);
  431. break;
  432. }
  433. }
  434. if (sc->sc_sessions == NULL) {
  435. ses = sc->sc_sessions = (struct safe_session *)
  436. kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
  437. if (ses == NULL)
  438. return (ENOMEM);
  439. memset(ses, 0, sizeof(struct safe_session));
  440. sesn = 0;
  441. sc->sc_nsessions = 1;
  442. } else {
  443. for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
  444. if (sc->sc_sessions[sesn].ses_used == 0) {
  445. ses = &sc->sc_sessions[sesn];
  446. break;
  447. }
  448. }
  449. if (ses == NULL) {
  450. sesn = sc->sc_nsessions;
  451. ses = (struct safe_session *)
  452. kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
  453. if (ses == NULL)
  454. return (ENOMEM);
  455. memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
  456. bcopy(sc->sc_sessions, ses, sesn *
  457. sizeof(struct safe_session));
  458. bzero(sc->sc_sessions, sesn *
  459. sizeof(struct safe_session));
  460. kfree(sc->sc_sessions);
  461. sc->sc_sessions = ses;
  462. ses = &sc->sc_sessions[sesn];
  463. sc->sc_nsessions++;
  464. }
  465. }
  466. bzero(ses, sizeof(struct safe_session));
  467. ses->ses_used = 1;
  468. if (encini) {
  469. ses->ses_klen = encini->cri_klen;
  470. if (encini->cri_key != NULL)
  471. safe_setup_enckey(ses, encini->cri_key);
  472. }
  473. if (macini) {
  474. ses->ses_mlen = macini->cri_mlen;
  475. if (ses->ses_mlen == 0) {
  476. if (macini->cri_alg == CRYPTO_MD5_HMAC)
  477. ses->ses_mlen = MD5_HASH_LEN;
  478. else
  479. ses->ses_mlen = SHA1_HASH_LEN;
  480. }
  481. if (macini->cri_key != NULL) {
  482. safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
  483. macini->cri_klen / 8);
  484. }
  485. }
  486. *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
  487. return (0);
  488. }
  489. /*
  490. * Deallocate a session.
  491. */
  492. static int
  493. safe_freesession(device_t dev, u_int64_t tid)
  494. {
  495. struct safe_softc *sc = device_get_softc(dev);
  496. int session, ret;
  497. u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
  498. DPRINTF(("%s()\n", __FUNCTION__));
  499. if (sc == NULL)
  500. return (EINVAL);
  501. session = SAFE_SESSION(sid);
  502. if (session < sc->sc_nsessions) {
  503. bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
  504. ret = 0;
  505. } else
  506. ret = EINVAL;
  507. return (ret);
  508. }
  509. static int
  510. safe_process(device_t dev, struct cryptop *crp, int hint)
  511. {
  512. struct safe_softc *sc = device_get_softc(dev);
  513. int err = 0, i, nicealign, uniform;
  514. struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
  515. int bypass, oplen, ivsize;
  516. caddr_t iv;
  517. int16_t coffset;
  518. struct safe_session *ses;
  519. struct safe_ringentry *re;
  520. struct safe_sarec *sa;
  521. struct safe_pdesc *pd;
  522. u_int32_t cmd0, cmd1, staterec, rand_iv[4];
  523. unsigned long flags;
  524. DPRINTF(("%s()\n", __FUNCTION__));
  525. if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
  526. safestats.st_invalid++;
  527. return (EINVAL);
  528. }
  529. if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
  530. safestats.st_badsession++;
  531. return (EINVAL);
  532. }
  533. spin_lock_irqsave(&sc->sc_ringmtx, flags);
  534. if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
  535. safestats.st_ringfull++;
  536. sc->sc_needwakeup |= CRYPTO_SYMQ;
  537. spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
  538. return (ERESTART);
  539. }
  540. re = sc->sc_front;
  541. staterec = re->re_sa.sa_staterec; /* save */
  542. /* NB: zero everything but the PE descriptor */
  543. bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
  544. re->re_sa.sa_staterec = staterec; /* restore */
  545. re->re_crp = crp;
  546. re->re_sesn = SAFE_SESSION(crp->crp_sid);
  547. re->re_src.nsegs = 0;
  548. re->re_dst.nsegs = 0;
  549. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  550. re->re_src_skb = (struct sk_buff *)crp->crp_buf;
  551. re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
  552. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  553. re->re_src_io = (struct uio *)crp->crp_buf;
  554. re->re_dst_io = (struct uio *)crp->crp_buf;
  555. } else {
  556. safestats.st_badflags++;
  557. err = EINVAL;
  558. goto errout; /* XXX we don't handle contiguous blocks! */
  559. }
  560. sa = &re->re_sa;
  561. ses = &sc->sc_sessions[re->re_sesn];
  562. crd1 = crp->crp_desc;
  563. if (crd1 == NULL) {
  564. safestats.st_nodesc++;
  565. err = EINVAL;
  566. goto errout;
  567. }
  568. crd2 = crd1->crd_next;
  569. cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
  570. cmd1 = 0;
  571. if (crd2 == NULL) {
  572. if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
  573. crd1->crd_alg == CRYPTO_SHA1_HMAC ||
  574. crd1->crd_alg == CRYPTO_NULL_HMAC) {
  575. maccrd = crd1;
  576. enccrd = NULL;
  577. cmd0 |= SAFE_SA_CMD0_OP_HASH;
  578. } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
  579. crd1->crd_alg == CRYPTO_3DES_CBC ||
  580. crd1->crd_alg == CRYPTO_AES_CBC ||
  581. crd1->crd_alg == CRYPTO_NULL_CBC) {
  582. maccrd = NULL;
  583. enccrd = crd1;
  584. cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
  585. } else {
  586. safestats.st_badalg++;
  587. err = EINVAL;
  588. goto errout;
  589. }
  590. } else {
  591. if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
  592. crd1->crd_alg == CRYPTO_SHA1_HMAC ||
  593. crd1->crd_alg == CRYPTO_NULL_HMAC) &&
  594. (crd2->crd_alg == CRYPTO_DES_CBC ||
  595. crd2->crd_alg == CRYPTO_3DES_CBC ||
  596. crd2->crd_alg == CRYPTO_AES_CBC ||
  597. crd2->crd_alg == CRYPTO_NULL_CBC) &&
  598. ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
  599. maccrd = crd1;
  600. enccrd = crd2;
  601. } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
  602. crd1->crd_alg == CRYPTO_3DES_CBC ||
  603. crd1->crd_alg == CRYPTO_AES_CBC ||
  604. crd1->crd_alg == CRYPTO_NULL_CBC) &&
  605. (crd2->crd_alg == CRYPTO_MD5_HMAC ||
  606. crd2->crd_alg == CRYPTO_SHA1_HMAC ||
  607. crd2->crd_alg == CRYPTO_NULL_HMAC) &&
  608. (crd1->crd_flags & CRD_F_ENCRYPT)) {
  609. enccrd = crd1;
  610. maccrd = crd2;
  611. } else {
  612. safestats.st_badalg++;
  613. err = EINVAL;
  614. goto errout;
  615. }
  616. cmd0 |= SAFE_SA_CMD0_OP_BOTH;
  617. }
  618. if (enccrd) {
  619. if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
  620. safe_setup_enckey(ses, enccrd->crd_key);
  621. if (enccrd->crd_alg == CRYPTO_DES_CBC) {
  622. cmd0 |= SAFE_SA_CMD0_DES;
  623. cmd1 |= SAFE_SA_CMD1_CBC;
  624. ivsize = 2*sizeof(u_int32_t);
  625. } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
  626. cmd0 |= SAFE_SA_CMD0_3DES;
  627. cmd1 |= SAFE_SA_CMD1_CBC;
  628. ivsize = 2*sizeof(u_int32_t);
  629. } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
  630. cmd0 |= SAFE_SA_CMD0_AES;
  631. cmd1 |= SAFE_SA_CMD1_CBC;
  632. if (ses->ses_klen == 128)
  633. cmd1 |= SAFE_SA_CMD1_AES128;
  634. else if (ses->ses_klen == 192)
  635. cmd1 |= SAFE_SA_CMD1_AES192;
  636. else
  637. cmd1 |= SAFE_SA_CMD1_AES256;
  638. ivsize = 4*sizeof(u_int32_t);
  639. } else {
  640. cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
  641. ivsize = 0;
  642. }
  643. /*
  644. * Setup encrypt/decrypt state. When using basic ops
  645. * we can't use an inline IV because hash/crypt offset
  646. * must be from the end of the IV to the start of the
  647. * crypt data and this leaves out the preceding header
  648. * from the hash calculation. Instead we place the IV
  649. * in the state record and set the hash/crypt offset to
  650. * copy both the header+IV.
  651. */
  652. if (enccrd->crd_flags & CRD_F_ENCRYPT) {
  653. cmd0 |= SAFE_SA_CMD0_OUTBOUND;
  654. if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
  655. iv = enccrd->crd_iv;
  656. else
  657. read_random((iv = (caddr_t) &rand_iv[0]), sizeof(rand_iv));
  658. if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  659. crypto_copyback(crp->crp_flags, crp->crp_buf,
  660. enccrd->crd_inject, ivsize, iv);
  661. }
  662. bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
  663. /* make iv LE */
  664. for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
  665. re->re_sastate.sa_saved_iv[i] =
  666. cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
  667. cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
  668. re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
  669. } else {
  670. cmd0 |= SAFE_SA_CMD0_INBOUND;
  671. if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
  672. bcopy(enccrd->crd_iv,
  673. re->re_sastate.sa_saved_iv, ivsize);
  674. } else {
  675. crypto_copydata(crp->crp_flags, crp->crp_buf,
  676. enccrd->crd_inject, ivsize,
  677. (caddr_t)re->re_sastate.sa_saved_iv);
  678. }
  679. /* make iv LE */
  680. for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
  681. re->re_sastate.sa_saved_iv[i] =
  682. cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
  683. cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
  684. }
  685. /*
  686. * For basic encryption use the zero pad algorithm.
  687. * This pads results to an 8-byte boundary and
  688. * suppresses padding verification for inbound (i.e.
  689. * decrypt) operations.
  690. *
  691. * NB: Not sure if the 8-byte pad boundary is a problem.
  692. */
  693. cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
  694. /* XXX assert key bufs have the same size */
  695. bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
  696. }
  697. if (maccrd) {
  698. if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
  699. safe_setup_mackey(ses, maccrd->crd_alg,
  700. maccrd->crd_key, maccrd->crd_klen / 8);
  701. }
  702. if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
  703. cmd0 |= SAFE_SA_CMD0_MD5;
  704. cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
  705. } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
  706. cmd0 |= SAFE_SA_CMD0_SHA1;
  707. cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
  708. } else {
  709. cmd0 |= SAFE_SA_CMD0_HASH_NULL;
  710. }
  711. /*
  712. * Digest data is loaded from the SA and the hash
  713. * result is saved to the state block where we
  714. * retrieve it for return to the caller.
  715. */
  716. /* XXX assert digest bufs have the same size */
  717. bcopy(ses->ses_hminner, sa->sa_indigest,
  718. sizeof(sa->sa_indigest));
  719. bcopy(ses->ses_hmouter, sa->sa_outdigest,
  720. sizeof(sa->sa_outdigest));
  721. cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
  722. re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
  723. }
  724. if (enccrd && maccrd) {
  725. /*
  726. * The offset from hash data to the start of
  727. * crypt data is the difference in the skips.
  728. */
  729. bypass = maccrd->crd_skip;
  730. coffset = enccrd->crd_skip - maccrd->crd_skip;
  731. if (coffset < 0) {
  732. DPRINTF(("%s: hash does not precede crypt; "
  733. "mac skip %u enc skip %u\n",
  734. __func__, maccrd->crd_skip, enccrd->crd_skip));
  735. safestats.st_skipmismatch++;
  736. err = EINVAL;
  737. goto errout;
  738. }
  739. oplen = enccrd->crd_skip + enccrd->crd_len;
  740. if (maccrd->crd_skip + maccrd->crd_len != oplen) {
  741. DPRINTF(("%s: hash amount %u != crypt amount %u\n",
  742. __func__, maccrd->crd_skip + maccrd->crd_len,
  743. oplen));
  744. safestats.st_lenmismatch++;
  745. err = EINVAL;
  746. goto errout;
  747. }
  748. #ifdef SAFE_DEBUG
  749. if (debug) {
  750. printf("mac: skip %d, len %d, inject %d\n",
  751. maccrd->crd_skip, maccrd->crd_len,
  752. maccrd->crd_inject);
  753. printf("enc: skip %d, len %d, inject %d\n",
  754. enccrd->crd_skip, enccrd->crd_len,
  755. enccrd->crd_inject);
  756. printf("bypass %d coffset %d oplen %d\n",
  757. bypass, coffset, oplen);
  758. }
  759. #endif
  760. if (coffset & 3) { /* offset must be 32-bit aligned */
  761. DPRINTF(("%s: coffset %u misaligned\n",
  762. __func__, coffset));
  763. safestats.st_coffmisaligned++;
  764. err = EINVAL;
  765. goto errout;
  766. }
  767. coffset >>= 2;
  768. if (coffset > 255) { /* offset must be <256 dwords */
  769. DPRINTF(("%s: coffset %u too big\n",
  770. __func__, coffset));
  771. safestats.st_cofftoobig++;
  772. err = EINVAL;
  773. goto errout;
  774. }
  775. /*
  776. * Tell the hardware to copy the header to the output.
  777. * The header is defined as the data from the end of
  778. * the bypass to the start of data to be encrypted.
  779. * Typically this is the inline IV. Note that you need
  780. * to do this even if src+dst are the same; it appears
  781. * that w/o this bit the crypted data is written
  782. * immediately after the bypass data.
  783. */
  784. cmd1 |= SAFE_SA_CMD1_HDRCOPY;
  785. /*
  786. * Disable IP header mutable bit handling. This is
  787. * needed to get correct HMAC calculations.
  788. */
  789. cmd1 |= SAFE_SA_CMD1_MUTABLE;
  790. } else {
  791. if (enccrd) {
  792. bypass = enccrd->crd_skip;
  793. oplen = bypass + enccrd->crd_len;
  794. } else {
  795. bypass = maccrd->crd_skip;
  796. oplen = bypass + maccrd->crd_len;
  797. }
  798. coffset = 0;
  799. }
  800. /* XXX verify multiple of 4 when using s/g */
  801. if (bypass > 96) { /* bypass offset must be <= 96 bytes */
  802. DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
  803. safestats.st_bypasstoobig++;
  804. err = EINVAL;
  805. goto errout;
  806. }
  807. if (crp->crp_flags & CRYPTO_F_SKBUF) {
  808. if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
  809. safestats.st_noload++;
  810. err = ENOMEM;
  811. goto errout;
  812. }
  813. } else if (crp->crp_flags & CRYPTO_F_IOV) {
  814. if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
  815. safestats.st_noload++;
  816. err = ENOMEM;
  817. goto errout;
  818. }
  819. }
  820. nicealign = safe_dmamap_aligned(sc, &re->re_src);
  821. uniform = safe_dmamap_uniform(sc, &re->re_src);
  822. DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
  823. nicealign, uniform, re->re_src.nsegs));
  824. if (re->re_src.nsegs > 1) {
  825. re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
  826. ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
  827. for (i = 0; i < re->re_src_nsegs; i++) {
  828. /* NB: no need to check if there's space */
  829. pd = sc->sc_spfree;
  830. if (++(sc->sc_spfree) == sc->sc_springtop)
  831. sc->sc_spfree = sc->sc_spring;
  832. KASSERT((pd->pd_flags&3) == 0 ||
  833. (pd->pd_flags&3) == SAFE_PD_DONE,
  834. ("bogus source particle descriptor; flags %x",
  835. pd->pd_flags));
  836. pd->pd_addr = re->re_src_segs[i].ds_addr;
  837. pd->pd_size = re->re_src_segs[i].ds_len;
  838. pd->pd_flags = SAFE_PD_READY;
  839. }
  840. cmd0 |= SAFE_SA_CMD0_IGATHER;
  841. } else {
  842. /*
  843. * No need for gather, reference the operand directly.
  844. */
  845. re->re_desc.d_src = re->re_src_segs[0].ds_addr;
  846. }
  847. if (enccrd == NULL && maccrd != NULL) {
  848. /*
  849. * Hash op; no destination needed.
  850. */
  851. } else {
  852. if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
  853. if (!nicealign) {
  854. safestats.st_iovmisaligned++;
  855. err = EINVAL;
  856. goto errout;
  857. }
  858. if (uniform != 1) {
  859. device_printf(sc->sc_dev, "!uniform source\n");
  860. if (!uniform) {
  861. /*
  862. * There's no way to handle the DMA
  863. * requirements with this uio. We
  864. * could create a separate DMA area for
  865. * the result and then copy it back,
  866. * but for now we just bail and return
  867. * an error. Note that uio requests
  868. * > SAFE_MAX_DSIZE are handled because
  869. * the DMA map and segment list for the
  870. * destination wil result in a
  871. * destination particle list that does
  872. * the necessary scatter DMA.
  873. */
  874. safestats.st_iovnotuniform++;
  875. err = EINVAL;
  876. goto errout;
  877. }
  878. } else
  879. re->re_dst = re->re_src;
  880. } else {
  881. safestats.st_badflags++;
  882. err = EINVAL;
  883. goto errout;
  884. }
  885. if (re->re_dst.nsegs > 1) {
  886. re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
  887. ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
  888. for (i = 0; i < re->re_dst_nsegs; i++) {
  889. pd = sc->sc_dpfree;
  890. KASSERT((pd->pd_flags&3) == 0 ||
  891. (pd->pd_flags&3) == SAFE_PD_DONE,
  892. ("bogus dest particle descriptor; flags %x",
  893. pd->pd_flags));
  894. if (++(sc->sc_dpfree) == sc->sc_dpringtop)
  895. sc->sc_dpfree = sc->sc_dpring;
  896. pd->pd_addr = re->re_dst_segs[i].ds_addr;
  897. pd->pd_flags = SAFE_PD_READY;
  898. }
  899. cmd0 |= SAFE_SA_CMD0_OSCATTER;
  900. } else {
  901. /*
  902. * No need for scatter, reference the operand directly.
  903. */
  904. re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
  905. }
  906. }
  907. /*
  908. * All done with setup; fillin the SA command words
  909. * and the packet engine descriptor. The operation
  910. * is now ready for submission to the hardware.
  911. */
  912. sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
  913. sa->sa_cmd1 = cmd1
  914. | (coffset << SAFE_SA_CMD1_OFFSET_S)
  915. | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
  916. | SAFE_SA_CMD1_SRPCI
  917. ;
  918. /*
  919. * NB: the order of writes is important here. In case the
  920. * chip is scanning the ring because of an outstanding request
  921. * it might nab this one too. In that case we need to make
  922. * sure the setup is complete before we write the length
  923. * field of the descriptor as it signals the descriptor is
  924. * ready for processing.
  925. */
  926. re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
  927. if (maccrd)
  928. re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
  929. wmb();
  930. re->re_desc.d_len = oplen
  931. | SAFE_PE_LEN_READY
  932. | (bypass << SAFE_PE_LEN_BYPASS_S)
  933. ;
  934. safestats.st_ipackets++;
  935. safestats.st_ibytes += oplen;
  936. if (++(sc->sc_front) == sc->sc_ringtop)
  937. sc->sc_front = sc->sc_ring;
  938. /* XXX honor batching */
  939. safe_feed(sc, re);
  940. spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
  941. return (0);
  942. errout:
  943. if (re->re_src.map != re->re_dst.map)
  944. pci_unmap_operand(sc, &re->re_dst);
  945. if (re->re_src.map)
  946. pci_unmap_operand(sc, &re->re_src);
  947. spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
  948. if (err != ERESTART) {
  949. crp->crp_etype = err;
  950. crypto_done(crp);
  951. } else {
  952. sc->sc_needwakeup |= CRYPTO_SYMQ;
  953. }
  954. return (err);
  955. }
  956. static void
  957. safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
  958. {
  959. struct cryptop *crp = (struct cryptop *)re->re_crp;
  960. struct cryptodesc *crd;
  961. DPRINTF(("%s()\n", __FUNCTION__));
  962. safestats.st_opackets++;
  963. safestats.st_obytes += re->re_dst.mapsize;
  964. if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
  965. device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
  966. re->re_desc.d_csr,
  967. re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
  968. safestats.st_peoperr++;
  969. crp->crp_etype = EIO; /* something more meaningful? */
  970. }
  971. if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
  972. pci_unmap_operand(sc, &re->re_dst);
  973. pci_unmap_operand(sc, &re->re_src);
  974. /*
  975. * If result was written to a differet mbuf chain, swap
  976. * it in as the return value and reclaim the original.
  977. */
  978. if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
  979. device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
  980. /* kfree_skb(skb) */
  981. /* crp->crp_buf = (caddr_t)re->re_dst_skb */
  982. return;
  983. }
  984. if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
  985. /* copy out ICV result */
  986. for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
  987. if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
  988. crd->crd_alg == CRYPTO_SHA1_HMAC ||
  989. crd->crd_alg == CRYPTO_NULL_HMAC))
  990. continue;
  991. if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
  992. /*
  993. * SHA-1 ICV's are byte-swapped; fix 'em up
  994. * before copy them to their destination.
  995. */
  996. re->re_sastate.sa_saved_indigest[0] =
  997. cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
  998. re->re_sastate.sa_saved_indigest[1] =
  999. cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
  1000. re->re_sastate.sa_saved_indigest[2] =
  1001. cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
  1002. } else {
  1003. re->re_sastate.sa_saved_indigest[0] =
  1004. cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
  1005. re->re_sastate.sa_saved_indigest[1] =
  1006. cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
  1007. re->re_sastate.sa_saved_indigest[2] =
  1008. cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
  1009. }
  1010. crypto_copyback(crp->crp_flags, crp->crp_buf,
  1011. crd->crd_inject,
  1012. sc->sc_sessions[re->re_sesn].ses_mlen,
  1013. (caddr_t)re->re_sastate.sa_saved_indigest);
  1014. break;
  1015. }
  1016. }
  1017. crypto_done(crp);
  1018. }
  1019. #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
  1020. #define SAFE_RNG_MAXWAIT 1000
  1021. static void
  1022. safe_rng_init(struct safe_softc *sc)
  1023. {
  1024. u_int32_t w, v;
  1025. int i;
  1026. DPRINTF(("%s()\n", __FUNCTION__));
  1027. WRITE_REG(sc, SAFE_RNG_CTRL, 0);
  1028. /* use default value according to the manual */
  1029. WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
  1030. WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
  1031. /*
  1032. * There is a bug in rev 1.0 of the 1140 that when the RNG
  1033. * is brought out of reset the ready status flag does not
  1034. * work until the RNG has finished its internal initialization.
  1035. *
  1036. * So in order to determine the device is through its
  1037. * initialization we must read the data register, using the
  1038. * status reg in the read in case it is initialized. Then read
  1039. * the data register until it changes from the first read.
  1040. * Once it changes read the data register until it changes
  1041. * again. At this time the RNG is considered initialized.
  1042. * This could take between 750ms - 1000ms in time.
  1043. */
  1044. i = 0;
  1045. w = READ_REG(sc, SAFE_RNG_OUT);
  1046. do {
  1047. v = READ_REG(sc, SAFE_RNG_OUT);
  1048. if (v != w) {
  1049. w = v;
  1050. break;
  1051. }
  1052. DELAY(10);
  1053. } while (++i < SAFE_RNG_MAXWAIT);
  1054. /* Wait Until data changes again */
  1055. i = 0;
  1056. do {
  1057. v = READ_REG(sc, SAFE_RNG_OUT);
  1058. if (v != w)
  1059. break;
  1060. DELAY(10);
  1061. } while (++i < SAFE_RNG_MAXWAIT);
  1062. }
  1063. static __inline void
  1064. safe_rng_disable_short_cycle(struct safe_softc *sc)
  1065. {
  1066. DPRINTF(("%s()\n", __FUNCTION__));
  1067. WRITE_REG(sc, SAFE_RNG_CTRL,
  1068. READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
  1069. }
  1070. static __inline void
  1071. safe_rng_enable_short_cycle(struct safe_softc *sc)
  1072. {
  1073. DPRINTF(("%s()\n", __FUNCTION__));
  1074. WRITE_REG(sc, SAFE_RNG_CTRL,
  1075. READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
  1076. }
  1077. static __inline u_int32_t
  1078. safe_rng_read(struct safe_softc *sc)
  1079. {
  1080. int i;
  1081. i = 0;
  1082. while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
  1083. ;
  1084. return READ_REG(sc, SAFE_RNG_OUT);
  1085. }
  1086. static int
  1087. safe_read_random(void *arg, u_int32_t *buf, int maxwords)
  1088. {
  1089. struct safe_softc *sc = (struct safe_softc *) arg;
  1090. int i, rc;
  1091. DPRINTF(("%s()\n", __FUNCTION__));
  1092. safestats.st_rng++;
  1093. /*
  1094. * Fetch the next block of data.
  1095. */
  1096. if (maxwords > safe_rngbufsize)
  1097. maxwords = safe_rngbufsize;
  1098. if (maxwords > SAFE_RNG_MAXBUFSIZ)
  1099. maxwords = SAFE_RNG_MAXBUFSIZ;
  1100. retry:
  1101. /* read as much as we can */
  1102. for (rc = 0; rc < maxwords; rc++) {
  1103. if (READ_REG(sc, SAFE_RNG_STAT) != 0)
  1104. break;
  1105. buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
  1106. }
  1107. if (rc == 0)
  1108. return 0;
  1109. /*
  1110. * Check the comparator alarm count and reset the h/w if
  1111. * it exceeds our threshold. This guards against the
  1112. * hardware oscillators resonating with external signals.
  1113. */
  1114. if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
  1115. u_int32_t freq_inc, w;
  1116. DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
  1117. (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
  1118. safestats.st_rngalarm++;
  1119. safe_rng_enable_short_cycle(sc);
  1120. freq_inc = 18;
  1121. for (i = 0; i < 64; i++) {
  1122. w = READ_REG(sc, SAFE_RNG_CNFG);
  1123. freq_inc = ((w + freq_inc) & 0x3fL);
  1124. w = ((w & ~0x3fL) | freq_inc);
  1125. WRITE_REG(sc, SAFE_RNG_CNFG, w);
  1126. WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
  1127. (void) safe_rng_read(sc);
  1128. DELAY(25);
  1129. if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
  1130. safe_rng_disable_short_cycle(sc);
  1131. goto retry;
  1132. }
  1133. freq_inc = 1;
  1134. }
  1135. safe_rng_disable_short_cycle(sc);
  1136. } else
  1137. WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
  1138. return(rc);
  1139. }
  1140. #endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
  1141. /*
  1142. * Resets the board. Values in the regesters are left as is
  1143. * from the reset (i.e. initial values are assigned elsewhere).
  1144. */
  1145. static void
  1146. safe_reset_board(struct safe_softc *sc)
  1147. {
  1148. u_int32_t v;
  1149. /*
  1150. * Reset the device. The manual says no delay
  1151. * is needed between marking and clearing reset.
  1152. */
  1153. DPRINTF(("%s()\n", __FUNCTION__));
  1154. v = READ_REG(sc, SAFE_PE_DMACFG) &~
  1155. (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
  1156. SAFE_PE_DMACFG_SGRESET);
  1157. WRITE_REG(sc, SAFE_PE_DMACFG, v
  1158. | SAFE_PE_DMACFG_PERESET
  1159. | SAFE_PE_DMACFG_PDRRESET
  1160. | SAFE_PE_DMACFG_SGRESET);
  1161. WRITE_REG(sc, SAFE_PE_DMACFG, v);
  1162. }
  1163. /*
  1164. * Initialize registers we need to touch only once.
  1165. */
  1166. static void
  1167. safe_init_board(struct safe_softc *sc)
  1168. {
  1169. u_int32_t v, dwords;
  1170. DPRINTF(("%s()\n", __FUNCTION__));
  1171. v = READ_REG(sc, SAFE_PE_DMACFG);
  1172. v &=~ ( SAFE_PE_DMACFG_PEMODE
  1173. | SAFE_PE_DMACFG_FSENA /* failsafe enable */
  1174. | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
  1175. | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
  1176. | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
  1177. | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
  1178. | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
  1179. | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
  1180. );
  1181. v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
  1182. | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
  1183. | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
  1184. | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
  1185. | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
  1186. | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
  1187. #if 0
  1188. | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
  1189. #endif
  1190. ;
  1191. WRITE_REG(sc, SAFE_PE_DMACFG, v);
  1192. #ifdef __BIG_ENDIAN
  1193. /* tell the safenet that we are 4321 and not 1234 */
  1194. WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
  1195. #endif
  1196. if (sc->sc_chiprev == SAFE_REV(1,0)) {
  1197. /*
  1198. * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
  1199. * "target mode transfers" done while the chip is DMA'ing
  1200. * >1020 bytes cause the hardware to lockup. To avoid this
  1201. * we reduce the max PCI transfer size and use small source
  1202. * particle descriptors (<= 256 bytes).
  1203. */
  1204. WRITE_REG(sc, SAFE_DMA_CFG, 256);
  1205. device_printf(sc->sc_dev,
  1206. "Reduce max DMA size to %u words for rev %u.%u WAR\n",
  1207. (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
  1208. (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
  1209. (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
  1210. sc->sc_max_dsize = 256;
  1211. } else {
  1212. sc->sc_max_dsize = SAFE_MAX_DSIZE;
  1213. }
  1214. /* NB: operands+results are overlaid */
  1215. WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
  1216. WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
  1217. /*
  1218. * Configure ring entry size and number of items in the ring.
  1219. */
  1220. KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
  1221. ("PE ring entry not 32-bit aligned!"));
  1222. dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
  1223. WRITE_REG(sc, SAFE_PE_RINGCFG,
  1224. (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
  1225. WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
  1226. WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
  1227. WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
  1228. WRITE_REG(sc, SAFE_PE_PARTSIZE,
  1229. (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
  1230. /*
  1231. * NB: destination particles are fixed size. We use
  1232. * an mbuf cluster and require all results go to
  1233. * clusters or smaller.
  1234. */
  1235. WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
  1236. /* it's now safe to enable PE mode, do it */
  1237. WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
  1238. /*
  1239. * Configure hardware to use level-triggered interrupts and
  1240. * to interrupt after each descriptor is processed.
  1241. */
  1242. WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
  1243. WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
  1244. WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
  1245. WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
  1246. }
  1247. /*
  1248. * Clean up after a chip crash.
  1249. * It is assumed that the caller in splimp()
  1250. */
  1251. static void
  1252. safe_cleanchip(struct safe_softc *sc)
  1253. {
  1254. DPRINTF(("%s()\n", __FUNCTION__));
  1255. if (sc->sc_nqchip != 0) {
  1256. struct safe_ringentry *re = sc->sc_back;
  1257. while (re != sc->sc_front) {
  1258. if (re->re_desc.d_csr != 0)
  1259. safe_free_entry(sc, re);
  1260. if (++re == sc->sc_ringtop)
  1261. re = sc->sc_ring;
  1262. }
  1263. sc->sc_back = re;
  1264. sc->sc_nqchip = 0;
  1265. }
  1266. }
  1267. /*
  1268. * free a safe_q
  1269. * It is assumed that the caller is within splimp().
  1270. */
  1271. static int
  1272. safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
  1273. {
  1274. struct cryptop *crp;
  1275. DPRINTF(("%s()\n", __FUNCTION__));
  1276. /*
  1277. * Free header MCR
  1278. */
  1279. if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
  1280. #ifdef NOTYET
  1281. m_freem(re->re_dst_m);
  1282. #else
  1283. printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
  1284. #endif
  1285. crp = (struct cryptop *)re->re_crp;
  1286. re->re_desc.d_csr = 0;
  1287. crp->crp_etype = EFAULT;
  1288. crypto_done(crp);
  1289. return(0);
  1290. }
  1291. /*
  1292. * Routine to reset the chip and clean up.
  1293. * It is assumed that the caller is in splimp()
  1294. */
  1295. static void
  1296. safe_totalreset(struct safe_softc *sc)
  1297. {
  1298. DPRINTF(("%s()\n", __FUNCTION__));
  1299. safe_reset_board(sc);
  1300. safe_init_board(sc);
  1301. safe_cleanchip(sc);
  1302. }
  1303. /*
  1304. * Is the operand suitable aligned for direct DMA. Each
  1305. * segment must be aligned on a 32-bit boundary and all
  1306. * but the last segment must be a multiple of 4 bytes.
  1307. */
  1308. static int
  1309. safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
  1310. {
  1311. int i;
  1312. DPRINTF(("%s()\n", __FUNCTION__));
  1313. for (i = 0; i < op->nsegs; i++) {
  1314. if (op->segs[i].ds_addr & 3)
  1315. return (0);
  1316. if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
  1317. return (0);
  1318. }
  1319. return (1);
  1320. }
  1321. /*
  1322. * Is the operand suitable for direct DMA as the destination
  1323. * of an operation. The hardware requires that each ``particle''
  1324. * but the last in an operation result have the same size. We
  1325. * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
  1326. * 0 if some segment is not a multiple of of this size, 1 if all
  1327. * segments are exactly this size, or 2 if segments are at worst
  1328. * a multple of this size.
  1329. */
  1330. static int
  1331. safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
  1332. {
  1333. int result = 1;
  1334. DPRINTF(("%s()\n", __FUNCTION__));
  1335. if (op->nsegs > 0) {
  1336. int i;
  1337. for (i = 0; i < op->nsegs-1; i++) {
  1338. if (op->segs[i].ds_len % sc->sc_max_dsize)
  1339. return (0);
  1340. if (op->segs[i].ds_len != sc->sc_max_dsize)
  1341. result = 2;
  1342. }
  1343. }
  1344. return (result);
  1345. }
  1346. static int
  1347. safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
  1348. {
  1349. struct safe_softc *sc = device_get_softc(dev);
  1350. struct safe_pkq *q;
  1351. unsigned long flags;
  1352. DPRINTF(("%s()\n", __FUNCTION__));
  1353. if (sc == NULL) {
  1354. krp->krp_status = EINVAL;
  1355. goto err;
  1356. }
  1357. if (krp->krp_op != CRK_MOD_EXP) {
  1358. krp->krp_status = EOPNOTSUPP;
  1359. goto err;
  1360. }
  1361. q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
  1362. if (q == NULL) {
  1363. krp->krp_status = ENOMEM;
  1364. goto err;
  1365. }
  1366. memset(q, 0, sizeof(*q));
  1367. q->pkq_krp = krp;
  1368. INIT_LIST_HEAD(&q->pkq_list);
  1369. spin_lock_irqsave(&sc->sc_pkmtx, flags);
  1370. list_add_tail(&q->pkq_list, &sc->sc_pkq);
  1371. safe_kfeed(sc);
  1372. spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
  1373. return (0);
  1374. err:
  1375. crypto_kdone(krp);
  1376. return (0);
  1377. }
  1378. #define SAFE_CRK_PARAM_BASE 0
  1379. #define SAFE_CRK_PARAM_EXP 1
  1380. #define SAFE_CRK_PARAM_MOD 2
  1381. static int
  1382. safe_kstart(struct safe_softc *sc)
  1383. {
  1384. struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
  1385. int exp_bits, mod_bits, base_bits;
  1386. u_int32_t op, a_off, b_off, c_off, d_off;
  1387. DPRINTF(("%s()\n", __FUNCTION__));
  1388. if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
  1389. krp->krp_status = EINVAL;
  1390. return (1);
  1391. }
  1392. base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
  1393. if (base_bits > 2048)
  1394. goto too_big;
  1395. if (base_bits <= 0) /* 5. base not zero */
  1396. goto too_small;
  1397. exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
  1398. if (exp_bits > 2048)
  1399. goto too_big;
  1400. if (exp_bits <= 0) /* 1. exponent word length > 0 */
  1401. goto too_small; /* 4. exponent not zero */
  1402. mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
  1403. if (mod_bits > 2048)
  1404. goto too_big;
  1405. if (mod_bits <= 32) /* 2. modulus word length > 1 */
  1406. goto too_small; /* 8. MSW of modulus != zero */
  1407. if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
  1408. goto too_small;
  1409. if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
  1410. goto bad_domain; /* 6. modulus is odd */
  1411. if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
  1412. goto too_small; /* make sure result will fit */
  1413. /* 7. modulus > base */
  1414. if (mod_bits < base_bits)
  1415. goto too_small;
  1416. if (mod_bits == base_bits) {
  1417. u_int8_t *basep, *modp;
  1418. int i;
  1419. basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
  1420. ((base_bits + 7) / 8) - 1;
  1421. modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
  1422. ((mod_bits + 7) / 8) - 1;
  1423. for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
  1424. if (*modp < *basep)
  1425. goto too_small;
  1426. if (*modp > *basep)
  1427. break;
  1428. }
  1429. }
  1430. /* And on the 9th step, he rested. */
  1431. WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
  1432. WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
  1433. if (mod_bits > 1024) {
  1434. op = SAFE_PK_FUNC_EXP4;
  1435. a_off = 0x000;
  1436. b_off = 0x100;
  1437. c_off = 0x200;
  1438. d_off = 0x300;
  1439. } else {
  1440. op = SAFE_PK_FUNC_EXP16;
  1441. a_off = 0x000;
  1442. b_off = 0x080;
  1443. c_off = 0x100;
  1444. d_off = 0x180;
  1445. }
  1446. sc->sc_pk_reslen = b_off - a_off;
  1447. sc->sc_pk_resoff = d_off;
  1448. /* A is exponent, B is modulus, C is base, D is result */
  1449. safe_kload_reg(sc, a_off, b_off - a_off,
  1450. &krp->krp_param[SAFE_CRK_PARAM_EXP]);
  1451. WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
  1452. safe_kload_reg(sc, b_off, b_off - a_off,
  1453. &krp->krp_param[SAFE_CRK_PARAM_MOD]);
  1454. WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
  1455. safe_kload_reg(sc, c_off, b_off - a_off,
  1456. &krp->krp_param[SAFE_CRK_PARAM_BASE]);
  1457. WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
  1458. WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
  1459. WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
  1460. return (0);
  1461. too_big:
  1462. krp->krp_status = E2BIG;
  1463. return (1);
  1464. too_small:
  1465. krp->krp_status = ERANGE;
  1466. return (1);
  1467. bad_domain:
  1468. krp->krp_status = EDOM;
  1469. return (1);
  1470. }
  1471. static int
  1472. safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
  1473. {
  1474. u_int plen = (cr->crp_nbits + 7) / 8;
  1475. int i, sig = plen * 8;
  1476. u_int8_t c, *p = cr->crp_p;
  1477. DPRINTF(("%s()\n", __FUNCTION__));
  1478. for (i = plen - 1; i >= 0; i--) {
  1479. c = p[i];
  1480. if (c != 0) {
  1481. while ((c & 0x80) == 0) {
  1482. sig--;
  1483. c <<= 1;
  1484. }
  1485. break;
  1486. }
  1487. sig -= 8;
  1488. }
  1489. return (sig);
  1490. }
  1491. static void
  1492. safe_kfeed(struct safe_softc *sc)
  1493. {
  1494. struct safe_pkq *q, *tmp;
  1495. DPRINTF(("%s()\n", __FUNCTION__));
  1496. if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
  1497. return;
  1498. if (sc->sc_pkq_cur != NULL)
  1499. return;
  1500. list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
  1501. sc->sc_pkq_cur = q;
  1502. list_del(&q->pkq_list);
  1503. if (safe_kstart(sc) != 0) {
  1504. crypto_kdone(q->pkq_krp);
  1505. kfree(q);
  1506. sc->sc_pkq_cur = NULL;
  1507. } else {
  1508. /* op started, start polling */
  1509. mod_timer(&sc->sc_pkto, jiffies + 1);
  1510. break;
  1511. }
  1512. }
  1513. }
  1514. static void
  1515. safe_kpoll(unsigned long arg)
  1516. {
  1517. struct safe_softc *sc = NULL;
  1518. struct safe_pkq *q;
  1519. struct crparam *res;
  1520. int i;
  1521. u_int32_t buf[64];
  1522. unsigned long flags;
  1523. DPRINTF(("%s()\n", __FUNCTION__));
  1524. if (arg >= SAFE_MAX_CHIPS)
  1525. return;
  1526. sc = safe_chip_idx[arg];
  1527. if (!sc) {
  1528. DPRINTF(("%s() - bad callback\n", __FUNCTION__));
  1529. return;
  1530. }
  1531. spin_lock_irqsave(&sc->sc_pkmtx, flags);
  1532. if (sc->sc_pkq_cur == NULL)
  1533. goto out;
  1534. if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
  1535. /* still running, check back later */
  1536. mod_timer(&sc->sc_pkto, jiffies + 1);
  1537. goto out;
  1538. }
  1539. q = sc->sc_pkq_cur;
  1540. res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
  1541. bzero(buf, sizeof(buf));
  1542. bzero(res->crp_p, (res->crp_nbits + 7) / 8);
  1543. for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
  1544. buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
  1545. sc->sc_pk_resoff + (i << 2)));
  1546. bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
  1547. /*
  1548. * reduce the bits that need copying if possible
  1549. */
  1550. res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
  1551. res->crp_nbits = safe_ksigbits(sc, res);
  1552. for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
  1553. WRITE_REG(sc, i, 0);
  1554. crypto_kdone(q->pkq_krp);
  1555. kfree(q);
  1556. sc->sc_pkq_cur = NULL;
  1557. safe_kfeed(sc);
  1558. out:
  1559. spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
  1560. }
  1561. static void
  1562. safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
  1563. struct crparam *n)
  1564. {
  1565. u_int32_t buf[64], i;
  1566. DPRINTF(("%s()\n", __FUNCTION__));
  1567. bzero(buf, sizeof(buf));
  1568. bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
  1569. for (i = 0; i < len >> 2; i++)
  1570. WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
  1571. cpu_to_le32(buf[i]));
  1572. }
  1573. #ifdef SAFE_DEBUG
  1574. static void
  1575. safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
  1576. {
  1577. printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
  1578. , tag
  1579. , READ_REG(sc, SAFE_DMA_ENDIAN)
  1580. , READ_REG(sc, SAFE_DMA_SRCADDR)
  1581. , READ_REG(sc, SAFE_DMA_DSTADDR)
  1582. , READ_REG(sc, SAFE_DMA_STAT)
  1583. );
  1584. }
  1585. static void
  1586. safe_dump_intrstate(struct safe_softc *sc, const char *tag)
  1587. {
  1588. printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
  1589. , tag
  1590. , READ_REG(sc, SAFE_HI_CFG)
  1591. , READ_REG(sc, SAFE_HI_MASK)
  1592. , READ_REG(sc, SAFE_HI_DESC_CNT)
  1593. , READ_REG(sc, SAFE_HU_STAT)
  1594. , READ_REG(sc, SAFE_HM_STAT)
  1595. );
  1596. }
  1597. static void
  1598. safe_dump_ringstate(struct safe_softc *sc, const char *tag)
  1599. {
  1600. u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
  1601. /* NB: assume caller has lock on ring */
  1602. printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
  1603. tag,
  1604. estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
  1605. (unsigned long)(sc->sc_back - sc->sc_ring),
  1606. (unsigned long)(sc->sc_front - sc->sc_ring));
  1607. }
  1608. static void
  1609. safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
  1610. {
  1611. int ix, nsegs;
  1612. ix = re - sc->sc_ring;
  1613. printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
  1614. , tag
  1615. , re, ix
  1616. , re->re_desc.d_csr
  1617. , re->re_desc.d_src
  1618. , re->re_desc.d_dst
  1619. , re->re_desc.d_sa
  1620. , re->re_desc.d_len
  1621. );
  1622. if (re->re_src.nsegs > 1) {
  1623. ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
  1624. sizeof(struct safe_pdesc);
  1625. for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
  1626. printf(" spd[%u] %p: %p size %u flags %x"
  1627. , ix, &sc->sc_spring[ix]
  1628. , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
  1629. , sc->sc_spring[ix].pd_size
  1630. , sc->sc_spring[ix].pd_flags
  1631. );
  1632. if (sc->sc_spring[ix].pd_size == 0)
  1633. printf(" (zero!)");
  1634. printf("\n");
  1635. if (++ix == SAFE_TOTAL_SPART)
  1636. ix = 0;
  1637. }
  1638. }
  1639. if (re->re_dst.nsegs > 1) {
  1640. ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
  1641. sizeof(struct safe_pdesc);
  1642. for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
  1643. printf(" dpd[%u] %p: %p flags %x\n"
  1644. , ix, &sc->sc_dpring[ix]
  1645. , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
  1646. , sc->sc_dpring[ix].pd_flags
  1647. );
  1648. if (++ix == SAFE_TOTAL_DPART)
  1649. ix = 0;
  1650. }
  1651. }
  1652. printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
  1653. re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
  1654. printf("sa: key %x %x %x %x %x %x %x %x\n"
  1655. , re->re_sa.sa_key[0]
  1656. , re->re_sa.sa_key[1]
  1657. , re->re_sa.sa_key[2]
  1658. , re->re_sa.sa_key[3]
  1659. , re->re_sa.sa_key[4]
  1660. , re->re_sa.sa_key[5]
  1661. , re->re_sa.sa_key[6]
  1662. , re->re_sa.sa_key[7]
  1663. );
  1664. printf("sa: indigest %x %x %x %x %x\n"
  1665. , re->re_sa.sa_indigest[0]
  1666. , re->re_sa.sa_indigest[1]
  1667. , re->re_sa.sa_indigest[2]
  1668. , re->re_sa.sa_indigest[3]
  1669. , re->re_sa.sa_indigest[4]
  1670. );
  1671. printf("sa: outdigest %x %x %x %x %x\n"
  1672. , re->re_sa.sa_outdigest[0]
  1673. , re->re_sa.sa_outdigest[1]
  1674. , re->re_sa.sa_outdigest[2]
  1675. , re->re_sa.sa_outdigest[3]
  1676. , re->re_sa.sa_outdigest[4]
  1677. );
  1678. printf("sr: iv %x %x %x %x\n"
  1679. , re->re_sastate.sa_saved_iv[0]
  1680. , re->re_sastate.sa_saved_iv[1]
  1681. , re->re_sastate.sa_saved_iv[2]
  1682. , re->re_sastate.sa_saved_iv[3]
  1683. );
  1684. printf("sr: hashbc %u indigest %x %x %x %x %x\n"
  1685. , re->re_sastate.sa_saved_hashbc
  1686. , re->re_sastate.sa_saved_indigest[0]
  1687. , re->re_sastate.sa_saved_indigest[1]
  1688. , re->re_sastate.sa_saved_indigest[2]
  1689. , re->re_sastate.sa_saved_indigest[3]
  1690. , re->re_sastate.sa_saved_indigest[4]
  1691. );
  1692. }
  1693. static void
  1694. safe_dump_ring(struct safe_softc *sc, const char *tag)
  1695. {
  1696. unsigned long flags;
  1697. spin_lock_irqsave(&sc->sc_ringmtx, flags);
  1698. printf("\nSafeNet Ring State:\n");
  1699. safe_dump_intrstate(sc, tag);
  1700. safe_dump_dmastatus(sc, tag);
  1701. safe_dump_ringstate(sc, tag);
  1702. if (sc->sc_nqchip) {
  1703. struct safe_ringentry *re = sc->sc_back;
  1704. do {
  1705. safe_dump_request(sc, tag, re);
  1706. if (++re == sc->sc_ringtop)
  1707. re = sc->sc_ring;
  1708. } while (re != sc->sc_front);
  1709. }
  1710. spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
  1711. }
  1712. #endif /* SAFE_DEBUG */
  1713. static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
  1714. {
  1715. struct safe_softc *sc = NULL;
  1716. u32 mem_start, mem_len, cmd;
  1717. int i, rc, devinfo;
  1718. dma_addr_t raddr;
  1719. static int num_chips = 0;
  1720. DPRINTF(("%s()\n", __FUNCTION__));
  1721. if (pci_enable_device(dev) < 0)
  1722. return(-ENODEV);
  1723. if (!dev->irq) {
  1724. printk("safe: found device with no IRQ assigned. check BIOS settings!");
  1725. pci_disable_device(dev);
  1726. return(-ENODEV);
  1727. }
  1728. if (pci_set_mwi(dev)) {
  1729. printk("safe: pci_set_mwi failed!");
  1730. return(-ENODEV);
  1731. }
  1732. sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
  1733. if (!sc)
  1734. return(-ENOMEM);
  1735. memset(sc, 0, sizeof(*sc));
  1736. softc_device_init(sc, "safe", num_chips, safe_methods);
  1737. sc->sc_irq = -1;
  1738. sc->sc_cid = -1;
  1739. sc->sc_pcidev = dev;
  1740. if (num_chips < SAFE_MAX_CHIPS) {
  1741. safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
  1742. num_chips++;
  1743. }
  1744. INIT_LIST_HEAD(&sc->sc_pkq);
  1745. spin_lock_init(&sc->sc_pkmtx);
  1746. pci_set_drvdata(sc->sc_pcidev, sc);
  1747. /* we read its hardware registers as memory */
  1748. mem_start = pci_resource_start(sc->sc_pcidev, 0);
  1749. mem_len = pci_resource_len(sc->sc_pcidev, 0);
  1750. sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
  1751. if (!sc->sc_base_addr) {
  1752. device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
  1753. mem_start, mem_start + mem_len - 1);
  1754. goto out;
  1755. }
  1756. /* fix up the bus size */
  1757. if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
  1758. device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
  1759. goto out;
  1760. }
  1761. if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
  1762. device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
  1763. goto out;
  1764. }
  1765. pci_set_master(sc->sc_pcidev);
  1766. pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
  1767. if (!(cmd & PCI_COMMAND_MEMORY)) {
  1768. device_printf(sc->sc_dev, "failed to enable memory mapping\n");
  1769. goto out;
  1770. }
  1771. if (!(cmd & PCI_COMMAND_MASTER)) {
  1772. device_printf(sc->sc_dev, "failed to enable bus mastering\n");
  1773. goto out;
  1774. }
  1775. rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
  1776. if (rc) {
  1777. device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
  1778. goto out;
  1779. }
  1780. sc->sc_irq = dev->irq;
  1781. sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
  1782. (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
  1783. /*
  1784. * Allocate packet engine descriptors.
  1785. */
  1786. sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
  1787. SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
  1788. &sc->sc_ringalloc.dma_paddr);
  1789. if (!sc->sc_ringalloc.dma_vaddr) {
  1790. device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
  1791. goto out;
  1792. }
  1793. /*
  1794. * Hookup the static portion of all our data structures.
  1795. */
  1796. sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
  1797. sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
  1798. sc->sc_front = sc->sc_ring;
  1799. sc->sc_back = sc->sc_ring;
  1800. raddr = sc->sc_ringalloc.dma_paddr;
  1801. bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
  1802. for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
  1803. struct safe_ringentry *re = &sc->sc_ring[i];
  1804. re->re_desc.d_sa = raddr +
  1805. offsetof(struct safe_ringentry, re_sa);
  1806. re->re_sa.sa_staterec = raddr +
  1807. offsetof(struct safe_ringentry, re_sastate);
  1808. raddr += sizeof (struct safe_ringentry);
  1809. }
  1810. spin_lock_init(&sc->sc_ringmtx);
  1811. /*
  1812. * Allocate scatter and gather particle descriptors.
  1813. */
  1814. sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
  1815. SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
  1816. &sc->sc_spalloc.dma_paddr);
  1817. if (!sc->sc_spalloc.dma_vaddr) {
  1818. device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
  1819. goto out;
  1820. }
  1821. sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
  1822. sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
  1823. sc->sc_spfree = sc->sc_spring;
  1824. bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
  1825. sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
  1826. SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
  1827. &sc->sc_dpalloc.dma_paddr);
  1828. if (!sc->sc_dpalloc.dma_vaddr) {
  1829. device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
  1830. goto out;
  1831. }
  1832. sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
  1833. sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
  1834. sc->sc_dpfree = sc->sc_dpring;
  1835. bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
  1836. sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
  1837. if (sc->sc_cid < 0) {
  1838. device_printf(sc->sc_dev, "could not get crypto driver id\n");
  1839. goto out;
  1840. }
  1841. printf("%s:", device_get_nameunit(sc->sc_dev));
  1842. devinfo = READ_REG(sc, SAFE_DEVINFO);
  1843. if (devinfo & SAFE_DEVINFO_RNG) {
  1844. sc->sc_flags |= SAFE_FLAGS_RNG;
  1845. printf(" rng");
  1846. }
  1847. if (devinfo & SAFE_DEVINFO_PKEY) {
  1848. printf(" key");
  1849. sc->sc_flags |= SAFE_FLAGS_KEY;
  1850. crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
  1851. #if 0
  1852. crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
  1853. #endif
  1854. init_timer(&sc->sc_pkto);
  1855. sc->sc_pkto.function = safe_kpoll;
  1856. sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
  1857. }
  1858. if (devinfo & SAFE_DEVINFO_DES) {
  1859. printf(" des/3des");
  1860. crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
  1861. crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
  1862. }
  1863. if (devinfo & SAFE_DEVINFO_AES) {
  1864. printf(" aes");
  1865. crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
  1866. }
  1867. if (devinfo & SAFE_DEVINFO_MD5) {
  1868. printf(" md5");
  1869. crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
  1870. }
  1871. if (devinfo & SAFE_DEVINFO_SHA1) {
  1872. printf(" sha1");
  1873. crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
  1874. }
  1875. printf(" null");
  1876. crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
  1877. crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
  1878. /* XXX other supported algorithms */
  1879. printf("\n");
  1880. safe_reset_board(sc); /* reset h/w */
  1881. safe_init_board(sc); /* init h/w */
  1882. #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
  1883. if (sc->sc_flags & SAFE_FLAGS_RNG) {
  1884. safe_rng_init(sc);
  1885. crypto_rregister(sc->sc_cid, safe_read_random, sc);
  1886. }
  1887. #endif /* SAFE_NO_RNG */
  1888. return (0);
  1889. out:
  1890. if (sc->sc_cid >= 0)
  1891. crypto_unregister_all(sc->sc_cid);
  1892. if (sc->sc_irq != -1)
  1893. free_irq(sc->sc_irq, sc);
  1894. if (sc->sc_ringalloc.dma_vaddr)
  1895. pci_free_consistent(sc->sc_pcidev,
  1896. SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
  1897. sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
  1898. if (sc->sc_spalloc.dma_vaddr)
  1899. pci_free_consistent(sc->sc_pcidev,
  1900. SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
  1901. sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
  1902. if (sc->sc_dpalloc.dma_vaddr)
  1903. pci_free_consistent(sc->sc_pcidev,
  1904. SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
  1905. sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
  1906. kfree(sc);
  1907. return(-ENODEV);
  1908. }
  1909. static void safe_remove(struct pci_dev *dev)
  1910. {
  1911. struct safe_softc *sc = pci_get_drvdata(dev);
  1912. DPRINTF(("%s()\n", __FUNCTION__));
  1913. /* XXX wait/abort active ops */
  1914. WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
  1915. del_timer_sync(&sc->sc_pkto);
  1916. crypto_unregister_all(sc->sc_cid);
  1917. safe_cleanchip(sc);
  1918. if (sc->sc_irq != -1)
  1919. free_irq(sc->sc_irq, sc);
  1920. if (sc->sc_ringalloc.dma_vaddr)
  1921. pci_free_consistent(sc->sc_pcidev,
  1922. SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
  1923. sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
  1924. if (sc->sc_spalloc.dma_vaddr)
  1925. pci_free_consistent(sc->sc_pcidev,
  1926. SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
  1927. sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
  1928. if (sc->sc_dpalloc.dma_vaddr)
  1929. pci_free_consistent(sc->sc_pcidev,
  1930. SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
  1931. sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
  1932. sc->sc_irq = -1;
  1933. sc->sc_ringalloc.dma_vaddr = NULL;
  1934. sc->sc_spalloc.dma_vaddr = NULL;
  1935. sc->sc_dpalloc.dma_vaddr = NULL;
  1936. }
  1937. static struct pci_device_id safe_pci_tbl[] = {
  1938. { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
  1939. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  1940. { },
  1941. };
  1942. MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
  1943. static struct pci_driver safe_driver = {
  1944. .name = "safe",
  1945. .id_table = safe_pci_tbl,
  1946. .probe = safe_probe,
  1947. .remove = safe_remove,
  1948. /* add PM stuff here one day */
  1949. };
  1950. static int __init safe_init (void)
  1951. {
  1952. struct safe_softc *sc = NULL;
  1953. int rc;
  1954. DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
  1955. rc = pci_register_driver(&safe_driver);
  1956. pci_register_driver_compat(&safe_driver, rc);
  1957. return rc;
  1958. }
  1959. static void __exit safe_exit (void)
  1960. {
  1961. pci_unregister_driver(&safe_driver);
  1962. }
  1963. module_init(safe_init);
  1964. module_exit(safe_exit);
  1965. MODULE_LICENSE("Dual BSD/GPL");
  1966. MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
  1967. MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");