This source file includes following definitions.
- safe_probe
- safe_attach
- safe_process
- safe_reset_board
- safe_init_board
- safe_init_pciregs
- safe_dma_malloc
- safe_dma_free
- safe_rng_init
- safe_rng_read
- safe_rng
- safe_newsession
- safe_freesession
- safe_dmamap_aligned
- safe_cleanchip
- safe_free_entry
- safe_feed
- safe_dmamap_uniform
- safe_mcopy
- safe_callback
- safe_intr
- safe_kfind
- safe_kprocess
- safe_kstart
- safe_ksigbits
- safe_kfeed
- safe_kpoll
- safe_kload_reg
- safe_dump_dmastatus
- safe_dump_intrstate
- safe_dump_ringstate
- safe_dump_request
- safe_dump_ring
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <sys/cdefs.h>
33
34
35
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/errno.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/device.h>
45 #include <sys/timeout.h>
46
47 #include <machine/bus.h>
48
49 #include <crypto/md5.h>
50 #include <crypto/sha1.h>
51 #include <crypto/cryptodev.h>
52 #include <crypto/cryptosoft.h>
53 #include <dev/rndvar.h>
54
55 #include <dev/pci/pcivar.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcidevs.h>
58
59 #include <dev/pci/safereg.h>
60 #include <dev/pci/safevar.h>
61
62 #ifndef bswap32
63 #define bswap32 NTOHL
64 #endif
65
66 #define KASSERT_X(x,y)
67
68
69
70
71 int safe_probe(struct device *, void *, void *);
72 void safe_attach(struct device *, struct device *, void *);
73
74 struct cfattach safe_ca = {
75 sizeof(struct safe_softc), safe_probe, safe_attach
76 };
77
78 struct cfdriver safe_cd = {
79 0, "safe", DV_DULL
80 };
81
82 int safe_intr(void *);
83 int safe_newsession(u_int32_t *, struct cryptoini *);
84 int safe_freesession(u_int64_t);
85 int safe_process(struct cryptop *);
86 int safe_kprocess(struct cryptkop *);
87 int safe_kstart(struct safe_softc *);
88 void safe_kload_reg(struct safe_softc *, u_int32_t, u_int32_t,
89 struct crparam *);
90 struct safe_softc *safe_kfind(struct cryptkop *);
91 void safe_kpoll(void *);
92 void safe_kfeed(struct safe_softc *);
93 int safe_ksigbits(struct crparam *cr);
94 void safe_callback(struct safe_softc *, struct safe_ringentry *);
95 void safe_feed(struct safe_softc *, struct safe_ringentry *);
96 void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
97 void safe_rng_init(struct safe_softc *);
98 void safe_rng(void *);
99 int safe_dma_malloc(struct safe_softc *, bus_size_t,
100 struct safe_dma_alloc *, int);
101 #define safe_dma_sync(_sc, _dma, _flags) \
102 bus_dmamap_sync((_sc)->sc_dmat, (_dma)->dma_map, 0, \
103 (_dma)->dma_map->dm_mapsize, (_flags))
104 void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
105 int safe_dmamap_aligned(const struct safe_operand *);
106 int safe_dmamap_uniform(const struct safe_operand *);
107
108 void safe_reset_board(struct safe_softc *);
109 void safe_init_board(struct safe_softc *);
110 void safe_init_pciregs(struct safe_softc *);
111 void safe_cleanchip(struct safe_softc *);
112 __inline u_int32_t safe_rng_read(struct safe_softc *);
113
114 int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
115
116 #ifdef SAFE_DEBUG
117 int safe_debug;
118 #define DPRINTF(_x) if (safe_debug) printf _x
119
120 void safe_dump_dmastatus(struct safe_softc *, const char *);
121 void safe_dump_intrstate(struct safe_softc *, const char *);
122 void safe_dump_ringstate(struct safe_softc *, const char *);
123 void safe_dump_request(struct safe_softc *, const char *,
124 struct safe_ringentry *);
125 void safe_dump_ring(struct safe_softc *sc, const char *tag);
126 #else
127 #define DPRINTF(_x)
128 #endif
129
130 #define READ_REG(sc,r) \
131 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
132
133 #define WRITE_REG(sc,reg,val) \
134 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
135
136 struct safe_stats safestats;
137
138 int safe_rnginterval = 1;
139 int safe_rngbufsize = 16;
140 int safe_rngmaxalarm = 8;
141
142 int
143 safe_probe(struct device *parent, void *match, void *aux)
144 {
145 struct pci_attach_args *pa = aux;
146
147 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SAFENET &&
148 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SAFENET_SAFEXCEL)
149 return (1);
150 return (0);
151 }
152
153 void
154 safe_attach(struct device *parent, struct device *self, void *aux)
155 {
156 struct safe_softc *sc = (struct safe_softc *)self;
157 struct pci_attach_args *pa = aux;
158 pci_intr_handle_t ih;
159 const char *intrstr = NULL;
160 bus_size_t iosize;
161 bus_addr_t raddr;
162 u_int32_t devinfo;
163 int algs[CRYPTO_ALGORITHM_MAX + 1], i;
164
165
166
167 SIMPLEQ_INIT(&sc->sc_pkq);
168 sc->sc_dmat = pa->pa_dmat;
169
170
171
172
173 if (pci_mapreg_map(pa, SAFE_BAR, PCI_MAPREG_TYPE_MEM, 0,
174 &sc->sc_st, &sc->sc_sh, NULL, &iosize, 0)) {
175 printf(": can't map register space\n");
176 goto bad;
177 }
178
179 if (pci_intr_map(pa, &ih)) {
180 printf(": couldn't map interrupt\n");
181 goto bad1;
182 }
183 intrstr = pci_intr_string(pa->pa_pc, ih);
184 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, safe_intr, sc,
185 self->dv_xname);
186 if (sc->sc_ih == NULL) {
187 printf(": couldn't establish interrupt");
188 if (intrstr != NULL)
189 printf(" at %s", intrstr);
190 printf("\n");
191 goto bad2;
192 }
193
194 sc->sc_cid = crypto_get_driverid(0);
195 if (sc->sc_cid < 0) {
196 printf(": could not get crypto driver id\n");
197 goto bad3;
198 }
199
200 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
201 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
202
203
204
205
206 if (safe_dma_malloc(sc,
207 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
208 &sc->sc_ringalloc, 0)) {
209 printf(": cannot allocate PE descriptor ring\n");
210 goto bad4;
211 }
212
213
214
215 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
216 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
217 sc->sc_front = sc->sc_ring;
218 sc->sc_back = sc->sc_ring;
219 raddr = sc->sc_ringalloc.dma_paddr;
220 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
221 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
222 struct safe_ringentry *re = &sc->sc_ring[i];
223
224 re->re_desc.d_sa = raddr +
225 offsetof(struct safe_ringentry, re_sa);
226 re->re_sa.sa_staterec = raddr +
227 offsetof(struct safe_ringentry, re_sastate);
228
229 raddr += sizeof (struct safe_ringentry);
230 }
231
232
233
234
235 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
236 &sc->sc_spalloc, 0)) {
237 printf(": cannot allocate source particle descriptor ring\n");
238 safe_dma_free(sc, &sc->sc_ringalloc);
239 goto bad4;
240 }
241 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
242 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
243 sc->sc_spfree = sc->sc_spring;
244 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
245
246 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
247 &sc->sc_dpalloc, 0)) {
248 printf(": cannot allocate destination particle "
249 "descriptor ring\n");
250 safe_dma_free(sc, &sc->sc_spalloc);
251 safe_dma_free(sc, &sc->sc_ringalloc);
252 goto bad4;
253 }
254 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
255 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
256 sc->sc_dpfree = sc->sc_dpring;
257 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
258
259 printf(":");
260
261 devinfo = READ_REG(sc, SAFE_DEVINFO);
262 if (devinfo & SAFE_DEVINFO_RNG)
263 printf(" RNG");
264
265 bzero(algs, sizeof(algs));
266 if (devinfo & SAFE_DEVINFO_PKEY) {
267 printf(" PK");
268 algs[CRK_MOD_EXP] = CRYPTO_ALG_FLAG_SUPPORTED;
269 crypto_kregister(sc->sc_cid, algs, safe_kprocess);
270 timeout_set(&sc->sc_pkto, safe_kpoll, sc);
271 }
272
273 bzero(algs, sizeof(algs));
274 if (devinfo & SAFE_DEVINFO_DES) {
275 printf(" 3DES");
276 algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
277 algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
278 }
279 if (devinfo & SAFE_DEVINFO_AES) {
280 printf(" AES");
281 algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
282 }
283 if (devinfo & SAFE_DEVINFO_MD5) {
284 printf(" MD5");
285 algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
286 }
287 if (devinfo & SAFE_DEVINFO_SHA1) {
288 printf(" SHA1");
289 algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
290 }
291 crypto_register(sc->sc_cid, algs, safe_newsession,
292 safe_freesession, safe_process);
293
294
295 printf(", %s\n", intrstr);
296
297 safe_reset_board(sc);
298 safe_init_pciregs(sc);
299 safe_init_board(sc);
300
301 if (devinfo & SAFE_DEVINFO_RNG) {
302 safe_rng_init(sc);
303
304 timeout_set(&sc->sc_rngto, safe_rng, sc);
305 timeout_add(&sc->sc_rngto, hz * safe_rnginterval);
306 }
307 return;
308
309 bad4:
310
311 bad3:
312 pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
313 bad2:
314 ;
315 bad1:
316 bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
317 bad:
318 return;
319 }
320
321 int
322 safe_process(struct cryptop *crp)
323 {
324 int err = 0, i, nicealign, uniform, s;
325 struct safe_softc *sc;
326 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
327 int bypass, oplen, ivsize, card;
328 int16_t coffset;
329 struct safe_session *ses;
330 struct safe_ringentry *re;
331 struct safe_sarec *sa;
332 struct safe_pdesc *pd;
333 u_int32_t cmd0, cmd1, staterec, iv[4];
334
335 s = splnet();
336 if (crp == NULL || crp->crp_callback == NULL) {
337 safestats.st_invalid++;
338 splx(s);
339 return (EINVAL);
340 }
341 card = SAFE_CARD(crp->crp_sid);
342 if (card >= safe_cd.cd_ndevs || safe_cd.cd_devs[card] == NULL) {
343 safestats.st_invalid++;
344 splx(s);
345 return (EINVAL);
346 }
347 sc = safe_cd.cd_devs[card];
348
349 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
350 safestats.st_badsession++;
351 splx(s);
352 return (EINVAL);
353 }
354
355 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
356 safestats.st_ringfull++;
357 splx(s);
358 return (ERESTART);
359 }
360 re = sc->sc_front;
361
362 staterec = re->re_sa.sa_staterec;
363
364 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
365 re->re_sa.sa_staterec = staterec;
366
367 re->re_crp = crp;
368 re->re_sesn = SAFE_SESSION(crp->crp_sid);
369
370 if (crp->crp_flags & CRYPTO_F_IMBUF) {
371 re->re_src_m = (struct mbuf *)crp->crp_buf;
372 re->re_dst_m = (struct mbuf *)crp->crp_buf;
373 } else if (crp->crp_flags & CRYPTO_F_IOV) {
374 re->re_src_io = (struct uio *)crp->crp_buf;
375 re->re_dst_io = (struct uio *)crp->crp_buf;
376 } else {
377 safestats.st_badflags++;
378 err = EINVAL;
379 goto errout;
380 }
381
382 sa = &re->re_sa;
383 ses = &sc->sc_sessions[re->re_sesn];
384
385 crd1 = crp->crp_desc;
386 if (crd1 == NULL) {
387 safestats.st_nodesc++;
388 err = EINVAL;
389 goto errout;
390 }
391 crd2 = crd1->crd_next;
392
393 cmd0 = SAFE_SA_CMD0_BASIC;
394 cmd1 = 0;
395 if (crd2 == NULL) {
396 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
397 crd1->crd_alg == CRYPTO_SHA1_HMAC) {
398 maccrd = crd1;
399 enccrd = NULL;
400 cmd0 |= SAFE_SA_CMD0_OP_HASH;
401 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
402 crd1->crd_alg == CRYPTO_3DES_CBC ||
403 crd1->crd_alg == CRYPTO_AES_CBC) {
404 maccrd = NULL;
405 enccrd = crd1;
406 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
407 } else {
408 safestats.st_badalg++;
409 err = EINVAL;
410 goto errout;
411 }
412 } else {
413 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
414 crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
415 (crd2->crd_alg == CRYPTO_DES_CBC ||
416 crd2->crd_alg == CRYPTO_3DES_CBC ||
417 crd2->crd_alg == CRYPTO_AES_CBC) &&
418 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
419 maccrd = crd1;
420 enccrd = crd2;
421 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
422 crd1->crd_alg == CRYPTO_3DES_CBC ||
423 crd1->crd_alg == CRYPTO_AES_CBC) &&
424 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
425 crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
426 (crd1->crd_flags & CRD_F_ENCRYPT)) {
427 enccrd = crd1;
428 maccrd = crd2;
429 } else {
430 safestats.st_badalg++;
431 err = EINVAL;
432 goto errout;
433 }
434 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
435 }
436
437 if (enccrd) {
438 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
439 cmd0 |= SAFE_SA_CMD0_DES;
440 cmd1 |= SAFE_SA_CMD1_CBC;
441 ivsize = 2*sizeof(u_int32_t);
442 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
443 cmd0 |= SAFE_SA_CMD0_3DES;
444 cmd1 |= SAFE_SA_CMD1_CBC;
445 ivsize = 2*sizeof(u_int32_t);
446 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
447 cmd0 |= SAFE_SA_CMD0_AES;
448 cmd1 |= SAFE_SA_CMD1_CBC;
449 if (ses->ses_klen == 128)
450 cmd1 |= SAFE_SA_CMD1_AES128;
451 else if (ses->ses_klen == 192)
452 cmd1 |= SAFE_SA_CMD1_AES192;
453 else
454 cmd1 |= SAFE_SA_CMD1_AES256;
455 ivsize = 4*sizeof(u_int32_t);
456 } else {
457 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
458 ivsize = 0;
459 }
460
461
462
463
464
465
466
467
468
469
470 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
471 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
472
473 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
474 bcopy(enccrd->crd_iv, iv, ivsize);
475 else
476 bcopy(ses->ses_iv, iv, ivsize);
477 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
478 if (crp->crp_flags & CRYPTO_F_IMBUF)
479 m_copyback(re->re_src_m,
480 enccrd->crd_inject, ivsize, iv);
481 else if (crp->crp_flags & CRYPTO_F_IOV)
482 cuio_copyback(re->re_src_io,
483 enccrd->crd_inject, ivsize, iv);
484 }
485 for (i = 0; i < ivsize / sizeof(iv[0]); i++)
486 re->re_sastate.sa_saved_iv[i] = htole32(iv[i]);
487 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
488 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
489 } else {
490 cmd0 |= SAFE_SA_CMD0_INBOUND;
491
492 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
493 bcopy(enccrd->crd_iv, iv, ivsize);
494 else if (crp->crp_flags & CRYPTO_F_IMBUF)
495 m_copydata(re->re_src_m, enccrd->crd_inject,
496 ivsize, (caddr_t)iv);
497 else if (crp->crp_flags & CRYPTO_F_IOV)
498 cuio_copydata(re->re_src_io, enccrd->crd_inject,
499 ivsize, (caddr_t)iv);
500 for (i = 0; i < ivsize / sizeof(iv[0]); i++)
501 re->re_sastate.sa_saved_iv[i] = htole32(iv[i]);
502 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
503 }
504
505
506
507
508
509
510
511
512 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
513
514
515 for (i = 0; i < sizeof(sa->sa_key)/sizeof(sa->sa_key[0]); i++)
516 sa->sa_key[i] = ses->ses_key[i];
517 }
518
519 if (maccrd) {
520 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
521 cmd0 |= SAFE_SA_CMD0_MD5;
522 cmd1 |= SAFE_SA_CMD1_HMAC;
523 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
524 cmd0 |= SAFE_SA_CMD0_SHA1;
525 cmd1 |= SAFE_SA_CMD1_HMAC;
526 } else {
527 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
528 }
529
530
531
532
533
534
535 for (i = 0;
536 i < sizeof(sa->sa_outdigest)/sizeof(sa->sa_outdigest[i]);
537 i++) {
538 sa->sa_indigest[i] = ses->ses_hminner[i];
539 sa->sa_outdigest[i] = ses->ses_hmouter[i];
540 }
541
542 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
543 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
544 }
545
546 if (enccrd && maccrd) {
547
548
549
550
551 bypass = maccrd->crd_skip;
552 coffset = enccrd->crd_skip - maccrd->crd_skip;
553 if (coffset < 0) {
554 DPRINTF(("%s: hash does not precede crypt; "
555 "mac skip %u enc skip %u\n",
556 __func__, maccrd->crd_skip, enccrd->crd_skip));
557 safestats.st_skipmismatch++;
558 err = EINVAL;
559 goto errout;
560 }
561 oplen = enccrd->crd_skip + enccrd->crd_len;
562 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
563 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
564 __func__, maccrd->crd_skip + maccrd->crd_len,
565 oplen));
566 safestats.st_lenmismatch++;
567 err = EINVAL;
568 goto errout;
569 }
570 #ifdef SAFE_DEBUG
571 if (safe_debug) {
572 printf("mac: skip %d, len %d, inject %d\n",
573 maccrd->crd_skip, maccrd->crd_len,
574 maccrd->crd_inject);
575 printf("enc: skip %d, len %d, inject %d\n",
576 enccrd->crd_skip, enccrd->crd_len,
577 enccrd->crd_inject);
578 printf("bypass %d coffset %d oplen %d\n",
579 bypass, coffset, oplen);
580 }
581 #endif
582 if (coffset & 3) {
583 DPRINTF(("%s: coffset %u misaligned\n",
584 __func__, coffset));
585 safestats.st_coffmisaligned++;
586 err = EINVAL;
587 goto errout;
588 }
589 coffset >>= 2;
590 if (coffset > 255) {
591 DPRINTF(("%s: coffset %u too big\n",
592 __func__, coffset));
593 safestats.st_cofftoobig++;
594 err = EINVAL;
595 goto errout;
596 }
597
598
599
600
601
602
603
604
605
606 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
607
608
609
610
611 cmd1 |= SAFE_SA_CMD1_MUTABLE;
612 } else {
613 if (enccrd) {
614 bypass = enccrd->crd_skip;
615 oplen = bypass + enccrd->crd_len;
616 } else {
617 bypass = maccrd->crd_skip;
618 oplen = bypass + maccrd->crd_len;
619 }
620 coffset = 0;
621 }
622
623 if (bypass > 96) {
624 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
625 safestats.st_bypasstoobig++;
626 err = EINVAL;
627 goto errout;
628 }
629
630 if (bus_dmamap_create(sc->sc_dmat, SAFE_MAX_DMA, SAFE_MAX_PART,
631 SAFE_MAX_DSIZE, SAFE_MAX_DSIZE, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
632 &re->re_src_map)) {
633 safestats.st_nomap++;
634 err = ENOMEM;
635 goto errout;
636 }
637 if (crp->crp_flags & CRYPTO_F_IMBUF) {
638 if (bus_dmamap_load_mbuf(sc->sc_dmat, re->re_src_map,
639 re->re_src_m, BUS_DMA_NOWAIT)) {
640 bus_dmamap_destroy(sc->sc_dmat, re->re_src_map);
641 re->re_src_map = NULL;
642 safestats.st_noload++;
643 err = ENOMEM;
644 goto errout;
645 }
646 } else if (crp->crp_flags & CRYPTO_F_IOV) {
647 if (bus_dmamap_load_uio(sc->sc_dmat, re->re_src_map,
648 re->re_src_io, BUS_DMA_NOWAIT) != 0) {
649 bus_dmamap_destroy(sc->sc_dmat, re->re_src_map);
650 re->re_src_map = NULL;
651 safestats.st_noload++;
652 err = ENOMEM;
653 goto errout;
654 }
655 }
656 nicealign = safe_dmamap_aligned(&re->re_src);
657 uniform = safe_dmamap_uniform(&re->re_src);
658
659 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
660 nicealign, uniform, re->re_src_nsegs));
661 if (re->re_src_nsegs > 1) {
662 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
663 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
664 for (i = 0; i < re->re_src_nsegs; i++) {
665
666 pd = sc->sc_spfree;
667 if (++(sc->sc_spfree) == sc->sc_springtop)
668 sc->sc_spfree = sc->sc_spring;
669
670 KASSERT_X((pd->pd_flags&3) == 0 ||
671 (pd->pd_flags&3) == SAFE_PD_DONE,
672 ("bogus source particle descriptor; flags %x",
673 pd->pd_flags));
674 pd->pd_addr = re->re_src_segs[i].ds_addr;
675 pd->pd_ctrl = SAFE_PD_READY |
676 ((re->re_src_segs[i].ds_len << SAFE_PD_LEN_S)
677 & SAFE_PD_LEN_M);
678 }
679 cmd0 |= SAFE_SA_CMD0_IGATHER;
680 } else {
681
682
683
684 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
685 }
686
687 if (enccrd == NULL && maccrd != NULL) {
688
689
690
691 } else {
692 if (crp->crp_flags & CRYPTO_F_IOV) {
693 if (!nicealign) {
694 safestats.st_iovmisaligned++;
695 err = EINVAL;
696 goto errout;
697 }
698 if (uniform != 1) {
699
700
701
702
703
704
705 if (bus_dmamap_create(sc->sc_dmat,
706 SAFE_MAX_DMA, SAFE_MAX_PART,
707 SAFE_MAX_DSIZE, SAFE_MAX_DSIZE,
708 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
709 &re->re_dst_map)) {
710 safestats.st_nomap++;
711 err = ENOMEM;
712 goto errout;
713 }
714 if (bus_dmamap_load_uio(sc->sc_dmat,
715 re->re_dst_map, re->re_dst_io,
716 BUS_DMA_NOWAIT) != 0) {
717 bus_dmamap_destroy(sc->sc_dmat,
718 re->re_dst_map);
719 re->re_dst_map = NULL;
720 safestats.st_noload++;
721 err = ENOMEM;
722 goto errout;
723 }
724 uniform = safe_dmamap_uniform(&re->re_dst);
725 if (!uniform) {
726
727
728
729
730
731
732
733
734
735
736
737
738
739 safestats.st_iovnotuniform++;
740 err = EINVAL;
741 goto errout;
742 }
743 } else
744 re->re_dst = re->re_src;
745 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
746 if (nicealign && uniform == 1) {
747
748
749
750
751 re->re_dst = re->re_src;
752 } else if (nicealign && uniform == 2) {
753
754
755
756
757
758
759
760
761 if (bus_dmamap_create(sc->sc_dmat,
762 SAFE_MAX_DMA, SAFE_MAX_PART,
763 SAFE_MAX_DSIZE, SAFE_MAX_DSIZE,
764 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
765 &re->re_dst_map)) {
766 safestats.st_nomap++;
767 err = ENOMEM;
768 goto errout;
769 }
770 if (bus_dmamap_load_mbuf(sc->sc_dmat,
771 re->re_dst_map, re->re_dst_m,
772 BUS_DMA_NOWAIT) != 0) {
773 bus_dmamap_destroy(sc->sc_dmat,
774 re->re_dst_map);
775 re->re_dst_map = NULL;
776 safestats.st_noload++;
777 err = ENOMEM;
778 goto errout;
779 }
780 } else {
781 int totlen, len;
782 struct mbuf *m, *top, **mp;
783
784
785
786
787
788
789
790
791
792 if (!nicealign)
793 safestats.st_unaligned++;
794 if (!uniform)
795 safestats.st_notuniform++;
796 totlen = re->re_src_mapsize;
797 if (re->re_src_m->m_flags & M_PKTHDR) {
798 len = MHLEN;
799 MGETHDR(m, M_DONTWAIT, MT_DATA);
800 } else {
801 len = MLEN;
802 MGET(m, M_DONTWAIT, MT_DATA);
803 }
804 if (m == NULL) {
805 safestats.st_nombuf++;
806 err = sc->sc_nqchip ? ERESTART : ENOMEM;
807 goto errout;
808 }
809 if (len == MHLEN)
810 M_DUP_PKTHDR(m, re->re_src_m);
811 if (totlen >= MINCLSIZE) {
812 MCLGET(m, M_DONTWAIT);
813 if ((m->m_flags & M_EXT) == 0) {
814 m_free(m);
815 safestats.st_nomcl++;
816 err = sc->sc_nqchip ?
817 ERESTART : ENOMEM;
818 goto errout;
819 }
820 len = MCLBYTES;
821 }
822 m->m_len = len;
823 top = NULL;
824 mp = ⊤
825
826 while (totlen > 0) {
827 if (top) {
828 MGET(m, M_DONTWAIT, MT_DATA);
829 if (m == NULL) {
830 m_freem(top);
831 safestats.st_nombuf++;
832 err = sc->sc_nqchip ?
833 ERESTART : ENOMEM;
834 goto errout;
835 }
836 len = MLEN;
837 }
838 if (top && totlen >= MINCLSIZE) {
839 MCLGET(m, M_DONTWAIT);
840 if ((m->m_flags & M_EXT) == 0) {
841 *mp = m;
842 m_freem(top);
843 safestats.st_nomcl++;
844 err = sc->sc_nqchip ?
845 ERESTART : ENOMEM;
846 goto errout;
847 }
848 len = MCLBYTES;
849 }
850 m->m_len = len = min(totlen, len);
851 totlen -= len;
852 *mp = m;
853 mp = &m->m_next;
854 }
855 re->re_dst_m = top;
856 if (bus_dmamap_create(sc->sc_dmat,
857 SAFE_MAX_DMA, SAFE_MAX_PART,
858 SAFE_MAX_DSIZE, SAFE_MAX_DSIZE,
859 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
860 &re->re_dst_map) != 0) {
861 safestats.st_nomap++;
862 err = ENOMEM;
863 goto errout;
864 }
865 if (bus_dmamap_load_mbuf(sc->sc_dmat,
866 re->re_dst_map, re->re_dst_m,
867 BUS_DMA_NOWAIT) != 0) {
868 bus_dmamap_destroy(sc->sc_dmat,
869 re->re_dst_map);
870 re->re_dst_map = NULL;
871 safestats.st_noload++;
872 err = ENOMEM;
873 goto errout;
874 }
875 if (re->re_src_mapsize > oplen) {
876
877
878
879
880
881
882
883 if (!(maccrd &&
884 (re->re_src_mapsize-oplen) == 12 &&
885 maccrd->crd_inject == oplen))
886 safe_mcopy(re->re_src_m,
887 re->re_dst_m,
888 oplen);
889 else
890 safestats.st_noicvcopy++;
891 }
892 }
893 } else {
894 safestats.st_badflags++;
895 err = EINVAL;
896 goto errout;
897 }
898
899 if (re->re_dst_nsegs > 1) {
900 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
901 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
902 for (i = 0; i < re->re_dst_nsegs; i++) {
903 pd = sc->sc_dpfree;
904 KASSERT_X((pd->pd_flags&3) == 0 ||
905 (pd->pd_flags&3) == SAFE_PD_DONE,
906 ("bogus dest particle descriptor; flags %x",
907 pd->pd_flags));
908 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
909 sc->sc_dpfree = sc->sc_dpring;
910 pd->pd_addr = re->re_dst_segs[i].ds_addr;
911 pd->pd_ctrl = SAFE_PD_READY;
912 }
913 cmd0 |= SAFE_SA_CMD0_OSCATTER;
914 } else {
915
916
917
918 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
919 }
920 }
921
922
923
924
925
926
927 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
928 sa->sa_cmd1 = cmd1
929 | (coffset << SAFE_SA_CMD1_OFFSET_S)
930 | SAFE_SA_CMD1_SAREV1
931 | SAFE_SA_CMD1_SRPCI;
932
933
934
935
936
937
938
939
940
941 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
942 if (maccrd)
943 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
944 re->re_desc.d_len = oplen
945 | SAFE_PE_LEN_READY
946 | (bypass << SAFE_PE_LEN_BYPASS_S)
947 ;
948
949 safestats.st_ipackets++;
950 safestats.st_ibytes += oplen;
951
952 if (++(sc->sc_front) == sc->sc_ringtop)
953 sc->sc_front = sc->sc_ring;
954
955
956 safe_feed(sc, re);
957 splx(s);
958 return (0);
959
960 errout:
961 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
962 m_freem(re->re_dst_m);
963
964 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
965 bus_dmamap_unload(sc->sc_dmat, re->re_dst_map);
966 bus_dmamap_destroy(sc->sc_dmat, re->re_dst_map);
967 }
968 if (re->re_src_map != NULL) {
969 bus_dmamap_unload(sc->sc_dmat, re->re_src_map);
970 bus_dmamap_destroy(sc->sc_dmat, re->re_src_map);
971 }
972 crp->crp_etype = err;
973 crypto_done(crp);
974 splx(s);
975 return (err);
976 }
977
978
979
980
981
982 void
983 safe_reset_board(struct safe_softc *sc)
984 {
985 u_int32_t v;
986
987
988
989
990
991 v = READ_REG(sc, SAFE_PE_DMACFG) &
992 ~(SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
993 SAFE_PE_DMACFG_SGRESET);
994 WRITE_REG(sc, SAFE_PE_DMACFG, v
995 | SAFE_PE_DMACFG_PERESET
996 | SAFE_PE_DMACFG_PDRRESET
997 | SAFE_PE_DMACFG_SGRESET);
998 WRITE_REG(sc, SAFE_PE_DMACFG, v);
999 }
1000
1001
1002
1003
1004 void
1005 safe_init_board(struct safe_softc *sc)
1006 {
1007 u_int32_t v, dwords;
1008
1009 v = READ_REG(sc, SAFE_PE_DMACFG);
1010 v &= ~(SAFE_PE_DMACFG_PEMODE | SAFE_PE_DMACFG_ESPACKET);
1011 v |= SAFE_PE_DMACFG_FSENA
1012 | SAFE_PE_DMACFG_GPRPCI
1013 | SAFE_PE_DMACFG_SPRPCI
1014 | SAFE_PE_DMACFG_ESDESC
1015 | SAFE_PE_DMACFG_ESPDESC
1016 | SAFE_PE_DMACFG_ESSA
1017 ;
1018 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1019
1020 WRITE_REG(sc, SAFE_CRYPTO_CTRL, SAFE_CRYPTO_CTRL_PKEY |
1021 SAFE_CRYPTO_CTRL_3DES | SAFE_CRYPTO_CTRL_RNG);
1022
1023 #if BYTE_ORDER == LITTLE_ENDIAN
1024 WRITE_REG(sc, SAFE_ENDIAN, SAFE_ENDIAN_TGT_PASS|SAFE_ENDIAN_DMA_PASS);
1025 #elif BYTE_ORDER == BIG_ENDIAN
1026 WRITE_REG(sc, SAFE_ENDIAN, SAFE_ENDIAN_TGT_PASS|SAFE_ENDIAN_DMA_SWAB);
1027 #endif
1028
1029 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1030
1031
1032
1033
1034
1035
1036
1037 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1038 printf("%s: Reduce max DMA size to %u words for rev %u.%u WAR\n",
1039 sc->sc_dev.dv_xname,
1040 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1041 SAFE_REV_MAJ(sc->sc_chiprev),
1042 SAFE_REV_MIN(sc->sc_chiprev));
1043 }
1044
1045
1046 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1047 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1048
1049
1050
1051 KASSERT_X((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1052 ("PE ring entry not 32-bit aligned!"));
1053 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1054 WRITE_REG(sc, SAFE_PE_RINGCFG,
1055 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1056 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);
1057
1058 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1059 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1060 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1061 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1062
1063
1064
1065
1066
1067 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1068
1069 WRITE_REG(sc, SAFE_HI_CLR, SAFE_INT_PE_CDONE | SAFE_INT_PE_DDONE |
1070 SAFE_INT_PE_ERROR | SAFE_INT_PE_ODONE);
1071
1072
1073 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1074
1075
1076
1077
1078
1079 DELAY(1000);
1080 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1081 DELAY(1000);
1082 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1083 DELAY(1000);
1084 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1085 DELAY(1000);
1086 }
1087
1088
1089
1090
1091 void
1092 safe_init_pciregs(struct safe_softc *sc)
1093 {
1094 }
1095
1096 int
1097 safe_dma_malloc(struct safe_softc *sc, bus_size_t size,
1098 struct safe_dma_alloc *dma, int mapflags)
1099 {
1100 int r;
1101
1102 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1103 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0)
1104 goto fail_0;
1105
1106 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1107 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1108 goto fail_1;
1109
1110 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1111 BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1112 goto fail_2;
1113
1114 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1115 size, NULL, BUS_DMA_NOWAIT)) != 0)
1116 goto fail_3;
1117
1118 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1119 dma->dma_size = size;
1120 return (0);
1121
1122 fail_3:
1123 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1124 fail_2:
1125 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1126 fail_1:
1127 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1128 fail_0:
1129 dma->dma_map = NULL;
1130 return (r);
1131 }
1132
1133 void
1134 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1135 {
1136 bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1137 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
1138 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1139 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1140 }
1141
1142
1143 #define SAFE_RNG_MAXWAIT 1000
1144
1145 void
1146 safe_rng_init(struct safe_softc *sc)
1147 {
1148 u_int32_t w, v;
1149 int i;
1150
1151 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1152
1153 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);
1154 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 i = 0;
1170 w = READ_REG(sc, SAFE_RNG_OUT);
1171 do {
1172 v = READ_REG(sc, SAFE_RNG_OUT);
1173 if (v != w) {
1174 w = v;
1175 break;
1176 }
1177 DELAY(10);
1178 } while (++i < SAFE_RNG_MAXWAIT);
1179
1180
1181 i = 0;
1182 do {
1183 v = READ_REG(sc, SAFE_RNG_OUT);
1184 if (v != w)
1185 break;
1186 DELAY(10);
1187 } while (++i < SAFE_RNG_MAXWAIT);
1188 }
1189
1190 __inline u_int32_t
1191 safe_rng_read(struct safe_softc *sc)
1192 {
1193 int i;
1194
1195 i = 0;
1196 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1197 ;
1198 return (READ_REG(sc, SAFE_RNG_OUT));
1199 }
1200
1201 void
1202 safe_rng(void *arg)
1203 {
1204 struct safe_softc *sc = arg;
1205 u_int32_t buf[SAFE_RNG_MAXBUFSIZ];
1206 u_int maxwords;
1207 int i;
1208
1209 safestats.st_rng++;
1210
1211
1212
1213 maxwords = safe_rngbufsize;
1214 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1215 maxwords = SAFE_RNG_MAXBUFSIZ;
1216 retry:
1217 for (i = 0; i < maxwords; i++)
1218 buf[i] = safe_rng_read(sc);
1219
1220
1221
1222
1223
1224 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1225 u_int32_t freq_inc, w;
1226
1227 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1228 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1229 safestats.st_rngalarm++;
1230 WRITE_REG(sc, SAFE_RNG_CTRL,
1231 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1232 freq_inc = 18;
1233 for (i = 0; i < 64; i++) {
1234 w = READ_REG(sc, SAFE_RNG_CNFG);
1235 freq_inc = ((w + freq_inc) & 0x3fL);
1236 w = ((w & ~0x3fL) | freq_inc);
1237 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1238
1239 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1240
1241 (void) safe_rng_read(sc);
1242 DELAY(25);
1243
1244 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1245 WRITE_REG(sc, SAFE_RNG_CTRL,
1246 READ_REG(sc, SAFE_RNG_CTRL) &
1247 ~SAFE_RNG_CTRL_SHORTEN);
1248 goto retry;
1249 }
1250 freq_inc = 1;
1251 }
1252 WRITE_REG(sc, SAFE_RNG_CTRL,
1253 READ_REG(sc, SAFE_RNG_CTRL) & ~SAFE_RNG_CTRL_SHORTEN);
1254 } else
1255 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1256
1257 for (i = 0; i < maxwords; i++)
1258 add_true_randomness(buf[i]);
1259
1260 timeout_add(&sc->sc_rngto, hz * safe_rnginterval);
1261 }
1262
1263
1264
1265
1266
1267
1268 int
1269 safe_newsession(u_int32_t *sidp, struct cryptoini *cri)
1270 {
1271 struct cryptoini *c, *encini = NULL, *macini = NULL;
1272 struct safe_softc *sc = NULL;
1273 struct safe_session *ses = NULL;
1274 MD5_CTX md5ctx;
1275 SHA1_CTX sha1ctx;
1276 int i, sesn;
1277
1278 if (sidp == NULL || cri == NULL)
1279 return (EINVAL);
1280 for (i = 0; i < safe_cd.cd_ndevs; i++) {
1281 sc = safe_cd.cd_devs[i];
1282 if (sc == NULL || sc->sc_cid == (*sidp))
1283 break;
1284 }
1285 if (sc == NULL)
1286 return (EINVAL);
1287
1288 for (c = cri; c != NULL; c = c->cri_next) {
1289 if (c->cri_alg == CRYPTO_MD5_HMAC ||
1290 c->cri_alg == CRYPTO_SHA1_HMAC) {
1291 if (macini)
1292 return (EINVAL);
1293 macini = c;
1294 } else if (c->cri_alg == CRYPTO_DES_CBC ||
1295 c->cri_alg == CRYPTO_3DES_CBC ||
1296 c->cri_alg == CRYPTO_AES_CBC) {
1297 if (encini)
1298 return (EINVAL);
1299 encini = c;
1300 } else
1301 return (EINVAL);
1302 }
1303 if (encini == NULL && macini == NULL)
1304 return (EINVAL);
1305 if (encini) {
1306 switch (encini->cri_alg) {
1307 case CRYPTO_DES_CBC:
1308 if (encini->cri_klen != 64)
1309 return (EINVAL);
1310 break;
1311 case CRYPTO_3DES_CBC:
1312 if (encini->cri_klen != 192)
1313 return (EINVAL);
1314 break;
1315 case CRYPTO_AES_CBC:
1316 if (encini->cri_klen != 128 &&
1317 encini->cri_klen != 192 &&
1318 encini->cri_klen != 256)
1319 return (EINVAL);
1320 break;
1321 }
1322 }
1323
1324 if (sc->sc_sessions == NULL) {
1325 ses = sc->sc_sessions = (struct safe_session *)malloc(
1326 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
1327 if (ses == NULL)
1328 return (ENOMEM);
1329 sesn = 0;
1330 sc->sc_nsessions = 1;
1331 } else {
1332 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
1333 if (sc->sc_sessions[sesn].ses_used == 0) {
1334 ses = &sc->sc_sessions[sesn];
1335 break;
1336 }
1337 }
1338
1339 if (ses == NULL) {
1340 sesn = sc->sc_nsessions;
1341 ses = (struct safe_session *)malloc((sesn + 1) *
1342 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
1343 if (ses == NULL)
1344 return (ENOMEM);
1345 bcopy(sc->sc_sessions, ses, sesn *
1346 sizeof(struct safe_session));
1347 bzero(sc->sc_sessions, sesn *
1348 sizeof(struct safe_session));
1349 free(sc->sc_sessions, M_DEVBUF);
1350 sc->sc_sessions = ses;
1351 ses = &sc->sc_sessions[sesn];
1352 sc->sc_nsessions++;
1353 }
1354 }
1355
1356 bzero(ses, sizeof(struct safe_session));
1357 ses->ses_used = 1;
1358
1359 if (encini) {
1360
1361 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv));
1362
1363 ses->ses_klen = encini->cri_klen;
1364 bcopy(encini->cri_key, ses->ses_key, ses->ses_klen / 8);
1365
1366 for (i = 0;
1367 i < sizeof(ses->ses_key)/sizeof(ses->ses_key[0]); i++)
1368 ses->ses_key[i] = htole32(ses->ses_key[i]);
1369 }
1370
1371 if (macini) {
1372 for (i = 0; i < macini->cri_klen / 8; i++)
1373 macini->cri_key[i] ^= HMAC_IPAD_VAL;
1374
1375 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
1376 MD5Init(&md5ctx);
1377 MD5Update(&md5ctx, macini->cri_key,
1378 macini->cri_klen / 8);
1379 MD5Update(&md5ctx, hmac_ipad_buffer,
1380 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
1381 bcopy(md5ctx.state, ses->ses_hminner,
1382 sizeof(md5ctx.state));
1383 } else {
1384 SHA1Init(&sha1ctx);
1385 SHA1Update(&sha1ctx, macini->cri_key,
1386 macini->cri_klen / 8);
1387 SHA1Update(&sha1ctx, hmac_ipad_buffer,
1388 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
1389 bcopy(sha1ctx.state, ses->ses_hminner,
1390 sizeof(sha1ctx.state));
1391 }
1392
1393 for (i = 0; i < macini->cri_klen / 8; i++)
1394 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
1395
1396 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
1397 MD5Init(&md5ctx);
1398 MD5Update(&md5ctx, macini->cri_key,
1399 macini->cri_klen / 8);
1400 MD5Update(&md5ctx, hmac_opad_buffer,
1401 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
1402 bcopy(md5ctx.state, ses->ses_hmouter,
1403 sizeof(md5ctx.state));
1404 } else {
1405 SHA1Init(&sha1ctx);
1406 SHA1Update(&sha1ctx, macini->cri_key,
1407 macini->cri_klen / 8);
1408 SHA1Update(&sha1ctx, hmac_opad_buffer,
1409 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
1410 bcopy(sha1ctx.state, ses->ses_hmouter,
1411 sizeof(sha1ctx.state));
1412 }
1413
1414 for (i = 0; i < macini->cri_klen / 8; i++)
1415 macini->cri_key[i] ^= HMAC_OPAD_VAL;
1416
1417
1418 for (i = 0;
1419 i < sizeof(ses->ses_hminner)/sizeof(ses->ses_hminner[0]);
1420 i++) {
1421 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
1422 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
1423 }
1424 }
1425
1426 *sidp = SAFE_SID(sc->sc_dev.dv_unit, sesn);
1427 return (0);
1428 }
1429
1430
1431
1432
1433 int
1434 safe_freesession(u_int64_t tid)
1435 {
1436 struct safe_softc *sc;
1437 int session, ret, card;
1438 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1439
1440 card = SAFE_CARD(sid);
1441 if (card >= safe_cd.cd_ndevs || safe_cd.cd_devs[card] == NULL)
1442 return (EINVAL);
1443 sc = safe_cd.cd_devs[card];
1444
1445 if (sc == NULL)
1446 return (EINVAL);
1447
1448 session = SAFE_SESSION(sid);
1449 if (session < sc->sc_nsessions) {
1450 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
1451 ret = 0;
1452 } else
1453 ret = EINVAL;
1454 return (ret);
1455 }
1456
1457
1458
1459
1460
1461
1462 int
1463 safe_dmamap_aligned(const struct safe_operand *op)
1464 {
1465 int i;
1466
1467 for (i = 0; i < op->map->dm_nsegs; i++) {
1468 if (op->map->dm_segs[i].ds_addr & 3)
1469 return (0);
1470 if (i != (op->map->dm_nsegs - 1) &&
1471 (op->map->dm_segs[i].ds_len & 3))
1472 return (0);
1473 }
1474 return (1);
1475 }
1476
1477
1478
1479
1480
1481 void
1482 safe_cleanchip(struct safe_softc *sc)
1483 {
1484
1485 if (sc->sc_nqchip != 0) {
1486 struct safe_ringentry *re = sc->sc_back;
1487
1488 while (re != sc->sc_front) {
1489 if (re->re_desc.d_csr != 0)
1490 safe_free_entry(sc, re);
1491 if (++re == sc->sc_ringtop)
1492 re = sc->sc_ring;
1493 }
1494 sc->sc_back = re;
1495 sc->sc_nqchip = 0;
1496 }
1497 }
1498
1499
1500
1501
1502
1503 int
1504 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1505 {
1506 struct cryptop *crp;
1507
1508
1509
1510
1511 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1512 m_freem(re->re_dst_m);
1513
1514 crp = (struct cryptop *)re->re_crp;
1515
1516 re->re_desc.d_csr = 0;
1517
1518 crp->crp_etype = EFAULT;
1519 crypto_done(crp);
1520 return (0);
1521 }
1522
1523
1524
1525
1526 void
1527 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
1528 {
1529 bus_dmamap_sync(sc->sc_dmat, re->re_src_map,
1530 0, re->re_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1531 if (re->re_dst_map != NULL)
1532 bus_dmamap_sync(sc->sc_dmat, re->re_dst_map, 0,
1533 re->re_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1534
1535 safe_dma_sync(sc, &sc->sc_ringalloc,
1536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1537 safe_dma_sync(sc, &sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
1538 safe_dma_sync(sc, &sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
1539
1540 #ifdef SAFE_DEBUG
1541 if (safe_debug) {
1542 safe_dump_ringstate(sc, __func__);
1543 safe_dump_request(sc, __func__, re);
1544 }
1545 #endif
1546 sc->sc_nqchip++;
1547 if (sc->sc_nqchip > safestats.st_maxqchip)
1548 safestats.st_maxqchip = sc->sc_nqchip;
1549
1550 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 int
1563 safe_dmamap_uniform(const struct safe_operand *op)
1564 {
1565 int result = 1, i;
1566
1567 if (op->map->dm_nsegs <= 0)
1568 return (result);
1569
1570 for (i = 0; i < op->map->dm_nsegs-1; i++) {
1571 if (op->map->dm_segs[i].ds_len % SAFE_MAX_DSIZE)
1572 return (0);
1573 if (op->map->dm_segs[i].ds_len != SAFE_MAX_DSIZE)
1574 result = 2;
1575 }
1576 return (result);
1577 }
1578
1579
1580
1581
1582 void
1583 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1584 {
1585 u_int j, dlen, slen;
1586 caddr_t dptr, sptr;
1587
1588
1589
1590
1591 for (j = offset; srcm->m_len <= j;) {
1592 j -= srcm->m_len;
1593 srcm = srcm->m_next;
1594 if (srcm == NULL)
1595 return;
1596 }
1597 sptr = mtod(srcm, caddr_t) + j;
1598 slen = srcm->m_len - j;
1599
1600 for (j = offset; dstm->m_len <= j;) {
1601 j -= dstm->m_len;
1602 dstm = dstm->m_next;
1603 if (dstm == NULL)
1604 return;
1605 }
1606 dptr = mtod(dstm, caddr_t) + j;
1607 dlen = dstm->m_len - j;
1608
1609
1610
1611
1612 for (;;) {
1613 j = min(slen, dlen);
1614 bcopy(sptr, dptr, j);
1615 if (slen == j) {
1616 srcm = srcm->m_next;
1617 if (srcm == NULL)
1618 return;
1619 sptr = srcm->m_data;
1620 slen = srcm->m_len;
1621 } else
1622 sptr += j, slen -= j;
1623 if (dlen == j) {
1624 dstm = dstm->m_next;
1625 if (dstm == NULL)
1626 return;
1627 dptr = dstm->m_data;
1628 dlen = dstm->m_len;
1629 } else
1630 dptr += j, dlen -= j;
1631 }
1632 }
1633
1634 void
1635 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1636 {
1637 struct cryptop *crp = (struct cryptop *)re->re_crp;
1638 struct cryptodesc *crd;
1639
1640 safestats.st_opackets++;
1641 safestats.st_obytes += (re->re_dst_map == NULL) ?
1642 re->re_src_mapsize : re->re_dst_mapsize;
1643
1644 safe_dma_sync(sc, &sc->sc_ringalloc,
1645 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1646 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1647 printf("%s: csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1648 sc->sc_dev.dv_xname, re->re_desc.d_csr,
1649 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1650 safestats.st_peoperr++;
1651 crp->crp_etype = EIO;
1652 }
1653 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1654 bus_dmamap_sync(sc->sc_dmat, re->re_dst_map, 0,
1655 re->re_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1656 bus_dmamap_unload(sc->sc_dmat, re->re_dst_map);
1657 bus_dmamap_destroy(sc->sc_dmat, re->re_dst_map);
1658 }
1659 bus_dmamap_sync(sc->sc_dmat, re->re_src_map, 0,
1660 re->re_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1661 bus_dmamap_unload(sc->sc_dmat, re->re_src_map);
1662 bus_dmamap_destroy(sc->sc_dmat, re->re_src_map);
1663
1664
1665
1666
1667
1668 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1669 m_freem(re->re_src_m);
1670 crp->crp_buf = (caddr_t)re->re_dst_m;
1671 }
1672
1673 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1674
1675 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1676 int ivsize;
1677
1678 if (crd->crd_alg == CRYPTO_DES_CBC ||
1679 crd->crd_alg == CRYPTO_3DES_CBC) {
1680 ivsize = 2*sizeof(u_int32_t);
1681 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1682 ivsize = 4*sizeof(u_int32_t);
1683 } else
1684 continue;
1685 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1686 m_copydata((struct mbuf *)crp->crp_buf,
1687 crd->crd_skip + crd->crd_len - ivsize,
1688 ivsize,
1689 (caddr_t) sc->sc_sessions[re->re_sesn].ses_iv);
1690 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1691 cuio_copydata((struct uio *)crp->crp_buf,
1692 crd->crd_skip + crd->crd_len - ivsize,
1693 ivsize,
1694 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1695 }
1696 break;
1697 }
1698 }
1699
1700 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1701
1702 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1703 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1704 crd->crd_alg == CRYPTO_SHA1_HMAC))
1705 continue;
1706 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1707
1708
1709
1710
1711 bswap32(re->re_sastate.sa_saved_indigest[0]);
1712 bswap32(re->re_sastate.sa_saved_indigest[1]);
1713 bswap32(re->re_sastate.sa_saved_indigest[2]);
1714 }
1715 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1716 m_copyback((struct mbuf *)crp->crp_buf,
1717 crd->crd_inject, 12,
1718 (caddr_t)re->re_sastate.sa_saved_indigest);
1719 } else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) {
1720 bcopy((caddr_t)re->re_sastate.sa_saved_indigest,
1721 crp->crp_mac, 12);
1722 }
1723 break;
1724 }
1725 }
1726
1727 crypto_done(crp);
1728 }
1729
1730
1731
1732
1733 int
1734 safe_intr(void *arg)
1735 {
1736 struct safe_softc *sc = arg;
1737 volatile u_int32_t stat;
1738
1739 stat = READ_REG(sc, SAFE_HM_STAT);
1740 if (stat == 0)
1741 return (0);
1742
1743 WRITE_REG(sc, SAFE_HI_CLR, stat);
1744
1745 if ((stat & SAFE_INT_PE_DDONE)) {
1746
1747
1748
1749
1750 while (sc->sc_back != sc->sc_front) {
1751 struct safe_ringentry *re = sc->sc_back;
1752 #ifdef SAFE_DEBUG
1753 if (safe_debug) {
1754 safe_dump_ringstate(sc, __func__);
1755 safe_dump_request(sc, __func__, re);
1756 }
1757 #endif
1758
1759
1760
1761
1762
1763
1764
1765 if (re->re_desc.d_csr != 0) {
1766 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
1767 break;
1768 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
1769 break;
1770 sc->sc_nqchip--;
1771 safe_callback(sc, re);
1772 }
1773 if (++(sc->sc_back) == sc->sc_ringtop)
1774 sc->sc_back = sc->sc_ring;
1775 }
1776 }
1777
1778 return (1);
1779 }
1780
1781 struct safe_softc *
1782 safe_kfind(struct cryptkop *krp)
1783 {
1784 struct safe_softc *sc;
1785 int i;
1786
1787 for (i = 0; i < safe_cd.cd_ndevs; i++) {
1788 sc = safe_cd.cd_devs[i];
1789 if (sc == NULL)
1790 continue;
1791 if (sc->sc_cid == krp->krp_hid)
1792 return (sc);
1793 }
1794 return (NULL);
1795 }
1796
1797 int
1798 safe_kprocess(struct cryptkop *krp)
1799 {
1800 struct safe_softc *sc;
1801 struct safe_pkq *q;
1802 int s;
1803
1804 if ((sc = safe_kfind(krp)) == NULL) {
1805 krp->krp_status = EINVAL;
1806 goto err;
1807 }
1808
1809 if (krp->krp_op != CRK_MOD_EXP) {
1810 krp->krp_status = EOPNOTSUPP;
1811 goto err;
1812 }
1813
1814 q = (struct safe_pkq *)malloc(sizeof(*q), M_DEVBUF, M_NOWAIT);
1815 if (q == NULL) {
1816 krp->krp_status = ENOMEM;
1817 goto err;
1818 }
1819 q->pkq_krp = krp;
1820
1821 s = splnet();
1822 SIMPLEQ_INSERT_TAIL(&sc->sc_pkq, q, pkq_next);
1823 safe_kfeed(sc);
1824 splx(s);
1825 return (0);
1826
1827 err:
1828 crypto_kdone(krp);
1829 return (0);
1830 }
1831
1832 #define SAFE_CRK_PARAM_BASE 0
1833 #define SAFE_CRK_PARAM_EXP 1
1834 #define SAFE_CRK_PARAM_MOD 2
1835
1836 int
1837 safe_kstart(struct safe_softc *sc)
1838 {
1839 struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
1840 int exp_bits, mod_bits, base_bits;
1841 u_int32_t op, a_off, b_off, c_off, d_off;
1842
1843 if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
1844 krp->krp_status = EINVAL;
1845 return (1);
1846 }
1847
1848 base_bits = safe_ksigbits(&krp->krp_param[SAFE_CRK_PARAM_BASE]);
1849 if (base_bits > 2048)
1850 goto too_big;
1851 if (base_bits <= 0)
1852 goto too_small;
1853
1854 exp_bits = safe_ksigbits(&krp->krp_param[SAFE_CRK_PARAM_EXP]);
1855 if (exp_bits > 2048)
1856 goto too_big;
1857 if (exp_bits <= 0)
1858 goto too_small;
1859
1860 mod_bits = safe_ksigbits(&krp->krp_param[SAFE_CRK_PARAM_MOD]);
1861 if (mod_bits > 2048)
1862 goto too_big;
1863 if (mod_bits <= 32)
1864 goto too_small;
1865 if (mod_bits < exp_bits)
1866 goto too_small;
1867 if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
1868 goto bad_domain;
1869 if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
1870 goto too_small;
1871
1872
1873 if (mod_bits < base_bits)
1874 goto too_small;
1875 if (mod_bits == base_bits) {
1876 u_int8_t *basep, *modp;
1877 int i;
1878
1879 basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
1880 ((base_bits + 7) / 8) - 1;
1881 modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
1882 ((mod_bits + 7) / 8) - 1;
1883
1884 for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
1885 if (*modp < *basep)
1886 goto too_small;
1887 if (*modp > *basep)
1888 break;
1889 }
1890 }
1891
1892
1893
1894 WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
1895 WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
1896 if (mod_bits > 1024) {
1897 op = SAFE_PK_FUNC_EXP4;
1898 a_off = 0x000;
1899 b_off = 0x100;
1900 c_off = 0x200;
1901 d_off = 0x300;
1902 } else {
1903 op = SAFE_PK_FUNC_EXP16;
1904 a_off = 0x000;
1905 b_off = 0x080;
1906 c_off = 0x100;
1907 d_off = 0x180;
1908 }
1909 sc->sc_pk_reslen = b_off - a_off;
1910 sc->sc_pk_resoff = d_off;
1911
1912
1913 safe_kload_reg(sc, a_off, b_off - a_off,
1914 &krp->krp_param[SAFE_CRK_PARAM_EXP]);
1915 WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
1916 safe_kload_reg(sc, b_off, b_off - a_off,
1917 &krp->krp_param[SAFE_CRK_PARAM_MOD]);
1918 WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
1919 safe_kload_reg(sc, c_off, b_off - a_off,
1920 &krp->krp_param[SAFE_CRK_PARAM_BASE]);
1921 WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
1922 WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
1923
1924 WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
1925
1926 return (0);
1927
1928 too_big:
1929 krp->krp_status = E2BIG;
1930 return (1);
1931 too_small:
1932 krp->krp_status = ERANGE;
1933 return (1);
1934 bad_domain:
1935 krp->krp_status = EDOM;
1936 return (1);
1937 }
1938
1939 int
1940 safe_ksigbits(struct crparam *cr)
1941 {
1942 u_int plen = (cr->crp_nbits + 7) / 8;
1943 int i, sig = plen * 8;
1944 u_int8_t c, *p = cr->crp_p;
1945
1946 for (i = plen - 1; i >= 0; i--) {
1947 c = p[i];
1948 if (c != 0) {
1949 while ((c & 0x80) == 0) {
1950 sig--;
1951 c <<= 1;
1952 }
1953 break;
1954 }
1955 sig -= 8;
1956 }
1957 return (sig);
1958 }
1959
1960 void
1961 safe_kfeed(struct safe_softc *sc)
1962 {
1963 if (SIMPLEQ_EMPTY(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
1964 return;
1965 if (sc->sc_pkq_cur != NULL)
1966 return;
1967 while (!SIMPLEQ_EMPTY(&sc->sc_pkq)) {
1968 struct safe_pkq *q = SIMPLEQ_FIRST(&sc->sc_pkq);
1969
1970 sc->sc_pkq_cur = q;
1971 SIMPLEQ_REMOVE_HEAD(&sc->sc_pkq, pkq_next);
1972 if (safe_kstart(sc) != 0) {
1973 crypto_kdone(q->pkq_krp);
1974 free(q, M_DEVBUF);
1975 sc->sc_pkq_cur = NULL;
1976 } else {
1977
1978 timeout_add(&sc->sc_pkto, 1);
1979 break;
1980 }
1981 }
1982 }
1983
1984 void
1985 safe_kpoll(void *vsc)
1986 {
1987 struct safe_softc *sc = vsc;
1988 struct safe_pkq *q;
1989 struct crparam *res;
1990 int s, i;
1991 u_int32_t buf[64];
1992
1993 s = splnet();
1994 if (sc->sc_pkq_cur == NULL)
1995 goto out;
1996 if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
1997
1998 timeout_add(&sc->sc_pkto, 1);
1999 goto out;
2000 }
2001
2002 q = sc->sc_pkq_cur;
2003 res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
2004 bzero(buf, sizeof(buf));
2005 bzero(res->crp_p, (res->crp_nbits + 7) / 8);
2006 for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
2007 buf[i] = letoh32(READ_REG(sc, SAFE_PK_RAM_START +
2008 sc->sc_pk_resoff + (i << 2)));
2009 bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
2010 res->crp_nbits = sc->sc_pk_reslen * 8;
2011 res->crp_nbits = safe_ksigbits(res);
2012
2013 for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
2014 WRITE_REG(sc, i, 0);
2015
2016 crypto_kdone(q->pkq_krp);
2017 free(q, M_DEVBUF);
2018 sc->sc_pkq_cur = NULL;
2019
2020 safe_kfeed(sc);
2021 out:
2022 splx(s);
2023 }
2024
2025 void
2026 safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
2027 struct crparam *n)
2028 {
2029 u_int32_t buf[64], i;
2030
2031 bzero(buf, sizeof(buf));
2032 bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
2033
2034 for (i = 0; i < len >> 2; i++)
2035 WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
2036 htole32(buf[i]));
2037 }
2038
2039 #ifdef SAFE_DEBUG
2040
2041 void
2042 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2043 {
2044 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n", tag,
2045 READ_REG(sc, SAFE_DMA_ENDIAN), READ_REG(sc, SAFE_DMA_SRCADDR),
2046 READ_REG(sc, SAFE_DMA_DSTADDR), READ_REG(sc, SAFE_DMA_STAT));
2047 }
2048
2049 void
2050 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2051 {
2052 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n",
2053 tag, READ_REG(sc, SAFE_HI_CFG), READ_REG(sc, SAFE_HI_MASK),
2054 READ_REG(sc, SAFE_HI_DESC_CNT), READ_REG(sc, SAFE_HU_STAT),
2055 READ_REG(sc, SAFE_HM_STAT));
2056 }
2057
2058 void
2059 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2060 {
2061 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2062
2063
2064 printf("%s: ERNGSTAT %x (next %u) back %u front %u\n",
2065 tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2066 sc->sc_back - sc->sc_ring, sc->sc_front - sc->sc_ring);
2067 }
2068
2069 void
2070 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2071 {
2072 int ix, nsegs;
2073
2074 ix = re - sc->sc_ring;
2075 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n", tag,
2076 re, ix, re->re_desc.d_csr, re->re_desc.d_src, re->re_desc.d_dst,
2077 re->re_desc.d_sa, re->re_desc.d_len);
2078 if (re->re_src_nsegs > 1) {
2079 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2080 sizeof(struct safe_pdesc);
2081 for (nsegs = re->re_src_nsegs; nsegs; nsegs--) {
2082 printf(" spd[%u] %p: %p", ix,
2083 &sc->sc_spring[ix],
2084 (caddr_t)sc->sc_spring[ix].pd_addr);
2085 printf("\n");
2086 if (++ix == SAFE_TOTAL_SPART)
2087 ix = 0;
2088 }
2089 }
2090 if (re->re_dst_nsegs > 1) {
2091 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2092 sizeof(struct safe_pdesc);
2093 for (nsegs = re->re_dst_nsegs; nsegs; nsegs--) {
2094 printf(" dpd[%u] %p: %p\n", ix,
2095 &sc->sc_dpring[ix],
2096 (caddr_t) sc->sc_dpring[ix].pd_addr);
2097 if (++ix == SAFE_TOTAL_DPART)
2098 ix = 0;
2099 }
2100 }
2101 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2102 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2103 printf("sa: key %x %x %x %x %x %x %x %x\n", re->re_sa.sa_key[0],
2104 re->re_sa.sa_key[1], re->re_sa.sa_key[2], re->re_sa.sa_key[3],
2105 re->re_sa.sa_key[4], re->re_sa.sa_key[5], re->re_sa.sa_key[6],
2106 re->re_sa.sa_key[7]);
2107 printf("sa: indigest %x %x %x %x %x\n", re->re_sa.sa_indigest[0],
2108 re->re_sa.sa_indigest[1], re->re_sa.sa_indigest[2],
2109 re->re_sa.sa_indigest[3], re->re_sa.sa_indigest[4]);
2110 printf("sa: outdigest %x %x %x %x %x\n", re->re_sa.sa_outdigest[0],
2111 re->re_sa.sa_outdigest[1], re->re_sa.sa_outdigest[2],
2112 re->re_sa.sa_outdigest[3], re->re_sa.sa_outdigest[4]);
2113 printf("sr: iv %x %x %x %x\n",
2114 re->re_sastate.sa_saved_iv[0], re->re_sastate.sa_saved_iv[1],
2115 re->re_sastate.sa_saved_iv[2], re->re_sastate.sa_saved_iv[3]);
2116 printf("sr: hashbc %u indigest %x %x %x %x %x\n",
2117 re->re_sastate.sa_saved_hashbc,
2118 re->re_sastate.sa_saved_indigest[0],
2119 re->re_sastate.sa_saved_indigest[1],
2120 re->re_sastate.sa_saved_indigest[2],
2121 re->re_sastate.sa_saved_indigest[3],
2122 re->re_sastate.sa_saved_indigest[4]);
2123 }
2124
2125 void
2126 safe_dump_ring(struct safe_softc *sc, const char *tag)
2127 {
2128 printf("\nSafeNet Ring State:\n");
2129 safe_dump_intrstate(sc, tag);
2130 safe_dump_dmastatus(sc, tag);
2131 safe_dump_ringstate(sc, tag);
2132 if (sc->sc_nqchip) {
2133 struct safe_ringentry *re = sc->sc_back;
2134 do {
2135 safe_dump_request(sc, tag, re);
2136 if (++re == sc->sc_ringtop)
2137 re = sc->sc_ring;
2138 } while (re != sc->sc_front);
2139 }
2140 }
2141
2142 #endif