root/arch/i386/pci/glxsb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. glxsb_match
  2. glxsb_attach
  3. glxsb_rnd
  4. glxsb_crypto_setup
  5. glxsb_crypto_newsession
  6. glxsb_crypto_freesession
  7. glxsb_aes
  8. glxsb_crypto_swauth
  9. glxsb_crypto_encdec
  10. glxsb_crypto_process
  11. glxsb_dma_alloc
  12. glxsb_dma_pre_op
  13. glxsb_dma_post_op
  14. glxsb_dma_free

    1 /*      $OpenBSD: glxsb.c,v 1.8 2007/08/07 09:48:23 markus Exp $        */
    2 
    3 /*
    4  * Copyright (c) 2006 Tom Cosgrove <tom@openbsd.org>
    5  * Copyright (c) 2003, 2004 Theo de Raadt
    6  * Copyright (c) 2003 Jason Wright
    7  *
    8  * Permission to use, copy, modify, and distribute this software for any
    9  * purpose with or without fee is hereby granted, provided that the above
   10  * copyright notice and this permission notice appear in all copies.
   11  *
   12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   19  */
   20 
   21 /*
   22  * Driver for the security block on the AMD Geode LX processors
   23  * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf
   24  */
   25 
   26 #include <sys/cdefs.h>
   27 #include <sys/param.h>
   28 #include <sys/systm.h>
   29 #include <sys/device.h>
   30 #include <sys/malloc.h>
   31 #include <sys/mbuf.h>
   32 #include <sys/types.h>
   33 #include <sys/timeout.h>
   34 
   35 #include <machine/bus.h>
   36 #include <machine/pctr.h>
   37 
   38 #include <dev/rndvar.h>
   39 #include <dev/pci/pcivar.h>
   40 #include <dev/pci/pcidevs.h>
   41 
   42 #ifdef CRYPTO
   43 #include <crypto/cryptodev.h>
   44 #include <crypto/rijndael.h>
   45 #include <crypto/xform.h>
   46 #include <crypto/cryptosoft.h>
   47 #endif
   48 
   49 #define SB_GLD_MSR_CAP          0x58002000      /* RO - Capabilities */
   50 #define SB_GLD_MSR_CONFIG       0x58002001      /* RW - Master Config */
   51 #define SB_GLD_MSR_SMI          0x58002002      /* RW - SMI */
   52 #define SB_GLD_MSR_ERROR        0x58002003      /* RW - Error */
   53 #define SB_GLD_MSR_PM           0x58002004      /* RW - Power Mgmt */
   54 #define SB_GLD_MSR_DIAG         0x58002005      /* RW - Diagnostic */
   55 #define SB_GLD_MSR_CTRL         0x58002006      /* RW - Security Block Cntrl */
   56 
   57                                                 /* For GLD_MSR_CTRL: */
   58 #define SB_GMC_DIV0             0x0000          /* AES update divisor values */
   59 #define SB_GMC_DIV1             0x0001
   60 #define SB_GMC_DIV2             0x0002
   61 #define SB_GMC_DIV3             0x0003
   62 #define SB_GMC_DIV_MASK         0x0003
   63 #define SB_GMC_SBI              0x0004          /* AES swap bits */
   64 #define SB_GMC_SBY              0x0008          /* AES swap bytes */
   65 #define SB_GMC_TW               0x0010          /* Time write (EEPROM) */
   66 #define SB_GMC_T_SEL0           0x0000          /* RNG post-proc: none */
   67 #define SB_GMC_T_SEL1           0x0100          /* RNG post-proc: LFSR */
   68 #define SB_GMC_T_SEL2           0x0200          /* RNG post-proc: whitener */
   69 #define SB_GMC_T_SEL3           0x0300          /* RNG LFSR+whitener */
   70 #define SB_GMC_T_SEL_MASK       0x0300
   71 #define SB_GMC_T_NE             0x0400          /* Noise (generator) Enable */
   72 #define SB_GMC_T_TM             0x0800          /* RNG test mode */
   73                                                 /*     (deterministic) */
   74 
   75 /* Security Block configuration/control registers (offsets from base) */
   76 
   77 #define SB_CTL_A                0x0000          /* RW - SB Control A */
   78 #define SB_CTL_B                0x0004          /* RW - SB Control B */
   79 #define SB_AES_INT              0x0008          /* RW - SB AES Interrupt */
   80 #define SB_SOURCE_A             0x0010          /* RW - Source A */
   81 #define SB_DEST_A               0x0014          /* RW - Destination A */
   82 #define SB_LENGTH_A             0x0018          /* RW - Length A */
   83 #define SB_SOURCE_B             0x0020          /* RW - Source B */
   84 #define SB_DEST_B               0x0024          /* RW - Destination B */
   85 #define SB_LENGTH_B             0x0028          /* RW - Length B */
   86 #define SB_WKEY                 0x0030          /* WO - Writable Key 0-3 */
   87 #define SB_WKEY_0               0x0030          /* WO - Writable Key 0 */
   88 #define SB_WKEY_1               0x0034          /* WO - Writable Key 1 */
   89 #define SB_WKEY_2               0x0038          /* WO - Writable Key 2 */
   90 #define SB_WKEY_3               0x003C          /* WO - Writable Key 3 */
   91 #define SB_CBC_IV               0x0040          /* RW - CBC IV 0-3 */
   92 #define SB_CBC_IV_0             0x0040          /* RW - CBC IV 0 */
   93 #define SB_CBC_IV_1             0x0044          /* RW - CBC IV 1 */
   94 #define SB_CBC_IV_2             0x0048          /* RW - CBC IV 2 */
   95 #define SB_CBC_IV_3             0x004C          /* RW - CBC IV 3 */
   96 #define SB_RANDOM_NUM           0x0050          /* RW - Random Number */
   97 #define SB_RANDOM_NUM_STATUS    0x0054          /* RW - Random Number Status */
   98 #define SB_EEPROM_COMM          0x0800          /* RW - EEPROM Command */
   99 #define SB_EEPROM_ADDR          0x0804          /* RW - EEPROM Address */
  100 #define SB_EEPROM_DATA          0x0808          /* RW - EEPROM Data */
  101 #define SB_EEPROM_SEC_STATE     0x080C          /* RW - EEPROM Security State */
  102 
  103                                                 /* For SB_CTL_A and _B */
  104 #define SB_CTL_ST               0x0001          /* Start operation (enc/dec) */
  105 #define SB_CTL_ENC              0x0002          /* Encrypt (0 is decrypt) */
  106 #define SB_CTL_DEC              0x0000          /* Decrypt */
  107 #define SB_CTL_WK               0x0004          /* Use writable key (we set) */
  108 #define SB_CTL_DC               0x0008          /* Destination coherent */
  109 #define SB_CTL_SC               0x0010          /* Source coherent */
  110 #define SB_CTL_CBC              0x0020          /* CBC (0 is ECB) */
  111 
  112                                                 /* For SB_AES_INT */
  113 #define SB_AI_DISABLE_AES_A     0x0001          /* Disable AES A compl int */
  114 #define SB_AI_ENABLE_AES_A      0x0000          /* Enable AES A compl int */
  115 #define SB_AI_DISABLE_AES_B     0x0002          /* Disable AES B compl int */
  116 #define SB_AI_ENABLE_AES_B      0x0000          /* Enable AES B compl int */
  117 #define SB_AI_DISABLE_EEPROM    0x0004          /* Disable EEPROM op comp int */
  118 #define SB_AI_ENABLE_EEPROM     0x0000          /* Enable EEPROM op compl int */
  119 #define SB_AI_AES_A_COMPLETE    0x0100          /* AES A operation complete */
  120 #define SB_AI_AES_B_COMPLETE    0x0200          /* AES B operation complete */
  121 #define SB_AI_EEPROM_COMPLETE   0x0400          /* EEPROM operation complete */
  122 
  123 #define SB_RNS_TRNG_VALID       0x0001          /* in SB_RANDOM_NUM_STATUS */
  124 
  125 #define SB_MEM_SIZE             0x0810          /* Size of memory block */
  126 
  127 #define SB_AES_ALIGN            0x0010          /* Source and dest buffers */
  128                                                 /* must be 16-byte aligned */
  129 #define SB_AES_BLOCK_SIZE       0x0010
  130 
  131 /*
  132  * The Geode LX security block AES acceleration doesn't perform scatter-
  133  * gather: it just takes source and destination addresses.  Therefore the
  134  * plain- and ciphertexts need to be contiguous.  To this end, we allocate
  135  * a buffer for both, and accept the overhead of copying in and out.  If
  136  * the number of bytes in one operation is bigger than allowed for by the
  137  * buffer (buffer is twice the size of the max length, as it has both input
  138  * and output) then we have to perform multiple encryptions/decryptions.
  139  */
  140 #define GLXSB_MAX_AES_LEN       16384
  141 
  142 #ifdef CRYPTO
  143 struct glxsb_dma_map {
  144         bus_dmamap_t            dma_map;
  145         bus_dma_segment_t       dma_seg;
  146         int                     dma_nsegs;
  147         int                     dma_size;
  148         caddr_t                 dma_vaddr;
  149         uint32_t                dma_paddr;
  150 };
  151 struct glxsb_session {
  152         uint32_t        ses_key[4];
  153         uint8_t         ses_iv[SB_AES_BLOCK_SIZE];
  154         int             ses_klen;
  155         int             ses_used;
  156         struct swcr_data *ses_swd;
  157 };
  158 #endif /* CRYPTO */
  159 
  160 struct glxsb_softc {
  161         struct device           sc_dev;
  162         bus_space_tag_t         sc_iot;
  163         bus_space_handle_t      sc_ioh;
  164         struct timeout          sc_to;
  165 
  166 #ifdef CRYPTO
  167         bus_dma_tag_t           sc_dmat;
  168         struct glxsb_dma_map    sc_dma;
  169         int32_t                 sc_cid;
  170         int                     sc_nsessions;
  171         struct glxsb_session    *sc_sessions;
  172 #endif /* CRYPTO */
  173 };
  174 
  175 int     glxsb_match(struct device *, void *, void *);
  176 void    glxsb_attach(struct device *, struct device *, void *);
  177 void    glxsb_rnd(void *);
  178 
  179 struct cfattach glxsb_ca = {
  180         sizeof(struct glxsb_softc), glxsb_match, glxsb_attach
  181 };
  182 
  183 struct cfdriver glxsb_cd = {
  184         NULL, "glxsb", DV_DULL
  185 };
  186 
  187 
  188 #ifdef CRYPTO
  189 
  190 #define GLXSB_SESSION(sid)              ((sid) & 0x0fffffff)
  191 #define GLXSB_SID(crd,ses)              (((crd) << 28) | ((ses) & 0x0fffffff))
  192 
  193 static struct glxsb_softc *glxsb_sc;
  194 extern int i386_has_xcrypt;
  195 
  196 int glxsb_crypto_setup(struct glxsb_softc *);
  197 int glxsb_crypto_newsession(uint32_t *, struct cryptoini *);
  198 int glxsb_crypto_process(struct cryptop *);
  199 int glxsb_crypto_freesession(uint64_t);
  200 static __inline void glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t,
  201     uint32_t, void *, int, void *);
  202 
  203 int glxsb_dma_alloc(struct glxsb_softc *, int, struct glxsb_dma_map *);
  204 void glxsb_dma_pre_op(struct glxsb_softc *, struct glxsb_dma_map *);
  205 void glxsb_dma_post_op(struct glxsb_softc *, struct glxsb_dma_map *);
  206 void glxsb_dma_free(struct glxsb_softc *, struct glxsb_dma_map *);
  207 
  208 #endif /* CRYPTO */
  209 
  210 
  211 int
  212 glxsb_match(struct device *parent, void *match, void *aux)
  213 {
  214         struct pci_attach_args *pa = aux;
  215 
  216         if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_AMD &&
  217             PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_GEODE_LX_CRYPTO)
  218                 return (1);
  219 
  220         return (0);
  221 }
  222 
  223 void
  224 glxsb_attach(struct device *parent, struct device *self, void *aux)
  225 {
  226         struct glxsb_softc *sc = (void *) self;
  227         struct pci_attach_args *pa = aux;
  228         bus_addr_t membase;
  229         bus_size_t memsize;
  230         uint64_t msr;
  231 #ifdef CRYPTO
  232         uint32_t intr;
  233 #endif
  234 
  235         msr = rdmsr(SB_GLD_MSR_CAP);
  236         if ((msr & 0xFFFF00) != 0x130400) {
  237                 printf(": unknown ID 0x%x\n", (int) ((msr & 0xFFFF00) >> 16));
  238                 return;
  239         }
  240 
  241         /* printf(": revision %d", (int) (msr & 0xFF)); */
  242 
  243         /* Map in the security block configuration/control registers */
  244         if (pci_mapreg_map(pa, PCI_MAPREG_START,
  245             PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_iot,
  246             &sc->sc_ioh, &membase, &memsize, SB_MEM_SIZE)) {
  247                 printf(": can't find mem space\n");
  248                 return;
  249         }
  250 
  251         /*
  252          * Configure the Security Block.
  253          *
  254          * We want to enable the noise generator (T_NE), and enable the
  255          * linear feedback shift register and whitener post-processing
  256          * (T_SEL = 3).  Also ensure that test mode (deterministic values)
  257          * is disabled.
  258          */
  259         msr = rdmsr(SB_GLD_MSR_CTRL);
  260         msr &= ~(SB_GMC_T_TM | SB_GMC_T_SEL_MASK);
  261         msr |= SB_GMC_T_NE | SB_GMC_T_SEL3;
  262 #if 0
  263         msr |= SB_GMC_SBI | SB_GMC_SBY;         /* for AES, if necessary */
  264 #endif
  265         wrmsr(SB_GLD_MSR_CTRL, msr);
  266 
  267         /* Install a periodic collector for the "true" (AMD's word) RNG */
  268         timeout_set(&sc->sc_to, glxsb_rnd, sc);
  269         glxsb_rnd(sc);
  270         printf(": RNG");
  271 
  272 #ifdef CRYPTO
  273         /* We don't have an interrupt handler, so disable completion INTs */
  274         intr = SB_AI_DISABLE_AES_A | SB_AI_DISABLE_AES_B |
  275             SB_AI_DISABLE_EEPROM | SB_AI_AES_A_COMPLETE |
  276             SB_AI_AES_B_COMPLETE | SB_AI_EEPROM_COMPLETE;
  277         bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_AES_INT, intr);
  278 
  279         sc->sc_dmat = pa->pa_dmat;
  280 
  281         if (glxsb_crypto_setup(sc))
  282                 printf(" AES");
  283 #endif
  284 
  285         printf("\n");
  286 }
  287 
  288 void
  289 glxsb_rnd(void *v)
  290 {
  291         struct glxsb_softc *sc = v;
  292         uint32_t status, value;
  293         extern int hz;
  294 
  295         status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_RANDOM_NUM_STATUS);
  296         if (status & SB_RNS_TRNG_VALID) {
  297                 value = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_RANDOM_NUM);
  298                 add_true_randomness(value);
  299         }
  300 
  301         timeout_add(&sc->sc_to, (hz > 100) ? (hz / 100) : 1);
  302 }
  303 
  304 #ifdef CRYPTO
  305 int
  306 glxsb_crypto_setup(struct glxsb_softc *sc)
  307 {
  308         int algs[CRYPTO_ALGORITHM_MAX + 1];
  309 
  310         /* Allocate a contiguous DMA-able buffer to work in */
  311         if (glxsb_dma_alloc(sc, GLXSB_MAX_AES_LEN * 2, &sc->sc_dma) != 0)
  312                 return 0;
  313 
  314         bzero(algs, sizeof(algs));
  315         algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
  316         algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  317         algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  318         algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  319         algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  320         algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  321         algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
  322 
  323         sc->sc_cid = crypto_get_driverid(0);
  324         if (sc->sc_cid < 0)
  325                 return 0;
  326 
  327         crypto_register(sc->sc_cid, algs, glxsb_crypto_newsession,
  328             glxsb_crypto_freesession, glxsb_crypto_process);
  329 
  330         sc->sc_nsessions = 0;
  331 
  332         glxsb_sc = sc;
  333 
  334         return 1;
  335 }
  336 
  337 int
  338 glxsb_crypto_newsession(uint32_t *sidp, struct cryptoini *cri)
  339 {
  340         struct glxsb_softc *sc = glxsb_sc;
  341         struct glxsb_session *ses = NULL;
  342         struct auth_hash        *axf;
  343         struct cryptoini        *c;
  344         struct swcr_data        *swd;
  345         int sesn, i;
  346 
  347         if (sc == NULL || sidp == NULL || cri == NULL)
  348                 return (EINVAL);
  349 
  350         for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
  351                 if (sc->sc_sessions[sesn].ses_used == 0) {
  352                         ses = &sc->sc_sessions[sesn];
  353                         break;
  354                 }
  355         }
  356 
  357         if (ses == NULL) {
  358                 sesn = sc->sc_nsessions;
  359                 ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF, M_NOWAIT);
  360                 if (ses == NULL)
  361                         return (ENOMEM);
  362                 if (sesn != 0) {
  363                         bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
  364                         bzero(sc->sc_sessions, sesn * sizeof(*ses));
  365                         free(sc->sc_sessions, M_DEVBUF);
  366                 }
  367                 sc->sc_sessions = ses;
  368                 ses = &sc->sc_sessions[sesn];
  369                 sc->sc_nsessions++;
  370         }
  371 
  372         bzero(ses, sizeof(*ses));
  373         ses->ses_used = 1;
  374 
  375         for (c = cri; c != NULL; c = c->cri_next) {
  376                 switch (c->cri_alg) {
  377                 case CRYPTO_AES_CBC:
  378                         if (c->cri_klen != 128) {
  379                                 glxsb_crypto_freesession(sesn);
  380                                 return (EINVAL);
  381                         }
  382 
  383                         get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv));
  384                         ses->ses_klen = c->cri_klen;
  385 
  386                         /* Copy the key (Geode LX wants the primary key only) */
  387                         bcopy(c->cri_key, ses->ses_key, sizeof(ses->ses_key));
  388                         break;
  389 
  390                 case CRYPTO_MD5_HMAC:
  391                         axf = &auth_hash_hmac_md5_96;
  392                         goto authcommon;
  393                 case CRYPTO_SHA1_HMAC:
  394                         axf = &auth_hash_hmac_sha1_96;
  395                         goto authcommon;
  396                 case CRYPTO_RIPEMD160_HMAC:
  397                         axf = &auth_hash_hmac_ripemd_160_96;
  398                         goto authcommon;
  399                 case CRYPTO_SHA2_256_HMAC:
  400                         axf = &auth_hash_hmac_sha2_256_96;
  401                         goto authcommon;
  402                 case CRYPTO_SHA2_384_HMAC:
  403                         axf = &auth_hash_hmac_sha2_384_96;
  404                         goto authcommon;
  405                 case CRYPTO_SHA2_512_HMAC:
  406                         axf = &auth_hash_hmac_sha2_512_96;
  407                 authcommon:
  408                         MALLOC(swd, struct swcr_data *,
  409                             sizeof(struct swcr_data), M_CRYPTO_DATA,
  410                             M_NOWAIT);
  411                         if (swd == NULL) {
  412                                 glxsb_crypto_freesession(sesn);
  413                                 return (ENOMEM);
  414                         }
  415                         bzero(swd, sizeof(struct swcr_data));
  416                         ses->ses_swd = swd;
  417 
  418                         swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
  419                             M_NOWAIT);
  420                         if (swd->sw_ictx == NULL) {
  421                                 glxsb_crypto_freesession(sesn);
  422                                 return (ENOMEM);
  423                         }
  424 
  425                         swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
  426                             M_NOWAIT);
  427                         if (swd->sw_octx == NULL) {
  428                                 glxsb_crypto_freesession(sesn);
  429                                 return (ENOMEM);
  430                         }
  431 
  432                         for (i = 0; i < c->cri_klen / 8; i++)
  433                                 c->cri_key[i] ^= HMAC_IPAD_VAL;
  434 
  435                         axf->Init(swd->sw_ictx);
  436                         axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
  437                         axf->Update(swd->sw_ictx, hmac_ipad_buffer,
  438                             HMAC_BLOCK_LEN - (c->cri_klen / 8));
  439 
  440                         for (i = 0; i < c->cri_klen / 8; i++)
  441                                 c->cri_key[i] ^= (HMAC_IPAD_VAL ^
  442                                     HMAC_OPAD_VAL);
  443 
  444                         axf->Init(swd->sw_octx);
  445                         axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
  446                         axf->Update(swd->sw_octx, hmac_opad_buffer,
  447                             HMAC_BLOCK_LEN - (c->cri_klen / 8));
  448 
  449                         for (i = 0; i < c->cri_klen / 8; i++)
  450                                 c->cri_key[i] ^= HMAC_OPAD_VAL;
  451 
  452                         swd->sw_axf = axf;
  453                         swd->sw_alg = c->cri_alg;
  454 
  455                         break;
  456                 default:
  457                         glxsb_crypto_freesession(sesn);
  458                         return (EINVAL);
  459                 }
  460         }
  461 
  462         *sidp = GLXSB_SID(0, sesn);
  463         return (0);
  464 }
  465 
  466 int
  467 glxsb_crypto_freesession(uint64_t tid)
  468 {
  469         struct glxsb_softc *sc = glxsb_sc;
  470         struct swcr_data *swd;
  471         struct auth_hash *axf;
  472         int sesn;
  473         uint32_t sid = ((uint32_t)tid) & 0xffffffff;
  474 
  475         if (sc == NULL)
  476                 return (EINVAL);
  477         sesn = GLXSB_SESSION(sid);
  478         if (sesn >= sc->sc_nsessions)
  479                 return (EINVAL);
  480         if (sc->sc_sessions[sesn].ses_swd) {
  481                 swd = sc->sc_sessions[sesn].ses_swd;
  482                 axf = swd->sw_axf;
  483 
  484                 if (swd->sw_ictx) {
  485                         bzero(swd->sw_ictx, axf->ctxsize);
  486                         free(swd->sw_ictx, M_CRYPTO_DATA);
  487                 }
  488                 if (swd->sw_octx) {
  489                         bzero(swd->sw_octx, axf->ctxsize);
  490                         free(swd->sw_octx, M_CRYPTO_DATA);
  491                 }
  492                 FREE(swd, M_CRYPTO_DATA);
  493         }
  494         bzero(&sc->sc_sessions[sesn], sizeof(sc->sc_sessions[sesn]));
  495         return (0);
  496 }
  497 
  498 /*
  499  * Must be called at splnet() or higher
  500  */
  501 static __inline void
  502 glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc,
  503     uint32_t pdst, void *key, int len, void *iv)
  504 {
  505         uint32_t status;
  506         int i;
  507 
  508         if (len & 0xF) {
  509                 printf("%s: len must be a multiple of 16 (not %d)\n",
  510                     sc->sc_dev.dv_xname, len);
  511                 return;
  512         }
  513 
  514         /* Set the source */
  515         bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_SOURCE_A, psrc);
  516 
  517         /* Set the destination address */
  518         bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_DEST_A, pdst);
  519 
  520         /* Set the data length */
  521         bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_LENGTH_A, len);
  522 
  523         /* Set the IV */
  524         if (iv != NULL) {
  525                 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh,
  526                     SB_CBC_IV, iv, 4);
  527                 control |= SB_CTL_CBC;
  528         }
  529 
  530         /* Set the key */
  531         bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, SB_WKEY, key, 4);
  532 
  533         /* Ask the security block to do it */
  534         bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_CTL_A,
  535             control | SB_CTL_WK | SB_CTL_DC | SB_CTL_SC | SB_CTL_ST);
  536 
  537         /*
  538          * Now wait until it is done.
  539          *
  540          * We do a busy wait.  Obviously the number of iterations of
  541          * the loop required to perform the AES operation depends upon
  542          * the number of bytes to process.
  543          *
  544          * On a 500 MHz Geode LX we see
  545          *
  546          *      length (bytes)  typical max iterations
  547          *          16             12
  548          *          64             22
  549          *         256             59
  550          *        1024            212
  551          *        8192          1,537
  552          *
  553          * Since we have a maximum size of operation defined in
  554          * GLXSB_MAX_AES_LEN, we use this constant to decide how long
  555          * to wait.  Allow an order of magnitude longer than it should
  556          * really take, just in case.
  557          */
  558         for (i = 0; i < GLXSB_MAX_AES_LEN * 10; i++) {
  559                 status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_CTL_A);
  560 
  561                 if ((status & SB_CTL_ST) == 0)          /* Done */
  562                         return;
  563         }
  564 
  565         printf("%s: operation failed to complete\n", sc->sc_dev.dv_xname);
  566 }
  567 
  568 static int
  569 glxsb_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
  570     struct swcr_data *sw, caddr_t buf)
  571 {
  572         int     type;
  573 
  574         if (crp->crp_flags & CRYPTO_F_IMBUF)
  575                 type = CRYPTO_BUF_MBUF;
  576         else
  577                 type = CRYPTO_BUF_IOV;
  578                 
  579         return (swcr_authcompute(crp, crd, sw, buf, type));
  580 }
  581 
  582 static int
  583 glxsb_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
  584     struct glxsb_session *ses, struct glxsb_softc *sc, caddr_t buf)
  585 {
  586         char *op_src, *op_dst;
  587         uint32_t op_psrc, op_pdst;
  588         uint8_t op_iv[SB_AES_BLOCK_SIZE], *piv;
  589         int err = 0;
  590         int len, tlen, xlen;
  591         int offset;
  592         uint32_t control;
  593 
  594         if (crd == NULL || (crd->crd_len % SB_AES_BLOCK_SIZE) != 0) {
  595                 err = EINVAL;
  596                 goto out;
  597         }
  598 
  599         /* How much of our buffer will we need to use? */
  600         xlen = crd->crd_len > GLXSB_MAX_AES_LEN ?
  601             GLXSB_MAX_AES_LEN : crd->crd_len;
  602 
  603         /*
  604          * XXX Check if we can have input == output on Geode LX.
  605          * XXX In the meantime, use two separate (adjacent) buffers.
  606          */
  607         op_src = sc->sc_dma.dma_vaddr;
  608         op_dst = sc->sc_dma.dma_vaddr + xlen;
  609 
  610         op_psrc = sc->sc_dma.dma_paddr;
  611         op_pdst = sc->sc_dma.dma_paddr + xlen;
  612 
  613         if (crd->crd_flags & CRD_F_ENCRYPT) {
  614                 control = SB_CTL_ENC;
  615                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  616                         bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
  617                 else
  618                         bcopy(ses->ses_iv, op_iv, sizeof(op_iv));
  619 
  620                 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
  621                         if (crp->crp_flags & CRYPTO_F_IMBUF)
  622                                 m_copyback((struct mbuf *)crp->crp_buf,
  623                                     crd->crd_inject, sizeof(op_iv), op_iv);
  624                         else if (crp->crp_flags & CRYPTO_F_IOV)
  625                                 cuio_copyback((struct uio *)crp->crp_buf,
  626                                     crd->crd_inject, sizeof(op_iv), op_iv);
  627                         else
  628                                 bcopy(op_iv,
  629                                     crp->crp_buf + crd->crd_inject, sizeof(op_iv));
  630                 }
  631         } else {
  632                 control = SB_CTL_DEC;
  633                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
  634                         bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
  635                 else {
  636                         if (crp->crp_flags & CRYPTO_F_IMBUF)
  637                                 m_copydata((struct mbuf *)crp->crp_buf,
  638                                     crd->crd_inject, sizeof(op_iv), op_iv);
  639                         else if (crp->crp_flags & CRYPTO_F_IOV)
  640                                 cuio_copydata((struct uio *)crp->crp_buf,
  641                                     crd->crd_inject, sizeof(op_iv), op_iv);
  642                         else
  643                                 bcopy(crp->crp_buf + crd->crd_inject,
  644                                     op_iv, sizeof(op_iv));
  645                 }
  646         }
  647 
  648         offset = 0;
  649         tlen = crd->crd_len;
  650         piv = op_iv;
  651 
  652         /* Process the data in GLXSB_MAX_AES_LEN chunks */
  653         while (tlen > 0) {
  654                 len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen;
  655 
  656                 if (crp->crp_flags & CRYPTO_F_IMBUF)
  657                         m_copydata((struct mbuf *)crp->crp_buf,
  658                             crd->crd_skip + offset, len, op_src);
  659                 else if (crp->crp_flags & CRYPTO_F_IOV)
  660                         cuio_copydata((struct uio *)crp->crp_buf,
  661                             crd->crd_skip + offset, len, op_src);
  662                 else
  663                         bcopy(crp->crp_buf + crd->crd_skip + offset, op_src,
  664                             len);
  665 
  666                 glxsb_dma_pre_op(sc, &sc->sc_dma);
  667 
  668                 glxsb_aes(sc, control, op_psrc, op_pdst, ses->ses_key,
  669                     len, op_iv);
  670 
  671                 glxsb_dma_post_op(sc, &sc->sc_dma);
  672 
  673                 if (crp->crp_flags & CRYPTO_F_IMBUF)
  674                         m_copyback((struct mbuf *)crp->crp_buf,
  675                             crd->crd_skip + offset, len, op_dst);
  676                 else if (crp->crp_flags & CRYPTO_F_IOV)
  677                         cuio_copyback((struct uio *)crp->crp_buf,
  678                             crd->crd_skip + offset, len, op_dst);
  679                 else
  680                         bcopy(op_dst, crp->crp_buf + crd->crd_skip + offset,
  681                             len);
  682 
  683                 offset += len;
  684                 tlen -= len;
  685 
  686                 if (tlen <= 0) {        /* Ideally, just == 0 */
  687                         /* Finished - put the IV in session IV */
  688                         piv = ses->ses_iv;
  689                 }
  690 
  691                 /*
  692                  * Copy out last block for use as next iteration/session IV.
  693                  *
  694                  * piv is set to op_iv[] before the loop starts, but is
  695                  * set to ses->ses_iv if we're going to exit the loop this
  696                  * time.
  697                  */
  698                 if (crd->crd_flags & CRD_F_ENCRYPT) {
  699                         bcopy(op_dst + len - sizeof(op_iv), piv, sizeof(op_iv));
  700                 } else {
  701                         /* Decryption, only need this if another iteration */
  702                         if (tlen > 0) {
  703                                 bcopy(op_src + len - sizeof(op_iv), piv,
  704                                     sizeof(op_iv));
  705                         }
  706                 }
  707         }
  708 
  709         /* All AES processing has now been done. */
  710         bzero(sc->sc_dma.dma_vaddr, xlen * 2);
  711 
  712 out:
  713         return (err);
  714 }
  715 
  716 int
  717 glxsb_crypto_process(struct cryptop *crp)
  718 {
  719         struct glxsb_softc *sc = glxsb_sc;
  720         struct glxsb_session *ses;
  721         struct cryptodesc *crd;
  722         int sesn,err = 0;
  723         int s;
  724 
  725         s = splnet();
  726 
  727         if (crp == NULL || crp->crp_callback == NULL) {
  728                 err = EINVAL;
  729                 goto out;
  730         }
  731         crd = crp->crp_desc;
  732         if (crd == NULL) {
  733                 err = EINVAL;
  734                 goto out;
  735         }
  736 
  737         sesn = GLXSB_SESSION(crp->crp_sid);
  738         if (sesn >= sc->sc_nsessions) {
  739                 err = EINVAL;
  740                 goto out;
  741         }
  742         ses = &sc->sc_sessions[sesn];
  743 
  744         for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
  745                 switch (crd->crd_alg) {
  746                 case CRYPTO_AES_CBC:
  747                         if ((err = glxsb_crypto_encdec(crp, crd, ses, sc,
  748                             crp->crp_buf)) != 0)
  749                                 goto out;
  750                         break;
  751 
  752                 case CRYPTO_MD5_HMAC:
  753                 case CRYPTO_SHA1_HMAC:
  754                 case CRYPTO_RIPEMD160_HMAC:
  755                 case CRYPTO_SHA2_256_HMAC:
  756                 case CRYPTO_SHA2_384_HMAC:
  757                 case CRYPTO_SHA2_512_HMAC:
  758                         if ((err = glxsb_crypto_swauth(crp, crd, ses->ses_swd,
  759                             crp->crp_buf)) != 0)
  760                                 goto out;
  761                         break;
  762 
  763                 default:
  764                         err = EINVAL;
  765                         goto out;
  766                 }
  767         }
  768 
  769 out:
  770         crp->crp_etype = err;
  771         crypto_done(crp);
  772         splx(s);
  773         return (err);
  774 }
  775 
  776 int
  777 glxsb_dma_alloc(struct glxsb_softc *sc, int size, struct glxsb_dma_map *dma)
  778 {
  779         int rc;
  780 
  781         dma->dma_nsegs = 1;
  782         dma->dma_size = size;
  783 
  784         rc = bus_dmamap_create(sc->sc_dmat, size, dma->dma_nsegs, size,
  785             0, BUS_DMA_NOWAIT, &dma->dma_map);
  786         if (rc != 0) {
  787                 printf("%s: couldn't create DMA map for %d bytes (%d)\n",
  788                     sc->sc_dev.dv_xname, size, rc);
  789 
  790                 goto fail0;
  791         }
  792 
  793         rc = bus_dmamem_alloc(sc->sc_dmat, size, SB_AES_ALIGN, 0,
  794             &dma->dma_seg, dma->dma_nsegs, &dma->dma_nsegs, BUS_DMA_NOWAIT);
  795         if (rc != 0) {
  796                 printf("%s: couldn't allocate DMA memory of %d bytes (%d)\n",
  797                     sc->sc_dev.dv_xname, size, rc);
  798 
  799                 goto fail1;
  800         }
  801 
  802         rc = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, 1, size,
  803             &dma->dma_vaddr, BUS_DMA_NOWAIT);
  804         if (rc != 0) {
  805                 printf("%s: couldn't map DMA memory for %d bytes (%d)\n",
  806                     sc->sc_dev.dv_xname, size, rc);
  807 
  808                 goto fail2;
  809         }
  810 
  811         rc = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
  812             size, NULL, BUS_DMA_NOWAIT);
  813         if (rc != 0) {
  814                 printf("%s: couldn't load DMA memory for %d bytes (%d)\n",
  815                     sc->sc_dev.dv_xname, size, rc);
  816 
  817                 goto fail3;
  818         }
  819 
  820         dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
  821 
  822         return 0;
  823 
  824 fail3:
  825         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
  826 fail2:
  827         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nsegs);
  828 fail1:
  829         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
  830 fail0:
  831         return rc;
  832 }
  833 
  834 void
  835 glxsb_dma_pre_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
  836 {
  837         bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 0, dma->dma_size,
  838             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  839 }
  840 
  841 void
  842 glxsb_dma_post_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
  843 {
  844         bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 0, dma->dma_size,
  845             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  846 }
  847 
  848 void
  849 glxsb_dma_free(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
  850 {
  851         bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
  852         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
  853         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nsegs);
  854         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
  855 }
  856 
  857 #endif /* CRYPTO */

/* [<][>][^][v][top][bottom][index][help] */