source: rtems-libbsd/freebsd/opencrypto/crypto.c @ 2f18089

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 2f18089 was 2f18089, checked in by Jennifer Averett <jennifer.averett@…>, on 05/17/12 at 13:37:48

Resolved issues with pc386 build.

  • Property mode set to 100644
File size: 40.3 KB
Line 
1#include <freebsd/machine/rtems-bsd-config.h>
2
3/*-
4 * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <freebsd/sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30/*
31 * Cryptographic Subsystem.
32 *
33 * This code is derived from the Openbsd Cryptographic Framework (OCF)
34 * that has the copyright shown below.  Very little of the original
35 * code remains.
36 */
37
38/*-
39 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
40 *
41 * This code was written by Angelos D. Keromytis in Athens, Greece, in
42 * February 2000. Network Security Technologies Inc. (NSTI) kindly
43 * supported the development of this code.
44 *
45 * Copyright (c) 2000, 2001 Angelos D. Keromytis
46 *
47 * Permission to use, copy, and modify this software with or without fee
48 * is hereby granted, provided that this entire notice is included in
49 * all source code copies of any software which is or includes a copy or
50 * modification of this software.
51 *
52 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
53 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
54 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
55 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
56 * PURPOSE.
57 */
58
59#define CRYPTO_TIMING                           /* enable timing support */
60
61#include <freebsd/local/opt_ddb.h>
62#include <freebsd/local/opt_kdtrace.h>
63
64#include <freebsd/sys/param.h>
65#include <freebsd/sys/systm.h>
66#include <freebsd/sys/eventhandler.h>
67#include <freebsd/sys/kernel.h>
68#include <freebsd/sys/kthread.h>
69#include <freebsd/sys/lock.h>
70#include <freebsd/sys/module.h>
71#include <freebsd/sys/mutex.h>
72#include <freebsd/sys/malloc.h>
73#include <freebsd/sys/proc.h>
74#include <freebsd/sys/sdt.h>
75#include <freebsd/sys/sysctl.h>
76
77#include <freebsd/ddb/ddb.h>
78
79#include <freebsd/vm/uma.h>
80#include <freebsd/opencrypto/cryptodev.h>
81#include <freebsd/opencrypto/xform.h>                   /* XXX for M_XDATA */
82
83#include <freebsd/sys/kobj.h>
84#include <freebsd/sys/bus.h>
85#include <freebsd/local/cryptodev_if.h>
86
87#if defined(__i386__) || defined(__amd64__)
88#include <freebsd/machine/pcb.h>
89#endif
90
91SDT_PROVIDER_DEFINE(opencrypto);
92
93/*
94 * Crypto drivers register themselves by allocating a slot in the
95 * crypto_drivers table with crypto_get_driverid() and then registering
96 * each algorithm they support with crypto_register() and crypto_kregister().
97 */
98static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
99#define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
100#define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
101#define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
102
103/*
104 * Crypto device/driver capabilities structure.
105 *
106 * Synchronization:
107 * (d) - protected by CRYPTO_DRIVER_LOCK()
108 * (q) - protected by CRYPTO_Q_LOCK()
109 * Not tagged fields are read-only.
110 */
111struct cryptocap {
112        device_t        cc_dev;                 /* (d) device/driver */
113        u_int32_t       cc_sessions;            /* (d) # of sessions */
114        u_int32_t       cc_koperations;         /* (d) # os asym operations */
115        /*
116         * Largest possible operator length (in bits) for each type of
117         * encryption algorithm. XXX not used
118         */
119        u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
120        u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
121        u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
122
123        int             cc_flags;               /* (d) flags */
124#define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
125        int             cc_qblocked;            /* (q) symmetric q blocked */
126        int             cc_kqblocked;           /* (q) asymmetric q blocked */
127};
128static  struct cryptocap *crypto_drivers = NULL;
129static  int crypto_drivers_num = 0;
130
131/*
132 * There are two queues for crypto requests; one for symmetric (e.g.
133 * cipher) operations and one for asymmetric (e.g. MOD)operations.
134 * A single mutex is used to lock access to both queues.  We could
135 * have one per-queue but having one simplifies handling of block/unblock
136 * operations.
137 */
138static  int crp_sleep = 0;
139static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
140static  TAILQ_HEAD(,cryptkop) crp_kq;
141static  struct mtx crypto_q_mtx;
142#define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
143#define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
144
145/*
146 * There are two queues for processing completed crypto requests; one
147 * for the symmetric and one for the asymmetric ops.  We only need one
148 * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
149 * mutex is used to lock access to both queues.  Note that this lock
150 * must be separate from the lock on request queues to insure driver
151 * callbacks don't generate lock order reversals.
152 */
153static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
154static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
155static  struct mtx crypto_ret_q_mtx;
156#define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
157#define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
158#define CRYPTO_RETQ_EMPTY()     (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
159
160static  uma_zone_t cryptop_zone;
161static  uma_zone_t cryptodesc_zone;
162
163int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
164SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
165           &crypto_userasymcrypto, 0,
166           "Enable/disable user-mode access to asymmetric crypto support");
167int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
168SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
169           &crypto_devallowsoft, 0,
170           "Enable/disable use of software asym crypto support");
171
172MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
173
174static  void crypto_proc(void);
175static  struct proc *cryptoproc;
176static  void crypto_ret_proc(void);
177static  struct proc *cryptoretproc;
178static  void crypto_destroy(void);
179static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
180static  int crypto_kinvoke(struct cryptkop *krp, int flags);
181
182static  struct cryptostats cryptostats;
183SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
184            cryptostats, "Crypto system statistics");
185
186#ifdef CRYPTO_TIMING
187static  int crypto_timing = 0;
188SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
189           &crypto_timing, 0, "Enable/disable crypto timing support");
190#endif
191
192static int
193crypto_init(void)
194{
195        int error;
196
197        mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
198                MTX_DEF|MTX_QUIET);
199
200        TAILQ_INIT(&crp_q);
201        TAILQ_INIT(&crp_kq);
202        mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
203
204        TAILQ_INIT(&crp_ret_q);
205        TAILQ_INIT(&crp_ret_kq);
206        mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
207
208        cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
209                                    0, 0, 0, 0,
210                                    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
211        cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
212                                    0, 0, 0, 0,
213                                    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
214        if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
215                printf("crypto_init: cannot setup crypto zones\n");
216                error = ENOMEM;
217                goto bad;
218        }
219
220        crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
221        crypto_drivers = malloc(crypto_drivers_num *
222            sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
223        if (crypto_drivers == NULL) {
224                printf("crypto_init: cannot setup crypto drivers\n");
225                error = ENOMEM;
226                goto bad;
227        }
228
229        error = kproc_create((void (*)(void *)) crypto_proc, NULL,
230                    &cryptoproc, 0, 0, "crypto");
231        if (error) {
232                printf("crypto_init: cannot start crypto thread; error %d",
233                        error);
234                goto bad;
235        }
236
237        error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
238                    &cryptoretproc, 0, 0, "crypto returns");
239        if (error) {
240                printf("crypto_init: cannot start cryptoret thread; error %d",
241                        error);
242                goto bad;
243        }
244        return 0;
245bad:
246        crypto_destroy();
247        return error;
248}
249
250/*
251 * Signal a crypto thread to terminate.  We use the driver
252 * table lock to synchronize the sleep/wakeups so that we
253 * are sure the threads have terminated before we release
254 * the data structures they use.  See crypto_finis below
255 * for the other half of this song-and-dance.
256 */
257static void
258crypto_terminate(struct proc **pp, void *q)
259{
260        struct proc *p;
261
262        mtx_assert(&crypto_drivers_mtx, MA_OWNED);
263        p = *pp;
264        *pp = NULL;
265        if (p) {
266                wakeup_one(q);
267                PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
268                CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
269                msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
270                PROC_UNLOCK(p);
271                CRYPTO_DRIVER_LOCK();
272        }
273}
274
275static void
276crypto_destroy(void)
277{
278        /*
279         * Terminate any crypto threads.
280         */
281        CRYPTO_DRIVER_LOCK();
282        crypto_terminate(&cryptoproc, &crp_q);
283        crypto_terminate(&cryptoretproc, &crp_ret_q);
284        CRYPTO_DRIVER_UNLOCK();
285
286        /* XXX flush queues??? */
287
288        /*
289         * Reclaim dynamically allocated resources.
290         */
291        if (crypto_drivers != NULL)
292                free(crypto_drivers, M_CRYPTO_DATA);
293
294        if (cryptodesc_zone != NULL)
295                uma_zdestroy(cryptodesc_zone);
296        if (cryptop_zone != NULL)
297                uma_zdestroy(cryptop_zone);
298        mtx_destroy(&crypto_q_mtx);
299        mtx_destroy(&crypto_ret_q_mtx);
300        mtx_destroy(&crypto_drivers_mtx);
301}
302
303static struct cryptocap *
304crypto_checkdriver(u_int32_t hid)
305{
306        if (crypto_drivers == NULL)
307                return NULL;
308        return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
309}
310
311/*
312 * Compare a driver's list of supported algorithms against another
313 * list; return non-zero if all algorithms are supported.
314 */
315static int
316driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
317{
318        const struct cryptoini *cr;
319
320        /* See if all the algorithms are supported. */
321        for (cr = cri; cr; cr = cr->cri_next)
322                if (cap->cc_alg[cr->cri_alg] == 0)
323                        return 0;
324        return 1;
325}
326
327/*
328 * Select a driver for a new session that supports the specified
329 * algorithms and, optionally, is constrained according to the flags.
330 * The algorithm we use here is pretty stupid; just use the
331 * first driver that supports all the algorithms we need. If there
332 * are multiple drivers we choose the driver with the fewest active
333 * sessions.  We prefer hardware-backed drivers to software ones.
334 *
335 * XXX We need more smarts here (in real life too, but that's
336 * XXX another story altogether).
337 */
338static struct cryptocap *
339crypto_select_driver(const struct cryptoini *cri, int flags)
340{
341        struct cryptocap *cap, *best;
342        int match, hid;
343
344        CRYPTO_DRIVER_ASSERT();
345
346        /*
347         * Look first for hardware crypto devices if permitted.
348         */
349        if (flags & CRYPTOCAP_F_HARDWARE)
350                match = CRYPTOCAP_F_HARDWARE;
351        else
352                match = CRYPTOCAP_F_SOFTWARE;
353        best = NULL;
354again:
355        for (hid = 0; hid < crypto_drivers_num; hid++) {
356                cap = &crypto_drivers[hid];
357                /*
358                 * If it's not initialized, is in the process of
359                 * going away, or is not appropriate (hardware
360                 * or software based on match), then skip.
361                 */
362                if (cap->cc_dev == NULL ||
363                    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
364                    (cap->cc_flags & match) == 0)
365                        continue;
366
367                /* verify all the algorithms are supported. */
368                if (driver_suitable(cap, cri)) {
369                        if (best == NULL ||
370                            cap->cc_sessions < best->cc_sessions)
371                                best = cap;
372                }
373        }
374        if (best != NULL)
375                return best;
376        if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
377                /* sort of an Algol 68-style for loop */
378                match = CRYPTOCAP_F_SOFTWARE;
379                goto again;
380        }
381        return best;
382}
383
384/*
385 * Create a new session.  The crid argument specifies a crypto
386 * driver to use or constraints on a driver to select (hardware
387 * only, software only, either).  Whatever driver is selected
388 * must be capable of the requested crypto algorithms.
389 */
390int
391crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
392{
393        struct cryptocap *cap;
394        u_int32_t hid, lid;
395        int err;
396
397        CRYPTO_DRIVER_LOCK();
398        if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
399                /*
400                 * Use specified driver; verify it is capable.
401                 */
402                cap = crypto_checkdriver(crid);
403                if (cap != NULL && !driver_suitable(cap, cri))
404                        cap = NULL;
405        } else {
406                /*
407                 * No requested driver; select based on crid flags.
408                 */
409                cap = crypto_select_driver(cri, crid);
410                /*
411                 * if NULL then can't do everything in one session.
412                 * XXX Fix this. We need to inject a "virtual" session
413                 * XXX layer right about here.
414                 */
415        }
416        if (cap != NULL) {
417                /* Call the driver initialization routine. */
418                hid = cap - crypto_drivers;
419                lid = hid;              /* Pass the driver ID. */
420                err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
421                if (err == 0) {
422                        (*sid) = (cap->cc_flags & 0xff000000)
423                               | (hid & 0x00ffffff);
424                        (*sid) <<= 32;
425                        (*sid) |= (lid & 0xffffffff);
426                        cap->cc_sessions++;
427                }
428        } else
429                err = EINVAL;
430        CRYPTO_DRIVER_UNLOCK();
431        return err;
432}
433
434static void
435crypto_remove(struct cryptocap *cap)
436{
437
438        mtx_assert(&crypto_drivers_mtx, MA_OWNED);
439        if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
440                bzero(cap, sizeof(*cap));
441}
442
443/*
444 * Delete an existing session (or a reserved session on an unregistered
445 * driver).
446 */
447int
448crypto_freesession(u_int64_t sid)
449{
450        struct cryptocap *cap;
451        u_int32_t hid;
452        int err;
453
454        CRYPTO_DRIVER_LOCK();
455
456        if (crypto_drivers == NULL) {
457                err = EINVAL;
458                goto done;
459        }
460
461        /* Determine two IDs. */
462        hid = CRYPTO_SESID2HID(sid);
463
464        if (hid >= crypto_drivers_num) {
465                err = ENOENT;
466                goto done;
467        }
468        cap = &crypto_drivers[hid];
469
470        if (cap->cc_sessions)
471                cap->cc_sessions--;
472
473        /* Call the driver cleanup routine, if available. */
474        err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
475
476        if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
477                crypto_remove(cap);
478
479done:
480        CRYPTO_DRIVER_UNLOCK();
481        return err;
482}
483
484/*
485 * Return an unused driver id.  Used by drivers prior to registering
486 * support for the algorithms they handle.
487 */
488int32_t
489crypto_get_driverid(device_t dev, int flags)
490{
491        struct cryptocap *newdrv;
492        int i;
493
494        if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
495                printf("%s: no flags specified when registering driver\n",
496                    device_get_nameunit(dev));
497                return -1;
498        }
499
500        CRYPTO_DRIVER_LOCK();
501
502        for (i = 0; i < crypto_drivers_num; i++) {
503                if (crypto_drivers[i].cc_dev == NULL &&
504                    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
505                        break;
506                }
507        }
508
509        /* Out of entries, allocate some more. */
510        if (i == crypto_drivers_num) {
511                /* Be careful about wrap-around. */
512                if (2 * crypto_drivers_num <= crypto_drivers_num) {
513                        CRYPTO_DRIVER_UNLOCK();
514                        printf("crypto: driver count wraparound!\n");
515                        return -1;
516                }
517
518                newdrv = malloc(2 * crypto_drivers_num *
519                    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
520                if (newdrv == NULL) {
521                        CRYPTO_DRIVER_UNLOCK();
522                        printf("crypto: no space to expand driver table!\n");
523                        return -1;
524                }
525
526                bcopy(crypto_drivers, newdrv,
527                    crypto_drivers_num * sizeof(struct cryptocap));
528
529                crypto_drivers_num *= 2;
530
531                free(crypto_drivers, M_CRYPTO_DATA);
532                crypto_drivers = newdrv;
533        }
534
535        /* NB: state is zero'd on free */
536        crypto_drivers[i].cc_sessions = 1;      /* Mark */
537        crypto_drivers[i].cc_dev = dev;
538        crypto_drivers[i].cc_flags = flags;
539        if (bootverbose)
540                printf("crypto: assign %s driver id %u, flags %u\n",
541                    device_get_nameunit(dev), i, flags);
542
543        CRYPTO_DRIVER_UNLOCK();
544
545        return i;
546}
547
548/*
549 * Lookup a driver by name.  We match against the full device
550 * name and unit, and against just the name.  The latter gives
551 * us a simple widlcarding by device name.  On success return the
552 * driver/hardware identifier; otherwise return -1.
553 */
554int
555crypto_find_driver(const char *match)
556{
557        int i, len = strlen(match);
558
559        CRYPTO_DRIVER_LOCK();
560        for (i = 0; i < crypto_drivers_num; i++) {
561                device_t dev = crypto_drivers[i].cc_dev;
562                if (dev == NULL ||
563                    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
564                        continue;
565                if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
566                    strncmp(match, device_get_name(dev), len) == 0)
567                        break;
568        }
569        CRYPTO_DRIVER_UNLOCK();
570        return i < crypto_drivers_num ? i : -1;
571}
572
573/*
574 * Return the device_t for the specified driver or NULL
575 * if the driver identifier is invalid.
576 */
577device_t
578crypto_find_device_byhid(int hid)
579{
580        struct cryptocap *cap = crypto_checkdriver(hid);
581        return cap != NULL ? cap->cc_dev : NULL;
582}
583
584/*
585 * Return the device/driver capabilities.
586 */
587int
588crypto_getcaps(int hid)
589{
590        struct cryptocap *cap = crypto_checkdriver(hid);
591        return cap != NULL ? cap->cc_flags : 0;
592}
593
594/*
595 * Register support for a key-related algorithm.  This routine
596 * is called once for each algorithm supported a driver.
597 */
598int
599crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
600{
601        struct cryptocap *cap;
602        int err;
603
604        CRYPTO_DRIVER_LOCK();
605
606        cap = crypto_checkdriver(driverid);
607        if (cap != NULL &&
608            (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
609                /*
610                 * XXX Do some performance testing to determine placing.
611                 * XXX We probably need an auxiliary data structure that
612                 * XXX describes relative performances.
613                 */
614
615                cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
616                if (bootverbose)
617                        printf("crypto: %s registers key alg %u flags %u\n"
618                                , device_get_nameunit(cap->cc_dev)
619                                , kalg
620                                , flags
621                        );
622                err = 0;
623        } else
624                err = EINVAL;
625
626        CRYPTO_DRIVER_UNLOCK();
627        return err;
628}
629
630/*
631 * Register support for a non-key-related algorithm.  This routine
632 * is called once for each such algorithm supported by a driver.
633 */
634int
635crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
636    u_int32_t flags)
637{
638        struct cryptocap *cap;
639        int err;
640
641        CRYPTO_DRIVER_LOCK();
642
643        cap = crypto_checkdriver(driverid);
644        /* NB: algorithms are in the range [1..max] */
645        if (cap != NULL &&
646            (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
647                /*
648                 * XXX Do some performance testing to determine placing.
649                 * XXX We probably need an auxiliary data structure that
650                 * XXX describes relative performances.
651                 */
652
653                cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
654                cap->cc_max_op_len[alg] = maxoplen;
655                if (bootverbose)
656                        printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
657                                , device_get_nameunit(cap->cc_dev)
658                                , alg
659                                , flags
660                                , maxoplen
661                        );
662                cap->cc_sessions = 0;           /* Unmark */
663                err = 0;
664        } else
665                err = EINVAL;
666
667        CRYPTO_DRIVER_UNLOCK();
668        return err;
669}
670
671static void
672driver_finis(struct cryptocap *cap)
673{
674        u_int32_t ses, kops;
675
676        CRYPTO_DRIVER_ASSERT();
677
678        ses = cap->cc_sessions;
679        kops = cap->cc_koperations;
680        bzero(cap, sizeof(*cap));
681        if (ses != 0 || kops != 0) {
682                /*
683                 * If there are pending sessions,
684                 * just mark as invalid.
685                 */
686                cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
687                cap->cc_sessions = ses;
688                cap->cc_koperations = kops;
689        }
690}
691
692/*
693 * Unregister a crypto driver. If there are pending sessions using it,
694 * leave enough information around so that subsequent calls using those
695 * sessions will correctly detect the driver has been unregistered and
696 * reroute requests.
697 */
698int
699crypto_unregister(u_int32_t driverid, int alg)
700{
701        struct cryptocap *cap;
702        int i, err;
703
704        CRYPTO_DRIVER_LOCK();
705        cap = crypto_checkdriver(driverid);
706        if (cap != NULL &&
707            (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
708            cap->cc_alg[alg] != 0) {
709                cap->cc_alg[alg] = 0;
710                cap->cc_max_op_len[alg] = 0;
711
712                /* Was this the last algorithm ? */
713                for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
714                        if (cap->cc_alg[i] != 0)
715                                break;
716
717                if (i == CRYPTO_ALGORITHM_MAX + 1)
718                        driver_finis(cap);
719                err = 0;
720        } else
721                err = EINVAL;
722        CRYPTO_DRIVER_UNLOCK();
723
724        return err;
725}
726
727/*
728 * Unregister all algorithms associated with a crypto driver.
729 * If there are pending sessions using it, leave enough information
730 * around so that subsequent calls using those sessions will
731 * correctly detect the driver has been unregistered and reroute
732 * requests.
733 */
734int
735crypto_unregister_all(u_int32_t driverid)
736{
737        struct cryptocap *cap;
738        int err;
739
740        CRYPTO_DRIVER_LOCK();
741        cap = crypto_checkdriver(driverid);
742        if (cap != NULL) {
743                driver_finis(cap);
744                err = 0;
745        } else
746                err = EINVAL;
747        CRYPTO_DRIVER_UNLOCK();
748
749        return err;
750}
751
752/*
753 * Clear blockage on a driver.  The what parameter indicates whether
754 * the driver is now ready for cryptop's and/or cryptokop's.
755 */
756int
757crypto_unblock(u_int32_t driverid, int what)
758{
759        struct cryptocap *cap;
760        int err;
761
762        CRYPTO_Q_LOCK();
763        cap = crypto_checkdriver(driverid);
764        if (cap != NULL) {
765                if (what & CRYPTO_SYMQ)
766                        cap->cc_qblocked = 0;
767                if (what & CRYPTO_ASYMQ)
768                        cap->cc_kqblocked = 0;
769                if (crp_sleep)
770                        wakeup_one(&crp_q);
771                err = 0;
772        } else
773                err = EINVAL;
774        CRYPTO_Q_UNLOCK();
775
776        return err;
777}
778
779/*
780 * Add a crypto request to a queue, to be processed by the kernel thread.
781 */
782int
783crypto_dispatch(struct cryptop *crp)
784{
785        struct cryptocap *cap;
786        u_int32_t hid;
787        int result;
788
789        cryptostats.cs_ops++;
790
791#ifdef CRYPTO_TIMING
792        if (crypto_timing)
793                binuptime(&crp->crp_tstamp);
794#endif
795
796        hid = CRYPTO_SESID2HID(crp->crp_sid);
797
798        if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
799                /*
800                 * Caller marked the request to be processed
801                 * immediately; dispatch it directly to the
802                 * driver unless the driver is currently blocked.
803                 */
804                cap = crypto_checkdriver(hid);
805                /* Driver cannot disappeared when there is an active session. */
806                KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
807                if (!cap->cc_qblocked) {
808                        result = crypto_invoke(cap, crp, 0);
809                        if (result != ERESTART)
810                                return (result);
811                        /*
812                         * The driver ran out of resources, put the request on
813                         * the queue.
814                         */
815                }
816        }
817        CRYPTO_Q_LOCK();
818        TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
819        if (crp_sleep)
820                wakeup_one(&crp_q);
821        CRYPTO_Q_UNLOCK();
822        return 0;
823}
824
825/*
826 * Add an asymetric crypto request to a queue,
827 * to be processed by the kernel thread.
828 */
829int
830crypto_kdispatch(struct cryptkop *krp)
831{
832        int error;
833
834        cryptostats.cs_kops++;
835
836        error = crypto_kinvoke(krp, krp->krp_crid);
837        if (error == ERESTART) {
838                CRYPTO_Q_LOCK();
839                TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
840                if (crp_sleep)
841                        wakeup_one(&crp_q);
842                CRYPTO_Q_UNLOCK();
843                error = 0;
844        }
845        return error;
846}
847
848/*
849 * Verify a driver is suitable for the specified operation.
850 */
851static __inline int
852kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
853{
854        return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
855}
856
857/*
858 * Select a driver for an asym operation.  The driver must
859 * support the necessary algorithm.  The caller can constrain
860 * which device is selected with the flags parameter.  The
861 * algorithm we use here is pretty stupid; just use the first
862 * driver that supports the algorithms we need. If there are
863 * multiple suitable drivers we choose the driver with the
864 * fewest active operations.  We prefer hardware-backed
865 * drivers to software ones when either may be used.
866 */
867static struct cryptocap *
868crypto_select_kdriver(const struct cryptkop *krp, int flags)
869{
870        struct cryptocap *cap, *best, *blocked;
871        int match, hid;
872
873        CRYPTO_DRIVER_ASSERT();
874
875        /*
876         * Look first for hardware crypto devices if permitted.
877         */
878        if (flags & CRYPTOCAP_F_HARDWARE)
879                match = CRYPTOCAP_F_HARDWARE;
880        else
881                match = CRYPTOCAP_F_SOFTWARE;
882        best = NULL;
883        blocked = NULL;
884again:
885        for (hid = 0; hid < crypto_drivers_num; hid++) {
886                cap = &crypto_drivers[hid];
887                /*
888                 * If it's not initialized, is in the process of
889                 * going away, or is not appropriate (hardware
890                 * or software based on match), then skip.
891                 */
892                if (cap->cc_dev == NULL ||
893                    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
894                    (cap->cc_flags & match) == 0)
895                        continue;
896
897                /* verify all the algorithms are supported. */
898                if (kdriver_suitable(cap, krp)) {
899                        if (best == NULL ||
900                            cap->cc_koperations < best->cc_koperations)
901                                best = cap;
902                }
903        }
904        if (best != NULL)
905                return best;
906        if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
907                /* sort of an Algol 68-style for loop */
908                match = CRYPTOCAP_F_SOFTWARE;
909                goto again;
910        }
911        return best;
912}
913
914/*
915 * Dispatch an assymetric crypto request.
916 */
917static int
918crypto_kinvoke(struct cryptkop *krp, int crid)
919{
920        struct cryptocap *cap = NULL;
921        int error;
922
923        KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
924        KASSERT(krp->krp_callback != NULL,
925            ("%s: krp->crp_callback == NULL", __func__));
926
927        CRYPTO_DRIVER_LOCK();
928        if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
929                cap = crypto_checkdriver(crid);
930                if (cap != NULL) {
931                        /*
932                         * Driver present, it must support the necessary
933                         * algorithm and, if s/w drivers are excluded,
934                         * it must be registered as hardware-backed.
935                         */
936                        if (!kdriver_suitable(cap, krp) ||
937                            (!crypto_devallowsoft &&
938                             (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
939                                cap = NULL;
940                }
941        } else {
942                /*
943                 * No requested driver; select based on crid flags.
944                 */
945                if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
946                        crid &= ~CRYPTOCAP_F_SOFTWARE;
947                cap = crypto_select_kdriver(krp, crid);
948        }
949        if (cap != NULL && !cap->cc_kqblocked) {
950                krp->krp_hid = cap - crypto_drivers;
951                cap->cc_koperations++;
952                CRYPTO_DRIVER_UNLOCK();
953                error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
954                CRYPTO_DRIVER_LOCK();
955                if (error == ERESTART) {
956                        cap->cc_koperations--;
957                        CRYPTO_DRIVER_UNLOCK();
958                        return (error);
959                }
960        } else {
961                /*
962                 * NB: cap is !NULL if device is blocked; in
963                 *     that case return ERESTART so the operation
964                 *     is resubmitted if possible.
965                 */
966                error = (cap == NULL) ? ENODEV : ERESTART;
967        }
968        CRYPTO_DRIVER_UNLOCK();
969
970        if (error) {
971                krp->krp_status = error;
972                crypto_kdone(krp);
973        }
974        return 0;
975}
976
977#ifdef CRYPTO_TIMING
978static void
979crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
980{
981        struct bintime now, delta;
982        struct timespec t;
983        uint64_t u;
984
985        binuptime(&now);
986        u = now.frac;
987        delta.frac = now.frac - bt->frac;
988        delta.sec = now.sec - bt->sec;
989        if (u < delta.frac)
990                delta.sec--;
991        bintime2timespec(&delta, &t);
992        timespecadd(&ts->acc, &t);
993        if (timespeccmp(&t, &ts->min, <))
994                ts->min = t;
995        if (timespeccmp(&t, &ts->max, >))
996                ts->max = t;
997        ts->count++;
998
999        *bt = now;
1000}
1001#endif
1002
1003/*
1004 * Dispatch a crypto request to the appropriate crypto devices.
1005 */
1006static int
1007crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1008{
1009
1010        KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1011        KASSERT(crp->crp_callback != NULL,
1012            ("%s: crp->crp_callback == NULL", __func__));
1013        KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1014
1015#ifdef CRYPTO_TIMING
1016        if (crypto_timing)
1017                crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1018#endif
1019        if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1020                struct cryptodesc *crd;
1021                u_int64_t nid;
1022
1023                /*
1024                 * Driver has unregistered; migrate the session and return
1025                 * an error to the caller so they'll resubmit the op.
1026                 *
1027                 * XXX: What if there are more already queued requests for this
1028                 *      session?
1029                 */
1030                crypto_freesession(crp->crp_sid);
1031
1032                for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1033                        crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1034
1035                /* XXX propagate flags from initial session? */
1036                if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1037                    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1038                        crp->crp_sid = nid;
1039
1040                crp->crp_etype = EAGAIN;
1041                crypto_done(crp);
1042                return 0;
1043        } else {
1044                /*
1045                 * Invoke the driver to process the request.
1046                 */
1047                return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1048        }
1049}
1050
1051/*
1052 * Release a set of crypto descriptors.
1053 */
1054void
1055crypto_freereq(struct cryptop *crp)
1056{
1057        struct cryptodesc *crd;
1058
1059        if (crp == NULL)
1060                return;
1061
1062#ifdef DIAGNOSTIC
1063        {
1064                struct cryptop *crp2;
1065
1066                CRYPTO_Q_LOCK();
1067                TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1068                        KASSERT(crp2 != crp,
1069                            ("Freeing cryptop from the crypto queue (%p).",
1070                            crp));
1071                }
1072                CRYPTO_Q_UNLOCK();
1073                CRYPTO_RETQ_LOCK();
1074                TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1075                        KASSERT(crp2 != crp,
1076                            ("Freeing cryptop from the return queue (%p).",
1077                            crp));
1078                }
1079                CRYPTO_RETQ_UNLOCK();
1080        }
1081#endif
1082
1083        while ((crd = crp->crp_desc) != NULL) {
1084                crp->crp_desc = crd->crd_next;
1085                uma_zfree(cryptodesc_zone, crd);
1086        }
1087        uma_zfree(cryptop_zone, crp);
1088}
1089
1090/*
1091 * Acquire a set of crypto descriptors.
1092 */
1093struct cryptop *
1094crypto_getreq(int num)
1095{
1096        struct cryptodesc *crd;
1097        struct cryptop *crp;
1098
1099        crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
1100        if (crp != NULL) {
1101                while (num--) {
1102                        crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
1103                        if (crd == NULL) {
1104                                crypto_freereq(crp);
1105                                return NULL;
1106                        }
1107
1108                        crd->crd_next = crp->crp_desc;
1109                        crp->crp_desc = crd;
1110                }
1111        }
1112        return crp;
1113}
1114
1115/*
1116 * Invoke the callback on behalf of the driver.
1117 */
1118void
1119crypto_done(struct cryptop *crp)
1120{
1121        KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1122                ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1123        crp->crp_flags |= CRYPTO_F_DONE;
1124        if (crp->crp_etype != 0)
1125                cryptostats.cs_errs++;
1126#ifdef CRYPTO_TIMING
1127        if (crypto_timing)
1128                crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1129#endif
1130        /*
1131         * CBIMM means unconditionally do the callback immediately;
1132         * CBIFSYNC means do the callback immediately only if the
1133         * operation was done synchronously.  Both are used to avoid
1134         * doing extraneous context switches; the latter is mostly
1135         * used with the software crypto driver.
1136         */
1137        if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1138            ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1139             (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
1140                /*
1141                 * Do the callback directly.  This is ok when the
1142                 * callback routine does very little (e.g. the
1143                 * /dev/crypto callback method just does a wakeup).
1144                 */
1145#ifdef CRYPTO_TIMING
1146                if (crypto_timing) {
1147                        /*
1148                         * NB: We must copy the timestamp before
1149                         * doing the callback as the cryptop is
1150                         * likely to be reclaimed.
1151                         */
1152                        struct bintime t = crp->crp_tstamp;
1153                        crypto_tstat(&cryptostats.cs_cb, &t);
1154                        crp->crp_callback(crp);
1155                        crypto_tstat(&cryptostats.cs_finis, &t);
1156                } else
1157#endif
1158                        crp->crp_callback(crp);
1159        } else {
1160                /*
1161                 * Normal case; queue the callback for the thread.
1162                 */
1163                CRYPTO_RETQ_LOCK();
1164                if (CRYPTO_RETQ_EMPTY())
1165                        wakeup_one(&crp_ret_q); /* shared wait channel */
1166                TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1167                CRYPTO_RETQ_UNLOCK();
1168        }
1169}
1170
1171/*
1172 * Invoke the callback on behalf of the driver.
1173 */
1174void
1175crypto_kdone(struct cryptkop *krp)
1176{
1177        struct cryptocap *cap;
1178
1179        if (krp->krp_status != 0)
1180                cryptostats.cs_kerrs++;
1181        CRYPTO_DRIVER_LOCK();
1182        /* XXX: What if driver is loaded in the meantime? */
1183        if (krp->krp_hid < crypto_drivers_num) {
1184                cap = &crypto_drivers[krp->krp_hid];
1185                cap->cc_koperations--;
1186                KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1187                if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1188                        crypto_remove(cap);
1189        }
1190        CRYPTO_DRIVER_UNLOCK();
1191        CRYPTO_RETQ_LOCK();
1192        if (CRYPTO_RETQ_EMPTY())
1193                wakeup_one(&crp_ret_q);         /* shared wait channel */
1194        TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1195        CRYPTO_RETQ_UNLOCK();
1196}
1197
1198int
1199crypto_getfeat(int *featp)
1200{
1201        int hid, kalg, feat = 0;
1202
1203        CRYPTO_DRIVER_LOCK();
1204        for (hid = 0; hid < crypto_drivers_num; hid++) {
1205                const struct cryptocap *cap = &crypto_drivers[hid];
1206
1207                if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1208                    !crypto_devallowsoft) {
1209                        continue;
1210                }
1211                for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1212                        if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1213                                feat |=  1 << kalg;
1214        }
1215        CRYPTO_DRIVER_UNLOCK();
1216        *featp = feat;
1217        return (0);
1218}
1219
1220/*
1221 * Terminate a thread at module unload.  The process that
1222 * initiated this is waiting for us to signal that we're gone;
1223 * wake it up and exit.  We use the driver table lock to insure
1224 * we don't do the wakeup before they're waiting.  There is no
1225 * race here because the waiter sleeps on the proc lock for the
1226 * thread so it gets notified at the right time because of an
1227 * extra wakeup that's done in exit1().
1228 */
1229static void
1230crypto_finis(void *chan)
1231{
1232        CRYPTO_DRIVER_LOCK();
1233        wakeup_one(chan);
1234        CRYPTO_DRIVER_UNLOCK();
1235        kproc_exit(0);
1236}
1237
1238/*
1239 * Crypto thread, dispatches crypto requests.
1240 */
1241static void
1242crypto_proc(void)
1243{
1244        struct cryptop *crp, *submit;
1245        struct cryptkop *krp;
1246        struct cryptocap *cap;
1247        u_int32_t hid;
1248        int result, hint;
1249
1250#ifndef __rtems__
1251#if defined(__i386__) || defined(__amd64__)
1252        fpu_kern_thread(FPU_KERN_NORMAL);
1253#endif
1254#endif /* __rtems__ */
1255
1256        CRYPTO_Q_LOCK();
1257        for (;;) {
1258                /*
1259                 * Find the first element in the queue that can be
1260                 * processed and look-ahead to see if multiple ops
1261                 * are ready for the same driver.
1262                 */
1263                submit = NULL;
1264                hint = 0;
1265                TAILQ_FOREACH(crp, &crp_q, crp_next) {
1266                        hid = CRYPTO_SESID2HID(crp->crp_sid);
1267                        cap = crypto_checkdriver(hid);
1268                        /*
1269                         * Driver cannot disappeared when there is an active
1270                         * session.
1271                         */
1272                        KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1273                            __func__, __LINE__));
1274                        if (cap == NULL || cap->cc_dev == NULL) {
1275                                /* Op needs to be migrated, process it. */
1276                                if (submit == NULL)
1277                                        submit = crp;
1278                                break;
1279                        }
1280                        if (!cap->cc_qblocked) {
1281                                if (submit != NULL) {
1282                                        /*
1283                                         * We stop on finding another op,
1284                                         * regardless whether its for the same
1285                                         * driver or not.  We could keep
1286                                         * searching the queue but it might be
1287                                         * better to just use a per-driver
1288                                         * queue instead.
1289                                         */
1290                                        if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1291                                                hint = CRYPTO_HINT_MORE;
1292                                        break;
1293                                } else {
1294                                        submit = crp;
1295                                        if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1296                                                break;
1297                                        /* keep scanning for more are q'd */
1298                                }
1299                        }
1300                }
1301                if (submit != NULL) {
1302                        TAILQ_REMOVE(&crp_q, submit, crp_next);
1303                        hid = CRYPTO_SESID2HID(submit->crp_sid);
1304                        cap = crypto_checkdriver(hid);
1305                        KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1306                            __func__, __LINE__));
1307                        result = crypto_invoke(cap, submit, hint);
1308                        if (result == ERESTART) {
1309                                /*
1310                                 * The driver ran out of resources, mark the
1311                                 * driver ``blocked'' for cryptop's and put
1312                                 * the request back in the queue.  It would
1313                                 * best to put the request back where we got
1314                                 * it but that's hard so for now we put it
1315                                 * at the front.  This should be ok; putting
1316                                 * it at the end does not work.
1317                                 */
1318                                /* XXX validate sid again? */
1319                                crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1320                                TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1321                                cryptostats.cs_blocks++;
1322                        }
1323                }
1324
1325                /* As above, but for key ops */
1326                TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1327                        cap = crypto_checkdriver(krp->krp_hid);
1328                        if (cap == NULL || cap->cc_dev == NULL) {
1329                                /*
1330                                 * Operation needs to be migrated, invalidate
1331                                 * the assigned device so it will reselect a
1332                                 * new one below.  Propagate the original
1333                                 * crid selection flags if supplied.
1334                                 */
1335                                krp->krp_hid = krp->krp_crid &
1336                                    (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1337                                if (krp->krp_hid == 0)
1338                                        krp->krp_hid =
1339                                    CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
1340                                break;
1341                        }
1342                        if (!cap->cc_kqblocked)
1343                                break;
1344                }
1345                if (krp != NULL) {
1346                        TAILQ_REMOVE(&crp_kq, krp, krp_next);
1347                        result = crypto_kinvoke(krp, krp->krp_hid);
1348                        if (result == ERESTART) {
1349                                /*
1350                                 * The driver ran out of resources, mark the
1351                                 * driver ``blocked'' for cryptkop's and put
1352                                 * the request back in the queue.  It would
1353                                 * best to put the request back where we got
1354                                 * it but that's hard so for now we put it
1355                                 * at the front.  This should be ok; putting
1356                                 * it at the end does not work.
1357                                 */
1358                                /* XXX validate sid again? */
1359                                crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1360                                TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1361                                cryptostats.cs_kblocks++;
1362                        }
1363                }
1364
1365                if (submit == NULL && krp == NULL) {
1366                        /*
1367                         * Nothing more to be processed.  Sleep until we're
1368                         * woken because there are more ops to process.
1369                         * This happens either by submission or by a driver
1370                         * becoming unblocked and notifying us through
1371                         * crypto_unblock.  Note that when we wakeup we
1372                         * start processing each queue again from the
1373                         * front. It's not clear that it's important to
1374                         * preserve this ordering since ops may finish
1375                         * out of order if dispatched to different devices
1376                         * and some become blocked while others do not.
1377                         */
1378                        crp_sleep = 1;
1379                        msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1380                        crp_sleep = 0;
1381                        if (cryptoproc == NULL)
1382                                break;
1383                        cryptostats.cs_intrs++;
1384                }
1385        }
1386        CRYPTO_Q_UNLOCK();
1387
1388        crypto_finis(&crp_q);
1389}
1390
1391/*
1392 * Crypto returns thread, does callbacks for processed crypto requests.
1393 * Callbacks are done here, rather than in the crypto drivers, because
1394 * callbacks typically are expensive and would slow interrupt handling.
1395 */
1396static void
1397crypto_ret_proc(void)
1398{
1399        struct cryptop *crpt;
1400        struct cryptkop *krpt;
1401
1402        CRYPTO_RETQ_LOCK();
1403        for (;;) {
1404                /* Harvest return q's for completed ops */
1405                crpt = TAILQ_FIRST(&crp_ret_q);
1406                if (crpt != NULL)
1407                        TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1408
1409                krpt = TAILQ_FIRST(&crp_ret_kq);
1410                if (krpt != NULL)
1411                        TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1412
1413                if (crpt != NULL || krpt != NULL) {
1414                        CRYPTO_RETQ_UNLOCK();
1415                        /*
1416                         * Run callbacks unlocked.
1417                         */
1418                        if (crpt != NULL) {
1419#ifdef CRYPTO_TIMING
1420                                if (crypto_timing) {
1421                                        /*
1422                                         * NB: We must copy the timestamp before
1423                                         * doing the callback as the cryptop is
1424                                         * likely to be reclaimed.
1425                                         */
1426                                        struct bintime t = crpt->crp_tstamp;
1427                                        crypto_tstat(&cryptostats.cs_cb, &t);
1428                                        crpt->crp_callback(crpt);
1429                                        crypto_tstat(&cryptostats.cs_finis, &t);
1430                                } else
1431#endif
1432                                        crpt->crp_callback(crpt);
1433                        }
1434                        if (krpt != NULL)
1435                                krpt->krp_callback(krpt);
1436                        CRYPTO_RETQ_LOCK();
1437                } else {
1438                        /*
1439                         * Nothing more to be processed.  Sleep until we're
1440                         * woken because there are more returns to process.
1441                         */
1442                        msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
1443                                "crypto_ret_wait", 0);
1444                        if (cryptoretproc == NULL)
1445                                break;
1446                        cryptostats.cs_rets++;
1447                }
1448        }
1449        CRYPTO_RETQ_UNLOCK();
1450
1451        crypto_finis(&crp_ret_q);
1452}
1453
1454#ifdef DDB
1455static void
1456db_show_drivers(void)
1457{
1458        int hid;
1459
1460        db_printf("%12s %4s %4s %8s %2s %2s\n"
1461                , "Device"
1462                , "Ses"
1463                , "Kops"
1464                , "Flags"
1465                , "QB"
1466                , "KB"
1467        );
1468        for (hid = 0; hid < crypto_drivers_num; hid++) {
1469                const struct cryptocap *cap = &crypto_drivers[hid];
1470                if (cap->cc_dev == NULL)
1471                        continue;
1472                db_printf("%-12s %4u %4u %08x %2u %2u\n"
1473                    , device_get_nameunit(cap->cc_dev)
1474                    , cap->cc_sessions
1475                    , cap->cc_koperations
1476                    , cap->cc_flags
1477                    , cap->cc_qblocked
1478                    , cap->cc_kqblocked
1479                );
1480        }
1481}
1482
1483DB_SHOW_COMMAND(crypto, db_show_crypto)
1484{
1485        struct cryptop *crp;
1486
1487        db_show_drivers();
1488        db_printf("\n");
1489
1490        db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1491            "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1492            "Desc", "Callback");
1493        TAILQ_FOREACH(crp, &crp_q, crp_next) {
1494                db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
1495                    , (int) CRYPTO_SESID2HID(crp->crp_sid)
1496                    , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
1497                    , crp->crp_ilen, crp->crp_olen
1498                    , crp->crp_etype
1499                    , crp->crp_flags
1500                    , crp->crp_desc
1501                    , crp->crp_callback
1502                );
1503        }
1504        if (!TAILQ_EMPTY(&crp_ret_q)) {
1505                db_printf("\n%4s %4s %4s %8s\n",
1506                    "HID", "Etype", "Flags", "Callback");
1507                TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
1508                        db_printf("%4u %4u %04x %8p\n"
1509                            , (int) CRYPTO_SESID2HID(crp->crp_sid)
1510                            , crp->crp_etype
1511                            , crp->crp_flags
1512                            , crp->crp_callback
1513                        );
1514                }
1515        }
1516}
1517
1518DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
1519{
1520        struct cryptkop *krp;
1521
1522        db_show_drivers();
1523        db_printf("\n");
1524
1525        db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
1526            "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
1527        TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1528                db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
1529                    , krp->krp_op
1530                    , krp->krp_status
1531                    , krp->krp_iparams, krp->krp_oparams
1532                    , krp->krp_crid, krp->krp_hid
1533                    , krp->krp_callback
1534                );
1535        }
1536        if (!TAILQ_EMPTY(&crp_ret_q)) {
1537                db_printf("%4s %5s %8s %4s %8s\n",
1538                    "Op", "Status", "CRID", "HID", "Callback");
1539                TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
1540                        db_printf("%4u %5u %08x %4u %8p\n"
1541                            , krp->krp_op
1542                            , krp->krp_status
1543                            , krp->krp_crid, krp->krp_hid
1544                            , krp->krp_callback
1545                        );
1546                }
1547        }
1548}
1549#endif
1550
1551int crypto_modevent(module_t mod, int type, void *unused);
1552
1553/*
1554 * Initialization code, both for static and dynamic loading.
1555 * Note this is not invoked with the usual MODULE_DECLARE
1556 * mechanism but instead is listed as a dependency by the
1557 * cryptosoft driver.  This guarantees proper ordering of
1558 * calls on module load/unload.
1559 */
1560int
1561crypto_modevent(module_t mod, int type, void *unused)
1562{
1563        int error = EINVAL;
1564
1565        switch (type) {
1566        case MOD_LOAD:
1567                error = crypto_init();
1568                if (error == 0 && bootverbose)
1569                        printf("crypto: <crypto core>\n");
1570                break;
1571        case MOD_UNLOAD:
1572                /*XXX disallow if active sessions */
1573                error = 0;
1574                crypto_destroy();
1575                return 0;
1576        }
1577        return error;
1578}
1579MODULE_VERSION(crypto, 1);
1580MODULE_DEPEND(crypto, zlib, 1, 1, 1);
Note: See TracBrowser for help on using the repository browser.