[Glibc-bsd-commits] r3628 - in trunk/freebsd-utils/debian: . local/include/sys patches
Robert Millan
rmh at alioth.debian.org
Mon Jul 25 15:40:45 UTC 2011
Author: rmh
Date: 2011-07-25 15:40:31 +0000 (Mon, 25 Jul 2011)
New Revision: 3628
Added:
trunk/freebsd-utils/debian/freebsd-geom.install
trunk/freebsd-utils/debian/freebsd-geom.manpages
trunk/freebsd-utils/debian/local/include/sys/endian.h
trunk/freebsd-utils/debian/patches/000_sys_geom.diff
trunk/freebsd-utils/debian/patches/039_geom.diff
Modified:
trunk/freebsd-utils/debian/changelog
trunk/freebsd-utils/debian/control
trunk/freebsd-utils/debian/patches/series
trunk/freebsd-utils/debian/rules
Log:
Add freebsd-geom package, with GEOM and a few modules (including geli). (Closes: #492889)
Modified: trunk/freebsd-utils/debian/changelog
===================================================================
--- trunk/freebsd-utils/debian/changelog 2011-07-25 12:51:04 UTC (rev 3627)
+++ trunk/freebsd-utils/debian/changelog 2011-07-25 15:40:31 UTC (rev 3628)
@@ -1,3 +1,10 @@
+freebsd-utils (8.2+ds2-4) experimental; urgency=low
+
+ * Add freebsd-geom package, with GEOM and a few modules (including
+ geli). (Closes: #492889)
+
+ -- Robert Millan <rmh at debian.org> Mon, 25 Jul 2011 17:28:06 +0200
+
freebsd-utils (8.2+ds2-3) unstable; urgency=low
* Replace usb.conf with version from 8-STABLE.
Modified: trunk/freebsd-utils/debian/control
===================================================================
--- trunk/freebsd-utils/debian/control 2011-07-25 12:51:04 UTC (rev 3627)
+++ trunk/freebsd-utils/debian/control 2011-07-25 15:40:31 UTC (rev 3628)
@@ -21,20 +21,22 @@
libkiconv-dev,
# libdevstat-dev: mdconfig
libdevstat-dev,
-# libgeom-dev : ccdconfig
+# libgeom-dev : ccdconfig, geom
libgeom-dev,
-# libsbuf-dev: ifconfig, mdconfig
+# libsbuf-dev: ifconfig, mdconfig, geom
libsbuf-dev (>= 8.2-2),
# libcam-dev: camcontrol
libcam-dev (>= 8.2),
# libedit-dev: pppctl
libedit-dev,
-# libexpat1-dev: ifconfig
+# libexpat1-dev: ifconfig, geom
libexpat1-dev,
# libtirpc-dev: mount_nfs, mountd, nfsd, rpc.lockd, rpc.statd
libtirpc-dev,
# libjail-dev: jail
libjail-dev,
+# libssl-dev: geom
+ libssl-dev,
Vcs-Browser: http://svn.debian.org/wsvn/glibc-bsd/trunk/freebsd-utils/
Vcs-Svn: svn://svn.debian.org/glibc-bsd/trunk/freebsd-utils/
Standards-Version: 3.9.2
@@ -142,6 +144,18 @@
Use this package on any machine that uses NFS, either as client or
server.
+Package: freebsd-geom
+Section: admin
+Priority: standard
+Architecture: kfreebsd-any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: modular disk I/O request transformation framework
+ The GEOM framework provides an infrastructure in which ``classes'' can
+ perform transformations on disk I/O requests on their path from the upper
+ kernel to the device drivers and back.
+ .
+ This package provides FreeBSD GEOM and associated utilities.
+
# FIXME:
# - add ppp (ppp, pppd)
# - add pf (pfctl + authpf + /etc stuff + init.d script)
Added: trunk/freebsd-utils/debian/freebsd-geom.install
===================================================================
--- trunk/freebsd-utils/debian/freebsd-geom.install (rev 0)
+++ trunk/freebsd-utils/debian/freebsd-geom.install 2011-07-25 15:40:31 UTC (rev 3628)
@@ -0,0 +1,12 @@
+sbin/geom/core/geom /sbin
+
+sbin/geom/class/cache/geom_cache.so /lib/geom
+sbin/geom/class/concat/geom_concat.so /lib/geom
+sbin/geom/class/eli/geom_eli.so /lib/geom
+sbin/geom/class/label/geom_label.so /lib/geom
+sbin/geom/class/mirror/geom_mirror.so /lib/geom
+sbin/geom/class/nop/geom_nop.so /lib/geom
+sbin/geom/class/raid3/geom_raid3.so /lib/geom
+sbin/geom/class/sched/geom_sched.so /lib/geom
+sbin/geom/class/shsec/geom_shsec.so /lib/geom
+sbin/geom/class/stripe/geom_stripe.so /lib/geom
Added: trunk/freebsd-utils/debian/freebsd-geom.manpages
===================================================================
--- trunk/freebsd-utils/debian/freebsd-geom.manpages (rev 0)
+++ trunk/freebsd-utils/debian/freebsd-geom.manpages 2011-07-25 15:40:31 UTC (rev 3628)
@@ -0,0 +1,15 @@
+sbin/geom/core/geom.8
+sbin/geom/class/cache/gcache.8
+sbin/geom/class/concat/gconcat.8
+sbin/geom/class/eli/geli.8
+sbin/geom/class/journal/gjournal.8
+sbin/geom/class/label/glabel.8
+sbin/geom/class/mirror/gmirror.8
+sbin/geom/class/multipath/gmultipath.8
+sbin/geom/class/nop/gnop.8
+sbin/geom/class/part/gpart.8
+sbin/geom/class/raid3/graid3.8
+sbin/geom/class/sched/gsched.8
+sbin/geom/class/shsec/gshsec.8
+sbin/geom/class/stripe/gstripe.8
+sbin/geom/class/virstor/gvirstor.8
Added: trunk/freebsd-utils/debian/local/include/sys/endian.h
===================================================================
--- trunk/freebsd-utils/debian/local/include/sys/endian.h (rev 0)
+++ trunk/freebsd-utils/debian/local/include/sys/endian.h 2011-07-25 15:40:31 UTC (rev 3628)
@@ -0,0 +1,144 @@
+#include_next <sys/endian.h>
+
+#ifndef _LOCAL_SYS_ENDIAN_H
+#define _LOCAL_SYS_ENDIAN_H
+/*-
+ * Copyright (c) 2002 Thomas Moestl <tmm at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdint.h>
+#include <cdefs.h>
+
+/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
+
+static __inline uint16_t
+be16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[0] << 8) | p[1]);
+}
+
+static __inline uint32_t
+be32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
+}
+
+static __inline uint64_t
+be64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
+}
+
+static __inline uint16_t
+le16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[1] << 8) | p[0]);
+}
+
+static __inline uint32_t
+le32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+}
+
+static __inline uint64_t
+le64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
+}
+
+static __inline void
+be16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 8) & 0xff;
+ p[1] = u & 0xff;
+}
+
+static __inline void
+be32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 24) & 0xff;
+ p[1] = (u >> 16) & 0xff;
+ p[2] = (u >> 8) & 0xff;
+ p[3] = u & 0xff;
+}
+
+static __inline void
+be64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ be32enc(p, (uint32_t)(u >> 32));
+ be32enc(p + 4, (uint32_t)(u & 0xffffffffU));
+}
+
+static __inline void
+le16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+}
+
+static __inline void
+le32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+ p[2] = (u >> 16) & 0xff;
+ p[3] = (u >> 24) & 0xff;
+}
+
+static __inline void
+le64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ le32enc(p, (uint32_t)(u & 0xffffffffU));
+ le32enc(p + 4, (uint32_t)(u >> 32));
+}
+
+#endif
Added: trunk/freebsd-utils/debian/patches/000_sys_geom.diff
===================================================================
--- trunk/freebsd-utils/debian/patches/000_sys_geom.diff (rev 0)
+++ trunk/freebsd-utils/debian/patches/000_sys_geom.diff 2011-07-25 15:40:31 UTC (rev 3628)
@@ -0,0 +1,5743 @@
+--- /dev/null
++++ b/sys/crypto/sha2/sha2.c
+@@ -0,0 +1,1052 @@
++/* $KAME: sha2.c,v 1.8 2001/11/08 01:07:52 itojun Exp $ */
++
++/*
++ * sha2.c
++ *
++ * Version 1.0.0beta1
++ *
++ * Written by Aaron D. Gifford <me at aarongifford.com>
++ *
++ * Copyright 2000 Aaron D. Gifford. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the copyright holder nor the names of contributors
++ * may be used to endorse or promote products derived from this software
++ * without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/types.h>
++#include <sys/time.h>
++#ifdef _KERNEL
++#include <sys/systm.h>
++#else
++#include <string.h>
++#endif
++#include <machine/endian.h>
++#include <crypto/sha2/sha2.h>
++
++/*
++ * ASSERT NOTE:
++ * Some sanity checking code is included using assert(). On my FreeBSD
++ * system, this additional code can be removed by compiling with NDEBUG
++ * defined. Check your own systems manpage on assert() to see how to
++ * compile WITHOUT the sanity checking code on your system.
++ *
++ * UNROLLED TRANSFORM LOOP NOTE:
++ * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
++ * loop version for the hash transform rounds (defined using macros
++ * later in this file). Either define on the command line, for example:
++ *
++ * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
++ *
++ * or define below:
++ *
++ * #define SHA2_UNROLL_TRANSFORM
++ *
++ */
++
++#if defined(__bsdi__) || defined(__FreeBSD__)
++#define assert(x)
++#endif
++
++
++/*** SHA-256/384/512 Machine Architecture Definitions *****************/
++/*
++ * BYTE_ORDER NOTE:
++ *
++ * Please make sure that your system defines BYTE_ORDER. If your
++ * architecture is little-endian, make sure it also defines
++ * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
++ * equivilent.
++ *
++ * If your system does not define the above, then you can do so by
++ * hand like this:
++ *
++ * #define LITTLE_ENDIAN 1234
++ * #define BIG_ENDIAN 4321
++ *
++ * And for little-endian machines, add:
++ *
++ * #define BYTE_ORDER LITTLE_ENDIAN
++ *
++ * Or for big-endian machines:
++ *
++ * #define BYTE_ORDER BIG_ENDIAN
++ *
++ * The FreeBSD machine this was written on defines BYTE_ORDER
++ * appropriately by including <sys/types.h> (which in turn includes
++ * <machine/endian.h> where the appropriate definitions are actually
++ * made).
++ */
++#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
++#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN
++#endif
++
++/*
++ * Define the followingsha2_* types to types of the correct length on
++ * the native archtecture. Most BSD systems and Linux define u_intXX_t
++ * types. Machines with very recent ANSI C headers, can use the
++ * uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H
++ * during compile or in the sha.h header file.
++ *
++ * Machines that support neither u_intXX_t nor inttypes.h's uintXX_t
++ * will need to define these three typedefs below (and the appropriate
++ * ones in sha.h too) by hand according to their system architecture.
++ *
++ * Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t
++ * types and pointing out recent ANSI C support for uintXX_t in inttypes.h.
++ */
++#if 0 /*def SHA2_USE_INTTYPES_H*/
++
++typedef uint8_t sha2_byte; /* Exactly 1 byte */
++typedef uint32_t sha2_word32; /* Exactly 4 bytes */
++typedef uint64_t sha2_word64; /* Exactly 8 bytes */
++
++#else /* SHA2_USE_INTTYPES_H */
++
++typedef u_int8_t sha2_byte; /* Exactly 1 byte */
++typedef u_int32_t sha2_word32; /* Exactly 4 bytes */
++typedef u_int64_t sha2_word64; /* Exactly 8 bytes */
++
++#endif /* SHA2_USE_INTTYPES_H */
++
++
++/*** SHA-256/384/512 Various Length Definitions ***********************/
++/* NOTE: Most of these are in sha2.h */
++#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8)
++#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16)
++#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16)
++
++
++/*** ENDIAN REVERSAL MACROS *******************************************/
++#if BYTE_ORDER == LITTLE_ENDIAN
++#define REVERSE32(w,x) { \
++ sha2_word32 tmp = (w); \
++ tmp = (tmp >> 16) | (tmp << 16); \
++ (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
++}
++#define REVERSE64(w,x) { \
++ sha2_word64 tmp = (w); \
++ tmp = (tmp >> 32) | (tmp << 32); \
++ tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
++ ((tmp & 0x00ff00ff00ff00ffULL) << 8); \
++ (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
++ ((tmp & 0x0000ffff0000ffffULL) << 16); \
++}
++#endif /* BYTE_ORDER == LITTLE_ENDIAN */
++
++/*
++ * Macro for incrementally adding the unsigned 64-bit integer n to the
++ * unsigned 128-bit integer (represented using a two-element array of
++ * 64-bit words):
++ */
++#define ADDINC128(w,n) { \
++ (w)[0] += (sha2_word64)(n); \
++ if ((w)[0] < (n)) { \
++ (w)[1]++; \
++ } \
++}
++
++/*** THE SIX LOGICAL FUNCTIONS ****************************************/
++/*
++ * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
++ *
++ * NOTE: The naming of R and S appears backwards here (R is a SHIFT and
++ * S is a ROTATION) because the SHA-256/384/512 description document
++ * (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this
++ * same "backwards" definition.
++ */
++/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
++#define R(b,x) ((x) >> (b))
++/* 32-bit Rotate-right (used in SHA-256): */
++#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
++/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
++#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
++
++/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */
++#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
++#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
++
++/* Four of six logical functions used in SHA-256: */
++#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x)))
++#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x)))
++#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x)))
++#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x)))
++
++/* Four of six logical functions used in SHA-384 and SHA-512: */
++#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x)))
++#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x)))
++#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x)))
++#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x)))
++
++/*** INTERNAL FUNCTION PROTOTYPES *************************************/
++/* NOTE: These should not be accessed directly from outside this
++ * library -- they are intended for private internal visibility/use
++ * only.
++ */
++void SHA512_Last(SHA512_CTX*);
++void SHA256_Transform(SHA256_CTX*, const sha2_word32*);
++void SHA512_Transform(SHA512_CTX*, const sha2_word64*);
++
++
++/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
++/* Hash constant words K for SHA-256: */
++static const sha2_word32 K256[64] = {
++ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
++ 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
++ 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
++ 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
++ 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
++ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
++ 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
++ 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
++ 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
++ 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
++ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
++ 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
++ 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
++ 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
++ 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
++ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
++};
++
++/* Initial hash value H for SHA-256: */
++static const sha2_word32 sha256_initial_hash_value[8] = {
++ 0x6a09e667UL,
++ 0xbb67ae85UL,
++ 0x3c6ef372UL,
++ 0xa54ff53aUL,
++ 0x510e527fUL,
++ 0x9b05688cUL,
++ 0x1f83d9abUL,
++ 0x5be0cd19UL
++};
++
++/* Hash constant words K for SHA-384 and SHA-512: */
++static const sha2_word64 K512[80] = {
++ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
++ 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
++ 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
++ 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
++ 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
++ 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
++ 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
++ 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
++ 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
++ 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
++ 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
++ 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
++ 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
++ 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
++ 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
++ 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
++ 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
++ 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
++ 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
++ 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
++ 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
++ 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
++ 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
++ 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
++ 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
++ 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
++ 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
++ 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
++ 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
++ 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
++ 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
++ 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
++ 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
++ 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
++ 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
++ 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
++ 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
++ 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
++ 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
++ 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
++};
++
++/* Initial hash value H for SHA-384 */
++static const sha2_word64 sha384_initial_hash_value[8] = {
++ 0xcbbb9d5dc1059ed8ULL,
++ 0x629a292a367cd507ULL,
++ 0x9159015a3070dd17ULL,
++ 0x152fecd8f70e5939ULL,
++ 0x67332667ffc00b31ULL,
++ 0x8eb44a8768581511ULL,
++ 0xdb0c2e0d64f98fa7ULL,
++ 0x47b5481dbefa4fa4ULL
++};
++
++/* Initial hash value H for SHA-512 */
++static const sha2_word64 sha512_initial_hash_value[8] = {
++ 0x6a09e667f3bcc908ULL,
++ 0xbb67ae8584caa73bULL,
++ 0x3c6ef372fe94f82bULL,
++ 0xa54ff53a5f1d36f1ULL,
++ 0x510e527fade682d1ULL,
++ 0x9b05688c2b3e6c1fULL,
++ 0x1f83d9abfb41bd6bULL,
++ 0x5be0cd19137e2179ULL
++};
++
++/*
++ * Constant used by SHA256/384/512_End() functions for converting the
++ * digest to a readable hexadecimal character string:
++ */
++static const char *sha2_hex_digits = "0123456789abcdef";
++
++
++/*** SHA-256: *********************************************************/
++void SHA256_Init(SHA256_CTX* context) {
++ if (context == (SHA256_CTX*)0) {
++ return;
++ }
++ bcopy(sha256_initial_hash_value, context->state, SHA256_DIGEST_LENGTH);
++ bzero(context->buffer, SHA256_BLOCK_LENGTH);
++ context->bitcount = 0;
++}
++
++#ifdef SHA2_UNROLL_TRANSFORM
++
++/* Unrolled SHA-256 round macros: */
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++
++#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
++ REVERSE32(*data++, W256[j]); \
++ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
++ K256[j] + W256[j]; \
++ (d) += T1; \
++ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
++ j++
++
++
++#else /* BYTE_ORDER == LITTLE_ENDIAN */
++
++#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
++ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
++ K256[j] + (W256[j] = *data++); \
++ (d) += T1; \
++ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
++ j++
++
++#endif /* BYTE_ORDER == LITTLE_ENDIAN */
++
++#define ROUND256(a,b,c,d,e,f,g,h) \
++ s0 = W256[(j+1)&0x0f]; \
++ s0 = sigma0_256(s0); \
++ s1 = W256[(j+14)&0x0f]; \
++ s1 = sigma1_256(s1); \
++ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
++ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
++ (d) += T1; \
++ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
++ j++
++
++void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
++ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
++ sha2_word32 T1, *W256;
++ int j;
++
++ W256 = (sha2_word32*)context->buffer;
++
++ /* Initialize registers with the prev. intermediate value */
++ a = context->state[0];
++ b = context->state[1];
++ c = context->state[2];
++ d = context->state[3];
++ e = context->state[4];
++ f = context->state[5];
++ g = context->state[6];
++ h = context->state[7];
++
++ j = 0;
++ do {
++ /* Rounds 0 to 15 (unrolled): */
++ ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
++ ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
++ ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
++ ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
++ ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
++ ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
++ ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
++ ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
++ } while (j < 16);
++
++ /* Now for the remaining rounds to 64: */
++ do {
++ ROUND256(a,b,c,d,e,f,g,h);
++ ROUND256(h,a,b,c,d,e,f,g);
++ ROUND256(g,h,a,b,c,d,e,f);
++ ROUND256(f,g,h,a,b,c,d,e);
++ ROUND256(e,f,g,h,a,b,c,d);
++ ROUND256(d,e,f,g,h,a,b,c);
++ ROUND256(c,d,e,f,g,h,a,b);
++ ROUND256(b,c,d,e,f,g,h,a);
++ } while (j < 64);
++
++ /* Compute the current intermediate hash value */
++ context->state[0] += a;
++ context->state[1] += b;
++ context->state[2] += c;
++ context->state[3] += d;
++ context->state[4] += e;
++ context->state[5] += f;
++ context->state[6] += g;
++ context->state[7] += h;
++
++ /* Clean up */
++ a = b = c = d = e = f = g = h = T1 = 0;
++}
++
++#else /* SHA2_UNROLL_TRANSFORM */
++
++void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
++ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
++ sha2_word32 T1, T2, *W256;
++ int j;
++
++ W256 = (sha2_word32*)context->buffer;
++
++ /* Initialize registers with the prev. intermediate value */
++ a = context->state[0];
++ b = context->state[1];
++ c = context->state[2];
++ d = context->state[3];
++ e = context->state[4];
++ f = context->state[5];
++ g = context->state[6];
++ h = context->state[7];
++
++ j = 0;
++ do {
++#if BYTE_ORDER == LITTLE_ENDIAN
++ /* Copy data while converting to host byte order */
++ REVERSE32(*data++,W256[j]);
++ /* Apply the SHA-256 compression function to update a..h */
++ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
++#else /* BYTE_ORDER == LITTLE_ENDIAN */
++ /* Apply the SHA-256 compression function to update a..h with copy */
++ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
++#endif /* BYTE_ORDER == LITTLE_ENDIAN */
++ T2 = Sigma0_256(a) + Maj(a, b, c);
++ h = g;
++ g = f;
++ f = e;
++ e = d + T1;
++ d = c;
++ c = b;
++ b = a;
++ a = T1 + T2;
++
++ j++;
++ } while (j < 16);
++
++ do {
++ /* Part of the message block expansion: */
++ s0 = W256[(j+1)&0x0f];
++ s0 = sigma0_256(s0);
++ s1 = W256[(j+14)&0x0f];
++ s1 = sigma1_256(s1);
++
++ /* Apply the SHA-256 compression function to update a..h */
++ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
++ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
++ T2 = Sigma0_256(a) + Maj(a, b, c);
++ h = g;
++ g = f;
++ f = e;
++ e = d + T1;
++ d = c;
++ c = b;
++ b = a;
++ a = T1 + T2;
++
++ j++;
++ } while (j < 64);
++
++ /* Compute the current intermediate hash value */
++ context->state[0] += a;
++ context->state[1] += b;
++ context->state[2] += c;
++ context->state[3] += d;
++ context->state[4] += e;
++ context->state[5] += f;
++ context->state[6] += g;
++ context->state[7] += h;
++
++ /* Clean up */
++ a = b = c = d = e = f = g = h = T1 = T2 = 0;
++}
++
++#endif /* SHA2_UNROLL_TRANSFORM */
++
++void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
++ unsigned int freespace, usedspace;
++
++ if (len == 0) {
++ /* Calling with no data is valid - we do nothing */
++ return;
++ }
++
++ /* Sanity check: */
++ assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0);
++
++ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
++ if (usedspace > 0) {
++ /* Calculate how much free space is available in the buffer */
++ freespace = SHA256_BLOCK_LENGTH - usedspace;
++
++ if (len >= freespace) {
++ /* Fill the buffer completely and process it */
++ bcopy(data, &context->buffer[usedspace], freespace);
++ context->bitcount += freespace << 3;
++ len -= freespace;
++ data += freespace;
++ SHA256_Transform(context, (sha2_word32*)context->buffer);
++ } else {
++ /* The buffer is not yet full */
++ bcopy(data, &context->buffer[usedspace], len);
++ context->bitcount += len << 3;
++ /* Clean up: */
++ usedspace = freespace = 0;
++ return;
++ }
++ }
++ while (len >= SHA256_BLOCK_LENGTH) {
++ /* Process as many complete blocks as we can */
++ SHA256_Transform(context, (const sha2_word32*)data);
++ context->bitcount += SHA256_BLOCK_LENGTH << 3;
++ len -= SHA256_BLOCK_LENGTH;
++ data += SHA256_BLOCK_LENGTH;
++ }
++ if (len > 0) {
++ /* There's left-overs, so save 'em */
++ bcopy(data, context->buffer, len);
++ context->bitcount += len << 3;
++ }
++ /* Clean up: */
++ usedspace = freespace = 0;
++}
++
++void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
++ sha2_word32 *d = (sha2_word32*)digest;
++ unsigned int usedspace;
++
++ /* Sanity check: */
++ assert(context != (SHA256_CTX*)0);
++
++ /* If no digest buffer is passed, we don't bother doing this: */
++ if (digest != (sha2_byte*)0) {
++ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
++#if BYTE_ORDER == LITTLE_ENDIAN
++ /* Convert FROM host byte order */
++ REVERSE64(context->bitcount,context->bitcount);
++#endif
++ if (usedspace > 0) {
++ /* Begin padding with a 1 bit: */
++ context->buffer[usedspace++] = 0x80;
++
++ if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
++ /* Set-up for the last transform: */
++ bzero(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace);
++ } else {
++ if (usedspace < SHA256_BLOCK_LENGTH) {
++ bzero(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace);
++ }
++ /* Do second-to-last transform: */
++ SHA256_Transform(context, (sha2_word32*)context->buffer);
++
++ /* And set-up for the last transform: */
++ bzero(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
++ }
++ } else {
++ /* Set-up for the last transform: */
++ bzero(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
++
++ /* Begin padding with a 1 bit: */
++ *context->buffer = 0x80;
++ }
++ /* Set the bit count: */
++ *(sha2_word64*)&context->buffer[SHA256_SHORT_BLOCK_LENGTH] = context->bitcount;
++
++ /* Final transform: */
++ SHA256_Transform(context, (sha2_word32*)context->buffer);
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++ {
++ /* Convert TO host byte order */
++ int j;
++ for (j = 0; j < 8; j++) {
++ REVERSE32(context->state[j],context->state[j]);
++ *d++ = context->state[j];
++ }
++ }
++#else
++ bcopy(context->state, d, SHA256_DIGEST_LENGTH);
++#endif
++ }
++
++ /* Clean up state data: */
++ bzero(context, sizeof(*context));
++ usedspace = 0;
++}
++
++char *SHA256_End(SHA256_CTX* context, char buffer[]) {
++ sha2_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
++ int i;
++
++ /* Sanity check: */
++ assert(context != (SHA256_CTX*)0);
++
++ if (buffer != (char*)0) {
++ SHA256_Final(digest, context);
++
++ for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
++ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
++ *buffer++ = sha2_hex_digits[*d & 0x0f];
++ d++;
++ }
++ *buffer = (char)0;
++ } else {
++ bzero(context, sizeof(*context));
++ }
++ bzero(digest, SHA256_DIGEST_LENGTH);
++ return buffer;
++}
++
++char* SHA256_Data(const sha2_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) {
++ SHA256_CTX context;
++
++ SHA256_Init(&context);
++ SHA256_Update(&context, data, len);
++ return SHA256_End(&context, digest);
++}
++
++
++/*** SHA-512: *********************************************************/
++void SHA512_Init(SHA512_CTX* context) {
++ if (context == (SHA512_CTX*)0) {
++ return;
++ }
++ bcopy(sha512_initial_hash_value, context->state, SHA512_DIGEST_LENGTH);
++ bzero(context->buffer, SHA512_BLOCK_LENGTH);
++ context->bitcount[0] = context->bitcount[1] = 0;
++}
++
++#ifdef SHA2_UNROLL_TRANSFORM
++
++/* Unrolled SHA-512 round macros: */
++#if BYTE_ORDER == LITTLE_ENDIAN
++
++#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
++ REVERSE64(*data++, W512[j]); \
++ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
++ K512[j] + W512[j]; \
++ (d) += T1, \
++ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
++ j++
++
++
++#else /* BYTE_ORDER == LITTLE_ENDIAN */
++
++#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
++ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
++ K512[j] + (W512[j] = *data++); \
++ (d) += T1; \
++ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
++ j++
++
++#endif /* BYTE_ORDER == LITTLE_ENDIAN */
++
++#define ROUND512(a,b,c,d,e,f,g,h) \
++ s0 = W512[(j+1)&0x0f]; \
++ s0 = sigma0_512(s0); \
++ s1 = W512[(j+14)&0x0f]; \
++ s1 = sigma1_512(s1); \
++ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
++ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
++ (d) += T1; \
++ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
++ j++
++
++void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
++ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
++ sha2_word64 T1, *W512 = (sha2_word64*)context->buffer;
++ int j;
++
++ /* Initialize registers with the prev. intermediate value */
++ a = context->state[0];
++ b = context->state[1];
++ c = context->state[2];
++ d = context->state[3];
++ e = context->state[4];
++ f = context->state[5];
++ g = context->state[6];
++ h = context->state[7];
++
++ j = 0;
++ do {
++ ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
++ ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
++ ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
++ ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
++ ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
++ ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
++ ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
++ ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
++ } while (j < 16);
++
++ /* Now for the remaining rounds up to 79: */
++ do {
++ ROUND512(a,b,c,d,e,f,g,h);
++ ROUND512(h,a,b,c,d,e,f,g);
++ ROUND512(g,h,a,b,c,d,e,f);
++ ROUND512(f,g,h,a,b,c,d,e);
++ ROUND512(e,f,g,h,a,b,c,d);
++ ROUND512(d,e,f,g,h,a,b,c);
++ ROUND512(c,d,e,f,g,h,a,b);
++ ROUND512(b,c,d,e,f,g,h,a);
++ } while (j < 80);
++
++ /* Compute the current intermediate hash value */
++ context->state[0] += a;
++ context->state[1] += b;
++ context->state[2] += c;
++ context->state[3] += d;
++ context->state[4] += e;
++ context->state[5] += f;
++ context->state[6] += g;
++ context->state[7] += h;
++
++ /* Clean up */
++ a = b = c = d = e = f = g = h = T1 = 0;
++}
++
++#else /* SHA2_UNROLL_TRANSFORM */
++
++void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
++ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
++ sha2_word64 T1 = 0, T2 = 0, *W512 = (sha2_word64*)context->buffer;
++ int j;
++
++ /* Initialize registers with the prev. intermediate value */
++ a = context->state[0];
++ b = context->state[1];
++ c = context->state[2];
++ d = context->state[3];
++ e = context->state[4];
++ f = context->state[5];
++ g = context->state[6];
++ h = context->state[7];
++
++ j = 0;
++ do {
++#if BYTE_ORDER == LITTLE_ENDIAN
++ /* Convert TO host byte order */
++ REVERSE64(*data++, W512[j]);
++ /* Apply the SHA-512 compression function to update a..h */
++ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
++#else /* BYTE_ORDER == LITTLE_ENDIAN */
++ /* Apply the SHA-512 compression function to update a..h with copy */
++ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
++#endif /* BYTE_ORDER == LITTLE_ENDIAN */
++ T2 = Sigma0_512(a) + Maj(a, b, c);
++ h = g;
++ g = f;
++ f = e;
++ e = d + T1;
++ d = c;
++ c = b;
++ b = a;
++ a = T1 + T2;
++
++ j++;
++ } while (j < 16);
++
++ do {
++ /* Part of the message block expansion: */
++ s0 = W512[(j+1)&0x0f];
++ s0 = sigma0_512(s0);
++ s1 = W512[(j+14)&0x0f];
++ s1 = sigma1_512(s1);
++
++ /* Apply the SHA-512 compression function to update a..h */
++ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
++ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
++ T2 = Sigma0_512(a) + Maj(a, b, c);
++ h = g;
++ g = f;
++ f = e;
++ e = d + T1;
++ d = c;
++ c = b;
++ b = a;
++ a = T1 + T2;
++
++ j++;
++ } while (j < 80);
++
++ /* Compute the current intermediate hash value */
++ context->state[0] += a;
++ context->state[1] += b;
++ context->state[2] += c;
++ context->state[3] += d;
++ context->state[4] += e;
++ context->state[5] += f;
++ context->state[6] += g;
++ context->state[7] += h;
++
++ /* Clean up */
++ a = b = c = d = e = f = g = h = T1 = T2 = 0;
++}
++
++#endif /* SHA2_UNROLL_TRANSFORM */
++
++void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
++ unsigned int freespace, usedspace;
++
++ if (len == 0) {
++ /* Calling with no data is valid - we do nothing */
++ return;
++ }
++
++ /* Sanity check: */
++ assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0);
++
++ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
++ if (usedspace > 0) {
++ /* Calculate how much free space is available in the buffer */
++ freespace = SHA512_BLOCK_LENGTH - usedspace;
++
++ if (len >= freespace) {
++ /* Fill the buffer completely and process it */
++ bcopy(data, &context->buffer[usedspace], freespace);
++ ADDINC128(context->bitcount, freespace << 3);
++ len -= freespace;
++ data += freespace;
++ SHA512_Transform(context, (sha2_word64*)context->buffer);
++ } else {
++ /* The buffer is not yet full */
++ bcopy(data, &context->buffer[usedspace], len);
++ ADDINC128(context->bitcount, len << 3);
++ /* Clean up: */
++ usedspace = freespace = 0;
++ return;
++ }
++ }
++ while (len >= SHA512_BLOCK_LENGTH) {
++ /* Process as many complete blocks as we can */
++ SHA512_Transform(context, (const sha2_word64*)data);
++ ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
++ len -= SHA512_BLOCK_LENGTH;
++ data += SHA512_BLOCK_LENGTH;
++ }
++ if (len > 0) {
++ /* There's left-overs, so save 'em */
++ bcopy(data, context->buffer, len);
++ ADDINC128(context->bitcount, len << 3);
++ }
++ /* Clean up: */
++ usedspace = freespace = 0;
++}
++
++void SHA512_Last(SHA512_CTX* context) {
++ unsigned int usedspace;
++
++ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
++#if BYTE_ORDER == LITTLE_ENDIAN
++ /* Convert FROM host byte order */
++ REVERSE64(context->bitcount[0],context->bitcount[0]);
++ REVERSE64(context->bitcount[1],context->bitcount[1]);
++#endif
++ if (usedspace > 0) {
++ /* Begin padding with a 1 bit: */
++ context->buffer[usedspace++] = 0x80;
++
++ if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
++ /* Set-up for the last transform: */
++ bzero(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace);
++ } else {
++ if (usedspace < SHA512_BLOCK_LENGTH) {
++ bzero(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace);
++ }
++ /* Do second-to-last transform: */
++ SHA512_Transform(context, (sha2_word64*)context->buffer);
++
++ /* And set-up for the last transform: */
++ bzero(context->buffer, SHA512_BLOCK_LENGTH - 2);
++ }
++ } else {
++ /* Prepare for final transform: */
++ bzero(context->buffer, SHA512_SHORT_BLOCK_LENGTH);
++
++ /* Begin padding with a 1 bit: */
++ *context->buffer = 0x80;
++ }
++ /* Store the length of input data (in bits): */
++ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH] = context->bitcount[1];
++ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8] = context->bitcount[0];
++
++ /* Final transform: */
++ SHA512_Transform(context, (sha2_word64*)context->buffer);
++}
++
++void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
++ sha2_word64 *d = (sha2_word64*)digest;
++
++ /* Sanity check: */
++ assert(context != (SHA512_CTX*)0);
++
++ /* If no digest buffer is passed, we don't bother doing this: */
++ if (digest != (sha2_byte*)0) {
++ SHA512_Last(context);
++
++ /* Save the hash data for output: */
++#if BYTE_ORDER == LITTLE_ENDIAN
++ {
++ /* Convert TO host byte order */
++ int j;
++ for (j = 0; j < 8; j++) {
++ REVERSE64(context->state[j],context->state[j]);
++ *d++ = context->state[j];
++ }
++ }
++#else
++ bcopy(context->state, d, SHA512_DIGEST_LENGTH);
++#endif
++ }
++
++ /* Zero out state data */
++ bzero(context, sizeof(*context));
++}
++
++char *SHA512_End(SHA512_CTX* context, char buffer[]) {
++ sha2_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
++ int i;
++
++ /* Sanity check: */
++ assert(context != (SHA512_CTX*)0);
++
++ if (buffer != (char*)0) {
++ SHA512_Final(digest, context);
++
++ for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
++ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
++ *buffer++ = sha2_hex_digits[*d & 0x0f];
++ d++;
++ }
++ *buffer = (char)0;
++ } else {
++ bzero(context, sizeof(*context));
++ }
++ bzero(digest, SHA512_DIGEST_LENGTH);
++ return buffer;
++}
++
++char* SHA512_Data(const sha2_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) {
++ SHA512_CTX context;
++
++ SHA512_Init(&context);
++ SHA512_Update(&context, data, len);
++ return SHA512_End(&context, digest);
++}
++
++
++/*** SHA-384: *********************************************************/
++void SHA384_Init(SHA384_CTX* context) {
++ if (context == (SHA384_CTX*)0) {
++ return;
++ }
++ bcopy(sha384_initial_hash_value, context->state, SHA512_DIGEST_LENGTH);
++ bzero(context->buffer, SHA384_BLOCK_LENGTH);
++ context->bitcount[0] = context->bitcount[1] = 0;
++}
++
++void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) {
++ SHA512_Update((SHA512_CTX*)context, data, len);
++}
++
++void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
++ sha2_word64 *d = (sha2_word64*)digest;
++
++ /* Sanity check: */
++ assert(context != (SHA384_CTX*)0);
++
++ /* If no digest buffer is passed, we don't bother doing this: */
++ if (digest != (sha2_byte*)0) {
++ SHA512_Last((SHA512_CTX*)context);
++
++ /* Save the hash data for output: */
++#if BYTE_ORDER == LITTLE_ENDIAN
++ {
++ /* Convert TO host byte order */
++ int j;
++ for (j = 0; j < 6; j++) {
++ REVERSE64(context->state[j],context->state[j]);
++ *d++ = context->state[j];
++ }
++ }
++#else
++ bcopy(context->state, d, SHA384_DIGEST_LENGTH);
++#endif
++ }
++
++ /* Zero out state data */
++ bzero(context, sizeof(*context));
++}
++
++char *SHA384_End(SHA384_CTX* context, char buffer[]) {
++ sha2_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
++ int i;
++
++ /* Sanity check: */
++ assert(context != (SHA384_CTX*)0);
++
++ if (buffer != (char*)0) {
++ SHA384_Final(digest, context);
++
++ for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
++ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
++ *buffer++ = sha2_hex_digits[*d & 0x0f];
++ d++;
++ }
++ *buffer = (char)0;
++ } else {
++ bzero(context, sizeof(*context));
++ }
++ bzero(digest, SHA384_DIGEST_LENGTH);
++ return buffer;
++}
++
++char* SHA384_Data(const sha2_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) {
++ SHA384_CTX context;
++
++ SHA384_Init(&context);
++ SHA384_Update(&context, data, len);
++ return SHA384_End(&context, digest);
++}
++
+--- /dev/null
++++ b/sys/crypto/sha2/sha2.h
+@@ -0,0 +1,141 @@
++/* $FreeBSD$ */
++/* $KAME: sha2.h,v 1.3 2001/03/12 08:27:48 itojun Exp $ */
++
++/*
++ * sha2.h
++ *
++ * Version 1.0.0beta1
++ *
++ * Written by Aaron D. Gifford <me at aarongifford.com>
++ *
++ * Copyright 2000 Aaron D. Gifford. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the copyright holder nor the names of contributors
++ * may be used to endorse or promote products derived from this software
++ * without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ */
++
++#ifndef __SHA2_H__
++#define __SHA2_H__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*** SHA-256/384/512 Various Length Definitions ***********************/
++#define SHA256_BLOCK_LENGTH 64
++#define SHA256_DIGEST_LENGTH 32
++#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1)
++#define SHA384_BLOCK_LENGTH 128
++#define SHA384_DIGEST_LENGTH 48
++#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1)
++#define SHA512_BLOCK_LENGTH 128
++#define SHA512_DIGEST_LENGTH 64
++#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1)
++
++
++/*** SHA-256/384/512 Context Structures *******************************/
++/* NOTE: If your architecture does not define either u_intXX_t types or
++ * uintXX_t (from inttypes.h), you may need to define things by hand
++ * for your system:
++ */
++#if 0
++typedef unsigned char u_int8_t; /* 1-byte (8-bits) */
++typedef unsigned int u_int32_t; /* 4-bytes (32-bits) */
++typedef unsigned long long u_int64_t; /* 8-bytes (64-bits) */
++#endif
++/*
++ * Most BSD systems already define u_intXX_t types, as does Linux.
++ * Some systems, however, like Compaq's Tru64 Unix instead can use
++ * uintXX_t types defined by very recent ANSI C standards and included
++ * in the file:
++ *
++ * #include <inttypes.h>
++ *
++ * If you choose to use <inttypes.h> then please define:
++ *
++ * #define SHA2_USE_INTTYPES_H
++ *
++ * Or on the command line during compile:
++ *
++ * cc -DSHA2_USE_INTTYPES_H ...
++ */
++#if 0 /*def SHA2_USE_INTTYPES_H*/
++
++typedef struct _SHA256_CTX {
++ uint32_t state[8];
++ uint64_t bitcount;
++ uint8_t buffer[SHA256_BLOCK_LENGTH];
++} SHA256_CTX;
++typedef struct _SHA512_CTX {
++ uint64_t state[8];
++ uint64_t bitcount[2];
++ uint8_t buffer[SHA512_BLOCK_LENGTH];
++} SHA512_CTX;
++
++#else /* SHA2_USE_INTTYPES_H */
++
++typedef struct _SHA256_CTX {
++ u_int32_t state[8];
++ u_int64_t bitcount;
++ u_int8_t buffer[SHA256_BLOCK_LENGTH];
++} SHA256_CTX;
++typedef struct _SHA512_CTX {
++ u_int64_t state[8];
++ u_int64_t bitcount[2];
++ u_int8_t buffer[SHA512_BLOCK_LENGTH];
++} SHA512_CTX;
++
++#endif /* SHA2_USE_INTTYPES_H */
++
++typedef SHA512_CTX SHA384_CTX;
++
++
++/*** SHA-256/384/512 Function Prototypes ******************************/
++
++void SHA256_Init(SHA256_CTX *);
++void SHA256_Update(SHA256_CTX*, const u_int8_t*, size_t);
++void SHA256_Final(u_int8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*);
++char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]);
++char* SHA256_Data(const u_int8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]);
++
++void SHA384_Init(SHA384_CTX*);
++void SHA384_Update(SHA384_CTX*, const u_int8_t*, size_t);
++void SHA384_Final(u_int8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*);
++char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]);
++char* SHA384_Data(const u_int8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]);
++
++void SHA512_Init(SHA512_CTX*);
++void SHA512_Update(SHA512_CTX*, const u_int8_t*, size_t);
++void SHA512_Final(u_int8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*);
++char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]);
++char* SHA512_Data(const u_int8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]);
++
++#ifdef __cplusplus
++}
++#endif /* __cplusplus */
++
++#endif /* __SHA2_H__ */
++
+--- /dev/null
++++ b/sys/geom/eli/g_eli.h
+@@ -0,0 +1,542 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef _G_ELI_H_
++#define _G_ELI_H_
++
++#include <sys/endian.h>
++#include <sys/errno.h>
++#include <sys/malloc.h>
++#include <crypto/sha2/sha2.h>
++#include <opencrypto/cryptodev.h>
++#ifdef _KERNEL
++#include <sys/bio.h>
++#include <sys/libkern.h>
++#include <geom/geom.h>
++#else
++#include <stdio.h>
++#include <string.h>
++#include <strings.h>
++#endif
++#ifndef _OpenSSL_
++#include <sys/md5.h>
++#endif
++
++#define G_ELI_CLASS_NAME "ELI"
++#define G_ELI_MAGIC "GEOM::ELI"
++#define G_ELI_SUFFIX ".eli"
++
++/*
++ * Version history:
++ * 0 - Initial version number.
++ * 1 - Added data authentication support (md_aalgo field and
++ * G_ELI_FLAG_AUTH flag).
++ * 2 - Added G_ELI_FLAG_READONLY.
++ * 3 - Added 'configure' subcommand.
++ * 4 - IV is generated from offset converted to little-endian
++ * (flag G_ELI_FLAG_NATIVE_BYTE_ORDER will be set for older versions).
++ * 5 - Added multiple encrypton keys and AES-XTS support.
++ */
++#define G_ELI_VERSION 5
++
++/* ON DISK FLAGS. */
++/* Use random, onetime keys. */
++#define G_ELI_FLAG_ONETIME 0x00000001
++/* Ask for the passphrase from the kernel, before mounting root. */
++#define G_ELI_FLAG_BOOT 0x00000002
++/* Detach on last close, if we were open for writing. */
++#define G_ELI_FLAG_WO_DETACH 0x00000004
++/* Detach on last close. */
++#define G_ELI_FLAG_RW_DETACH 0x00000008
++/* Provide data authentication. */
++#define G_ELI_FLAG_AUTH 0x00000010
++/* Provider is read-only, we should deny all write attempts. */
++#define G_ELI_FLAG_RO 0x00000020
++/* RUNTIME FLAGS. */
++/* Provider was open for writing. */
++#define G_ELI_FLAG_WOPEN 0x00010000
++/* Destroy device. */
++#define G_ELI_FLAG_DESTROY 0x00020000
++/* Provider uses native byte-order for IV generation. */
++#define G_ELI_FLAG_NATIVE_BYTE_ORDER 0x00040000
++/* Provider uses single encryption key. */
++#define G_ELI_FLAG_SINGLE_KEY 0x00080000
++/* Device suspended. */
++#define G_ELI_FLAG_SUSPEND 0x00100000
++
++#define G_ELI_NEW_BIO 255
++
++#define SHA512_MDLEN 64
++#define G_ELI_AUTH_SECKEYLEN SHA256_DIGEST_LENGTH
++
++#define G_ELI_MAXMKEYS 2
++#define G_ELI_MAXKEYLEN 64
++#define G_ELI_USERKEYLEN G_ELI_MAXKEYLEN
++#define G_ELI_DATAKEYLEN G_ELI_MAXKEYLEN
++#define G_ELI_AUTHKEYLEN G_ELI_MAXKEYLEN
++#define G_ELI_IVKEYLEN G_ELI_MAXKEYLEN
++#define G_ELI_SALTLEN 64
++#define G_ELI_DATAIVKEYLEN (G_ELI_DATAKEYLEN + G_ELI_IVKEYLEN)
++/* Data-Key, IV-Key, HMAC_SHA512(Derived-Key, Data-Key+IV-Key) */
++#define G_ELI_MKEYLEN (G_ELI_DATAIVKEYLEN + SHA512_MDLEN)
++#define G_ELI_OVERWRITES 5
++/* Switch data encryption key every 2^20 blocks. */
++#define G_ELI_KEY_SHIFT 20
++
++#ifdef _KERNEL
++extern int g_eli_debug;
++extern u_int g_eli_overwrites;
++extern u_int g_eli_batch;
++
++#define G_ELI_CRYPTO_UNKNOWN 0
++#define G_ELI_CRYPTO_HW 1
++#define G_ELI_CRYPTO_SW 2
++
++#define G_ELI_DEBUG(lvl, ...) do { \
++ if (g_eli_debug >= (lvl)) { \
++ printf("GEOM_ELI"); \
++ if (g_eli_debug > 0) \
++ printf("[%u]", lvl); \
++ printf(": "); \
++ printf(__VA_ARGS__); \
++ printf("\n"); \
++ } \
++} while (0)
++#define G_ELI_LOGREQ(lvl, bp, ...) do { \
++ if (g_eli_debug >= (lvl)) { \
++ printf("GEOM_ELI"); \
++ if (g_eli_debug > 0) \
++ printf("[%u]", lvl); \
++ printf(": "); \
++ printf(__VA_ARGS__); \
++ printf(" "); \
++ g_print_bio(bp); \
++ printf("\n"); \
++ } \
++} while (0)
++
++struct g_eli_worker {
++ struct g_eli_softc *w_softc;
++ struct proc *w_proc;
++ u_int w_number;
++ uint64_t w_sid;
++ boolean_t w_active;
++ LIST_ENTRY(g_eli_worker) w_next;
++};
++
++struct g_eli_softc {
++ struct g_geom *sc_geom;
++ u_int sc_crypto;
++ uint8_t sc_mkey[G_ELI_DATAIVKEYLEN];
++ uint8_t **sc_ekeys;
++ u_int sc_nekeys;
++ u_int sc_ealgo;
++ u_int sc_ekeylen;
++ uint8_t sc_akey[G_ELI_AUTHKEYLEN];
++ u_int sc_aalgo;
++ u_int sc_akeylen;
++ u_int sc_alen;
++ SHA256_CTX sc_akeyctx;
++ uint8_t sc_ivkey[G_ELI_IVKEYLEN];
++ SHA256_CTX sc_ivctx;
++ int sc_nkey;
++ uint32_t sc_flags;
++ int sc_inflight;
++ off_t sc_mediasize;
++ size_t sc_sectorsize;
++ u_int sc_bytes_per_sector;
++ u_int sc_data_per_sector;
++
++ /* Only for software cryptography. */
++ struct bio_queue_head sc_queue;
++ struct mtx sc_queue_mtx;
++ LIST_HEAD(, g_eli_worker) sc_workers;
++};
++#define sc_name sc_geom->name
++#endif /* _KERNEL */
++
++struct g_eli_metadata {
++ char md_magic[16]; /* Magic value. */
++ uint32_t md_version; /* Version number. */
++ uint32_t md_flags; /* Additional flags. */
++ uint16_t md_ealgo; /* Encryption algorithm. */
++ uint16_t md_keylen; /* Key length. */
++ uint16_t md_aalgo; /* Authentication algorithm. */
++ uint64_t md_provsize; /* Provider's size. */
++ uint32_t md_sectorsize; /* Sector size. */
++ uint8_t md_keys; /* Available keys. */
++ int32_t md_iterations; /* Number of iterations for PKCS#5v2. */
++ uint8_t md_salt[G_ELI_SALTLEN]; /* Salt. */
++ /* Encrypted master key (IV-key, Data-key, HMAC). */
++ uint8_t md_mkeys[G_ELI_MAXMKEYS * G_ELI_MKEYLEN];
++ u_char md_hash[16]; /* MD5 hash. */
++} __packed;
++#ifndef _OpenSSL_
++static __inline void
++eli_metadata_encode(struct g_eli_metadata *md, u_char *data)
++{
++ MD5_CTX ctx;
++ u_char *p;
++
++ p = data;
++ bcopy(md->md_magic, p, sizeof(md->md_magic)); p += sizeof(md->md_magic);
++ le32enc(p, md->md_version); p += sizeof(md->md_version);
++ le32enc(p, md->md_flags); p += sizeof(md->md_flags);
++ le16enc(p, md->md_ealgo); p += sizeof(md->md_ealgo);
++ le16enc(p, md->md_keylen); p += sizeof(md->md_keylen);
++ le16enc(p, md->md_aalgo); p += sizeof(md->md_aalgo);
++ le64enc(p, md->md_provsize); p += sizeof(md->md_provsize);
++ le32enc(p, md->md_sectorsize); p += sizeof(md->md_sectorsize);
++ *p = md->md_keys; p += sizeof(md->md_keys);
++ le32enc(p, md->md_iterations); p += sizeof(md->md_iterations);
++ bcopy(md->md_salt, p, sizeof(md->md_salt)); p += sizeof(md->md_salt);
++ bcopy(md->md_mkeys, p, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys);
++ MD5Init(&ctx);
++ MD5Update(&ctx, data, p - data);
++ MD5Final(md->md_hash, &ctx);
++ bcopy(md->md_hash, p, sizeof(md->md_hash));
++}
++static __inline int
++eli_metadata_decode_v0(const u_char *data, struct g_eli_metadata *md)
++{
++ MD5_CTX ctx;
++ const u_char *p;
++
++ p = data + sizeof(md->md_magic) + sizeof(md->md_version);
++ md->md_flags = le32dec(p); p += sizeof(md->md_flags);
++ md->md_ealgo = le16dec(p); p += sizeof(md->md_ealgo);
++ md->md_keylen = le16dec(p); p += sizeof(md->md_keylen);
++ md->md_provsize = le64dec(p); p += sizeof(md->md_provsize);
++ md->md_sectorsize = le32dec(p); p += sizeof(md->md_sectorsize);
++ md->md_keys = *p; p += sizeof(md->md_keys);
++ md->md_iterations = le32dec(p); p += sizeof(md->md_iterations);
++ bcopy(p, md->md_salt, sizeof(md->md_salt)); p += sizeof(md->md_salt);
++ bcopy(p, md->md_mkeys, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys);
++ MD5Init(&ctx);
++ MD5Update(&ctx, data, p - data);
++ MD5Final(md->md_hash, &ctx);
++ if (bcmp(md->md_hash, p, 16) != 0)
++ return (EINVAL);
++ return (0);
++}
++
++static __inline int
++eli_metadata_decode_v1v2v3v4v5(const u_char *data, struct g_eli_metadata *md)
++{
++ MD5_CTX ctx;
++ const u_char *p;
++
++ p = data + sizeof(md->md_magic) + sizeof(md->md_version);
++ md->md_flags = le32dec(p); p += sizeof(md->md_flags);
++ md->md_ealgo = le16dec(p); p += sizeof(md->md_ealgo);
++ md->md_keylen = le16dec(p); p += sizeof(md->md_keylen);
++ md->md_aalgo = le16dec(p); p += sizeof(md->md_aalgo);
++ md->md_provsize = le64dec(p); p += sizeof(md->md_provsize);
++ md->md_sectorsize = le32dec(p); p += sizeof(md->md_sectorsize);
++ md->md_keys = *p; p += sizeof(md->md_keys);
++ md->md_iterations = le32dec(p); p += sizeof(md->md_iterations);
++ bcopy(p, md->md_salt, sizeof(md->md_salt)); p += sizeof(md->md_salt);
++ bcopy(p, md->md_mkeys, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys);
++ MD5Init(&ctx);
++ MD5Update(&ctx, data, p - data);
++ MD5Final(md->md_hash, &ctx);
++ if (bcmp(md->md_hash, p, 16) != 0)
++ return (EINVAL);
++ return (0);
++}
++static __inline int
++eli_metadata_decode(const u_char *data, struct g_eli_metadata *md)
++{
++ int error;
++
++ bcopy(data, md->md_magic, sizeof(md->md_magic));
++ md->md_version = le32dec(data + sizeof(md->md_magic));
++ switch (md->md_version) {
++ case 0:
++ error = eli_metadata_decode_v0(data, md);
++ break;
++ case 1:
++ case 2:
++ case 3:
++ case 4:
++ case 5:
++ error = eli_metadata_decode_v1v2v3v4v5(data, md);
++ break;
++ default:
++ error = EINVAL;
++ break;
++ }
++ return (error);
++}
++#endif /* !_OpenSSL */
++
++static __inline u_int
++g_eli_str2ealgo(const char *name)
++{
++
++ if (strcasecmp("null", name) == 0)
++ return (CRYPTO_NULL_CBC);
++ else if (strcasecmp("null-cbc", name) == 0)
++ return (CRYPTO_NULL_CBC);
++ else if (strcasecmp("aes", name) == 0)
++ return (CRYPTO_AES_XTS);
++ else if (strcasecmp("aes-cbc", name) == 0)
++ return (CRYPTO_AES_CBC);
++ else if (strcasecmp("aes-xts", name) == 0)
++ return (CRYPTO_AES_XTS);
++ else if (strcasecmp("blowfish", name) == 0)
++ return (CRYPTO_BLF_CBC);
++ else if (strcasecmp("blowfish-cbc", name) == 0)
++ return (CRYPTO_BLF_CBC);
++ else if (strcasecmp("camellia", name) == 0)
++ return (CRYPTO_CAMELLIA_CBC);
++ else if (strcasecmp("camellia-cbc", name) == 0)
++ return (CRYPTO_CAMELLIA_CBC);
++ else if (strcasecmp("3des", name) == 0)
++ return (CRYPTO_3DES_CBC);
++ else if (strcasecmp("3des-cbc", name) == 0)
++ return (CRYPTO_3DES_CBC);
++ return (CRYPTO_ALGORITHM_MIN - 1);
++}
++
++static __inline u_int
++g_eli_str2aalgo(const char *name)
++{
++
++ if (strcasecmp("hmac/md5", name) == 0)
++ return (CRYPTO_MD5_HMAC);
++ else if (strcasecmp("hmac/sha1", name) == 0)
++ return (CRYPTO_SHA1_HMAC);
++ else if (strcasecmp("hmac/ripemd160", name) == 0)
++ return (CRYPTO_RIPEMD160_HMAC);
++ else if (strcasecmp("hmac/sha256", name) == 0)
++ return (CRYPTO_SHA2_256_HMAC);
++ else if (strcasecmp("hmac/sha384", name) == 0)
++ return (CRYPTO_SHA2_384_HMAC);
++ else if (strcasecmp("hmac/sha512", name) == 0)
++ return (CRYPTO_SHA2_512_HMAC);
++ return (CRYPTO_ALGORITHM_MIN - 1);
++}
++
++static __inline const char *
++g_eli_algo2str(u_int algo)
++{
++
++ switch (algo) {
++ case CRYPTO_NULL_CBC:
++ return ("NULL");
++ case CRYPTO_AES_CBC:
++ return ("AES-CBC");
++ case CRYPTO_AES_XTS:
++ return ("AES-XTS");
++ case CRYPTO_BLF_CBC:
++ return ("Blowfish-CBC");
++ case CRYPTO_CAMELLIA_CBC:
++ return ("CAMELLIA-CBC");
++ case CRYPTO_3DES_CBC:
++ return ("3DES-CBC");
++ case CRYPTO_MD5_HMAC:
++ return ("HMAC/MD5");
++ case CRYPTO_SHA1_HMAC:
++ return ("HMAC/SHA1");
++ case CRYPTO_RIPEMD160_HMAC:
++ return ("HMAC/RIPEMD160");
++ case CRYPTO_SHA2_256_HMAC:
++ return ("HMAC/SHA256");
++ case CRYPTO_SHA2_384_HMAC:
++ return ("HMAC/SHA384");
++ case CRYPTO_SHA2_512_HMAC:
++ return ("HMAC/SHA512");
++ }
++ return ("unknown");
++}
++
++static __inline void
++eli_metadata_dump(const struct g_eli_metadata *md)
++{
++ static const char hex[] = "0123456789abcdef";
++ char str[sizeof(md->md_mkeys) * 2 + 1];
++ u_int i;
++
++ printf(" magic: %s\n", md->md_magic);
++ printf(" version: %u\n", (u_int)md->md_version);
++ printf(" flags: 0x%x\n", (u_int)md->md_flags);
++ printf(" ealgo: %s\n", g_eli_algo2str(md->md_ealgo));
++ printf(" keylen: %u\n", (u_int)md->md_keylen);
++ if (md->md_flags & G_ELI_FLAG_AUTH)
++ printf(" aalgo: %s\n", g_eli_algo2str(md->md_aalgo));
++ printf(" provsize: %ju\n", (uintmax_t)md->md_provsize);
++ printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
++ printf(" keys: 0x%02x\n", (u_int)md->md_keys);
++ printf("iterations: %u\n", (u_int)md->md_iterations);
++ bzero(str, sizeof(str));
++ for (i = 0; i < sizeof(md->md_salt); i++) {
++ str[i * 2] = hex[md->md_salt[i] >> 4];
++ str[i * 2 + 1] = hex[md->md_salt[i] & 0x0f];
++ }
++ printf(" Salt: %s\n", str);
++ bzero(str, sizeof(str));
++ for (i = 0; i < sizeof(md->md_mkeys); i++) {
++ str[i * 2] = hex[md->md_mkeys[i] >> 4];
++ str[i * 2 + 1] = hex[md->md_mkeys[i] & 0x0f];
++ }
++ printf("Master Key: %s\n", str);
++ bzero(str, sizeof(str));
++ for (i = 0; i < 16; i++) {
++ str[i * 2] = hex[md->md_hash[i] >> 4];
++ str[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
++ }
++ printf(" MD5 hash: %s\n", str);
++}
++
++static __inline u_int
++g_eli_keylen(u_int algo, u_int keylen)
++{
++
++ switch (algo) {
++ case CRYPTO_NULL_CBC:
++ if (keylen == 0)
++ keylen = 64 * 8;
++ else {
++ if (keylen > 64 * 8)
++ keylen = 0;
++ }
++ return (keylen);
++ case CRYPTO_AES_CBC:
++ case CRYPTO_CAMELLIA_CBC:
++ switch (keylen) {
++ case 0:
++ return (128);
++ case 128:
++ case 192:
++ case 256:
++ return (keylen);
++ default:
++ return (0);
++ }
++ case CRYPTO_AES_XTS:
++ switch (keylen) {
++ case 0:
++ return (128);
++ case 128:
++ case 256:
++ return (keylen);
++ default:
++ return (0);
++ }
++ case CRYPTO_BLF_CBC:
++ if (keylen == 0)
++ return (128);
++ if (keylen < 128 || keylen > 448)
++ return (0);
++ if ((keylen % 32) != 0)
++ return (0);
++ return (keylen);
++ case CRYPTO_3DES_CBC:
++ if (keylen == 0 || keylen == 192)
++ return (192);
++ return (0);
++ default:
++ return (0);
++ }
++}
++
++static __inline u_int
++g_eli_hashlen(u_int algo)
++{
++
++ switch (algo) {
++ case CRYPTO_MD5_HMAC:
++ return (16);
++ case CRYPTO_SHA1_HMAC:
++ return (20);
++ case CRYPTO_RIPEMD160_HMAC:
++ return (20);
++ case CRYPTO_SHA2_256_HMAC:
++ return (32);
++ case CRYPTO_SHA2_384_HMAC:
++ return (48);
++ case CRYPTO_SHA2_512_HMAC:
++ return (64);
++ }
++ return (0);
++}
++
++#ifdef _KERNEL
++int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
++ struct g_eli_metadata *md);
++struct g_geom *g_eli_create(struct gctl_req *req, struct g_class *mp,
++ struct g_provider *bpp, const struct g_eli_metadata *md,
++ const u_char *mkey, int nkey);
++int g_eli_destroy(struct g_eli_softc *sc, boolean_t force);
++
++int g_eli_access(struct g_provider *pp, int dr, int dw, int de);
++void g_eli_config(struct gctl_req *req, struct g_class *mp, const char *verb);
++
++void g_eli_read_done(struct bio *bp);
++void g_eli_write_done(struct bio *bp);
++int g_eli_crypto_rerun(struct cryptop *crp);
++uint8_t *g_eli_crypto_key(struct g_eli_softc *sc, off_t offset,
++ size_t blocksize);
++void g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
++ size_t size);
++
++void g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker);
++void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp);
++
++void g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp);
++void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp);
++#endif
++
++void g_eli_mkey_hmac(unsigned char *mkey, const unsigned char *key);
++int g_eli_mkey_decrypt(const struct g_eli_metadata *md,
++ const unsigned char *key, unsigned char *mkey, unsigned *nkeyp);
++int g_eli_mkey_encrypt(unsigned algo, const unsigned char *key, unsigned keylen,
++ unsigned char *mkey);
++#ifdef _KERNEL
++void g_eli_mkey_propagate(struct g_eli_softc *sc, const unsigned char *mkey);
++#endif
++
++int g_eli_crypto_encrypt(u_int algo, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize);
++int g_eli_crypto_decrypt(u_int algo, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize);
++
++struct hmac_ctx {
++ SHA512_CTX shactx;
++ u_char k_opad[128];
++};
++
++void g_eli_crypto_hmac_init(struct hmac_ctx *ctx, const uint8_t *hkey,
++ size_t hkeylen);
++void g_eli_crypto_hmac_update(struct hmac_ctx *ctx, const uint8_t *data,
++ size_t datasize);
++void g_eli_crypto_hmac_final(struct hmac_ctx *ctx, uint8_t *md, size_t mdsize);
++void g_eli_crypto_hmac(const uint8_t *hkey, size_t hkeysize,
++ const uint8_t *data, size_t datasize, uint8_t *md, size_t mdsize);
++#endif /* !_G_ELI_H_ */
+--- /dev/null
++++ b/sys/geom/eli/pkcs5v2.h
+@@ -0,0 +1,36 @@
++/*-
++ * Copyright (c) 2005 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef _PKCS5V2_H_
++#define _PKCS5V2_H_
++void pkcs5v2_genkey(uint8_t *key, unsigned keylen, const uint8_t *salt,
++ size_t saltsize, const char *passphrase, u_int iterations);
++#ifndef _KERNEL
++int pkcs5v2_calculate(int usecs);
++#endif
++#endif /* !_PKCS5V2_H_ */
+--- /dev/null
++++ b/sys/geom/eli/g_eli_key.c
+@@ -0,0 +1,293 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#ifdef _KERNEL
++#include <sys/malloc.h>
++#include <sys/systm.h>
++#include <geom/geom.h>
++#else
++#include <stdio.h>
++#include <stdint.h>
++#include <stdlib.h>
++#include <string.h>
++#include <strings.h>
++#include <errno.h>
++#endif
++
++#include <geom/eli/g_eli.h>
++
++#ifdef _KERNEL
++MALLOC_DECLARE(M_ELI);
++#endif
++
++/*
++ * Verify if the given 'key' is correct.
++ * Return 1 if it is correct and 0 otherwise.
++ */
++static int
++g_eli_mkey_verify(const unsigned char *mkey, const unsigned char *key)
++{
++ const unsigned char *odhmac; /* On-disk HMAC. */
++ unsigned char chmac[SHA512_MDLEN]; /* Calculated HMAC. */
++ unsigned char hmkey[SHA512_MDLEN]; /* Key for HMAC. */
++
++ /*
++ * The key for HMAC calculations is: hmkey = HMAC_SHA512(Derived-Key, 0)
++ */
++ g_eli_crypto_hmac(key, G_ELI_USERKEYLEN, "\x00", 1, hmkey, 0);
++
++ odhmac = mkey + G_ELI_DATAIVKEYLEN;
++
++ /* Calculate HMAC from Data-Key and IV-Key. */
++ g_eli_crypto_hmac(hmkey, sizeof(hmkey), mkey, G_ELI_DATAIVKEYLEN,
++ chmac, 0);
++
++ bzero(hmkey, sizeof(hmkey));
++
++ /*
++ * Compare calculated HMAC with HMAC from metadata.
++ * If two HMACs are equal, 'key' is correct.
++ */
++ return (!bcmp(odhmac, chmac, SHA512_MDLEN));
++}
++
++/*
++ * Calculate HMAC from Data-Key and IV-Key.
++ */
++void
++g_eli_mkey_hmac(unsigned char *mkey, const unsigned char *key)
++{
++ unsigned char hmkey[SHA512_MDLEN]; /* Key for HMAC. */
++ unsigned char *odhmac; /* On-disk HMAC. */
++
++ /*
++ * The key for HMAC calculations is: hmkey = HMAC_SHA512(Derived-Key, 0)
++ */
++ g_eli_crypto_hmac(key, G_ELI_USERKEYLEN, "\x00", 1, hmkey, 0);
++
++ odhmac = mkey + G_ELI_DATAIVKEYLEN;
++ /* Calculate HMAC from Data-Key and IV-Key. */
++ g_eli_crypto_hmac(hmkey, sizeof(hmkey), mkey, G_ELI_DATAIVKEYLEN,
++ odhmac, 0);
++
++ bzero(hmkey, sizeof(hmkey));
++}
++
++/*
++ * Find and decrypt Master Key encrypted with 'key'.
++ * Return decrypted Master Key number in 'nkeyp' if not NULL.
++ * Return 0 on success, > 0 on failure, -1 on bad key.
++ */
++int
++g_eli_mkey_decrypt(const struct g_eli_metadata *md, const unsigned char *key,
++ unsigned char *mkey, unsigned *nkeyp)
++{
++ unsigned char tmpmkey[G_ELI_MKEYLEN];
++ unsigned char enckey[SHA512_MDLEN]; /* Key for encryption. */
++ const unsigned char *mmkey;
++ int bit, error, nkey;
++
++ if (nkeyp != NULL)
++ *nkeyp = -1;
++
++ /*
++ * The key for encryption is: enckey = HMAC_SHA512(Derived-Key, 1)
++ */
++ g_eli_crypto_hmac(key, G_ELI_USERKEYLEN, "\x01", 1, enckey, 0);
++
++ mmkey = md->md_mkeys;
++ for (nkey = 0; nkey < G_ELI_MAXMKEYS; nkey++, mmkey += G_ELI_MKEYLEN) {
++ bit = (1 << nkey);
++ if (!(md->md_keys & bit))
++ continue;
++ bcopy(mmkey, tmpmkey, G_ELI_MKEYLEN);
++ error = g_eli_crypto_decrypt(md->md_ealgo, tmpmkey,
++ G_ELI_MKEYLEN, enckey, md->md_keylen);
++ if (error != 0) {
++ bzero(tmpmkey, sizeof(tmpmkey));
++ bzero(enckey, sizeof(enckey));
++ return (error);
++ }
++ if (g_eli_mkey_verify(tmpmkey, key)) {
++ bcopy(tmpmkey, mkey, G_ELI_DATAIVKEYLEN);
++ bzero(tmpmkey, sizeof(tmpmkey));
++ bzero(enckey, sizeof(enckey));
++ if (nkeyp != NULL)
++ *nkeyp = nkey;
++ return (0);
++ }
++ }
++ bzero(enckey, sizeof(enckey));
++ bzero(tmpmkey, sizeof(tmpmkey));
++ return (-1);
++}
++
++/*
++ * Encrypt the Master-Key and calculate HMAC to be able to verify it in the
++ * future.
++ */
++int
++g_eli_mkey_encrypt(unsigned algo, const unsigned char *key, unsigned keylen,
++ unsigned char *mkey)
++{
++ unsigned char enckey[SHA512_MDLEN]; /* Key for encryption. */
++ int error;
++
++ /*
++ * To calculate HMAC, the whole key (G_ELI_USERKEYLEN bytes long) will
++ * be used.
++ */
++ g_eli_mkey_hmac(mkey, key);
++ /*
++ * The key for encryption is: enckey = HMAC_SHA512(Derived-Key, 1)
++ */
++ g_eli_crypto_hmac(key, G_ELI_USERKEYLEN, "\x01", 1, enckey, 0);
++ /*
++ * Encrypt the Master-Key and HMAC() result with the given key (this
++ * time only 'keylen' bits from the key are used).
++ */
++ error = g_eli_crypto_encrypt(algo, mkey, G_ELI_MKEYLEN, enckey, keylen);
++
++ bzero(enckey, sizeof(enckey));
++
++ return (error);
++}
++
++#ifdef _KERNEL
++static void
++g_eli_ekeys_generate(struct g_eli_softc *sc)
++{
++ uint8_t *keys;
++ u_int kno;
++ off_t mediasize;
++ size_t blocksize;
++ struct {
++ char magic[4];
++ uint8_t keyno[8];
++ } __packed hmacdata;
++
++ KASSERT((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0,
++ ("%s: G_ELI_FLAG_SINGLE_KEY flag present", __func__));
++
++ if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
++ struct g_provider *pp;
++
++ pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
++ mediasize = pp->mediasize;
++ blocksize = pp->sectorsize;
++ } else {
++ mediasize = sc->sc_mediasize;
++ blocksize = sc->sc_sectorsize;
++ }
++ sc->sc_nekeys = ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
++ sc->sc_ekeys =
++ malloc(sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN),
++ M_ELI, M_WAITOK);
++ keys = (uint8_t *)(sc->sc_ekeys + sc->sc_nekeys);
++ bcopy("ekey", hmacdata.magic, 4);
++ for (kno = 0; kno < sc->sc_nekeys; kno++, keys += G_ELI_DATAKEYLEN) {
++ sc->sc_ekeys[kno] = keys;
++ le64enc(hmacdata.keyno, (uint64_t)kno);
++ g_eli_crypto_hmac(sc->sc_mkey, G_ELI_MAXKEYLEN,
++ (uint8_t *)&hmacdata, sizeof(hmacdata),
++ sc->sc_ekeys[kno], 0);
++ }
++}
++
++/*
++ * When doing encryption only, copy IV key and encryption key.
++ * When doing encryption and authentication, copy IV key, generate encryption
++ * key and generate authentication key.
++ */
++void
++g_eli_mkey_propagate(struct g_eli_softc *sc, const unsigned char *mkey)
++{
++
++ /* Remember the Master Key. */
++ bcopy(mkey, sc->sc_mkey, sizeof(sc->sc_mkey));
++
++ bcopy(mkey, sc->sc_ivkey, sizeof(sc->sc_ivkey));
++ mkey += sizeof(sc->sc_ivkey);
++
++ /*
++ * The authentication key is: akey = HMAC_SHA512(Master-Key, 0x11)
++ */
++ if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
++ g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x11", 1,
++ sc->sc_akey, 0);
++ } else {
++ arc4rand(sc->sc_akey, sizeof(sc->sc_akey), 0);
++ }
++
++ if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
++ sc->sc_nekeys = 1;
++ sc->sc_ekeys = malloc(sc->sc_nekeys *
++ (sizeof(uint8_t *) + G_ELI_DATAKEYLEN), M_ELI, M_WAITOK);
++ sc->sc_ekeys[0] = (uint8_t *)(sc->sc_ekeys + sc->sc_nekeys);
++ if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
++ bcopy(mkey, sc->sc_ekeys[0], G_ELI_DATAKEYLEN);
++ else {
++ /*
++ * The encryption key is: ekey = HMAC_SHA512(Master-Key, 0x10)
++ */
++ g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x10", 1,
++ sc->sc_ekeys[0], 0);
++ }
++ } else {
++ /* Generate all encryption keys. */
++ g_eli_ekeys_generate(sc);
++ }
++
++ if (sc->sc_flags & G_ELI_FLAG_AUTH) {
++ /*
++ * Precalculate SHA256 for HMAC key generation.
++ * This is expensive operation and we can do it only once now or
++ * for every access to sector, so now will be much better.
++ */
++ SHA256_Init(&sc->sc_akeyctx);
++ SHA256_Update(&sc->sc_akeyctx, sc->sc_akey,
++ sizeof(sc->sc_akey));
++ }
++ /*
++ * Precalculate SHA256 for IV generation.
++ * This is expensive operation and we can do it only once now or for
++ * every access to sector, so now will be much better.
++ */
++ switch (sc->sc_ealgo) {
++ case CRYPTO_AES_XTS:
++ break;
++ default:
++ SHA256_Init(&sc->sc_ivctx);
++ SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey,
++ sizeof(sc->sc_ivkey));
++ break;
++ }
++}
++#endif
+--- /dev/null
++++ b/sys/geom/eli/g_eli_ctl.c
+@@ -0,0 +1,1045 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/module.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++#include <sys/bio.h>
++#include <sys/sysctl.h>
++#include <sys/malloc.h>
++#include <sys/kthread.h>
++#include <sys/proc.h>
++#include <sys/sched.h>
++#include <sys/uio.h>
++
++#include <vm/uma.h>
++
++#include <geom/geom.h>
++#include <geom/eli/g_eli.h>
++
++
++MALLOC_DECLARE(M_ELI);
++
++
++static void
++g_eli_ctl_attach(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_metadata md;
++ struct g_provider *pp;
++ const char *name;
++ u_char *key, mkey[G_ELI_DATAIVKEYLEN];
++ int *nargs, *detach, *readonly;
++ int keysize, error;
++ u_int nkey;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ if (*nargs != 1) {
++ gctl_error(req, "Invalid number of arguments.");
++ return;
++ }
++
++ detach = gctl_get_paraml(req, "detach", sizeof(*detach));
++ if (detach == NULL) {
++ gctl_error(req, "No '%s' argument.", "detach");
++ return;
++ }
++
++ readonly = gctl_get_paraml(req, "readonly", sizeof(*readonly));
++ if (readonly == NULL) {
++ gctl_error(req, "No '%s' argument.", "readonly");
++ return;
++ }
++
++ name = gctl_get_asciiparam(req, "arg0");
++ if (name == NULL) {
++ gctl_error(req, "No 'arg%u' argument.", 0);
++ return;
++ }
++ if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
++ name += strlen("/dev/");
++ pp = g_provider_by_name(name);
++ if (pp == NULL) {
++ gctl_error(req, "Provider %s is invalid.", name);
++ return;
++ }
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0) {
++ gctl_error(req, "Cannot read metadata from %s (error=%d).",
++ name, error);
++ return;
++ }
++ if (md.md_keys == 0x00) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "No valid keys on %s.", pp->name);
++ return;
++ }
++
++ key = gctl_get_param(req, "key", &keysize);
++ if (key == NULL || keysize != G_ELI_USERKEYLEN) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "No '%s' argument.", "key");
++ return;
++ }
++
++ error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
++ bzero(key, keysize);
++ if (error == -1) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Wrong key for %s.", pp->name);
++ return;
++ } else if (error > 0) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Cannot decrypt Master Key for %s (error=%d).",
++ pp->name, error);
++ return;
++ }
++ G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
++
++ if (*detach && *readonly) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Options -d and -r are mutually exclusive.");
++ return;
++ }
++ if (*detach)
++ md.md_flags |= G_ELI_FLAG_WO_DETACH;
++ if (*readonly)
++ md.md_flags |= G_ELI_FLAG_RO;
++ g_eli_create(req, mp, pp, &md, mkey, nkey);
++ bzero(mkey, sizeof(mkey));
++ bzero(&md, sizeof(md));
++}
++
++static struct g_eli_softc *
++g_eli_find_device(struct g_class *mp, const char *prov)
++{
++ struct g_eli_softc *sc;
++ struct g_geom *gp;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++
++ if (strncmp(prov, "/dev/", strlen("/dev/")) == 0)
++ prov += strlen("/dev/");
++ LIST_FOREACH(gp, &mp->geom, geom) {
++ sc = gp->softc;
++ if (sc == NULL)
++ continue;
++ pp = LIST_FIRST(&gp->provider);
++ if (pp != NULL && strcmp(pp->name, prov) == 0)
++ return (sc);
++ cp = LIST_FIRST(&gp->consumer);
++ if (cp != NULL && cp->provider != NULL &&
++ strcmp(cp->provider->name, prov) == 0) {
++ return (sc);
++ }
++ }
++ return (NULL);
++}
++
++static void
++g_eli_ctl_detach(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_softc *sc;
++ int *force, *last, *nargs, error;
++ const char *prov;
++ char param[16];
++ int i;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ if (*nargs <= 0) {
++ gctl_error(req, "Missing device(s).");
++ return;
++ }
++ force = gctl_get_paraml(req, "force", sizeof(*force));
++ if (force == NULL) {
++ gctl_error(req, "No '%s' argument.", "force");
++ return;
++ }
++ last = gctl_get_paraml(req, "last", sizeof(*last));
++ if (last == NULL) {
++ gctl_error(req, "No '%s' argument.", "last");
++ return;
++ }
++
++ for (i = 0; i < *nargs; i++) {
++ snprintf(param, sizeof(param), "arg%d", i);
++ prov = gctl_get_asciiparam(req, param);
++ if (prov == NULL) {
++ gctl_error(req, "No 'arg%d' argument.", i);
++ return;
++ }
++ sc = g_eli_find_device(mp, prov);
++ if (sc == NULL) {
++ gctl_error(req, "No such device: %s.", prov);
++ return;
++ }
++ if (*last) {
++ sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
++ sc->sc_geom->access = g_eli_access;
++ } else {
++ error = g_eli_destroy(sc, *force ? TRUE : FALSE);
++ if (error != 0) {
++ gctl_error(req,
++ "Cannot destroy device %s (error=%d).",
++ sc->sc_name, error);
++ return;
++ }
++ }
++ }
++}
++
++static void
++g_eli_ctl_onetime(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_metadata md;
++ struct g_provider *pp;
++ const char *name;
++ intmax_t *keylen, *sectorsize;
++ u_char mkey[G_ELI_DATAIVKEYLEN];
++ int *nargs, *detach;
++
++ g_topology_assert();
++ bzero(&md, sizeof(md));
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ if (*nargs != 1) {
++ gctl_error(req, "Invalid number of arguments.");
++ return;
++ }
++
++ detach = gctl_get_paraml(req, "detach", sizeof(*detach));
++ if (detach == NULL) {
++ gctl_error(req, "No '%s' argument.", "detach");
++ return;
++ }
++
++ strlcpy(md.md_magic, G_ELI_MAGIC, sizeof(md.md_magic));
++ md.md_version = G_ELI_VERSION;
++ md.md_flags |= G_ELI_FLAG_ONETIME;
++ if (*detach)
++ md.md_flags |= G_ELI_FLAG_WO_DETACH;
++
++ md.md_ealgo = CRYPTO_ALGORITHM_MIN - 1;
++ name = gctl_get_asciiparam(req, "aalgo");
++ if (name == NULL) {
++ gctl_error(req, "No '%s' argument.", "aalgo");
++ return;
++ }
++ if (strcmp(name, "none") != 0) {
++ md.md_aalgo = g_eli_str2aalgo(name);
++ if (md.md_aalgo >= CRYPTO_ALGORITHM_MIN &&
++ md.md_aalgo <= CRYPTO_ALGORITHM_MAX) {
++ md.md_flags |= G_ELI_FLAG_AUTH;
++ } else {
++ /*
++ * For backward compatibility, check if the -a option
++ * was used to provide encryption algorithm.
++ */
++ md.md_ealgo = g_eli_str2ealgo(name);
++ if (md.md_ealgo < CRYPTO_ALGORITHM_MIN ||
++ md.md_ealgo > CRYPTO_ALGORITHM_MAX) {
++ gctl_error(req,
++ "Invalid authentication algorithm.");
++ return;
++ } else {
++ gctl_error(req, "warning: The -e option, not "
++ "the -a option is now used to specify "
++ "encryption algorithm to use.");
++ }
++ }
++ }
++
++ if (md.md_ealgo < CRYPTO_ALGORITHM_MIN ||
++ md.md_ealgo > CRYPTO_ALGORITHM_MAX) {
++ name = gctl_get_asciiparam(req, "ealgo");
++ if (name == NULL) {
++ gctl_error(req, "No '%s' argument.", "ealgo");
++ return;
++ }
++ md.md_ealgo = g_eli_str2ealgo(name);
++ if (md.md_ealgo < CRYPTO_ALGORITHM_MIN ||
++ md.md_ealgo > CRYPTO_ALGORITHM_MAX) {
++ gctl_error(req, "Invalid encryption algorithm.");
++ return;
++ }
++ }
++
++ keylen = gctl_get_paraml(req, "keylen", sizeof(*keylen));
++ if (keylen == NULL) {
++ gctl_error(req, "No '%s' argument.", "keylen");
++ return;
++ }
++ md.md_keylen = g_eli_keylen(md.md_ealgo, *keylen);
++ if (md.md_keylen == 0) {
++ gctl_error(req, "Invalid '%s' argument.", "keylen");
++ return;
++ }
++
++ /* Not important here. */
++ md.md_provsize = 0;
++ /* Not important here. */
++ bzero(md.md_salt, sizeof(md.md_salt));
++
++ md.md_keys = 0x01;
++ arc4rand(mkey, sizeof(mkey), 0);
++
++ /* Not important here. */
++ bzero(md.md_hash, sizeof(md.md_hash));
++
++ name = gctl_get_asciiparam(req, "arg0");
++ if (name == NULL) {
++ gctl_error(req, "No 'arg%u' argument.", 0);
++ return;
++ }
++ if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
++ name += strlen("/dev/");
++ pp = g_provider_by_name(name);
++ if (pp == NULL) {
++ gctl_error(req, "Provider %s is invalid.", name);
++ return;
++ }
++
++ sectorsize = gctl_get_paraml(req, "sectorsize", sizeof(*sectorsize));
++ if (sectorsize == NULL) {
++ gctl_error(req, "No '%s' argument.", "sectorsize");
++ return;
++ }
++ if (*sectorsize == 0)
++ md.md_sectorsize = pp->sectorsize;
++ else {
++ if (*sectorsize < 0 || (*sectorsize % pp->sectorsize) != 0) {
++ gctl_error(req, "Invalid sector size.");
++ return;
++ }
++ if (*sectorsize > PAGE_SIZE) {
++ gctl_error(req, "warning: Using sectorsize bigger than "
++ "the page size!");
++ }
++ md.md_sectorsize = *sectorsize;
++ }
++
++ g_eli_create(req, mp, pp, &md, mkey, -1);
++ bzero(mkey, sizeof(mkey));
++ bzero(&md, sizeof(md));
++}
++
++static void
++g_eli_ctl_configure(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_metadata md;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ char param[16];
++ const char *prov;
++ u_char *sector;
++ int *nargs, *boot, *noboot;
++ int error;
++ u_int i;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ if (*nargs <= 0) {
++ gctl_error(req, "Missing device(s).");
++ return;
++ }
++
++ boot = gctl_get_paraml(req, "boot", sizeof(*boot));
++ if (boot == NULL) {
++ gctl_error(req, "No '%s' argument.", "boot");
++ return;
++ }
++ noboot = gctl_get_paraml(req, "noboot", sizeof(*noboot));
++ if (noboot == NULL) {
++ gctl_error(req, "No '%s' argument.", "noboot");
++ return;
++ }
++ if (*boot && *noboot) {
++ gctl_error(req, "Options -b and -B are mutually exclusive.");
++ return;
++ }
++ if (!*boot && !*noboot) {
++ gctl_error(req, "No option given.");
++ return;
++ }
++
++ for (i = 0; i < *nargs; i++) {
++ snprintf(param, sizeof(param), "arg%d", i);
++ prov = gctl_get_asciiparam(req, param);
++ if (prov == NULL) {
++ gctl_error(req, "No 'arg%d' argument.", i);
++ return;
++ }
++ sc = g_eli_find_device(mp, prov);
++ if (sc == NULL) {
++ /*
++ * We ignore not attached providers, userland part will
++ * take care of them.
++ */
++ G_ELI_DEBUG(1, "Skipping configuration of not attached "
++ "provider %s.", prov);
++ continue;
++ }
++ if (*boot && (sc->sc_flags & G_ELI_FLAG_BOOT)) {
++ G_ELI_DEBUG(1, "BOOT flag already configured for %s.",
++ prov);
++ continue;
++ } else if (!*boot && !(sc->sc_flags & G_ELI_FLAG_BOOT)) {
++ G_ELI_DEBUG(1, "BOOT flag not configured for %s.",
++ prov);
++ continue;
++ }
++ if (sc->sc_flags & G_ELI_FLAG_RO) {
++ gctl_error(req, "Cannot change configuration of "
++ "read-only provider %s.", prov);
++ continue;
++ }
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ pp = cp->provider;
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0) {
++ gctl_error(req,
++ "Cannot read metadata from %s (error=%d).",
++ prov, error);
++ continue;
++ }
++
++ if (*boot) {
++ md.md_flags |= G_ELI_FLAG_BOOT;
++ sc->sc_flags |= G_ELI_FLAG_BOOT;
++ } else {
++ md.md_flags &= ~G_ELI_FLAG_BOOT;
++ sc->sc_flags &= ~G_ELI_FLAG_BOOT;
++ }
++
++ sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO);
++ eli_metadata_encode(&md, sector);
++ error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector,
++ pp->sectorsize);
++ if (error != 0) {
++ gctl_error(req,
++ "Cannot store metadata on %s (error=%d).",
++ prov, error);
++ }
++ bzero(&md, sizeof(md));
++ bzero(sector, sizeof(sector));
++ free(sector, M_ELI);
++ }
++}
++
++static void
++g_eli_ctl_setkey(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_metadata md;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ const char *name;
++ u_char *key, *mkeydst, *sector;
++ intmax_t *valp;
++ int keysize, nkey, error;
++
++ g_topology_assert();
++
++ name = gctl_get_asciiparam(req, "arg0");
++ if (name == NULL) {
++ gctl_error(req, "No 'arg%u' argument.", 0);
++ return;
++ }
++ sc = g_eli_find_device(mp, name);
++ if (sc == NULL) {
++ gctl_error(req, "Provider %s is invalid.", name);
++ return;
++ }
++ if (sc->sc_flags & G_ELI_FLAG_RO) {
++ gctl_error(req, "Cannot change keys for read-only provider.");
++ return;
++ }
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ pp = cp->provider;
++
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0) {
++ gctl_error(req, "Cannot read metadata from %s (error=%d).",
++ name, error);
++ return;
++ }
++
++ valp = gctl_get_paraml(req, "keyno", sizeof(*valp));
++ if (valp == NULL) {
++ gctl_error(req, "No '%s' argument.", "keyno");
++ return;
++ }
++ if (*valp != -1)
++ nkey = *valp;
++ else
++ nkey = sc->sc_nkey;
++ if (nkey < 0 || nkey >= G_ELI_MAXMKEYS) {
++ gctl_error(req, "Invalid '%s' argument.", "keyno");
++ return;
++ }
++
++ valp = gctl_get_paraml(req, "iterations", sizeof(*valp));
++ if (valp == NULL) {
++ gctl_error(req, "No '%s' argument.", "iterations");
++ return;
++ }
++ /* Check if iterations number should and can be changed. */
++ if (*valp != -1) {
++ if (bitcount32(md.md_keys) != 1) {
++ gctl_error(req, "To be able to use '-i' option, only "
++ "one key can be defined.");
++ return;
++ }
++ if (md.md_keys != (1 << nkey)) {
++ gctl_error(req, "Only already defined key can be "
++ "changed when '-i' option is used.");
++ return;
++ }
++ md.md_iterations = *valp;
++ }
++
++ key = gctl_get_param(req, "key", &keysize);
++ if (key == NULL || keysize != G_ELI_USERKEYLEN) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "No '%s' argument.", "key");
++ return;
++ }
++
++ mkeydst = md.md_mkeys + nkey * G_ELI_MKEYLEN;
++ md.md_keys |= (1 << nkey);
++
++ bcopy(sc->sc_mkey, mkeydst, sizeof(sc->sc_mkey));
++
++ /* Encrypt Master Key with the new key. */
++ error = g_eli_mkey_encrypt(md.md_ealgo, key, md.md_keylen, mkeydst);
++ bzero(key, sizeof(key));
++ if (error != 0) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Cannot encrypt Master Key (error=%d).", error);
++ return;
++ }
++
++ sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO);
++ /* Store metadata with fresh key. */
++ eli_metadata_encode(&md, sector);
++ bzero(&md, sizeof(md));
++ error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector,
++ pp->sectorsize);
++ bzero(sector, sizeof(sector));
++ free(sector, M_ELI);
++ if (error != 0) {
++ gctl_error(req, "Cannot store metadata on %s (error=%d).",
++ pp->name, error);
++ return;
++ }
++ G_ELI_DEBUG(1, "Key %u changed on %s.", nkey, pp->name);
++}
++
++static void
++g_eli_ctl_delkey(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_metadata md;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ const char *name;
++ u_char *mkeydst, *sector;
++ intmax_t *valp;
++ size_t keysize;
++ int error, nkey, *all, *force;
++ u_int i;
++
++ g_topology_assert();
++
++ nkey = 0; /* fixes causeless gcc warning */
++
++ name = gctl_get_asciiparam(req, "arg0");
++ if (name == NULL) {
++ gctl_error(req, "No 'arg%u' argument.", 0);
++ return;
++ }
++ sc = g_eli_find_device(mp, name);
++ if (sc == NULL) {
++ gctl_error(req, "Provider %s is invalid.", name);
++ return;
++ }
++ if (sc->sc_flags & G_ELI_FLAG_RO) {
++ gctl_error(req, "Cannot delete keys for read-only provider.");
++ return;
++ }
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ pp = cp->provider;
++
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0) {
++ gctl_error(req, "Cannot read metadata from %s (error=%d).",
++ name, error);
++ return;
++ }
++
++ all = gctl_get_paraml(req, "all", sizeof(*all));
++ if (all == NULL) {
++ gctl_error(req, "No '%s' argument.", "all");
++ return;
++ }
++
++ if (*all) {
++ mkeydst = md.md_mkeys;
++ keysize = sizeof(md.md_mkeys);
++ } else {
++ force = gctl_get_paraml(req, "force", sizeof(*force));
++ if (force == NULL) {
++ gctl_error(req, "No '%s' argument.", "force");
++ return;
++ }
++
++ valp = gctl_get_paraml(req, "keyno", sizeof(*valp));
++ if (valp == NULL) {
++ gctl_error(req, "No '%s' argument.", "keyno");
++ return;
++ }
++ if (*valp != -1)
++ nkey = *valp;
++ else
++ nkey = sc->sc_nkey;
++ if (nkey < 0 || nkey >= G_ELI_MAXMKEYS) {
++ gctl_error(req, "Invalid '%s' argument.", "keyno");
++ return;
++ }
++ if (!(md.md_keys & (1 << nkey)) && !*force) {
++ gctl_error(req, "Master Key %u is not set.", nkey);
++ return;
++ }
++ md.md_keys &= ~(1 << nkey);
++ if (md.md_keys == 0 && !*force) {
++ gctl_error(req, "This is the last Master Key. Use '-f' "
++ "flag if you really want to remove it.");
++ return;
++ }
++ mkeydst = md.md_mkeys + nkey * G_ELI_MKEYLEN;
++ keysize = G_ELI_MKEYLEN;
++ }
++
++ sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO);
++ for (i = 0; i <= g_eli_overwrites; i++) {
++ if (i == g_eli_overwrites)
++ bzero(mkeydst, keysize);
++ else
++ arc4rand(mkeydst, keysize, 0);
++ /* Store metadata with destroyed key. */
++ eli_metadata_encode(&md, sector);
++ error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector,
++ pp->sectorsize);
++ if (error != 0) {
++ G_ELI_DEBUG(0, "Cannot store metadata on %s "
++ "(error=%d).", pp->name, error);
++ }
++ /*
++ * Flush write cache so we don't overwrite data N times in cache
++ * and only once on disk.
++ */
++ (void)g_io_flush(cp);
++ }
++ bzero(&md, sizeof(md));
++ bzero(sector, sizeof(sector));
++ free(sector, M_ELI);
++ if (*all)
++ G_ELI_DEBUG(1, "All keys removed from %s.", pp->name);
++ else
++ G_ELI_DEBUG(1, "Key %d removed from %s.", nkey, pp->name);
++}
++
++static void
++g_eli_suspend_one(struct g_eli_softc *sc, struct gctl_req *req)
++{
++ struct g_eli_worker *wr;
++
++ g_topology_assert();
++
++ KASSERT(sc != NULL, ("NULL sc"));
++
++ if (sc->sc_flags & G_ELI_FLAG_ONETIME) {
++ gctl_error(req,
++ "Device %s is using one-time key, suspend not supported.",
++ sc->sc_name);
++ return;
++ }
++
++ mtx_lock(&sc->sc_queue_mtx);
++ if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
++ mtx_unlock(&sc->sc_queue_mtx);
++ gctl_error(req, "Device %s already suspended.",
++ sc->sc_name);
++ return;
++ }
++ sc->sc_flags |= G_ELI_FLAG_SUSPEND;
++ wakeup(sc);
++ for (;;) {
++ LIST_FOREACH(wr, &sc->sc_workers, w_next) {
++ if (wr->w_active)
++ break;
++ }
++ if (wr == NULL)
++ break;
++ /* Not all threads suspended. */
++ msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
++ "geli:suspend", 0);
++ }
++ /*
++ * Clear sensitive data on suspend, they will be recovered on resume.
++ */
++ bzero(sc->sc_mkey, sizeof(sc->sc_mkey));
++ bzero(sc->sc_ekeys,
++ sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
++ free(sc->sc_ekeys, M_ELI);
++ sc->sc_ekeys = NULL;
++ bzero(sc->sc_akey, sizeof(sc->sc_akey));
++ bzero(&sc->sc_akeyctx, sizeof(sc->sc_akeyctx));
++ bzero(sc->sc_ivkey, sizeof(sc->sc_ivkey));
++ bzero(&sc->sc_ivctx, sizeof(sc->sc_ivctx));
++ mtx_unlock(&sc->sc_queue_mtx);
++ G_ELI_DEBUG(0, "Device %s has been suspended.", sc->sc_name);
++}
++
++static void
++g_eli_ctl_suspend(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_softc *sc;
++ int *all, *nargs;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ all = gctl_get_paraml(req, "all", sizeof(*all));
++ if (all == NULL) {
++ gctl_error(req, "No '%s' argument.", "all");
++ return;
++ }
++ if (!*all && *nargs == 0) {
++ gctl_error(req, "Too few arguments.");
++ return;
++ }
++
++ if (*all) {
++ struct g_geom *gp, *gp2;
++
++ LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
++ sc = gp->softc;
++ if (sc->sc_flags & G_ELI_FLAG_ONETIME) {
++ G_ELI_DEBUG(0,
++ "Device %s is using one-time key, suspend not supported, skipping.",
++ sc->sc_name);
++ continue;
++ }
++ g_eli_suspend_one(sc, req);
++ }
++ } else {
++ const char *prov;
++ char param[16];
++ int i;
++
++ for (i = 0; i < *nargs; i++) {
++ snprintf(param, sizeof(param), "arg%d", i);
++ prov = gctl_get_asciiparam(req, param);
++ if (prov == NULL) {
++ G_ELI_DEBUG(0, "No 'arg%d' argument.", i);
++ continue;
++ }
++
++ sc = g_eli_find_device(mp, prov);
++ if (sc == NULL) {
++ G_ELI_DEBUG(0, "No such provider: %s.", prov);
++ continue;
++ }
++ g_eli_suspend_one(sc, req);
++ }
++ }
++}
++
++static void
++g_eli_ctl_resume(struct gctl_req *req, struct g_class *mp)
++{
++ struct g_eli_metadata md;
++ struct g_eli_softc *sc;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ const char *name;
++ u_char *key, mkey[G_ELI_DATAIVKEYLEN];
++ int *nargs, keysize, error;
++ u_int nkey;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ if (*nargs != 1) {
++ gctl_error(req, "Invalid number of arguments.");
++ return;
++ }
++
++ name = gctl_get_asciiparam(req, "arg0");
++ if (name == NULL) {
++ gctl_error(req, "No 'arg%u' argument.", 0);
++ return;
++ }
++ sc = g_eli_find_device(mp, name);
++ if (sc == NULL) {
++ gctl_error(req, "Provider %s is invalid.", name);
++ return;
++ }
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ pp = cp->provider;
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0) {
++ gctl_error(req, "Cannot read metadata from %s (error=%d).",
++ name, error);
++ return;
++ }
++ if (md.md_keys == 0x00) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "No valid keys on %s.", pp->name);
++ return;
++ }
++
++ key = gctl_get_param(req, "key", &keysize);
++ if (key == NULL || keysize != G_ELI_USERKEYLEN) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "No '%s' argument.", "key");
++ return;
++ }
++
++ error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
++ bzero(key, keysize);
++ if (error == -1) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Wrong key for %s.", pp->name);
++ return;
++ } else if (error > 0) {
++ bzero(&md, sizeof(md));
++ gctl_error(req, "Cannot decrypt Master Key for %s (error=%d).",
++ pp->name, error);
++ return;
++ }
++ G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
++
++ mtx_lock(&sc->sc_queue_mtx);
++ if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
++ gctl_error(req, "Device %s is not suspended.", name);
++ else {
++ /* Restore sc_mkey, sc_ekeys, sc_akey and sc_ivkey. */
++ g_eli_mkey_propagate(sc, mkey);
++ sc->sc_flags &= ~G_ELI_FLAG_SUSPEND;
++ G_ELI_DEBUG(1, "Resumed %s.", pp->name);
++ wakeup(sc);
++ }
++ mtx_unlock(&sc->sc_queue_mtx);
++ bzero(mkey, sizeof(mkey));
++ bzero(&md, sizeof(md));
++}
++
++static int
++g_eli_kill_one(struct g_eli_softc *sc)
++{
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ int error = 0;
++
++ g_topology_assert();
++
++ if (sc == NULL)
++ return (ENOENT);
++
++ pp = LIST_FIRST(&sc->sc_geom->provider);
++ g_error_provider(pp, ENXIO);
++
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ pp = cp->provider;
++
++ if (sc->sc_flags & G_ELI_FLAG_RO) {
++ G_ELI_DEBUG(0, "WARNING: Metadata won't be erased on read-only "
++ "provider: %s.", pp->name);
++ } else {
++ u_char *sector;
++ u_int i;
++ int err;
++
++ sector = malloc(pp->sectorsize, M_ELI, M_WAITOK);
++ for (i = 0; i <= g_eli_overwrites; i++) {
++ if (i == g_eli_overwrites)
++ bzero(sector, pp->sectorsize);
++ else
++ arc4rand(sector, pp->sectorsize, 0);
++ err = g_write_data(cp, pp->mediasize - pp->sectorsize,
++ sector, pp->sectorsize);
++ if (err != 0) {
++ G_ELI_DEBUG(0, "Cannot erase metadata on %s "
++ "(error=%d).", pp->name, err);
++ if (error == 0)
++ error = err;
++ }
++ /*
++ * Flush write cache so we don't overwrite data N times
++ * in cache and only once on disk.
++ */
++ (void)g_io_flush(cp);
++ }
++ free(sector, M_ELI);
++ }
++ if (error == 0)
++ G_ELI_DEBUG(0, "%s has been killed.", pp->name);
++ g_eli_destroy(sc, TRUE);
++ return (error);
++}
++
++static void
++g_eli_ctl_kill(struct gctl_req *req, struct g_class *mp)
++{
++ int *all, *nargs;
++ int error;
++
++ g_topology_assert();
++
++ nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
++ if (nargs == NULL) {
++ gctl_error(req, "No '%s' argument.", "nargs");
++ return;
++ }
++ all = gctl_get_paraml(req, "all", sizeof(*all));
++ if (all == NULL) {
++ gctl_error(req, "No '%s' argument.", "all");
++ return;
++ }
++ if (!*all && *nargs == 0) {
++ gctl_error(req, "Too few arguments.");
++ return;
++ }
++
++ if (*all) {
++ struct g_geom *gp, *gp2;
++
++ LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
++ error = g_eli_kill_one(gp->softc);
++ if (error != 0)
++ gctl_error(req, "Not fully done.");
++ }
++ } else {
++ struct g_eli_softc *sc;
++ const char *prov;
++ char param[16];
++ int i;
++
++ for (i = 0; i < *nargs; i++) {
++ snprintf(param, sizeof(param), "arg%d", i);
++ prov = gctl_get_asciiparam(req, param);
++ if (prov == NULL) {
++ G_ELI_DEBUG(0, "No 'arg%d' argument.", i);
++ continue;
++ }
++
++ sc = g_eli_find_device(mp, prov);
++ if (sc == NULL) {
++ G_ELI_DEBUG(0, "No such provider: %s.", prov);
++ continue;
++ }
++ error = g_eli_kill_one(sc);
++ if (error != 0)
++ gctl_error(req, "Not fully done.");
++ }
++ }
++}
++
++void
++g_eli_config(struct gctl_req *req, struct g_class *mp, const char *verb)
++{
++ uint32_t *version;
++
++ g_topology_assert();
++
++ version = gctl_get_paraml(req, "version", sizeof(*version));
++ if (version == NULL) {
++ gctl_error(req, "No '%s' argument.", "version");
++ return;
++ }
++ if (*version != G_ELI_VERSION) {
++ gctl_error(req, "Userland and kernel parts are out of sync.");
++ return;
++ }
++
++ if (strcmp(verb, "attach") == 0)
++ g_eli_ctl_attach(req, mp);
++ else if (strcmp(verb, "detach") == 0 || strcmp(verb, "stop") == 0)
++ g_eli_ctl_detach(req, mp);
++ else if (strcmp(verb, "onetime") == 0)
++ g_eli_ctl_onetime(req, mp);
++ else if (strcmp(verb, "configure") == 0)
++ g_eli_ctl_configure(req, mp);
++ else if (strcmp(verb, "setkey") == 0)
++ g_eli_ctl_setkey(req, mp);
++ else if (strcmp(verb, "delkey") == 0)
++ g_eli_ctl_delkey(req, mp);
++ else if (strcmp(verb, "suspend") == 0)
++ g_eli_ctl_suspend(req, mp);
++ else if (strcmp(verb, "resume") == 0)
++ g_eli_ctl_resume(req, mp);
++ else if (strcmp(verb, "kill") == 0)
++ g_eli_ctl_kill(req, mp);
++ else
++ gctl_error(req, "Unknown verb.");
++}
+--- /dev/null
++++ b/sys/geom/eli/pkcs5v2.c
+@@ -0,0 +1,123 @@
++/*-
++ * Copyright (c) 2005 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#ifdef _KERNEL
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#else
++#include <sys/resource.h>
++#include <stdint.h>
++#include <strings.h>
++#endif
++
++#include <geom/eli/g_eli.h>
++#include <geom/eli/pkcs5v2.h>
++
++static __inline void
++xor(uint8_t *dst, const uint8_t *src, size_t size)
++{
++
++ for (; size > 0; size--)
++ *dst++ ^= *src++;
++}
++
++void
++pkcs5v2_genkey(uint8_t *key, unsigned keylen, const uint8_t *salt,
++ size_t saltsize, const char *passphrase, u_int iterations)
++{
++ uint8_t md[SHA512_MDLEN], saltcount[saltsize + sizeof(uint32_t)];
++ uint8_t *counter, *keyp;
++ u_int i, bsize, passlen;
++ uint32_t count;
++
++ passlen = strlen(passphrase);
++ bzero(key, keylen);
++ bcopy(salt, saltcount, saltsize);
++ counter = saltcount + saltsize;
++
++ keyp = key;
++ for (count = 1; keylen > 0; count++, keylen -= bsize, keyp += bsize) {
++ bsize = MIN(keylen, sizeof(md));
++
++ counter[0] = (count >> 24) & 0xff;
++ counter[1] = (count >> 16) & 0xff;
++ counter[2] = (count >> 8) & 0xff;
++ counter[3] = count & 0xff;
++ g_eli_crypto_hmac(passphrase, passlen, saltcount,
++ sizeof(saltcount), md, 0);
++ xor(keyp, md, bsize);
++
++ for(i = 1; i < iterations; i++) {
++ g_eli_crypto_hmac(passphrase, passlen, md, sizeof(md),
++ md, 0);
++ xor(keyp, md, bsize);
++ }
++ }
++}
++
++#ifndef _KERNEL
++/*
++ * Return the number of microseconds needed for 'interations' iterations.
++ */
++static int
++pkcs5v2_probe(int iterations)
++{
++ uint8_t key[G_ELI_USERKEYLEN], salt[G_ELI_SALTLEN];
++ uint8_t passphrase[] = "passphrase";
++ struct rusage start, end;
++ int usecs;
++
++ getrusage(RUSAGE_SELF, &start);
++ pkcs5v2_genkey(key, sizeof(key), salt, sizeof(salt), passphrase,
++ iterations);
++ getrusage(RUSAGE_SELF, &end);
++
++ usecs = end.ru_utime.tv_sec - start.ru_utime.tv_sec;
++ usecs *= 1000000;
++ usecs += end.ru_utime.tv_usec - start.ru_utime.tv_usec;
++ return (usecs);
++}
++
++/*
++ * Return the number of iterations which takes 'usecs' microseconds.
++ */
++int
++pkcs5v2_calculate(int usecs)
++{
++ int iterations, v;
++
++ for (iterations = 1; ; iterations <<= 1) {
++ v = pkcs5v2_probe(iterations);
++ if (v > 2000000)
++ break;
++ }
++ return (((intmax_t)iterations * (intmax_t)usecs) / v);
++}
++#endif /* !_KERNEL */
+--- /dev/null
++++ b/sys/geom/eli/g_eli_integrity.c
+@@ -0,0 +1,540 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/linker.h>
++#include <sys/module.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++#include <sys/bio.h>
++#include <sys/sysctl.h>
++#include <sys/malloc.h>
++#include <sys/kthread.h>
++#include <sys/proc.h>
++#include <sys/sched.h>
++#include <sys/smp.h>
++#include <sys/uio.h>
++#include <sys/vnode.h>
++
++#include <vm/uma.h>
++
++#include <geom/geom.h>
++#include <geom/eli/g_eli.h>
++#include <geom/eli/pkcs5v2.h>
++
++/*
++ * The data layout description when integrity verification is configured.
++ *
++ * One of the most important assumption here is that authenticated data and its
++ * HMAC has to be stored in the same place (namely in the same sector) to make
++ * it work reliable.
++ * The problem is that file systems work only with sectors that are multiple of
++ * 512 bytes and a power of two number.
++ * My idea to implement it is as follows.
++ * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for
++ * data. We can't use that directly (ie. we can't create provider with 480 bytes
++ * sector size). We need another sector from where we take only 32 bytes of data
++ * and we store HMAC of this data as well. This takes two sectors from the
++ * original provider at the input and leaves us one sector of authenticated data
++ * at the output. Not very efficient, but you got the idea.
++ * Now, let's assume, we want to create provider with 4096 bytes sector.
++ * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we
++ * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the
++ * output. That's better. With 4096 bytes sector we can use 89% of size of the
++ * original provider. I find it as an acceptable cost.
++ * The reliability comes from the fact, that every HMAC stored inside the sector
++ * is calculated only for the data in the same sector, so its impossible to
++ * write new data and leave old HMAC or vice versa.
++ *
++ * And here is the picture:
++ *
++ * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+
++ * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b |
++ * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data |
++ * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+
++ * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes |
++ * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused|
++ * +----------+
++ * da0.eli: +----+----+----+----+----+----+----+----+----+
++ * |480b|480b|480b|480b|480b|480b|480b|480b|256b|
++ * +----+----+----+----+----+----+----+----+----+
++ * | 4096 bytes |
++ * +--------------------------------------------+
++ *
++ * PS. You can use any sector size with geli(8). My example is using 4kB,
++ * because it's most efficient. For 8kB sectors you need 2 extra sectors,
++ * so the cost is the same as for 4kB sectors.
++ */
++
++/*
++ * Code paths:
++ * BIO_READ:
++ * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++
++MALLOC_DECLARE(M_ELI);
++
++/*
++ * Here we generate key for HMAC. Every sector has its own HMAC key, so it is
++ * not possible to copy sectors.
++ * We cannot depend on fact, that every sector has its own IV, because different
++ * IV doesn't change HMAC, when we use encrypt-then-authenticate method.
++ */
++static void
++g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key)
++{
++ SHA256_CTX ctx;
++
++ /* Copy precalculated SHA256 context. */
++ bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx));
++ SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset));
++ SHA256_Final(key, &ctx);
++}
++
++/*
++ * The function is called after we read and decrypt data.
++ *
++ * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver
++ */
++static int
++g_eli_auth_read_done(struct cryptop *crp)
++{
++ struct g_eli_softc *sc;
++ struct bio *bp;
++
++ if (crp->crp_etype == EAGAIN) {
++ if (g_eli_crypto_rerun(crp) == 0)
++ return (0);
++ }
++ bp = (struct bio *)crp->crp_opaque;
++ bp->bio_inbed++;
++ if (crp->crp_etype == 0) {
++ bp->bio_completed += crp->crp_olen;
++ G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).",
++ bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed);
++ } else {
++ G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
++ bp->bio_inbed, bp->bio_children, crp->crp_etype);
++ if (bp->bio_error == 0)
++ bp->bio_error = crp->crp_etype;
++ }
++ /*
++ * Do we have all sectors already?
++ */
++ if (bp->bio_inbed < bp->bio_children)
++ return (0);
++ sc = bp->bio_to->geom->softc;
++ if (bp->bio_error == 0) {
++ u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
++ u_char *srcdata, *dstdata, *auth;
++ off_t coroff, corsize;
++
++ /*
++ * Verify data integrity based on calculated and read HMACs.
++ */
++ /* Sectorsize of decrypted provider eg. 4096. */
++ decr_secsize = bp->bio_to->sectorsize;
++ /* The real sectorsize of encrypted provider, eg. 512. */
++ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
++ /* Number of data bytes in one encrypted sector, eg. 480. */
++ data_secsize = sc->sc_data_per_sector;
++ /* Number of sectors from decrypted provider, eg. 2. */
++ nsec = bp->bio_length / decr_secsize;
++ /* Number of sectors from encrypted provider, eg. 18. */
++ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
++ /* Last sector number in every big sector, eg. 9. */
++ lsec = sc->sc_bytes_per_sector / encr_secsize;
++
++ srcdata = bp->bio_driver2;
++ dstdata = bp->bio_data;
++ auth = srcdata + encr_secsize * nsec;
++ coroff = -1;
++ corsize = 0;
++
++ for (i = 1; i <= nsec; i++) {
++ data_secsize = sc->sc_data_per_sector;
++ if ((i % lsec) == 0)
++ data_secsize = decr_secsize % data_secsize;
++ if (bcmp(srcdata, auth, sc->sc_alen) != 0) {
++ /*
++ * Curruption detected, remember the offset if
++ * this is the first corrupted sector and
++ * increase size.
++ */
++ if (bp->bio_error == 0)
++ bp->bio_error = -1;
++ if (coroff == -1) {
++ coroff = bp->bio_offset +
++ (dstdata - (u_char *)bp->bio_data);
++ }
++ corsize += data_secsize;
++ } else {
++ /*
++ * No curruption, good.
++ * Report previous corruption if there was one.
++ */
++ if (coroff != -1) {
++ G_ELI_DEBUG(0, "%s: %jd bytes "
++ "corrupted at offset %jd.",
++ sc->sc_name, (intmax_t)corsize,
++ (intmax_t)coroff);
++ coroff = -1;
++ corsize = 0;
++ }
++ bcopy(srcdata + sc->sc_alen, dstdata,
++ data_secsize);
++ }
++ srcdata += encr_secsize;
++ dstdata += data_secsize;
++ auth += sc->sc_alen;
++ }
++ /* Report previous corruption if there was one. */
++ if (coroff != -1) {
++ G_ELI_DEBUG(0, "%s: %jd bytes corrupted at offset %jd.",
++ sc->sc_name, (intmax_t)corsize, (intmax_t)coroff);
++ }
++ }
++ free(bp->bio_driver2, M_ELI);
++ bp->bio_driver2 = NULL;
++ if (bp->bio_error != 0) {
++ if (bp->bio_error == -1)
++ bp->bio_error = EINVAL;
++ else {
++ G_ELI_LOGREQ(0, bp,
++ "Crypto READ request failed (error=%d).",
++ bp->bio_error);
++ }
++ bp->bio_completed = 0;
++ }
++ /*
++ * Read is finished, send it up.
++ */
++ g_io_deliver(bp, bp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++ return (0);
++}
++
++/*
++ * The function is called after data encryption.
++ *
++ * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++static int
++g_eli_auth_write_done(struct cryptop *crp)
++{
++ struct g_eli_softc *sc;
++ struct g_consumer *cp;
++ struct bio *bp, *cbp, *cbp2;
++ u_int nsec;
++
++ if (crp->crp_etype == EAGAIN) {
++ if (g_eli_crypto_rerun(crp) == 0)
++ return (0);
++ }
++ bp = (struct bio *)crp->crp_opaque;
++ bp->bio_inbed++;
++ if (crp->crp_etype == 0) {
++ G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
++ bp->bio_inbed, bp->bio_children);
++ } else {
++ G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
++ bp->bio_inbed, bp->bio_children, crp->crp_etype);
++ if (bp->bio_error == 0)
++ bp->bio_error = crp->crp_etype;
++ }
++ /*
++ * All sectors are already encrypted?
++ */
++ if (bp->bio_inbed < bp->bio_children)
++ return (0);
++ sc = bp->bio_to->geom->softc;
++ if (bp->bio_error != 0) {
++ G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
++ bp->bio_error);
++ free(bp->bio_driver2, M_ELI);
++ bp->bio_driver2 = NULL;
++ cbp = bp->bio_driver1;
++ bp->bio_driver1 = NULL;
++ g_destroy_bio(cbp);
++ g_io_deliver(bp, bp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++ return (0);
++ }
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ cbp = bp->bio_driver1;
++ bp->bio_driver1 = NULL;
++ cbp->bio_to = cp->provider;
++ cbp->bio_done = g_eli_write_done;
++
++ /* Number of sectors from decrypted provider, eg. 1. */
++ nsec = bp->bio_length / bp->bio_to->sectorsize;
++ /* Number of sectors from encrypted provider, eg. 9. */
++ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize;
++
++ cbp->bio_length = cp->provider->sectorsize * nsec;
++ cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
++ cbp->bio_data = bp->bio_driver2;
++
++ /*
++ * We write more than what is requested, so we have to be ready to write
++ * more than MAXPHYS.
++ */
++ cbp2 = NULL;
++ if (cbp->bio_length > MAXPHYS) {
++ cbp2 = g_duplicate_bio(bp);
++ cbp2->bio_length = cbp->bio_length - MAXPHYS;
++ cbp2->bio_data = cbp->bio_data + MAXPHYS;
++ cbp2->bio_offset = cbp->bio_offset + MAXPHYS;
++ cbp2->bio_to = cp->provider;
++ cbp2->bio_done = g_eli_write_done;
++ cbp->bio_length = MAXPHYS;
++ }
++ /*
++ * Send encrypted data to the provider.
++ */
++ G_ELI_LOGREQ(2, cbp, "Sending request.");
++ bp->bio_inbed = 0;
++ bp->bio_children = (cbp2 != NULL ? 2 : 1);
++ g_io_request(cbp, cp);
++ if (cbp2 != NULL) {
++ G_ELI_LOGREQ(2, cbp2, "Sending request.");
++ g_io_request(cbp2, cp);
++ }
++ return (0);
++}
++
++void
++g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp)
++{
++ struct g_consumer *cp;
++ struct bio *cbp, *cbp2;
++ size_t size;
++ off_t nsec;
++
++ bp->bio_pflags = 0;
++
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ cbp = bp->bio_driver1;
++ bp->bio_driver1 = NULL;
++ cbp->bio_to = cp->provider;
++ cbp->bio_done = g_eli_read_done;
++
++ /* Number of sectors from decrypted provider, eg. 1. */
++ nsec = bp->bio_length / bp->bio_to->sectorsize;
++ /* Number of sectors from encrypted provider, eg. 9. */
++ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize;
++
++ cbp->bio_length = cp->provider->sectorsize * nsec;
++ size = cbp->bio_length;
++ size += sc->sc_alen * nsec;
++ size += sizeof(struct cryptop) * nsec;
++ size += sizeof(struct cryptodesc) * nsec * 2;
++ size += G_ELI_AUTH_SECKEYLEN * nsec;
++ size += sizeof(struct uio) * nsec;
++ size += sizeof(struct iovec) * nsec;
++ cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
++ bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK);
++ cbp->bio_data = bp->bio_driver2;
++
++ /*
++ * We read more than what is requested, so we have to be ready to read
++ * more than MAXPHYS.
++ */
++ cbp2 = NULL;
++ if (cbp->bio_length > MAXPHYS) {
++ cbp2 = g_duplicate_bio(bp);
++ cbp2->bio_length = cbp->bio_length - MAXPHYS;
++ cbp2->bio_data = cbp->bio_data + MAXPHYS;
++ cbp2->bio_offset = cbp->bio_offset + MAXPHYS;
++ cbp2->bio_to = cp->provider;
++ cbp2->bio_done = g_eli_read_done;
++ cbp->bio_length = MAXPHYS;
++ }
++ /*
++ * Read encrypted data from provider.
++ */
++ G_ELI_LOGREQ(2, cbp, "Sending request.");
++ g_io_request(cbp, cp);
++ if (cbp2 != NULL) {
++ G_ELI_LOGREQ(2, cbp2, "Sending request.");
++ g_io_request(cbp2, cp);
++ }
++}
++
++/*
++ * This is the main function responsible for cryptography (ie. communication
++ * with crypto(9) subsystem).
++ *
++ * BIO_READ:
++ * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++void
++g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp)
++{
++ struct g_eli_softc *sc;
++ struct cryptop *crp;
++ struct cryptodesc *crde, *crda;
++ struct uio *uio;
++ struct iovec *iov;
++ u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
++ off_t dstoff;
++ int err, error;
++ u_char *p, *data, *auth, *authkey, *plaindata;
++
++ G_ELI_LOGREQ(3, bp, "%s", __func__);
++
++ bp->bio_pflags = wr->w_number;
++ sc = wr->w_softc;
++ /* Sectorsize of decrypted provider eg. 4096. */
++ decr_secsize = bp->bio_to->sectorsize;
++ /* The real sectorsize of encrypted provider, eg. 512. */
++ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
++ /* Number of data bytes in one encrypted sector, eg. 480. */
++ data_secsize = sc->sc_data_per_sector;
++ /* Number of sectors from decrypted provider, eg. 2. */
++ nsec = bp->bio_length / decr_secsize;
++ /* Number of sectors from encrypted provider, eg. 18. */
++ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
++ /* Last sector number in every big sector, eg. 9. */
++ lsec = sc->sc_bytes_per_sector / encr_secsize;
++ /* Destination offset, used for IV generation. */
++ dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
++
++ auth = NULL; /* Silence compiler warning. */
++ plaindata = bp->bio_data;
++ if (bp->bio_cmd == BIO_READ) {
++ data = bp->bio_driver2;
++ auth = data + encr_secsize * nsec;
++ p = auth + sc->sc_alen * nsec;
++ } else {
++ size_t size;
++
++ size = encr_secsize * nsec;
++ size += sizeof(*crp) * nsec;
++ size += sizeof(*crde) * nsec;
++ size += sizeof(*crda) * nsec;
++ size += G_ELI_AUTH_SECKEYLEN * nsec;
++ size += sizeof(*uio) * nsec;
++ size += sizeof(*iov) * nsec;
++ data = malloc(size, M_ELI, M_WAITOK);
++ bp->bio_driver2 = data;
++ p = data + encr_secsize * nsec;
++ }
++ bp->bio_inbed = 0;
++ bp->bio_children = nsec;
++
++ error = 0;
++ for (i = 1; i <= nsec; i++, dstoff += encr_secsize) {
++ crp = (struct cryptop *)p; p += sizeof(*crp);
++ crde = (struct cryptodesc *)p; p += sizeof(*crde);
++ crda = (struct cryptodesc *)p; p += sizeof(*crda);
++ authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN;
++ uio = (struct uio *)p; p += sizeof(*uio);
++ iov = (struct iovec *)p; p += sizeof(*iov);
++
++ data_secsize = sc->sc_data_per_sector;
++ if ((i % lsec) == 0)
++ data_secsize = decr_secsize % data_secsize;
++
++ if (bp->bio_cmd == BIO_READ) {
++ /* Remember read HMAC. */
++ bcopy(data, auth, sc->sc_alen);
++ auth += sc->sc_alen;
++ /* TODO: bzero(9) can be commented out later. */
++ bzero(data, sc->sc_alen);
++ } else {
++ bcopy(plaindata, data + sc->sc_alen, data_secsize);
++ plaindata += data_secsize;
++ }
++
++ iov->iov_len = sc->sc_alen + data_secsize;
++ iov->iov_base = data;
++ data += encr_secsize;
++
++ uio->uio_iov = iov;
++ uio->uio_iovcnt = 1;
++ uio->uio_segflg = UIO_SYSSPACE;
++ uio->uio_resid = iov->iov_len;
++
++ crp->crp_sid = wr->w_sid;
++ crp->crp_ilen = uio->uio_resid;
++ crp->crp_olen = data_secsize;
++ crp->crp_opaque = (void *)bp;
++ crp->crp_buf = (void *)uio;
++ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
++ if (g_eli_batch)
++ crp->crp_flags |= CRYPTO_F_BATCH;
++ if (bp->bio_cmd == BIO_WRITE) {
++ crp->crp_callback = g_eli_auth_write_done;
++ crp->crp_desc = crde;
++ crde->crd_next = crda;
++ crda->crd_next = NULL;
++ } else {
++ crp->crp_callback = g_eli_auth_read_done;
++ crp->crp_desc = crda;
++ crda->crd_next = crde;
++ crde->crd_next = NULL;
++ }
++
++ crde->crd_skip = sc->sc_alen;
++ crde->crd_len = data_secsize;
++ crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
++ if (bp->bio_cmd == BIO_WRITE)
++ crde->crd_flags |= CRD_F_ENCRYPT;
++ crde->crd_alg = sc->sc_ealgo;
++ crde->crd_key = g_eli_crypto_key(sc, dstoff, encr_secsize);
++ crde->crd_klen = sc->sc_ekeylen;
++ if (sc->sc_ealgo == CRYPTO_AES_XTS)
++ crde->crd_klen <<= 1;
++ g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv,
++ sizeof(crde->crd_iv));
++
++ crda->crd_skip = sc->sc_alen;
++ crda->crd_len = data_secsize;
++ crda->crd_inject = 0;
++ crda->crd_flags = CRD_F_KEY_EXPLICIT;
++ crda->crd_alg = sc->sc_aalgo;
++ g_eli_auth_keygen(sc, dstoff, authkey);
++ crda->crd_key = authkey;
++ crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8;
++
++ crp->crp_etype = 0;
++ err = crypto_dispatch(crp);
++ if (err != 0 && error == 0)
++ error = err;
++ }
++ if (bp->bio_error == 0)
++ bp->bio_error = error;
++}
+--- /dev/null
++++ b/sys/geom/eli/g_eli_crypto.c
+@@ -0,0 +1,306 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#ifdef _KERNEL
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/malloc.h>
++#include <sys/uio.h>
++#else
++#include <stdint.h>
++#include <string.h>
++#include <strings.h>
++#include <errno.h>
++#include <assert.h>
++#include <openssl/evp.h>
++#define _OpenSSL_
++#endif
++#include <geom/eli/g_eli.h>
++
++#ifdef _KERNEL
++MALLOC_DECLARE(M_ELI);
++
++static int
++g_eli_crypto_done(struct cryptop *crp)
++{
++
++ crp->crp_opaque = (void *)crp;
++ wakeup(crp);
++ return (0);
++}
++
++static int
++g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize)
++{
++ struct cryptoini cri;
++ struct cryptop *crp;
++ struct cryptodesc *crd;
++ struct uio *uio;
++ struct iovec *iov;
++ uint64_t sid;
++ u_char *p;
++ int error;
++
++ KASSERT(algo != CRYPTO_AES_XTS,
++ ("%s: CRYPTO_AES_XTS unexpected here", __func__));
++
++ bzero(&cri, sizeof(cri));
++ cri.cri_alg = algo;
++ cri.cri_key = __DECONST(void *, key);
++ cri.cri_klen = keysize;
++ error = crypto_newsession(&sid, &cri, CRYPTOCAP_F_SOFTWARE);
++ if (error != 0)
++ return (error);
++ p = malloc(sizeof(*crp) + sizeof(*crd) + sizeof(*uio) + sizeof(*iov),
++ M_ELI, M_NOWAIT | M_ZERO);
++ if (p == NULL) {
++ crypto_freesession(sid);
++ return (ENOMEM);
++ }
++ crp = (struct cryptop *)p; p += sizeof(*crp);
++ crd = (struct cryptodesc *)p; p += sizeof(*crd);
++ uio = (struct uio *)p; p += sizeof(*uio);
++ iov = (struct iovec *)p; p += sizeof(*iov);
++
++ iov->iov_len = datasize;
++ iov->iov_base = data;
++
++ uio->uio_iov = iov;
++ uio->uio_iovcnt = 1;
++ uio->uio_segflg = UIO_SYSSPACE;
++ uio->uio_resid = datasize;
++
++ crd->crd_skip = 0;
++ crd->crd_len = datasize;
++ crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
++ if (enc)
++ crd->crd_flags |= CRD_F_ENCRYPT;
++ crd->crd_alg = algo;
++ crd->crd_key = __DECONST(void *, key);
++ crd->crd_klen = keysize;
++ bzero(crd->crd_iv, sizeof(crd->crd_iv));
++ crd->crd_next = NULL;
++
++ crp->crp_sid = sid;
++ crp->crp_ilen = datasize;
++ crp->crp_olen = datasize;
++ crp->crp_opaque = NULL;
++ crp->crp_callback = g_eli_crypto_done;
++ crp->crp_buf = (void *)uio;
++ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
++ crp->crp_desc = crd;
++
++ error = crypto_dispatch(crp);
++ if (error == 0) {
++ while (crp->crp_opaque == NULL)
++ tsleep(crp, PRIBIO, "geli", hz / 5);
++ error = crp->crp_etype;
++ }
++
++ free(crp, M_ELI);
++ crypto_freesession(sid);
++ return (error);
++}
++#else /* !_KERNEL */
++static int
++g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize)
++{
++ EVP_CIPHER_CTX ctx;
++ const EVP_CIPHER *type;
++ u_char iv[keysize];
++ int outsize;
++
++ assert(algo != CRYPTO_AES_XTS);
++
++ switch (algo) {
++ case CRYPTO_NULL_CBC:
++ type = EVP_enc_null();
++ break;
++ case CRYPTO_AES_CBC:
++ switch (keysize) {
++ case 128:
++ type = EVP_aes_128_cbc();
++ break;
++ case 192:
++ type = EVP_aes_192_cbc();
++ break;
++ case 256:
++ type = EVP_aes_256_cbc();
++ break;
++ default:
++ return (EINVAL);
++ }
++ break;
++ case CRYPTO_BLF_CBC:
++ type = EVP_bf_cbc();
++ break;
++#ifndef OPENSSL_NO_CAMELLIA
++ case CRYPTO_CAMELLIA_CBC:
++ switch (keysize) {
++ case 128:
++ type = EVP_camellia_128_cbc();
++ break;
++ case 192:
++ type = EVP_camellia_192_cbc();
++ break;
++ case 256:
++ type = EVP_camellia_256_cbc();
++ break;
++ default:
++ return (EINVAL);
++ }
++ break;
++#endif
++ case CRYPTO_3DES_CBC:
++ type = EVP_des_ede3_cbc();
++ break;
++ default:
++ return (EINVAL);
++ }
++
++ EVP_CIPHER_CTX_init(&ctx);
++
++ EVP_CipherInit_ex(&ctx, type, NULL, NULL, NULL, enc);
++ EVP_CIPHER_CTX_set_key_length(&ctx, keysize / 8);
++ EVP_CIPHER_CTX_set_padding(&ctx, 0);
++ bzero(iv, sizeof(iv));
++ EVP_CipherInit_ex(&ctx, NULL, NULL, key, iv, enc);
++
++ if (EVP_CipherUpdate(&ctx, data, &outsize, data, datasize) == 0) {
++ EVP_CIPHER_CTX_cleanup(&ctx);
++ return (EINVAL);
++ }
++ assert(outsize == (int)datasize);
++
++ if (EVP_CipherFinal_ex(&ctx, data + outsize, &outsize) == 0) {
++ EVP_CIPHER_CTX_cleanup(&ctx);
++ return (EINVAL);
++ }
++ assert(outsize == 0);
++
++ EVP_CIPHER_CTX_cleanup(&ctx);
++ return (0);
++}
++#endif /* !_KERNEL */
++
++int
++g_eli_crypto_encrypt(u_int algo, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize)
++{
++
++ /* We prefer AES-CBC for metadata protection. */
++ if (algo == CRYPTO_AES_XTS)
++ algo = CRYPTO_AES_CBC;
++
++ return (g_eli_crypto_cipher(algo, 1, data, datasize, key, keysize));
++}
++
++int
++g_eli_crypto_decrypt(u_int algo, u_char *data, size_t datasize,
++ const u_char *key, size_t keysize)
++{
++
++ /* We prefer AES-CBC for metadata protection. */
++ if (algo == CRYPTO_AES_XTS)
++ algo = CRYPTO_AES_CBC;
++
++ return (g_eli_crypto_cipher(algo, 0, data, datasize, key, keysize));
++}
++
++void
++g_eli_crypto_hmac_init(struct hmac_ctx *ctx, const uint8_t *hkey,
++ size_t hkeylen)
++{
++ u_char k_ipad[128], key[128];
++ SHA512_CTX lctx;
++ u_int i;
++
++ bzero(key, sizeof(key));
++ if (hkeylen == 0)
++ ; /* do nothing */
++ else if (hkeylen <= 128)
++ bcopy(hkey, key, hkeylen);
++ else {
++ /* If key is longer than 128 bytes reset it to key = SHA512(key). */
++ SHA512_Init(&lctx);
++ SHA512_Update(&lctx, hkey, hkeylen);
++ SHA512_Final(key, &lctx);
++ }
++
++ /* XOR key with ipad and opad values. */
++ for (i = 0; i < sizeof(key); i++) {
++ k_ipad[i] = key[i] ^ 0x36;
++ ctx->k_opad[i] = key[i] ^ 0x5c;
++ }
++ bzero(key, sizeof(key));
++ /* Perform inner SHA512. */
++ SHA512_Init(&ctx->shactx);
++ SHA512_Update(&ctx->shactx, k_ipad, sizeof(k_ipad));
++}
++
++void
++g_eli_crypto_hmac_update(struct hmac_ctx *ctx, const uint8_t *data,
++ size_t datasize)
++{
++
++ SHA512_Update(&ctx->shactx, data, datasize);
++}
++
++void
++g_eli_crypto_hmac_final(struct hmac_ctx *ctx, uint8_t *md, size_t mdsize)
++{
++ u_char digest[SHA512_MDLEN];
++ SHA512_CTX lctx;
++
++ SHA512_Final(digest, &ctx->shactx);
++ /* Perform outer SHA512. */
++ SHA512_Init(&lctx);
++ SHA512_Update(&lctx, ctx->k_opad, sizeof(ctx->k_opad));
++ bzero(ctx, sizeof(*ctx));
++ SHA512_Update(&lctx, digest, sizeof(digest));
++ SHA512_Final(digest, &lctx);
++ /* mdsize == 0 means "Give me the whole hash!" */
++ if (mdsize == 0)
++ mdsize = SHA512_MDLEN;
++ bcopy(digest, md, mdsize);
++}
++
++void
++g_eli_crypto_hmac(const uint8_t *hkey, size_t hkeysize, const uint8_t *data,
++ size_t datasize, uint8_t *md, size_t mdsize)
++{
++ struct hmac_ctx ctx;
++
++ g_eli_crypto_hmac_init(&ctx, hkey, hkeysize);
++ g_eli_crypto_hmac_update(&ctx, data, datasize);
++ g_eli_crypto_hmac_final(&ctx, md, mdsize);
++}
+--- /dev/null
++++ b/sys/geom/eli/g_eli.c
+@@ -0,0 +1,1302 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/linker.h>
++#include <sys/module.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++#include <sys/bio.h>
++#include <sys/sysctl.h>
++#include <sys/malloc.h>
++#include <sys/eventhandler.h>
++#include <sys/kthread.h>
++#include <sys/proc.h>
++#include <sys/sched.h>
++#include <sys/smp.h>
++#include <sys/uio.h>
++#include <sys/vnode.h>
++
++#include <vm/uma.h>
++
++#include <geom/geom.h>
++#include <geom/eli/g_eli.h>
++#include <geom/eli/pkcs5v2.h>
++
++
++MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
++
++SYSCTL_DECL(_kern_geom);
++SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
++int g_eli_debug = 0;
++TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
++SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
++ "Debug level");
++static u_int g_eli_tries = 3;
++TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
++SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
++ "Number of tries for entering the passphrase");
++static u_int g_eli_visible_passphrase = 0;
++TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
++SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
++ &g_eli_visible_passphrase, 0,
++ "Turn on echo when entering the passphrase (for debug purposes only!!)");
++u_int g_eli_overwrites = G_ELI_OVERWRITES;
++TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
++SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
++ 0, "Number of times on-disk keys should be overwritten when destroying them");
++static u_int g_eli_threads = 0;
++TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
++SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
++ "Number of threads doing crypto work");
++u_int g_eli_batch = 0;
++TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
++SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
++ "Use crypto operations batching");
++
++static eventhandler_tag g_eli_pre_sync = NULL;
++
++static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
++ struct g_geom *gp);
++static void g_eli_init(struct g_class *mp);
++static void g_eli_fini(struct g_class *mp);
++
++static g_taste_t g_eli_taste;
++static g_dumpconf_t g_eli_dumpconf;
++
++struct g_class g_eli_class = {
++ .name = G_ELI_CLASS_NAME,
++ .version = G_VERSION,
++ .ctlreq = g_eli_config,
++ .taste = g_eli_taste,
++ .destroy_geom = g_eli_destroy_geom,
++ .init = g_eli_init,
++ .fini = g_eli_fini
++};
++
++
++/*
++ * Code paths:
++ * BIO_READ:
++ * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++
++
++/*
++ * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
++ * accelerator or something like this.
++ * The function updates the SID and rerun the operation.
++ */
++int
++g_eli_crypto_rerun(struct cryptop *crp)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_worker *wr;
++ struct bio *bp;
++ int error;
++
++ bp = (struct bio *)crp->crp_opaque;
++ sc = bp->bio_to->geom->softc;
++ LIST_FOREACH(wr, &sc->sc_workers, w_next) {
++ if (wr->w_number == bp->bio_pflags)
++ break;
++ }
++ KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
++ G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
++ bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
++ (uintmax_t)crp->crp_sid);
++ wr->w_sid = crp->crp_sid;
++ crp->crp_etype = 0;
++ error = crypto_dispatch(crp);
++ if (error == 0)
++ return (0);
++ G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
++ crp->crp_etype = error;
++ return (error);
++}
++
++/*
++ * The function is called afer reading encrypted data from the provider.
++ *
++ * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
++ */
++void
++g_eli_read_done(struct bio *bp)
++{
++ struct g_eli_softc *sc;
++ struct bio *pbp;
++
++ G_ELI_LOGREQ(2, bp, "Request done.");
++ pbp = bp->bio_parent;
++ if (pbp->bio_error == 0)
++ pbp->bio_error = bp->bio_error;
++ /*
++ * Do we have all sectors already?
++ */
++ pbp->bio_inbed++;
++ if (pbp->bio_inbed < pbp->bio_children)
++ return;
++ g_destroy_bio(bp);
++ sc = pbp->bio_to->geom->softc;
++ if (pbp->bio_error != 0) {
++ G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
++ pbp->bio_completed = 0;
++ if (pbp->bio_driver2 != NULL) {
++ free(pbp->bio_driver2, M_ELI);
++ pbp->bio_driver2 = NULL;
++ }
++ g_io_deliver(pbp, pbp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++ return;
++ }
++ mtx_lock(&sc->sc_queue_mtx);
++ bioq_insert_tail(&sc->sc_queue, pbp);
++ mtx_unlock(&sc->sc_queue_mtx);
++ wakeup(sc);
++}
++
++/*
++ * The function is called after we encrypt and write data.
++ *
++ * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
++ */
++void
++g_eli_write_done(struct bio *bp)
++{
++ struct g_eli_softc *sc;
++ struct bio *pbp;
++
++ G_ELI_LOGREQ(2, bp, "Request done.");
++ pbp = bp->bio_parent;
++ if (pbp->bio_error == 0) {
++ if (bp->bio_error != 0)
++ pbp->bio_error = bp->bio_error;
++ }
++ /*
++ * Do we have all sectors already?
++ */
++ pbp->bio_inbed++;
++ if (pbp->bio_inbed < pbp->bio_children)
++ return;
++ free(pbp->bio_driver2, M_ELI);
++ pbp->bio_driver2 = NULL;
++ if (pbp->bio_error != 0) {
++ G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
++ pbp->bio_error);
++ pbp->bio_completed = 0;
++ }
++ g_destroy_bio(bp);
++ /*
++ * Write is finished, send it up.
++ */
++ pbp->bio_completed = pbp->bio_length;
++ sc = pbp->bio_to->geom->softc;
++ g_io_deliver(pbp, pbp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++}
++
++/*
++ * This function should never be called, but GEOM made as it set ->orphan()
++ * method for every geom.
++ */
++static void
++g_eli_orphan_spoil_assert(struct g_consumer *cp)
++{
++
++ panic("Function %s() called for %s.", __func__, cp->geom->name);
++}
++
++static void
++g_eli_orphan(struct g_consumer *cp)
++{
++ struct g_eli_softc *sc;
++
++ g_topology_assert();
++ sc = cp->geom->softc;
++ if (sc == NULL)
++ return;
++ g_eli_destroy(sc, TRUE);
++}
++
++/*
++ * BIO_READ:
++ * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++static void
++g_eli_start(struct bio *bp)
++{
++ struct g_eli_softc *sc;
++ struct g_consumer *cp;
++ struct bio *cbp;
++
++ sc = bp->bio_to->geom->softc;
++ KASSERT(sc != NULL,
++ ("Provider's error should be set (error=%d)(device=%s).",
++ bp->bio_to->error, bp->bio_to->name));
++ G_ELI_LOGREQ(2, bp, "Request received.");
++
++ switch (bp->bio_cmd) {
++ case BIO_READ:
++ case BIO_WRITE:
++ case BIO_GETATTR:
++ case BIO_FLUSH:
++ break;
++ case BIO_DELETE:
++ /*
++ * We could eventually support BIO_DELETE request.
++ * It could be done by overwritting requested sector with
++ * random data g_eli_overwrites number of times.
++ */
++ default:
++ g_io_deliver(bp, EOPNOTSUPP);
++ return;
++ }
++ cbp = g_clone_bio(bp);
++ if (cbp == NULL) {
++ g_io_deliver(bp, ENOMEM);
++ return;
++ }
++ bp->bio_driver1 = cbp;
++ bp->bio_pflags = G_ELI_NEW_BIO;
++ switch (bp->bio_cmd) {
++ case BIO_READ:
++ if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
++ g_eli_crypto_read(sc, bp, 0);
++ break;
++ }
++ /* FALLTHROUGH */
++ case BIO_WRITE:
++ mtx_lock(&sc->sc_queue_mtx);
++ bioq_insert_tail(&sc->sc_queue, bp);
++ mtx_unlock(&sc->sc_queue_mtx);
++ wakeup(sc);
++ break;
++ case BIO_GETATTR:
++ case BIO_FLUSH:
++ cbp->bio_done = g_std_done;
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ cbp->bio_to = cp->provider;
++ G_ELI_LOGREQ(2, cbp, "Sending request.");
++ g_io_request(cbp, cp);
++ break;
++ }
++}
++
++static int
++g_eli_newsession(struct g_eli_worker *wr)
++{
++ struct g_eli_softc *sc;
++ struct cryptoini crie, cria;
++ int error;
++
++ sc = wr->w_softc;
++
++ bzero(&crie, sizeof(crie));
++ crie.cri_alg = sc->sc_ealgo;
++ crie.cri_klen = sc->sc_ekeylen;
++ if (sc->sc_ealgo == CRYPTO_AES_XTS)
++ crie.cri_klen <<= 1;
++ crie.cri_key = sc->sc_ekeys[0];
++ if (sc->sc_flags & G_ELI_FLAG_AUTH) {
++ bzero(&cria, sizeof(cria));
++ cria.cri_alg = sc->sc_aalgo;
++ cria.cri_klen = sc->sc_akeylen;
++ cria.cri_key = sc->sc_akey;
++ crie.cri_next = &cria;
++ }
++
++ switch (sc->sc_crypto) {
++ case G_ELI_CRYPTO_SW:
++ error = crypto_newsession(&wr->w_sid, &crie,
++ CRYPTOCAP_F_SOFTWARE);
++ break;
++ case G_ELI_CRYPTO_HW:
++ error = crypto_newsession(&wr->w_sid, &crie,
++ CRYPTOCAP_F_HARDWARE);
++ break;
++ case G_ELI_CRYPTO_UNKNOWN:
++ error = crypto_newsession(&wr->w_sid, &crie,
++ CRYPTOCAP_F_HARDWARE);
++ if (error == 0) {
++ mtx_lock(&sc->sc_queue_mtx);
++ if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
++ sc->sc_crypto = G_ELI_CRYPTO_HW;
++ mtx_unlock(&sc->sc_queue_mtx);
++ } else {
++ error = crypto_newsession(&wr->w_sid, &crie,
++ CRYPTOCAP_F_SOFTWARE);
++ mtx_lock(&sc->sc_queue_mtx);
++ if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
++ sc->sc_crypto = G_ELI_CRYPTO_SW;
++ mtx_unlock(&sc->sc_queue_mtx);
++ }
++ break;
++ default:
++ panic("%s: invalid condition", __func__);
++ }
++
++ return (error);
++}
++
++static void
++g_eli_freesession(struct g_eli_worker *wr)
++{
++
++ crypto_freesession(wr->w_sid);
++}
++
++static void
++g_eli_cancel(struct g_eli_softc *sc)
++{
++ struct bio *bp;
++
++ mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
++
++ while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
++ KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
++ ("Not new bio when canceling (bp=%p).", bp));
++ g_io_deliver(bp, ENXIO);
++ }
++}
++
++static struct bio *
++g_eli_takefirst(struct g_eli_softc *sc)
++{
++ struct bio *bp;
++
++ mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
++
++ if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
++ return (bioq_takefirst(&sc->sc_queue));
++ /*
++ * Device suspended, so we skip new I/O requests.
++ */
++ TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
++ if (bp->bio_pflags != G_ELI_NEW_BIO)
++ break;
++ }
++ if (bp != NULL)
++ bioq_remove(&sc->sc_queue, bp);
++ return (bp);
++}
++
++/*
++ * This is the main function for kernel worker thread when we don't have
++ * hardware acceleration and we have to do cryptography in software.
++ * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
++ * threads with crypto work.
++ */
++static void
++g_eli_worker(void *arg)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_worker *wr;
++ struct bio *bp;
++ int error;
++
++ wr = arg;
++ sc = wr->w_softc;
++#ifdef SMP
++ /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
++ if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
++ g_eli_threads == 0) {
++ while (!smp_started)
++ tsleep(wr, 0, "geli:smp", hz / 4);
++ }
++#endif
++ thread_lock(curthread);
++ sched_prio(curthread, PUSER);
++ if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
++ sched_bind(curthread, wr->w_number);
++ thread_unlock(curthread);
++
++ G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
++
++ for (;;) {
++ mtx_lock(&sc->sc_queue_mtx);
++again:
++ bp = g_eli_takefirst(sc);
++ if (bp == NULL) {
++ if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
++ g_eli_cancel(sc);
++ LIST_REMOVE(wr, w_next);
++ g_eli_freesession(wr);
++ free(wr, M_ELI);
++ G_ELI_DEBUG(1, "Thread %s exiting.",
++ curthread->td_proc->p_comm);
++ wakeup(&sc->sc_workers);
++ mtx_unlock(&sc->sc_queue_mtx);
++ kproc_exit(0);
++ }
++ while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
++ if (sc->sc_inflight > 0) {
++ G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight);
++ /*
++ * We still have inflight BIOs, so
++ * sleep and retry.
++ */
++ msleep(sc, &sc->sc_queue_mtx, PRIBIO,
++ "geli:inf", hz / 5);
++ goto again;
++ }
++ /*
++ * Suspend requested, mark the worker as
++ * suspended and go to sleep.
++ */
++ if (wr->w_active) {
++ g_eli_freesession(wr);
++ wr->w_active = FALSE;
++ }
++ wakeup(&sc->sc_workers);
++ msleep(sc, &sc->sc_queue_mtx, PRIBIO,
++ "geli:suspend", 0);
++ if (!wr->w_active &&
++ !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
++ error = g_eli_newsession(wr);
++ KASSERT(error == 0,
++ ("g_eli_newsession() failed on resume (error=%d)",
++ error));
++ wr->w_active = TRUE;
++ }
++ goto again;
++ }
++ msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
++ continue;
++ }
++ if (bp->bio_pflags == G_ELI_NEW_BIO)
++ atomic_add_int(&sc->sc_inflight, 1);
++ mtx_unlock(&sc->sc_queue_mtx);
++ if (bp->bio_pflags == G_ELI_NEW_BIO) {
++ bp->bio_pflags = 0;
++ if (sc->sc_flags & G_ELI_FLAG_AUTH) {
++ if (bp->bio_cmd == BIO_READ)
++ g_eli_auth_read(sc, bp);
++ else
++ g_eli_auth_run(wr, bp);
++ } else {
++ if (bp->bio_cmd == BIO_READ)
++ g_eli_crypto_read(sc, bp, 1);
++ else
++ g_eli_crypto_run(wr, bp);
++ }
++ } else {
++ if (sc->sc_flags & G_ELI_FLAG_AUTH)
++ g_eli_auth_run(wr, bp);
++ else
++ g_eli_crypto_run(wr, bp);
++ }
++ }
++}
++
++/*
++ * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
++ * key available for all the data. If the flag is not present select the key
++ * based on data offset.
++ */
++uint8_t *
++g_eli_crypto_key(struct g_eli_softc *sc, off_t offset, size_t blocksize)
++{
++ u_int nkey;
++
++ if (sc->sc_nekeys == 1)
++ return (sc->sc_ekeys[0]);
++
++ KASSERT(sc->sc_nekeys > 1, ("%s: sc_nekeys=%u", __func__,
++ sc->sc_nekeys));
++ KASSERT((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0,
++ ("%s: SINGLE_KEY flag set, but sc_nekeys=%u", __func__,
++ sc->sc_nekeys));
++
++ /* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
++ nkey = (offset >> G_ELI_KEY_SHIFT) / blocksize;
++
++ KASSERT(nkey < sc->sc_nekeys, ("%s: nkey=%u >= sc_nekeys=%u", __func__,
++ nkey, sc->sc_nekeys));
++
++ return (sc->sc_ekeys[nkey]);
++}
++
++/*
++ * Here we generate IV. It is unique for every sector.
++ */
++void
++g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
++ size_t size)
++{
++ uint8_t off[8];
++
++ if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
++ bcopy(&offset, off, sizeof(off));
++ else
++ le64enc(off, (uint64_t)offset);
++
++ switch (sc->sc_ealgo) {
++ case CRYPTO_AES_XTS:
++ bcopy(off, iv, sizeof(off));
++ bzero(iv + sizeof(off), size - sizeof(off));
++ break;
++ default:
++ {
++ u_char hash[SHA256_DIGEST_LENGTH];
++ SHA256_CTX ctx;
++
++ /* Copy precalculated SHA256 context for IV-Key. */
++ bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
++ SHA256_Update(&ctx, off, sizeof(off));
++ SHA256_Final(hash, &ctx);
++ bcopy(hash, iv, MIN(sizeof(hash), size));
++ break;
++ }
++ }
++}
++
++int
++g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
++ struct g_eli_metadata *md)
++{
++ struct g_geom *gp;
++ struct g_consumer *cp;
++ u_char *buf = NULL;
++ int error;
++
++ g_topology_assert();
++
++ gp = g_new_geomf(mp, "eli:taste");
++ gp->start = g_eli_start;
++ gp->access = g_std_access;
++ /*
++ * g_eli_read_metadata() is always called from the event thread.
++ * Our geom is created and destroyed in the same event, so there
++ * could be no orphan nor spoil event in the meantime.
++ */
++ gp->orphan = g_eli_orphan_spoil_assert;
++ gp->spoiled = g_eli_orphan_spoil_assert;
++ cp = g_new_consumer(gp);
++ error = g_attach(cp, pp);
++ if (error != 0)
++ goto end;
++ error = g_access(cp, 1, 0, 0);
++ if (error != 0)
++ goto end;
++ g_topology_unlock();
++ buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
++ &error);
++ g_topology_lock();
++ if (buf == NULL)
++ goto end;
++ eli_metadata_decode(buf, md);
++end:
++ if (buf != NULL)
++ g_free(buf);
++ if (cp->provider != NULL) {
++ if (cp->acr == 1)
++ g_access(cp, -1, 0, 0);
++ g_detach(cp);
++ }
++ g_destroy_consumer(cp);
++ g_destroy_geom(gp);
++ return (error);
++}
++
++/*
++ * The function is called when we had last close on provider and user requested
++ * to close it when this situation occur.
++ */
++static void
++g_eli_last_close(struct g_eli_softc *sc)
++{
++ struct g_geom *gp;
++ struct g_provider *pp;
++ char ppname[64];
++ int error;
++
++ g_topology_assert();
++ gp = sc->sc_geom;
++ pp = LIST_FIRST(&gp->provider);
++ strlcpy(ppname, pp->name, sizeof(ppname));
++ error = g_eli_destroy(sc, TRUE);
++ KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
++ ppname, error));
++ G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
++}
++
++int
++g_eli_access(struct g_provider *pp, int dr, int dw, int de)
++{
++ struct g_eli_softc *sc;
++ struct g_geom *gp;
++
++ gp = pp->geom;
++ sc = gp->softc;
++
++ if (dw > 0) {
++ if (sc->sc_flags & G_ELI_FLAG_RO) {
++ /* Deny write attempts. */
++ return (EROFS);
++ }
++ /* Someone is opening us for write, we need to remember that. */
++ sc->sc_flags |= G_ELI_FLAG_WOPEN;
++ return (0);
++ }
++ /* Is this the last close? */
++ if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
++ return (0);
++
++ /*
++ * Automatically detach on last close if requested.
++ */
++ if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
++ (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
++ g_eli_last_close(sc);
++ }
++ return (0);
++}
++
++static int
++g_eli_cpu_is_disabled(int cpu)
++{
++#ifdef SMP
++ return ((hlt_cpus_mask & (1 << cpu)) != 0);
++#else
++ return (0);
++#endif
++}
++
++struct g_geom *
++g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
++ const struct g_eli_metadata *md, const u_char *mkey, int nkey)
++{
++ struct g_eli_softc *sc;
++ struct g_eli_worker *wr;
++ struct g_geom *gp;
++ struct g_provider *pp;
++ struct g_consumer *cp;
++ u_int i, threads;
++ int error;
++
++ G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
++
++ gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
++ gp->softc = NULL; /* for a moment */
++
++ sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
++ gp->start = g_eli_start;
++ /*
++ * Spoiling cannot happen actually, because we keep provider open for
++ * writing all the time or provider is read-only.
++ */
++ gp->spoiled = g_eli_orphan_spoil_assert;
++ gp->orphan = g_eli_orphan;
++ gp->dumpconf = g_eli_dumpconf;
++ /*
++ * If detach-on-last-close feature is not enabled and we don't operate
++ * on read-only provider, we can simply use g_std_access().
++ */
++ if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
++ gp->access = g_eli_access;
++ else
++ gp->access = g_std_access;
++
++ sc->sc_inflight = 0;
++ sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
++ sc->sc_flags = md->md_flags;
++ /* Backward compatibility. */
++ if (md->md_version < 4)
++ sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
++ if (md->md_version < 5)
++ sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
++ sc->sc_ealgo = md->md_ealgo;
++ sc->sc_nkey = nkey;
++
++ if (sc->sc_flags & G_ELI_FLAG_AUTH) {
++ sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
++ sc->sc_aalgo = md->md_aalgo;
++ sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
++
++ sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
++ /*
++ * Some hash functions (like SHA1 and RIPEMD160) generates hash
++ * which length is not multiple of 128 bits, but we want data
++ * length to be multiple of 128, so we can encrypt without
++ * padding. The line below rounds down data length to multiple
++ * of 128 bits.
++ */
++ sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
++
++ sc->sc_bytes_per_sector =
++ (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
++ sc->sc_bytes_per_sector *= bpp->sectorsize;
++ }
++
++ gp->softc = sc;
++ sc->sc_geom = gp;
++
++ bioq_init(&sc->sc_queue);
++ mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
++
++ pp = NULL;
++ cp = g_new_consumer(gp);
++ error = g_attach(cp, bpp);
++ if (error != 0) {
++ if (req != NULL) {
++ gctl_error(req, "Cannot attach to %s (error=%d).",
++ bpp->name, error);
++ } else {
++ G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
++ bpp->name, error);
++ }
++ goto failed;
++ }
++ /*
++ * Keep provider open all the time, so we can run critical tasks,
++ * like Master Keys deletion, without wondering if we can open
++ * provider or not.
++ * We don't open provider for writing only when user requested read-only
++ * access.
++ */
++ if (sc->sc_flags & G_ELI_FLAG_RO)
++ error = g_access(cp, 1, 0, 1);
++ else
++ error = g_access(cp, 1, 1, 1);
++ if (error != 0) {
++ if (req != NULL) {
++ gctl_error(req, "Cannot access %s (error=%d).",
++ bpp->name, error);
++ } else {
++ G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
++ bpp->name, error);
++ }
++ goto failed;
++ }
++
++ sc->sc_sectorsize = md->md_sectorsize;
++ sc->sc_mediasize = bpp->mediasize;
++ if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
++ sc->sc_mediasize -= bpp->sectorsize;
++ if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
++ sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
++ else {
++ sc->sc_mediasize /= sc->sc_bytes_per_sector;
++ sc->sc_mediasize *= sc->sc_sectorsize;
++ }
++
++ /*
++ * Remember the keys in our softc structure.
++ */
++ g_eli_mkey_propagate(sc, mkey);
++ sc->sc_ekeylen = md->md_keylen;
++
++ LIST_INIT(&sc->sc_workers);
++
++ threads = g_eli_threads;
++ if (threads == 0)
++ threads = mp_ncpus;
++ else if (threads > mp_ncpus) {
++ /* There is really no need for too many worker threads. */
++ threads = mp_ncpus;
++ G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
++ }
++ for (i = 0; i < threads; i++) {
++ if (g_eli_cpu_is_disabled(i)) {
++ G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
++ bpp->name, i);
++ continue;
++ }
++ wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
++ wr->w_softc = sc;
++ wr->w_number = i;
++ wr->w_active = TRUE;
++
++ error = g_eli_newsession(wr);
++ if (error != 0) {
++ free(wr, M_ELI);
++ if (req != NULL) {
++ gctl_error(req, "Cannot set up crypto session "
++ "for %s (error=%d).", bpp->name, error);
++ } else {
++ G_ELI_DEBUG(1, "Cannot set up crypto session "
++ "for %s (error=%d).", bpp->name, error);
++ }
++ goto failed;
++ }
++
++ error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
++ "g_eli[%u] %s", i, bpp->name);
++ if (error != 0) {
++ g_eli_freesession(wr);
++ free(wr, M_ELI);
++ if (req != NULL) {
++ gctl_error(req, "Cannot create kernel thread "
++ "for %s (error=%d).", bpp->name, error);
++ } else {
++ G_ELI_DEBUG(1, "Cannot create kernel thread "
++ "for %s (error=%d).", bpp->name, error);
++ }
++ goto failed;
++ }
++ LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
++ /* If we have hardware support, one thread is enough. */
++ if (sc->sc_crypto == G_ELI_CRYPTO_HW)
++ break;
++ }
++
++ /*
++ * Create decrypted provider.
++ */
++ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
++ pp->mediasize = sc->sc_mediasize;
++ pp->sectorsize = sc->sc_sectorsize;
++
++ g_error_provider(pp, 0);
++
++ G_ELI_DEBUG(0, "Device %s created.", pp->name);
++ G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
++ sc->sc_ekeylen);
++ if (sc->sc_flags & G_ELI_FLAG_AUTH)
++ G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
++ G_ELI_DEBUG(0, " Crypto: %s",
++ sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
++ return (gp);
++failed:
++ mtx_lock(&sc->sc_queue_mtx);
++ sc->sc_flags |= G_ELI_FLAG_DESTROY;
++ wakeup(sc);
++ /*
++ * Wait for kernel threads self destruction.
++ */
++ while (!LIST_EMPTY(&sc->sc_workers)) {
++ msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
++ "geli:destroy", 0);
++ }
++ mtx_destroy(&sc->sc_queue_mtx);
++ if (cp->provider != NULL) {
++ if (cp->acr == 1)
++ g_access(cp, -1, -1, -1);
++ g_detach(cp);
++ }
++ g_destroy_consumer(cp);
++ g_destroy_geom(gp);
++ if (sc->sc_ekeys != NULL) {
++ bzero(sc->sc_ekeys,
++ sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
++ free(sc->sc_ekeys, M_ELI);
++ }
++ bzero(sc, sizeof(*sc));
++ free(sc, M_ELI);
++ return (NULL);
++}
++
++int
++g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
++{
++ struct g_geom *gp;
++ struct g_provider *pp;
++
++ g_topology_assert();
++
++ if (sc == NULL)
++ return (ENXIO);
++
++ gp = sc->sc_geom;
++ pp = LIST_FIRST(&gp->provider);
++ if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
++ if (force) {
++ G_ELI_DEBUG(1, "Device %s is still open, so it "
++ "cannot be definitely removed.", pp->name);
++ } else {
++ G_ELI_DEBUG(1,
++ "Device %s is still open (r%dw%de%d).", pp->name,
++ pp->acr, pp->acw, pp->ace);
++ return (EBUSY);
++ }
++ }
++
++ mtx_lock(&sc->sc_queue_mtx);
++ sc->sc_flags |= G_ELI_FLAG_DESTROY;
++ wakeup(sc);
++ while (!LIST_EMPTY(&sc->sc_workers)) {
++ msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
++ "geli:destroy", 0);
++ }
++ mtx_destroy(&sc->sc_queue_mtx);
++ gp->softc = NULL;
++ if (sc->sc_ekeys != NULL) {
++ /* The sc_ekeys field can be NULL is device is suspended. */
++ bzero(sc->sc_ekeys,
++ sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
++ free(sc->sc_ekeys, M_ELI);
++ }
++ bzero(sc, sizeof(*sc));
++ free(sc, M_ELI);
++
++ if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
++ G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
++ g_wither_geom_close(gp, ENXIO);
++
++ return (0);
++}
++
++static int
++g_eli_destroy_geom(struct gctl_req *req __unused,
++ struct g_class *mp __unused, struct g_geom *gp)
++{
++ struct g_eli_softc *sc;
++
++ sc = gp->softc;
++ return (g_eli_destroy(sc, FALSE));
++}
++
++static int
++g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
++{
++ u_char *keyfile, *data, *size;
++ char *file, name[64];
++ int i;
++
++ for (i = 0; ; i++) {
++ snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
++ keyfile = preload_search_by_type(name);
++ if (keyfile == NULL)
++ return (i); /* Return number of loaded keyfiles. */
++ data = preload_search_info(keyfile, MODINFO_ADDR);
++ if (data == NULL) {
++ G_ELI_DEBUG(0, "Cannot find key file data for %s.",
++ name);
++ return (0);
++ }
++ data = *(void **)data;
++ size = preload_search_info(keyfile, MODINFO_SIZE);
++ if (size == NULL) {
++ G_ELI_DEBUG(0, "Cannot find key file size for %s.",
++ name);
++ return (0);
++ }
++ file = preload_search_info(keyfile, MODINFO_NAME);
++ if (file == NULL) {
++ G_ELI_DEBUG(0, "Cannot find key file name for %s.",
++ name);
++ return (0);
++ }
++ G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
++ provider, name);
++ g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
++ }
++}
++
++static void
++g_eli_keyfiles_clear(const char *provider)
++{
++ u_char *keyfile, *data, *size;
++ char name[64];
++ int i;
++
++ for (i = 0; ; i++) {
++ snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
++ keyfile = preload_search_by_type(name);
++ if (keyfile == NULL)
++ return;
++ data = preload_search_info(keyfile, MODINFO_ADDR);
++ size = preload_search_info(keyfile, MODINFO_SIZE);
++ if (data == NULL || size == NULL)
++ continue;
++ data = *(void **)data;
++ bzero(data, *(size_t *)size);
++ }
++}
++
++/*
++ * Tasting is only made on boot.
++ * We detect providers which should be attached before root is mounted.
++ */
++static struct g_geom *
++g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
++{
++ struct g_eli_metadata md;
++ struct g_geom *gp;
++ struct hmac_ctx ctx;
++ char passphrase[256];
++ u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
++ u_int i, nkey, nkeyfiles, tries;
++ int error;
++
++ g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
++ g_topology_assert();
++
++ if (root_mounted() || g_eli_tries == 0)
++ return (NULL);
++
++ G_ELI_DEBUG(3, "Tasting %s.", pp->name);
++
++ error = g_eli_read_metadata(mp, pp, &md);
++ if (error != 0)
++ return (NULL);
++ gp = NULL;
++
++ if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
++ return (NULL);
++ if (md.md_version > G_ELI_VERSION) {
++ printf("geom_eli.ko module is too old to handle %s.\n",
++ pp->name);
++ return (NULL);
++ }
++ if (md.md_provsize != pp->mediasize)
++ return (NULL);
++ /* Should we attach it on boot? */
++ if (!(md.md_flags & G_ELI_FLAG_BOOT))
++ return (NULL);
++ if (md.md_keys == 0x00) {
++ G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
++ return (NULL);
++ }
++ if (md.md_iterations == -1) {
++ /* If there is no passphrase, we try only once. */
++ tries = 1;
++ } else {
++ /* Ask for the passphrase no more than g_eli_tries times. */
++ tries = g_eli_tries;
++ }
++
++ for (i = 0; i < tries; i++) {
++ g_eli_crypto_hmac_init(&ctx, NULL, 0);
++
++ /*
++ * Load all key files.
++ */
++ nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
++
++ if (nkeyfiles == 0 && md.md_iterations == -1) {
++ /*
++ * No key files and no passphrase, something is
++ * definitely wrong here.
++ * geli(8) doesn't allow for such situation, so assume
++ * that there was really no passphrase and in that case
++ * key files are no properly defined in loader.conf.
++ */
++ G_ELI_DEBUG(0,
++ "Found no key files in loader.conf for %s.",
++ pp->name);
++ return (NULL);
++ }
++
++ /* Ask for the passphrase if defined. */
++ if (md.md_iterations >= 0) {
++ printf("Enter passphrase for %s: ", pp->name);
++ gets(passphrase, sizeof(passphrase),
++ g_eli_visible_passphrase);
++ }
++
++ /*
++ * Prepare Derived-Key from the user passphrase.
++ */
++ if (md.md_iterations == 0) {
++ g_eli_crypto_hmac_update(&ctx, md.md_salt,
++ sizeof(md.md_salt));
++ g_eli_crypto_hmac_update(&ctx, passphrase,
++ strlen(passphrase));
++ bzero(passphrase, sizeof(passphrase));
++ } else if (md.md_iterations > 0) {
++ u_char dkey[G_ELI_USERKEYLEN];
++
++ pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
++ sizeof(md.md_salt), passphrase, md.md_iterations);
++ bzero(passphrase, sizeof(passphrase));
++ g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
++ bzero(dkey, sizeof(dkey));
++ }
++
++ g_eli_crypto_hmac_final(&ctx, key, 0);
++
++ /*
++ * Decrypt Master-Key.
++ */
++ error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
++ bzero(key, sizeof(key));
++ if (error == -1) {
++ if (i == tries - 1) {
++ G_ELI_DEBUG(0,
++ "Wrong key for %s. No tries left.",
++ pp->name);
++ g_eli_keyfiles_clear(pp->name);
++ return (NULL);
++ }
++ G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
++ pp->name, tries - i - 1);
++ /* Try again. */
++ continue;
++ } else if (error > 0) {
++ G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
++ pp->name, error);
++ g_eli_keyfiles_clear(pp->name);
++ return (NULL);
++ }
++ G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
++ break;
++ }
++
++ /*
++ * We have correct key, let's attach provider.
++ */
++ gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
++ bzero(mkey, sizeof(mkey));
++ bzero(&md, sizeof(md));
++ if (gp == NULL) {
++ G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
++ G_ELI_SUFFIX);
++ return (NULL);
++ }
++ return (gp);
++}
++
++static void
++g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
++ struct g_consumer *cp, struct g_provider *pp)
++{
++ struct g_eli_softc *sc;
++
++ g_topology_assert();
++ sc = gp->softc;
++ if (sc == NULL)
++ return;
++ if (pp != NULL || cp != NULL)
++ return; /* Nothing here. */
++ sbuf_printf(sb, "%s<Flags>", indent);
++ if (sc->sc_flags == 0)
++ sbuf_printf(sb, "NONE");
++ else {
++ int first = 1;
++
++#define ADD_FLAG(flag, name) do { \
++ if (sc->sc_flags & (flag)) { \
++ if (!first) \
++ sbuf_printf(sb, ", "); \
++ else \
++ first = 0; \
++ sbuf_printf(sb, name); \
++ } \
++} while (0)
++ ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
++ ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
++ ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
++ ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
++ ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
++ ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
++ ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
++ ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
++ ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
++ ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
++ ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
++#undef ADD_FLAG
++ }
++ sbuf_printf(sb, "</Flags>\n");
++
++ if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
++ sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
++ sc->sc_nkey);
++ }
++ sbuf_printf(sb, "%s<Crypto>", indent);
++ switch (sc->sc_crypto) {
++ case G_ELI_CRYPTO_HW:
++ sbuf_printf(sb, "hardware");
++ break;
++ case G_ELI_CRYPTO_SW:
++ sbuf_printf(sb, "software");
++ break;
++ default:
++ sbuf_printf(sb, "UNKNOWN");
++ break;
++ }
++ sbuf_printf(sb, "</Crypto>\n");
++ if (sc->sc_flags & G_ELI_FLAG_AUTH) {
++ sbuf_printf(sb,
++ "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
++ indent, g_eli_algo2str(sc->sc_aalgo));
++ }
++ sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
++ sc->sc_ekeylen);
++ sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
++ g_eli_algo2str(sc->sc_ealgo));
++ sbuf_printf(sb, "%s<State>%s</State>\n", indent,
++ (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
++}
++
++static void
++g_eli_shutdown_pre_sync(void *arg, int howto)
++{
++ struct g_class *mp;
++ struct g_geom *gp, *gp2;
++ struct g_provider *pp;
++ struct g_eli_softc *sc;
++ int error;
++
++ mp = arg;
++ DROP_GIANT();
++ g_topology_lock();
++ LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
++ sc = gp->softc;
++ if (sc == NULL)
++ continue;
++ pp = LIST_FIRST(&gp->provider);
++ KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
++ if (pp->acr + pp->acw + pp->ace == 0)
++ error = g_eli_destroy(sc, TRUE);
++ else {
++ sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
++ gp->access = g_eli_access;
++ }
++ }
++ g_topology_unlock();
++ PICKUP_GIANT();
++}
++
++static void
++g_eli_init(struct g_class *mp)
++{
++
++ g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
++ g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
++ if (g_eli_pre_sync == NULL)
++ G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
++}
++
++static void
++g_eli_fini(struct g_class *mp)
++{
++
++ if (g_eli_pre_sync != NULL)
++ EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
++}
++
++DECLARE_GEOM_CLASS(g_eli_class, g_eli);
++MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
+--- /dev/null
++++ b/sys/geom/eli/g_eli_privacy.c
+@@ -0,0 +1,330 @@
++/*-
++ * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/param.h>
++#include <sys/systm.h>
++#include <sys/kernel.h>
++#include <sys/linker.h>
++#include <sys/module.h>
++#include <sys/lock.h>
++#include <sys/mutex.h>
++#include <sys/bio.h>
++#include <sys/sysctl.h>
++#include <sys/malloc.h>
++#include <sys/kthread.h>
++#include <sys/proc.h>
++#include <sys/sched.h>
++#include <sys/smp.h>
++#include <sys/uio.h>
++#include <sys/vnode.h>
++
++#include <vm/uma.h>
++
++#include <geom/geom.h>
++#include <geom/eli/g_eli.h>
++#include <geom/eli/pkcs5v2.h>
++
++/*
++ * Code paths:
++ * BIO_READ:
++ * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++
++MALLOC_DECLARE(M_ELI);
++
++/*
++ * The function is called after we read and decrypt data.
++ *
++ * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver
++ */
++static int
++g_eli_crypto_read_done(struct cryptop *crp)
++{
++ struct g_eli_softc *sc;
++ struct bio *bp;
++
++ if (crp->crp_etype == EAGAIN) {
++ if (g_eli_crypto_rerun(crp) == 0)
++ return (0);
++ }
++ bp = (struct bio *)crp->crp_opaque;
++ bp->bio_inbed++;
++ if (crp->crp_etype == 0) {
++ G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
++ bp->bio_inbed, bp->bio_children);
++ bp->bio_completed += crp->crp_olen;
++ } else {
++ G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
++ bp->bio_inbed, bp->bio_children, crp->crp_etype);
++ if (bp->bio_error == 0)
++ bp->bio_error = crp->crp_etype;
++ }
++ /*
++ * Do we have all sectors already?
++ */
++ if (bp->bio_inbed < bp->bio_children)
++ return (0);
++ free(bp->bio_driver2, M_ELI);
++ bp->bio_driver2 = NULL;
++ if (bp->bio_error != 0) {
++ G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).",
++ bp->bio_error);
++ bp->bio_completed = 0;
++ }
++ /*
++ * Read is finished, send it up.
++ */
++ sc = bp->bio_to->geom->softc;
++ g_io_deliver(bp, bp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++ return (0);
++}
++
++/*
++ * The function is called after data encryption.
++ *
++ * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++static int
++g_eli_crypto_write_done(struct cryptop *crp)
++{
++ struct g_eli_softc *sc;
++ struct g_geom *gp;
++ struct g_consumer *cp;
++ struct bio *bp, *cbp;
++
++ if (crp->crp_etype == EAGAIN) {
++ if (g_eli_crypto_rerun(crp) == 0)
++ return (0);
++ }
++ bp = (struct bio *)crp->crp_opaque;
++ bp->bio_inbed++;
++ if (crp->crp_etype == 0) {
++ G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
++ bp->bio_inbed, bp->bio_children);
++ } else {
++ G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
++ bp->bio_inbed, bp->bio_children, crp->crp_etype);
++ if (bp->bio_error == 0)
++ bp->bio_error = crp->crp_etype;
++ }
++ /*
++ * All sectors are already encrypted?
++ */
++ if (bp->bio_inbed < bp->bio_children)
++ return (0);
++ bp->bio_inbed = 0;
++ bp->bio_children = 1;
++ cbp = bp->bio_driver1;
++ bp->bio_driver1 = NULL;
++ gp = bp->bio_to->geom;
++ if (bp->bio_error != 0) {
++ G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
++ bp->bio_error);
++ free(bp->bio_driver2, M_ELI);
++ bp->bio_driver2 = NULL;
++ g_destroy_bio(cbp);
++ sc = gp->softc;
++ g_io_deliver(bp, bp->bio_error);
++ atomic_subtract_int(&sc->sc_inflight, 1);
++ return (0);
++ }
++ cbp->bio_data = bp->bio_driver2;
++ cbp->bio_done = g_eli_write_done;
++ cp = LIST_FIRST(&gp->consumer);
++ cbp->bio_to = cp->provider;
++ G_ELI_LOGREQ(2, cbp, "Sending request.");
++ /*
++ * Send encrypted data to the provider.
++ */
++ g_io_request(cbp, cp);
++ return (0);
++}
++
++/*
++ * The function is called to read encrypted data.
++ *
++ * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
++ */
++void
++g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker)
++{
++ struct g_consumer *cp;
++ struct bio *cbp;
++
++ if (!fromworker) {
++ /*
++ * We are not called from the worker thread, so check if
++ * device is suspended.
++ */
++ mtx_lock(&sc->sc_queue_mtx);
++ if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
++ /*
++ * If device is suspended, we place the request onto
++ * the queue, so it can be handled after resume.
++ */
++ G_ELI_DEBUG(0, "device suspended, move onto queue");
++ bioq_insert_tail(&sc->sc_queue, bp);
++ mtx_unlock(&sc->sc_queue_mtx);
++ wakeup(sc);
++ return;
++ }
++ atomic_add_int(&sc->sc_inflight, 1);
++ mtx_unlock(&sc->sc_queue_mtx);
++ }
++ bp->bio_pflags = 0;
++ bp->bio_driver2 = NULL;
++ cbp = bp->bio_driver1;
++ cbp->bio_done = g_eli_read_done;
++ cp = LIST_FIRST(&sc->sc_geom->consumer);
++ cbp->bio_to = cp->provider;
++ G_ELI_LOGREQ(2, cbp, "Sending request.");
++ /*
++ * Read encrypted data from provider.
++ */
++ g_io_request(cbp, cp);
++}
++
++/*
++ * This is the main function responsible for cryptography (ie. communication
++ * with crypto(9) subsystem).
++ *
++ * BIO_READ:
++ * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver
++ * BIO_WRITE:
++ * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
++ */
++void
++g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
++{
++ struct g_eli_softc *sc;
++ struct cryptop *crp;
++ struct cryptodesc *crd;
++ struct uio *uio;
++ struct iovec *iov;
++ u_int i, nsec, secsize;
++ int err, error;
++ off_t dstoff;
++ size_t size;
++ u_char *p, *data;
++
++ G_ELI_LOGREQ(3, bp, "%s", __func__);
++
++ bp->bio_pflags = wr->w_number;
++ sc = wr->w_softc;
++ secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
++ nsec = bp->bio_length / secsize;
++
++ /*
++ * Calculate how much memory do we need.
++ * We need separate crypto operation for every single sector.
++ * It is much faster to calculate total amount of needed memory here and
++ * do the allocation once instead of allocating memory in pieces (many,
++ * many pieces).
++ */
++ size = sizeof(*crp) * nsec;
++ size += sizeof(*crd) * nsec;
++ size += sizeof(*uio) * nsec;
++ size += sizeof(*iov) * nsec;
++ /*
++ * If we write the data we cannot destroy current bio_data content,
++ * so we need to allocate more memory for encrypted data.
++ */
++ if (bp->bio_cmd == BIO_WRITE)
++ size += bp->bio_length;
++ p = malloc(size, M_ELI, M_WAITOK);
++
++ bp->bio_inbed = 0;
++ bp->bio_children = nsec;
++ bp->bio_driver2 = p;
++
++ if (bp->bio_cmd == BIO_READ)
++ data = bp->bio_data;
++ else {
++ data = p;
++ p += bp->bio_length;
++ bcopy(bp->bio_data, data, bp->bio_length);
++ }
++
++ error = 0;
++ for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) {
++ crp = (struct cryptop *)p; p += sizeof(*crp);
++ crd = (struct cryptodesc *)p; p += sizeof(*crd);
++ uio = (struct uio *)p; p += sizeof(*uio);
++ iov = (struct iovec *)p; p += sizeof(*iov);
++
++ iov->iov_len = secsize;
++ iov->iov_base = data;
++ data += secsize;
++
++ uio->uio_iov = iov;
++ uio->uio_iovcnt = 1;
++ uio->uio_segflg = UIO_SYSSPACE;
++ uio->uio_resid = secsize;
++
++ crp->crp_sid = wr->w_sid;
++ crp->crp_ilen = secsize;
++ crp->crp_olen = secsize;
++ crp->crp_opaque = (void *)bp;
++ crp->crp_buf = (void *)uio;
++ if (bp->bio_cmd == BIO_WRITE)
++ crp->crp_callback = g_eli_crypto_write_done;
++ else /* if (bp->bio_cmd == BIO_READ) */
++ crp->crp_callback = g_eli_crypto_read_done;
++ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
++ if (g_eli_batch)
++ crp->crp_flags |= CRYPTO_F_BATCH;
++ crp->crp_desc = crd;
++
++ crd->crd_skip = 0;
++ crd->crd_len = secsize;
++ crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
++ if (sc->sc_nekeys > 1)
++ crd->crd_flags |= CRD_F_KEY_EXPLICIT;
++ if (bp->bio_cmd == BIO_WRITE)
++ crd->crd_flags |= CRD_F_ENCRYPT;
++ crd->crd_alg = sc->sc_ealgo;
++ crd->crd_key = g_eli_crypto_key(sc, dstoff, secsize);
++ crd->crd_klen = sc->sc_ekeylen;
++ if (sc->sc_ealgo == CRYPTO_AES_XTS)
++ crd->crd_klen <<= 1;
++ g_eli_crypto_ivgen(sc, dstoff, crd->crd_iv,
++ sizeof(crd->crd_iv));
++ crd->crd_next = NULL;
++
++ crp->crp_etype = 0;
++ err = crypto_dispatch(crp);
++ if (error == 0)
++ error = err;
++ }
++ if (bp->bio_error == 0)
++ bp->bio_error = error;
++}
Added: trunk/freebsd-utils/debian/patches/039_geom.diff
===================================================================
--- trunk/freebsd-utils/debian/patches/039_geom.diff (rev 0)
+++ trunk/freebsd-utils/debian/patches/039_geom.diff 2011-07-25 15:40:31 UTC (rev 3628)
@@ -0,0 +1,354 @@
+--- a/sbin/geom/Makefile.inc
++++ b/sbin/geom/Makefile.inc
+@@ -2,5 +2,3 @@
+
+ WARNS?= 6
+ CLASS_DIR?=/lib/geom
+-
+-.include "../Makefile.inc"
+--- a/sbin/geom/class/eli/geom_eli.c
++++ b/sbin/geom/class/eli/geom_eli.c
+@@ -1083,7 +1083,7 @@
+ }
+
+ static void
+-eli_delkey_attached(struct gctl_req *req, const char *prov __unused)
++eli_delkey_attached(struct gctl_req *req, const char *prov)
+ {
+
+ gctl_issue(req);
+--- a/sbin/geom/class/eli/Makefile
++++ b/sbin/geom/class/eli/Makefile
+@@ -9,7 +9,7 @@
+ SRCS+= sha2.c
+
+ DPADD= ${LIBMD} ${LIBCRYPTO}
+-LDADD= -lmd -lcrypto
++LDADD= -lcrypto
+
+ WARNS?= 3
+
+--- a/sbin/geom/class/multipath/geom_multipath.c
++++ b/sbin/geom/class/multipath/geom_multipath.c
+@@ -79,7 +79,7 @@
+ };
+
+ static void
+-mp_main(struct gctl_req *req, unsigned int flags __unused)
++mp_main(struct gctl_req *req, unsigned int flags)
+ {
+ const char *name;
+
+--- a/sbin/geom/class/part/geom_part.c
++++ b/sbin/geom/class/part/geom_part.c
+@@ -622,7 +622,7 @@
+ }
+
+ static void
+-gpart_show(struct gctl_req *req, unsigned int fl __unused)
++gpart_show(struct gctl_req *req, unsigned int fl)
+ {
+ struct gmesh mesh;
+ struct gclass *classp;
+@@ -668,7 +668,7 @@
+ }
+
+ static void
+-gpart_backup(struct gctl_req *req, unsigned int fl __unused)
++gpart_backup(struct gctl_req *req, unsigned int fl)
+ {
+ struct gmesh mesh;
+ struct gclass *classp;
+@@ -757,13 +757,13 @@
+ }
+
+ static void
+-gpart_sighndl(int sig __unused)
++gpart_sighndl(int sig)
+ {
+ undo_restore = 1;
+ }
+
+ static void
+-gpart_restore(struct gctl_req *req, unsigned int fl __unused)
++gpart_restore(struct gctl_req *req, unsigned int fl)
+ {
+ struct gmesh mesh;
+ struct gclass *classp;
+@@ -1211,7 +1211,7 @@
+ }
+
+ static void
+-gpart_issue(struct gctl_req *req, unsigned int fl __unused)
++gpart_issue(struct gctl_req *req, unsigned int fl)
+ {
+ char buf[4096];
+ const char *errstr;
+--- a/sbin/geom/class/sched/geom_sched.c
++++ b/sbin/geom/class/sched/geom_sched.c
+@@ -68,7 +68,7 @@
+ #endif
+
+ static void
+-gcmd_createinsert(struct gctl_req *req, unsigned flags __unused)
++gcmd_createinsert(struct gctl_req *req, unsigned flags)
+ {
+ const char *reqalgo;
+ char name[64];
+--- a/sbin/geom/core/geom.c
++++ b/sbin/geom/core/geom.c
+@@ -770,7 +770,7 @@
+ }
+
+ static void
+-std_help(struct gctl_req *req __unused, unsigned flags __unused)
++std_help(struct gctl_req *req, unsigned flags)
+ {
+
+ usage();
+@@ -794,7 +794,7 @@
+ }
+
+ static void
+-std_list(struct gctl_req *req, unsigned flags __unused)
++std_list(struct gctl_req *req, unsigned flags)
+ {
+ struct gmesh mesh;
+ struct gclass *classp;
+@@ -928,7 +928,7 @@
+ }
+
+ static void
+-std_status(struct gctl_req *req, unsigned flags __unused)
++std_status(struct gctl_req *req, unsigned flags)
+ {
+ struct gmesh mesh;
+ struct gclass *classp;
+@@ -1024,7 +1024,7 @@
+ }
+
+ static void
+-std_load(struct gctl_req *req __unused, unsigned flags)
++std_load(struct gctl_req *req, unsigned flags)
+ {
+
+ /*
+@@ -1049,7 +1049,7 @@
+ }
+
+ static void
+-std_unload(struct gctl_req *req, unsigned flags __unused)
++std_unload(struct gctl_req *req, unsigned flags)
+ {
+ char name[64];
+ int id;
+--- a/sbin/geom/misc/subr.c
++++ b/sbin/geom/misc/subr.c
+@@ -47,7 +47,6 @@
+
+ #include "misc/subr.h"
+
+-
+ struct std_metadata {
+ char md_magic[16];
+ uint32_t md_version;
+@@ -187,7 +186,7 @@
+ if (*s != '\0')
+ return (EINVAL);
+ done:
+- if ((OFF_MAX / unit) < mult || (OFF_MAX / mult / unit) < number)
++ if ((INT64_MAX / unit) < mult || (INT64_MAX / mult / unit) < number)
+ return (ERANGE);
+ number *= mult * unit;
+ if (number % sectorsize)
+--- a/sbin/geom/Makefile
++++ b/sbin/geom/Makefile
+@@ -14,7 +14,7 @@
+ CFLAGS+=-I${.CURDIR} -I${.CURDIR}/core -DSTATIC_GEOM_CLASSES
+
+ DPADD= ${LIBGEOM} ${LIBSBUF} ${LIBBSDXML} ${LIBUTIL}
+-LDADD= -lgeom -lsbuf -lbsdxml -lutil
++LDADD= -lgeom -lsbuf -lexpat -lutil
+
+ .include <bsd.prog.mk>
+
+--- a/sbin/geom/class/Makefile
++++ b/sbin/geom/class/Makefile
+@@ -7,16 +7,16 @@
+ .if ${MK_OPENSSL} != "no"
+ SUBDIR+=eli
+ .endif
+-SUBDIR+=journal
++#SUBDIR+=journal
+ SUBDIR+=label
+ SUBDIR+=mirror
+-SUBDIR+=multipath
++#SUBDIR+=multipath
+ SUBDIR+=nop
+-SUBDIR+=part
++#SUBDIR+=part
+ SUBDIR+=raid3
+ SUBDIR+=sched
+ SUBDIR+=shsec
+ SUBDIR+=stripe
+-SUBDIR+=virstor
++#SUBDIR+=virstor
+
+ .include <bsd.subdir.mk>
+--- a/sbin/geom/class/mirror/Makefile
++++ b/sbin/geom/class/mirror/Makefile
+@@ -5,6 +5,6 @@
+ CLASS= mirror
+
+ DPADD= ${LIBMD}
+-LDADD= -lmd
++#LDADD= -lmd
+
+ .include <bsd.lib.mk>
+--- a/sbin/geom/class/raid3/Makefile
++++ b/sbin/geom/class/raid3/Makefile
+@@ -5,6 +5,6 @@
+ CLASS= raid3
+
+ DPADD= ${LIBMD}
+-LDADD= -lmd
++#LDADD= -lmd
+
+ .include <bsd.lib.mk>
+--- a/sbin/geom/core/Makefile
++++ b/sbin/geom/core/Makefile
+@@ -4,7 +4,7 @@
+
+ PROG= geom
+ MAN= geom.8
+-SRCS= geom.c subr.c
++SRCS= geom.c subr.c expand_number.c
+
+ NO_SHARED=NO
+
+@@ -12,6 +12,6 @@
+ CFLAGS+= -I${.CURDIR}/../../../sys -I${.CURDIR} -I${.CURDIR}/..
+
+ DPADD= ${LIBGEOM} ${LIBSBUF} ${LIBBSDXML} ${LIBUTIL}
+-LDADD= -lgeom -lsbuf -lbsdxml -lutil
++LDADD= -lgeom -ldl
+
+ .include <bsd.prog.mk>
+--- /dev/null
++++ b/sbin/geom/core/expand_number.c
+@@ -0,0 +1,101 @@
++/*-
++ * Copyright (c) 2007 Eric Anderson <anderson at FreeBSD.org>
++ * Copyright (c) 2007 Pawel Jakub Dawidek <pjd at FreeBSD.org>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD$");
++
++#include <sys/types.h>
++#include <ctype.h>
++#include <errno.h>
++#include <inttypes.h>
++#include <libutil.h>
++#include <stdint.h>
++
++/*
++ * Convert an expression of the following forms to a uint64_t.
++ * 1) A positive decimal number.
++ * 2) A positive decimal number followed by a 'b' or 'B' (mult by 1).
++ * 3) A positive decimal number followed by a 'k' or 'K' (mult by 1 << 10).
++ * 4) A positive decimal number followed by a 'm' or 'M' (mult by 1 << 20).
++ * 5) A positive decimal number followed by a 'g' or 'G' (mult by 1 << 30).
++ * 6) A positive decimal number followed by a 't' or 'T' (mult by 1 << 40).
++ * 7) A positive decimal number followed by a 'p' or 'P' (mult by 1 << 50).
++ * 8) A positive decimal number followed by a 'e' or 'E' (mult by 1 << 60).
++ */
++int
++expand_number(const char *buf, uint64_t *num)
++{
++ uint64_t number;
++ unsigned shift;
++ char *endptr;
++
++ number = strtoumax(buf, &endptr, 0);
++
++ if (endptr == buf) {
++ /* No valid digits. */
++ errno = EINVAL;
++ return (-1);
++ }
++
++ switch (tolower((unsigned char)*endptr)) {
++ case 'e':
++ shift = 60;
++ break;
++ case 'p':
++ shift = 50;
++ break;
++ case 't':
++ shift = 40;
++ break;
++ case 'g':
++ shift = 30;
++ break;
++ case 'm':
++ shift = 20;
++ break;
++ case 'k':
++ shift = 10;
++ break;
++ case 'b':
++ case '\0': /* No unit. */
++ *num = number;
++ return (0);
++ default:
++ /* Unrecognized unit. */
++ errno = EINVAL;
++ return (-1);
++ }
++
++ if ((number << shift) >> shift != number) {
++ /* Overflow */
++ errno = ERANGE;
++ return (-1);
++ }
++
++ *num = number << shift;
++ return (0);
++}
+--- a/sys/crypto/sha2/sha2.c
++++ b/sys/crypto/sha2/sha2.c
+@@ -67,8 +67,10 @@
+ *
+ */
+
+-#if defined(__bsdi__) || defined(__FreeBSD__)
++#if defined(_KERNEL) && (defined(__bsdi__) || defined(__FreeBSD__))
+ #define assert(x)
++#else
++#include <assert.h>
+ #endif
+
+
Modified: trunk/freebsd-utils/debian/patches/series
===================================================================
--- trunk/freebsd-utils/debian/patches/series 2011-07-25 12:51:04 UTC (rev 3627)
+++ trunk/freebsd-utils/debian/patches/series 2011-07-25 15:40:31 UTC (rev 3628)
@@ -1,4 +1,5 @@
000_devd_usb.diff
+000_sys_geom.diff
001_dmesg.diff
002_ifconfig.diff
003_kbdcontrol.diff
@@ -34,3 +35,4 @@
036_nfs_glibc.diff
037_mount_autofs.diff
038_jail.diff
+039_geom.diff
Modified: trunk/freebsd-utils/debian/rules
===================================================================
--- trunk/freebsd-utils/debian/rules 2011-07-25 12:51:04 UTC (rev 3627)
+++ trunk/freebsd-utils/debian/rules 2011-07-25 15:40:31 UTC (rev 3628)
@@ -64,7 +64,8 @@
sbin/mount_ntfs sbin/mount_nullfs sbin/mount_udf sbin/mount_unionfs \
sbin/mount_reiserfs sbin/mount \
sbin/mdconfig sbin/ccdconfig sbin/swapon sbin/atacontrol sbin/camcontrol \
- sbin/gbde sbin/geom bin/kenv \
+ sbin/gbde bin/kenv \
+ sbin/geom sys/geom/eli sys/crypto/sha2 \
usr.sbin/kbdcontrol usr.sbin/vidcontrol share/syscons \
sbin/savecore sbin/dumpon \
sys/kern/syscalls.c etc/pf.os \
@@ -102,6 +103,7 @@
$(PMAKE) -C sbin/devfs
$(PMAKE) -C sbin/dmesg
$(PMAKE) -C sbin/dumpon
+ $(PMAKE) -C sbin/geom
$(PMAKE) -C sbin/mdconfig
$(PMAKE) -C sbin/mount
$(PMAKE) -C sbin/mount_autofs
@@ -206,6 +208,7 @@
$(PMAKE) -C sbin/devfs clean
$(PMAKE) -C sbin/dmesg clean
$(PMAKE) -C sbin/dumpon clean
+ $(PMAKE) -C sbin/geom clean
$(PMAKE) -C sbin/mdconfig clean
$(PMAKE) -C sbin/mount clean
$(PMAKE) -C sbin/mount_autofs clean
More information about the Glibc-bsd-commits
mailing list