diff options
Diffstat (limited to 'sys/contrib/openzfs/module')
425 files changed, 349003 insertions, 0 deletions
diff --git a/sys/contrib/openzfs/module/.gitignore b/sys/contrib/openzfs/module/.gitignore new file mode 100644 index 000000000000..7a4bd3673e77 --- /dev/null +++ b/sys/contrib/openzfs/module/.gitignore @@ -0,0 +1,26 @@ +*.ko +*.ko.unsigned +*.ko.out +*.ko.out.sig +*.ko.debug +*.ko.full +*.dwo +.*.cmd +.*.d +*.mod + +/Kbuild +/.cache.mk +/.tmp_versions +/Module.markers +/Module.symvers +/vnode_if* +/bus_if.h +/device_if.h +/opt_global.h + +/export_syms +/machine +/x86 + +!Makefile.in diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in new file mode 100644 index 000000000000..1507965c5750 --- /dev/null +++ b/sys/contrib/openzfs/module/Kbuild.in @@ -0,0 +1,47 @@ +# When integrated in to a monolithic kernel the spl module must appear +# first. This ensures its module initialization function is run before +# any of the other module initialization functions which depend on it. +ZFS_MODULES += spl/ +ZFS_MODULES += avl/ +ZFS_MODULES += icp/ +ZFS_MODULES += lua/ +ZFS_MODULES += nvpair/ +ZFS_MODULES += unicode/ +ZFS_MODULES += zcommon/ +ZFS_MODULES += zfs/ +ZFS_MODULES += zstd/ + +# The rest is only relevant when run by kbuild +ifneq ($(KERNELRELEASE),) + +obj-$(CONFIG_ZFS) := $(ZFS_MODULES) + +ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement +ZFS_MODULE_CFLAGS += -Wmissing-prototypes +ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@ + +ifneq ($(KBUILD_EXTMOD),) +zfs_include = @abs_top_srcdir@/include +ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h +ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include +else +zfs_include = $(srctree)/include/zfs +ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h +endif + +ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel +ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl +ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs +ZFS_MODULE_CFLAGS += -I$(zfs_include) +ZFS_MODULE_CPPFLAGS += -D_KERNEL +ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@ + +ifneq ($(KBUILD_EXTMOD),) +@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include +@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@ +endif + +subdir-asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS) +subdir-ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS) + +endif diff --git a/sys/contrib/openzfs/module/Makefile.bsd b/sys/contrib/openzfs/module/Makefile.bsd new file mode 100644 index 000000000000..53b97dafd9e9 --- /dev/null +++ b/sys/contrib/openzfs/module/Makefile.bsd @@ -0,0 +1,359 @@ +.if !defined(WITH_CTF) +WITH_CTF=1 +.endif + +.include <bsd.sys.mk> + +SRCDIR=${.CURDIR} +INCDIR=${.CURDIR:H}/include + +KMOD= openzfs + +.PATH: ${SRCDIR}/avl \ + ${SRCDIR}/lua \ + ${SRCDIR}/nvpair \ + ${SRCDIR}/os/freebsd/spl \ + ${SRCDIR}/os/freebsd/zfs \ + ${SRCDIR}/unicode \ + ${SRCDIR}/zcommon \ + ${SRCDIR}/zfs \ + ${SRCDIR}/zstd \ + ${SRCDIR}/zstd/lib + + + +CFLAGS+= -I${.OBJDIR:H}/include +CFLAGS+= -I${INCDIR} +CFLAGS+= -I${INCDIR}/spl +CFLAGS+= -I${INCDIR}/os/freebsd +CFLAGS+= -I${INCDIR}/os/freebsd/spl +CFLAGS+= -I${INCDIR}/os/freebsd/zfs +CFLAGS+= -I${SRCDIR}/zstd/include +CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h + +CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS -D__BSD_VISIBLE=1 \ + -DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \ + -D_SYS_VMEM_H_ -DKDTRACE_HOOKS -DSMP -DHAVE_KSID -DCOMPAT_FREEBSD11 + +.if ${MACHINE_ARCH} == "amd64" +CFLAGS+= -DHAVE_AVX2 -DHAVE_AVX -D__x86_64 -DHAVE_SSE2 -DHAVE_AVX512F -DHAVE_SSSE3 +.endif + +.if defined(WITH_DEBUG) && ${WITH_DEBUG} == "true" +CFLAGS+= -DINVARIANTS -DWITNESS -g -O0 -DZFS_DEBUG -DOPENSOLARIS_WITNESS +.else +CFLAGS += -DNDEBUG +.endif + +.if defined(WITH_VFS_DEBUG) && ${WITH_VFS_DEBUG} == "true" +# kernel must also be built with this option for this to work +CFLAGS+= -DDEBUG_VFS_LOCKS +.endif + +.if defined(WITH_GCOV) && ${WITH_GCOV} == "true" +CFLAGS+= -fprofile-arcs -ftest-coverage +.endif + +DEBUG_FLAGS=-g + +.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \ + ${MACHINE_ARCH} == "arm" +CFLAGS+= -DBITS_PER_LONG=32 +.else +CFLAGS+= -DBITS_PER_LONG=64 +.endif + +SRCS= vnode_if.h device_if.h bus_if.h + +# avl +SRCS+= avl.c + +#lua +SRCS+= lapi.c \ + lauxlib.c \ + lbaselib.c \ + lcode.c \ + lcompat.c \ + lcorolib.c \ + lctype.c \ + ldebug.c \ + ldo.c \ + lfunc.c \ + lgc.c \ + llex.c \ + lmem.c \ + lobject.c \ + lopcodes.c \ + lparser.c \ + lstate.c \ + lstring.c \ + lstrlib.c \ + ltable.c \ + ltablib.c \ + ltm.c \ + lvm.c \ + lzio.c + +#nvpair +SRCS+= nvpair.c \ + fnvpair.c \ + nvpair_alloc_spl.c \ + nvpair_alloc_fixed.c + +#os/freebsd/spl +SRCS+= acl_common.c \ + btree.c \ + callb.c \ + list.c \ + spl_acl.c \ + spl_cmn_err.c \ + spl_dtrace.c \ + spl_kmem.c \ + spl_kstat.c \ + spl_misc.c \ + spl_policy.c \ + spl_string.c \ + spl_sunddi.c \ + spl_sysevent.c \ + spl_taskq.c \ + spl_uio.c \ + spl_vfs.c \ + spl_vm.c \ + spl_zone.c \ + sha256c.c \ + sha512c.c \ + spl_procfs_list.c \ + spl_zlib.c + + +.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \ + ${MACHINE_ARCH} == "arm" +SRCS+= spl_atomic.c +.endif + +#os/freebsd/zfs +SRCS+= abd_os.c \ + crypto_os.c \ + dmu_os.c \ + hkdf.c \ + kmod_core.c \ + spa_os.c \ + sysctl_os.c \ + vdev_file.c \ + vdev_label_os.c \ + vdev_geom.c \ + zfs_acl.c \ + zfs_ctldir.c \ + zfs_dir.c \ + zfs_ioctl_compat.c \ + zfs_ioctl_os.c \ + zfs_log.c \ + zfs_replay.c \ + zfs_vfsops.c \ + zfs_vnops.c \ + zfs_znode.c \ + zio_crypt.c \ + zvol_os.c + +#unicode +SRCS+= uconv.c \ + u8_textprep.c + +#zcommon +SRCS+= zfeature_common.c \ + zfs_comutil.c \ + zfs_deleg.c \ + zfs_fletcher.c \ + zfs_fletcher_avx512.c \ + zfs_fletcher_intel.c \ + zfs_fletcher_sse.c \ + zfs_fletcher_superscalar.c \ + zfs_fletcher_superscalar4.c \ + zfs_namecheck.c \ + zfs_prop.c \ + zpool_prop.c \ + zprop_common.c + +#zfs +SRCS+= abd.c \ + aggsum.c \ + arc.c \ + arc_os.c \ + blkptr.c \ + bplist.c \ + bpobj.c \ + cityhash.c \ + dbuf.c \ + dbuf_stats.c \ + bptree.c \ + bqueue.c \ + dataset_kstats.c \ + ddt.c \ + ddt_zap.c \ + dmu.c \ + dmu_diff.c \ + dmu_object.c \ + dmu_objset.c \ + dmu_recv.c \ + dmu_redact.c \ + dmu_send.c \ + dmu_traverse.c \ + dmu_tx.c \ + dmu_zfetch.c \ + dnode.c \ + dnode_sync.c \ + dsl_dataset.c \ + dsl_deadlist.c \ + dsl_deleg.c \ + dsl_bookmark.c \ + dsl_dir.c \ + dsl_crypt.c \ + dsl_destroy.c \ + dsl_pool.c \ + dsl_prop.c \ + dsl_scan.c \ + dsl_synctask.c \ + dsl_userhold.c \ + fm.c \ + gzip.c \ + lzjb.c \ + lz4.c \ + metaslab.c \ + mmp.c \ + multilist.c \ + objlist.c \ + pathname.c \ + range_tree.c \ + refcount.c \ + rrwlock.c \ + sa.c \ + sha256.c \ + skein_zfs.c \ + spa.c \ + spa_boot.c \ + spa_checkpoint.c \ + spa_config.c \ + spa_errlog.c \ + spa_history.c \ + spa_log_spacemap.c \ + spa_misc.c \ + spa_stats.c \ + space_map.c \ + space_reftree.c \ + txg.c \ + uberblock.c \ + unique.c \ + vdev.c \ + vdev_cache.c \ + vdev_indirect.c \ + vdev_indirect_births.c \ + vdev_indirect_mapping.c \ + vdev_initialize.c \ + vdev_label.c \ + vdev_mirror.c \ + vdev_missing.c \ + vdev_queue.c \ + vdev_raidz.c \ + vdev_raidz_math.c \ + vdev_raidz_math_scalar.c \ + vdev_rebuild.c \ + vdev_raidz_math_avx2.c \ + vdev_raidz_math_avx512bw.c \ + vdev_raidz_math_avx512f.c \ + vdev_raidz_math_sse2.c \ + vdev_raidz_math_ssse3.c \ + vdev_removal.c \ + vdev_root.c \ + vdev_trim.c \ + zap.c \ + zap_leaf.c \ + zap_micro.c \ + zcp.c \ + zcp_get.c \ + zcp_global.c \ + zcp_iter.c \ + zcp_set.c \ + zcp_synctask.c \ + zfeature.c \ + zfs_byteswap.c \ + zfs_debug.c \ + zfs_file_os.c \ + zfs_fm.c \ + zfs_fuid.c \ + zfs_ioctl.c \ + zfs_onexit.c \ + zfs_quota.c \ + zfs_ratelimit.c \ + zfs_rlock.c \ + zfs_sa.c \ + zil.c \ + zio.c \ + zio_checksum.c \ + zio_compress.c \ + zio_inject.c \ + zle.c \ + zrlock.c \ + zthr.c \ + zvol.c + +#zstd +SRCS+= zfs_zstd.c \ + zstd.c + +beforeinstall: +.if ${MK_DEBUG_FILES} != "no" + mtree -eu \ + -f /etc/mtree/BSD.debug.dist \ + -p ${DESTDIR}/usr/lib +.endif + +.include <bsd.kmod.mk> + + +CFLAGS.gcc+= -Wno-pointer-to-int-cast + +CFLAGS.lapi.c= -Wno-cast-qual +CFLAGS.lcompat.c= -Wno-cast-qual +CFLAGS.lobject.c= -Wno-cast-qual +CFLAGS.ltable.c= -Wno-cast-qual +CFLAGS.lvm.c= -Wno-cast-qual +CFLAGS.nvpair.c= -DHAVE_RPC_TYPES -Wno-cast-qual +CFLAGS.spl_string.c= -Wno-cast-qual +CFLAGS.spl_vm.c= -Wno-cast-qual +CFLAGS.spl_zlib.c= -Wno-cast-qual +CFLAGS.abd.c= -Wno-cast-qual +CFLAGS.zfs_log.c= -Wno-cast-qual +CFLAGS.zfs_vnops.c= -Wno-pointer-arith +CFLAGS.u8_textprep.c= -Wno-cast-qual +CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith +CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith +CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith +CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith +CFLAGS.zprop_common.c= -Wno-cast-qual +CFLAGS.ddt.c= -Wno-cast-qual +CFLAGS.dmu.c= -Wno-cast-qual +CFLAGS.dmu_traverse.c= -Wno-cast-qual +CFLAGS.dsl_dir.c= -Wno-cast-qual +CFLAGS.dsl_deadlist.c= -Wno-cast-qual +CFLAGS.dsl_prop.c= -Wno-cast-qual +CFLAGS.fm.c= -Wno-cast-qual +CFLAGS.lz4.c= -Wno-cast-qual +CFLAGS.spa.c= -Wno-cast-qual +CFLAGS.spa_misc.c= -Wno-cast-qual +CFLAGS.sysctl_os.c= -include ../zfs_config.h +CFLAGS.vdev_raidz.c= -Wno-cast-qual +CFLAGS.vdev_raidz_math.c= -Wno-cast-qual +CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual +CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier +CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier +CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier +CFLAGS.zap_leaf.c= -Wno-cast-qual +CFLAGS.zap_micro.c= -Wno-cast-qual +CFLAGS.zcp.c= -Wno-cast-qual +CFLAGS.zfs_fm.c= -Wno-cast-qual +CFLAGS.zfs_ioctl.c= -Wno-cast-qual +CFLAGS.zil.c= -Wno-cast-qual +CFLAGS.zio.c= -Wno-cast-qual +CFLAGS.zrlock.c= -Wno-cast-qual +CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith +CFLAGS.zstd.c= -fno-tree-vectorize diff --git a/sys/contrib/openzfs/module/Makefile.in b/sys/contrib/openzfs/module/Makefile.in new file mode 100644 index 000000000000..ead4ff1360b2 --- /dev/null +++ b/sys/contrib/openzfs/module/Makefile.in @@ -0,0 +1,115 @@ +include Kbuild + +INSTALL_MOD_DIR ?= extra + +SUBDIR_TARGETS = icp lua zstd + +all: modules +distclean maintainer-clean: clean +install: modules_install +uninstall: modules_uninstall +check: + +.PHONY: all distclean maintainer-clean install uninstall check distdir \ + modules modules-Linux modules-FreeBSD modules-unknown \ + clean clean-Linux clean-FreeBSD \ + modules_install modules_install-Linux modules_install-FreeBSD \ + modules_uninstall modules_uninstall-Linux modules_uninstall-FreeBSD + +# Filter out options that FreeBSD make doesn't understand +getflags = ( \ +set -- \ + $(filter-out --%,$(firstword $(MFLAGS))) \ + $(filter -I%,$(MFLAGS)) \ + $(filter -j%,$(MFLAGS)); \ +fmakeflags=""; \ +while getopts :deiI:j:knqrstw flag; do \ + case $$flag in \ + \?) :;; \ + :) if [ $$OPTARG = "j" ]; then \ + ncpus=$$(sysctl -n kern.smp.cpus 2>/dev/null || :); \ + if [ -n "$$ncpus" ]; then fmakeflags="$$fmakeflags -j$$ncpus"; fi; \ + fi;; \ + d) fmakeflags="$$fmakeflags -dA";; \ + *) fmakeflags="$$fmakeflags -$$flag$$OPTARG";; \ + esac; \ +done; \ +echo $$fmakeflags \ +) +FMAKEFLAGS = -C @abs_srcdir@ -f Makefile.bsd $(shell $(getflags)) + +ifneq (@abs_srcdir@,@abs_builddir@) +FMAKEFLAGS += MAKEOBJDIR=@abs_builddir@ +endif +FMAKE = env -u MAKEFLAGS make $(FMAKEFLAGS) + +modules-Linux: + list='$(SUBDIR_TARGETS)'; for targetdir in $$list; do \ + $(MAKE) -C $$targetdir; \ + done + $(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNEL_MAKE@ CONFIG_ZFS=m modules + +modules-FreeBSD: + +$(FMAKE) + +modules-unknown: + @true + +modules: modules-@ac_system@ + +clean-Linux: + @# Only cleanup the kernel build directories when CONFIG_KERNEL + @# is defined. This indicates that kernel modules should be built. +@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNEL_MAKE@ clean + + if [ -f @LINUX_SYMBOLS@ ]; then $(RM) @LINUX_SYMBOLS@; fi + if [ -f Module.markers ]; then $(RM) Module.markers; fi + + find . -name '*.ur-safe' -type f -print | xargs $(RM) + +clean-FreeBSD: + +$(FMAKE) clean + +clean: clean-@ac_system@ + +modules_install-Linux: + @# Install the kernel modules + $(MAKE) -C @LINUX_OBJ@ M=`pwd` modules_install \ + INSTALL_MOD_PATH=$(DESTDIR)$(INSTALL_MOD_PATH) \ + INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) \ + KERNELRELEASE=@LINUX_VERSION@ + @# Remove extraneous build products when packaging + kmoddir=$(DESTDIR)$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@; \ + if [ -n "$(DESTDIR)" ]; then \ + find $$kmoddir -name 'modules.*' | xargs $(RM); \ + fi + sysmap=$(DESTDIR)$(INSTALL_MOD_PATH)/boot/System.map-@LINUX_VERSION@; \ + if [ -f $$sysmap ]; then \ + depmod -ae -F $$sysmap @LINUX_VERSION@; \ + fi + +modules_install-FreeBSD: + @# Install the kernel modules + +$(FMAKE) install + +modules_install: modules_install-@ac_system@ + +modules_uninstall-Linux: + @# Uninstall the kernel modules + kmoddir=$(DESTDIR)$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@ \ + for objdir in $(ZFS_MODULES); do \ + $(RM) -R $$kmoddir/$(INSTALL_MOD_DIR)/$$objdir; \ + done + +modules_uninstall-FreeBSD: + @false + +modules_uninstall: modules_uninstall-@ac_system@ + +distdir: + (cd @srcdir@ && find $(ZFS_MODULES) os -name '*.[chS]') | \ + while read path; do \ + mkdir -p $$distdir/$${path%/*}; \ + cp @srcdir@/$$path $$distdir/$$path; \ + done; \ + cp @srcdir@/Makefile.bsd $$distdir/Makefile.bsd diff --git a/sys/contrib/openzfs/module/avl/Makefile.in b/sys/contrib/openzfs/module/avl/Makefile.in new file mode 100644 index 000000000000..991d5f95b8c0 --- /dev/null +++ b/sys/contrib/openzfs/module/avl/Makefile.in @@ -0,0 +1,10 @@ +ifneq ($(KBUILD_EXTMOD),) +src = @abs_srcdir@ +obj = @abs_builddir@ +endif + +MODULE := zavl + +obj-$(CONFIG_ZFS) := $(MODULE).o + +$(MODULE)-objs += avl.o diff --git a/sys/contrib/openzfs/module/avl/avl.c b/sys/contrib/openzfs/module/avl/avl.c new file mode 100644 index 000000000000..9cc8362399d9 --- /dev/null +++ b/sys/contrib/openzfs/module/avl/avl.c @@ -0,0 +1,1093 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright 2015 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2015 by Delphix. All rights reserved. + */ + +/* + * AVL - generic AVL tree implementation for kernel use + * + * A complete description of AVL trees can be found in many CS textbooks. + * + * Here is a very brief overview. An AVL tree is a binary search tree that is + * almost perfectly balanced. By "almost" perfectly balanced, we mean that at + * any given node, the left and right subtrees are allowed to differ in height + * by at most 1 level. + * + * This relaxation from a perfectly balanced binary tree allows doing + * insertion and deletion relatively efficiently. Searching the tree is + * still a fast operation, roughly O(log(N)). + * + * The key to insertion and deletion is a set of tree manipulations called + * rotations, which bring unbalanced subtrees back into the semi-balanced state. + * + * This implementation of AVL trees has the following peculiarities: + * + * - The AVL specific data structures are physically embedded as fields + * in the "using" data structures. To maintain generality the code + * must constantly translate between "avl_node_t *" and containing + * data structure "void *"s by adding/subtracting the avl_offset. + * + * - Since the AVL data is always embedded in other structures, there is + * no locking or memory allocation in the AVL routines. This must be + * provided for by the enclosing data structure's semantics. Typically, + * avl_insert()/_add()/_remove()/avl_insert_here() require some kind of + * exclusive write lock. Other operations require a read lock. + * + * - The implementation uses iteration instead of explicit recursion, + * since it is intended to run on limited size kernel stacks. Since + * there is no recursion stack present to move "up" in the tree, + * there is an explicit "parent" link in the avl_node_t. + * + * - The left/right children pointers of a node are in an array. + * In the code, variables (instead of constants) are used to represent + * left and right indices. The implementation is written as if it only + * dealt with left handed manipulations. By changing the value assigned + * to "left", the code also works for right handed trees. The + * following variables/terms are frequently used: + * + * int left; // 0 when dealing with left children, + * // 1 for dealing with right children + * + * int left_heavy; // -1 when left subtree is taller at some node, + * // +1 when right subtree is taller + * + * int right; // will be the opposite of left (0 or 1) + * int right_heavy;// will be the opposite of left_heavy (-1 or 1) + * + * int direction; // 0 for "<" (ie. left child); 1 for ">" (right) + * + * Though it is a little more confusing to read the code, the approach + * allows using half as much code (and hence cache footprint) for tree + * manipulations and eliminates many conditional branches. + * + * - The avl_index_t is an opaque "cookie" used to find nodes at or + * adjacent to where a new value would be inserted in the tree. The value + * is a modified "avl_node_t *". The bottom bit (normally 0 for a + * pointer) is set to indicate if that the new node has a value greater + * than the value of the indicated "avl_node_t *". + * + * Note - in addition to userland (e.g. libavl and libutil) and the kernel + * (e.g. genunix), avl.c is compiled into ld.so and kmdb's genunix module, + * which each have their own compilation environments and subsequent + * requirements. Each of these environments must be considered when adding + * dependencies from avl.c. + */ + +#include <sys/types.h> +#include <sys/param.h> +#include <sys/debug.h> +#include <sys/avl.h> +#include <sys/cmn_err.h> +#include <sys/mod.h> + +/* + * Small arrays to translate between balance (or diff) values and child indices. + * + * Code that deals with binary tree data structures will randomly use + * left and right children when examining a tree. C "if()" statements + * which evaluate randomly suffer from very poor hardware branch prediction. + * In this code we avoid some of the branch mispredictions by using the + * following translation arrays. They replace random branches with an + * additional memory reference. Since the translation arrays are both very + * small the data should remain efficiently in cache. + */ +static const int avl_child2balance[2] = {-1, 1}; +static const int avl_balance2child[] = {0, 0, 1}; + + +/* + * Walk from one node to the previous valued node (ie. an infix walk + * towards the left). At any given node we do one of 2 things: + * + * - If there is a left child, go to it, then to it's rightmost descendant. + * + * - otherwise we return through parent nodes until we've come from a right + * child. + * + * Return Value: + * NULL - if at the end of the nodes + * otherwise next node + */ +void * +avl_walk(avl_tree_t *tree, void *oldnode, int left) +{ + size_t off = tree->avl_offset; + avl_node_t *node = AVL_DATA2NODE(oldnode, off); + int right = 1 - left; + int was_child; + + + /* + * nowhere to walk to if tree is empty + */ + if (node == NULL) + return (NULL); + + /* + * Visit the previous valued node. There are two possibilities: + * + * If this node has a left child, go down one left, then all + * the way right. + */ + if (node->avl_child[left] != NULL) { + for (node = node->avl_child[left]; + node->avl_child[right] != NULL; + node = node->avl_child[right]) + ; + /* + * Otherwise, return through left children as far as we can. + */ + } else { + for (;;) { + was_child = AVL_XCHILD(node); + node = AVL_XPARENT(node); + if (node == NULL) + return (NULL); + if (was_child == right) + break; + } + } + + return (AVL_NODE2DATA(node, off)); +} + +/* + * Return the lowest valued node in a tree or NULL. + * (leftmost child from root of tree) + */ +void * +avl_first(avl_tree_t *tree) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; node = node->avl_child[0]) + prev = node; + + if (prev != NULL) + return (AVL_NODE2DATA(prev, off)); + return (NULL); +} + +/* + * Return the highest valued node in a tree or NULL. + * (rightmost child from root of tree) + */ +void * +avl_last(avl_tree_t *tree) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; node = node->avl_child[1]) + prev = node; + + if (prev != NULL) + return (AVL_NODE2DATA(prev, off)); + return (NULL); +} + +/* + * Access the node immediately before or after an insertion point. + * + * "avl_index_t" is a (avl_node_t *) with the bottom bit indicating a child + * + * Return value: + * NULL: no node in the given direction + * "void *" of the found tree node + */ +void * +avl_nearest(avl_tree_t *tree, avl_index_t where, int direction) +{ + int child = AVL_INDEX2CHILD(where); + avl_node_t *node = AVL_INDEX2NODE(where); + void *data; + size_t off = tree->avl_offset; + + if (node == NULL) { + ASSERT(tree->avl_root == NULL); + return (NULL); + } + data = AVL_NODE2DATA(node, off); + if (child != direction) + return (data); + + return (avl_walk(tree, data, direction)); +} + + +/* + * Search for the node which contains "value". The algorithm is a + * simple binary tree search. + * + * return value: + * NULL: the value is not in the AVL tree + * *where (if not NULL) is set to indicate the insertion point + * "void *" of the found tree node + */ +void * +avl_find(avl_tree_t *tree, const void *value, avl_index_t *where) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + int child = 0; + int diff; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; + node = node->avl_child[child]) { + + prev = node; + + diff = tree->avl_compar(value, AVL_NODE2DATA(node, off)); + ASSERT(-1 <= diff && diff <= 1); + if (diff == 0) { +#ifdef ZFS_DEBUG + if (where != NULL) + *where = 0; +#endif + return (AVL_NODE2DATA(node, off)); + } + child = avl_balance2child[1 + diff]; + + } + + if (where != NULL) + *where = AVL_MKINDEX(prev, child); + + return (NULL); +} + + +/* + * Perform a rotation to restore balance at the subtree given by depth. + * + * This routine is used by both insertion and deletion. The return value + * indicates: + * 0 : subtree did not change height + * !0 : subtree was reduced in height + * + * The code is written as if handling left rotations, right rotations are + * symmetric and handled by swapping values of variables right/left[_heavy] + * + * On input balance is the "new" balance at "node". This value is either + * -2 or +2. + */ +static int +avl_rotation(avl_tree_t *tree, avl_node_t *node, int balance) +{ + int left = !(balance < 0); /* when balance = -2, left will be 0 */ + int right = 1 - left; + int left_heavy = balance >> 1; + int right_heavy = -left_heavy; + avl_node_t *parent = AVL_XPARENT(node); + avl_node_t *child = node->avl_child[left]; + avl_node_t *cright; + avl_node_t *gchild; + avl_node_t *gright; + avl_node_t *gleft; + int which_child = AVL_XCHILD(node); + int child_bal = AVL_XBALANCE(child); + + /* BEGIN CSTYLED */ + /* + * case 1 : node is overly left heavy, the left child is balanced or + * also left heavy. This requires the following rotation. + * + * (node bal:-2) + * / \ + * / \ + * (child bal:0 or -1) + * / \ + * / \ + * cright + * + * becomes: + * + * (child bal:1 or 0) + * / \ + * / \ + * (node bal:-1 or 0) + * / \ + * / \ + * cright + * + * we detect this situation by noting that child's balance is not + * right_heavy. + */ + /* END CSTYLED */ + if (child_bal != right_heavy) { + + /* + * compute new balance of nodes + * + * If child used to be left heavy (now balanced) we reduced + * the height of this sub-tree -- used in "return...;" below + */ + child_bal += right_heavy; /* adjust towards right */ + + /* + * move "cright" to be node's left child + */ + cright = child->avl_child[right]; + node->avl_child[left] = cright; + if (cright != NULL) { + AVL_SETPARENT(cright, node); + AVL_SETCHILD(cright, left); + } + + /* + * move node to be child's right child + */ + child->avl_child[right] = node; + AVL_SETBALANCE(node, -child_bal); + AVL_SETCHILD(node, right); + AVL_SETPARENT(node, child); + + /* + * update the pointer into this subtree + */ + AVL_SETBALANCE(child, child_bal); + AVL_SETCHILD(child, which_child); + AVL_SETPARENT(child, parent); + if (parent != NULL) + parent->avl_child[which_child] = child; + else + tree->avl_root = child; + + return (child_bal == 0); + } + + /* BEGIN CSTYLED */ + /* + * case 2 : When node is left heavy, but child is right heavy we use + * a different rotation. + * + * (node b:-2) + * / \ + * / \ + * / \ + * (child b:+1) + * / \ + * / \ + * (gchild b: != 0) + * / \ + * / \ + * gleft gright + * + * becomes: + * + * (gchild b:0) + * / \ + * / \ + * / \ + * (child b:?) (node b:?) + * / \ / \ + * / \ / \ + * gleft gright + * + * computing the new balances is more complicated. As an example: + * if gchild was right_heavy, then child is now left heavy + * else it is balanced + */ + /* END CSTYLED */ + gchild = child->avl_child[right]; + gleft = gchild->avl_child[left]; + gright = gchild->avl_child[right]; + + /* + * move gright to left child of node and + * + * move gleft to right child of node + */ + node->avl_child[left] = gright; + if (gright != NULL) { + AVL_SETPARENT(gright, node); + AVL_SETCHILD(gright, left); + } + + child->avl_child[right] = gleft; + if (gleft != NULL) { + AVL_SETPARENT(gleft, child); + AVL_SETCHILD(gleft, right); + } + + /* + * move child to left child of gchild and + * + * move node to right child of gchild and + * + * fixup parent of all this to point to gchild + */ + balance = AVL_XBALANCE(gchild); + gchild->avl_child[left] = child; + AVL_SETBALANCE(child, (balance == right_heavy ? left_heavy : 0)); + AVL_SETPARENT(child, gchild); + AVL_SETCHILD(child, left); + + gchild->avl_child[right] = node; + AVL_SETBALANCE(node, (balance == left_heavy ? right_heavy : 0)); + AVL_SETPARENT(node, gchild); + AVL_SETCHILD(node, right); + + AVL_SETBALANCE(gchild, 0); + AVL_SETPARENT(gchild, parent); + AVL_SETCHILD(gchild, which_child); + if (parent != NULL) + parent->avl_child[which_child] = gchild; + else + tree->avl_root = gchild; + + return (1); /* the new tree is always shorter */ +} + + +/* + * Insert a new node into an AVL tree at the specified (from avl_find()) place. + * + * Newly inserted nodes are always leaf nodes in the tree, since avl_find() + * searches out to the leaf positions. The avl_index_t indicates the node + * which will be the parent of the new node. + * + * After the node is inserted, a single rotation further up the tree may + * be necessary to maintain an acceptable AVL balance. + */ +void +avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where) +{ + avl_node_t *node; + avl_node_t *parent = AVL_INDEX2NODE(where); + int old_balance; + int new_balance; + int which_child = AVL_INDEX2CHILD(where); + size_t off = tree->avl_offset; + + ASSERT(tree); +#ifdef _LP64 + ASSERT(((uintptr_t)new_data & 0x7) == 0); +#endif + + node = AVL_DATA2NODE(new_data, off); + + /* + * First, add the node to the tree at the indicated position. + */ + ++tree->avl_numnodes; + + node->avl_child[0] = NULL; + node->avl_child[1] = NULL; + + AVL_SETCHILD(node, which_child); + AVL_SETBALANCE(node, 0); + AVL_SETPARENT(node, parent); + if (parent != NULL) { + ASSERT(parent->avl_child[which_child] == NULL); + parent->avl_child[which_child] = node; + } else { + ASSERT(tree->avl_root == NULL); + tree->avl_root = node; + } + /* + * Now, back up the tree modifying the balance of all nodes above the + * insertion point. If we get to a highly unbalanced ancestor, we + * need to do a rotation. If we back out of the tree we are done. + * If we brought any subtree into perfect balance (0), we are also done. + */ + for (;;) { + node = parent; + if (node == NULL) + return; + + /* + * Compute the new balance + */ + old_balance = AVL_XBALANCE(node); + new_balance = old_balance + avl_child2balance[which_child]; + + /* + * If we introduced equal balance, then we are done immediately + */ + if (new_balance == 0) { + AVL_SETBALANCE(node, 0); + return; + } + + /* + * If both old and new are not zero we went + * from -1 to -2 balance, do a rotation. + */ + if (old_balance != 0) + break; + + AVL_SETBALANCE(node, new_balance); + parent = AVL_XPARENT(node); + which_child = AVL_XCHILD(node); + } + + /* + * perform a rotation to fix the tree and return + */ + (void) avl_rotation(tree, node, new_balance); +} + +/* + * Insert "new_data" in "tree" in the given "direction" either after or + * before (AVL_AFTER, AVL_BEFORE) the data "here". + * + * Insertions can only be done at empty leaf points in the tree, therefore + * if the given child of the node is already present we move to either + * the AVL_PREV or AVL_NEXT and reverse the insertion direction. Since + * every other node in the tree is a leaf, this always works. + * + * To help developers using this interface, we assert that the new node + * is correctly ordered at every step of the way in DEBUG kernels. + */ +void +avl_insert_here( + avl_tree_t *tree, + void *new_data, + void *here, + int direction) +{ + avl_node_t *node; + int child = direction; /* rely on AVL_BEFORE == 0, AVL_AFTER == 1 */ +#ifdef ZFS_DEBUG + int diff; +#endif + + ASSERT(tree != NULL); + ASSERT(new_data != NULL); + ASSERT(here != NULL); + ASSERT(direction == AVL_BEFORE || direction == AVL_AFTER); + + /* + * If corresponding child of node is not NULL, go to the neighboring + * node and reverse the insertion direction. + */ + node = AVL_DATA2NODE(here, tree->avl_offset); + +#ifdef ZFS_DEBUG + diff = tree->avl_compar(new_data, here); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + + if (node->avl_child[child] != NULL) { + node = node->avl_child[child]; + child = 1 - child; + while (node->avl_child[child] != NULL) { +#ifdef ZFS_DEBUG + diff = tree->avl_compar(new_data, + AVL_NODE2DATA(node, tree->avl_offset)); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + node = node->avl_child[child]; + } +#ifdef ZFS_DEBUG + diff = tree->avl_compar(new_data, + AVL_NODE2DATA(node, tree->avl_offset)); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + } + ASSERT(node->avl_child[child] == NULL); + + avl_insert(tree, new_data, AVL_MKINDEX(node, child)); +} + +/* + * Add a new node to an AVL tree. Strictly enforce that no duplicates can + * be added to the tree with a VERIFY which is enabled for non-DEBUG builds. + */ +void +avl_add(avl_tree_t *tree, void *new_node) +{ + avl_index_t where = 0; + + VERIFY(avl_find(tree, new_node, &where) == NULL); + + avl_insert(tree, new_node, where); +} + +/* + * Delete a node from the AVL tree. Deletion is similar to insertion, but + * with 2 complications. + * + * First, we may be deleting an interior node. Consider the following subtree: + * + * d c c + * / \ / \ / \ + * b e b e b e + * / \ / \ / + * a c a a + * + * When we are deleting node (d), we find and bring up an adjacent valued leaf + * node, say (c), to take the interior node's place. In the code this is + * handled by temporarily swapping (d) and (c) in the tree and then using + * common code to delete (d) from the leaf position. + * + * Secondly, an interior deletion from a deep tree may require more than one + * rotation to fix the balance. This is handled by moving up the tree through + * parents and applying rotations as needed. The return value from + * avl_rotation() is used to detect when a subtree did not change overall + * height due to a rotation. + */ +void +avl_remove(avl_tree_t *tree, void *data) +{ + avl_node_t *delete; + avl_node_t *parent; + avl_node_t *node; + avl_node_t tmp; + int old_balance; + int new_balance; + int left; + int right; + int which_child; + size_t off = tree->avl_offset; + + ASSERT(tree); + + delete = AVL_DATA2NODE(data, off); + + /* + * Deletion is easiest with a node that has at most 1 child. + * We swap a node with 2 children with a sequentially valued + * neighbor node. That node will have at most 1 child. Note this + * has no effect on the ordering of the remaining nodes. + * + * As an optimization, we choose the greater neighbor if the tree + * is right heavy, otherwise the left neighbor. This reduces the + * number of rotations needed. + */ + if (delete->avl_child[0] != NULL && delete->avl_child[1] != NULL) { + + /* + * choose node to swap from whichever side is taller + */ + old_balance = AVL_XBALANCE(delete); + left = avl_balance2child[old_balance + 1]; + right = 1 - left; + + /* + * get to the previous value'd node + * (down 1 left, as far as possible right) + */ + for (node = delete->avl_child[left]; + node->avl_child[right] != NULL; + node = node->avl_child[right]) + ; + + /* + * create a temp placeholder for 'node' + * move 'node' to delete's spot in the tree + */ + tmp = *node; + + *node = *delete; + if (node->avl_child[left] == node) + node->avl_child[left] = &tmp; + + parent = AVL_XPARENT(node); + if (parent != NULL) + parent->avl_child[AVL_XCHILD(node)] = node; + else + tree->avl_root = node; + AVL_SETPARENT(node->avl_child[left], node); + AVL_SETPARENT(node->avl_child[right], node); + + /* + * Put tmp where node used to be (just temporary). + * It always has a parent and at most 1 child. + */ + delete = &tmp; + parent = AVL_XPARENT(delete); + parent->avl_child[AVL_XCHILD(delete)] = delete; + which_child = (delete->avl_child[1] != 0); + if (delete->avl_child[which_child] != NULL) + AVL_SETPARENT(delete->avl_child[which_child], delete); + } + + + /* + * Here we know "delete" is at least partially a leaf node. It can + * be easily removed from the tree. + */ + ASSERT(tree->avl_numnodes > 0); + --tree->avl_numnodes; + parent = AVL_XPARENT(delete); + which_child = AVL_XCHILD(delete); + if (delete->avl_child[0] != NULL) + node = delete->avl_child[0]; + else + node = delete->avl_child[1]; + + /* + * Connect parent directly to node (leaving out delete). + */ + if (node != NULL) { + AVL_SETPARENT(node, parent); + AVL_SETCHILD(node, which_child); + } + if (parent == NULL) { + tree->avl_root = node; + return; + } + parent->avl_child[which_child] = node; + + + /* + * Since the subtree is now shorter, begin adjusting parent balances + * and performing any needed rotations. + */ + do { + + /* + * Move up the tree and adjust the balance + * + * Capture the parent and which_child values for the next + * iteration before any rotations occur. + */ + node = parent; + old_balance = AVL_XBALANCE(node); + new_balance = old_balance - avl_child2balance[which_child]; + parent = AVL_XPARENT(node); + which_child = AVL_XCHILD(node); + + /* + * If a node was in perfect balance but isn't anymore then + * we can stop, since the height didn't change above this point + * due to a deletion. + */ + if (old_balance == 0) { + AVL_SETBALANCE(node, new_balance); + break; + } + + /* + * If the new balance is zero, we don't need to rotate + * else + * need a rotation to fix the balance. + * If the rotation doesn't change the height + * of the sub-tree we have finished adjusting. + */ + if (new_balance == 0) + AVL_SETBALANCE(node, new_balance); + else if (!avl_rotation(tree, node, new_balance)) + break; + } while (parent != NULL); +} + +#define AVL_REINSERT(tree, obj) \ + avl_remove((tree), (obj)); \ + avl_add((tree), (obj)) + +boolean_t +avl_update_lt(avl_tree_t *t, void *obj) +{ + void *neighbor; + + ASSERT(((neighbor = AVL_NEXT(t, obj)) == NULL) || + (t->avl_compar(obj, neighbor) <= 0)); + + neighbor = AVL_PREV(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) < 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +boolean_t +avl_update_gt(avl_tree_t *t, void *obj) +{ + void *neighbor; + + ASSERT(((neighbor = AVL_PREV(t, obj)) == NULL) || + (t->avl_compar(obj, neighbor) >= 0)); + + neighbor = AVL_NEXT(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) > 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +boolean_t +avl_update(avl_tree_t *t, void *obj) +{ + void *neighbor; + + neighbor = AVL_PREV(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) < 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + neighbor = AVL_NEXT(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) > 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +void +avl_swap(avl_tree_t *tree1, avl_tree_t *tree2) +{ + avl_node_t *temp_node; + ulong_t temp_numnodes; + + ASSERT3P(tree1->avl_compar, ==, tree2->avl_compar); + ASSERT3U(tree1->avl_offset, ==, tree2->avl_offset); + ASSERT3U(tree1->avl_size, ==, tree2->avl_size); + + temp_node = tree1->avl_root; + temp_numnodes = tree1->avl_numnodes; + tree1->avl_root = tree2->avl_root; + tree1->avl_numnodes = tree2->avl_numnodes; + tree2->avl_root = temp_node; + tree2->avl_numnodes = temp_numnodes; +} + +/* + * initialize a new AVL tree + */ +void +avl_create(avl_tree_t *tree, int (*compar) (const void *, const void *), + size_t size, size_t offset) +{ + ASSERT(tree); + ASSERT(compar); + ASSERT(size > 0); + ASSERT(size >= offset + sizeof (avl_node_t)); +#ifdef _LP64 + ASSERT((offset & 0x7) == 0); +#endif + + tree->avl_compar = compar; + tree->avl_root = NULL; + tree->avl_numnodes = 0; + tree->avl_size = size; + tree->avl_offset = offset; +} + +/* + * Delete a tree. + */ +/* ARGSUSED */ +void +avl_destroy(avl_tree_t *tree) +{ + ASSERT(tree); + ASSERT(tree->avl_numnodes == 0); + ASSERT(tree->avl_root == NULL); +} + + +/* + * Return the number of nodes in an AVL tree. + */ +ulong_t +avl_numnodes(avl_tree_t *tree) +{ + ASSERT(tree); + return (tree->avl_numnodes); +} + +boolean_t +avl_is_empty(avl_tree_t *tree) +{ + ASSERT(tree); + return (tree->avl_numnodes == 0); +} + +#define CHILDBIT (1L) + +/* + * Post-order tree walk used to visit all tree nodes and destroy the tree + * in post order. This is used for removing all the nodes from a tree without + * paying any cost for rebalancing it. + * + * example: + * + * void *cookie = NULL; + * my_data_t *node; + * + * while ((node = avl_destroy_nodes(tree, &cookie)) != NULL) + * free(node); + * avl_destroy(tree); + * + * The cookie is really an avl_node_t to the current node's parent and + * an indication of which child you looked at last. + * + * On input, a cookie value of CHILDBIT indicates the tree is done. + */ +void * +avl_destroy_nodes(avl_tree_t *tree, void **cookie) +{ + avl_node_t *node; + avl_node_t *parent; + int child; + void *first; + size_t off = tree->avl_offset; + + /* + * Initial calls go to the first node or it's right descendant. + */ + if (*cookie == NULL) { + first = avl_first(tree); + + /* + * deal with an empty tree + */ + if (first == NULL) { + *cookie = (void *)CHILDBIT; + return (NULL); + } + + node = AVL_DATA2NODE(first, off); + parent = AVL_XPARENT(node); + goto check_right_side; + } + + /* + * If there is no parent to return to we are done. + */ + parent = (avl_node_t *)((uintptr_t)(*cookie) & ~CHILDBIT); + if (parent == NULL) { + if (tree->avl_root != NULL) { + ASSERT(tree->avl_numnodes == 1); + tree->avl_root = NULL; + tree->avl_numnodes = 0; + } + return (NULL); + } + + /* + * Remove the child pointer we just visited from the parent and tree. + */ + child = (uintptr_t)(*cookie) & CHILDBIT; + parent->avl_child[child] = NULL; + ASSERT(tree->avl_numnodes > 1); + --tree->avl_numnodes; + + /* + * If we just did a right child or there isn't one, go up to parent. + */ + if (child == 1 || parent->avl_child[1] == NULL) { + node = parent; + parent = AVL_XPARENT(parent); + goto done; + } + + /* + * Do parent's right child, then leftmost descendent. + */ + node = parent->avl_child[1]; + while (node->avl_child[0] != NULL) { + parent = node; + node = node->avl_child[0]; + } + + /* + * If here, we moved to a left child. It may have one + * child on the right (when balance == +1). + */ +check_right_side: + if (node->avl_child[1] != NULL) { + ASSERT(AVL_XBALANCE(node) == 1); + parent = node; + node = node->avl_child[1]; + ASSERT(node->avl_child[0] == NULL && + node->avl_child[1] == NULL); + } else { + ASSERT(AVL_XBALANCE(node) <= 0); + } + +done: + if (parent == NULL) { + *cookie = (void *)CHILDBIT; + ASSERT(node == tree->avl_root); + } else { + *cookie = (void *)((uintptr_t)parent | AVL_XCHILD(node)); + } + + return (AVL_NODE2DATA(node, off)); +} + +#if defined(_KERNEL) + +static int __init +avl_init(void) +{ + return (0); +} + +static void __exit +avl_fini(void) +{ +} + +module_init(avl_init); +module_exit(avl_fini); +#endif + +ZFS_MODULE_DESCRIPTION("Generic AVL tree implementation"); +ZFS_MODULE_AUTHOR(ZFS_META_AUTHOR); +ZFS_MODULE_LICENSE(ZFS_META_LICENSE); +ZFS_MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE); + +EXPORT_SYMBOL(avl_create); +EXPORT_SYMBOL(avl_find); +EXPORT_SYMBOL(avl_insert); +EXPORT_SYMBOL(avl_insert_here); +EXPORT_SYMBOL(avl_walk); +EXPORT_SYMBOL(avl_first); +EXPORT_SYMBOL(avl_last); +EXPORT_SYMBOL(avl_nearest); +EXPORT_SYMBOL(avl_add); +EXPORT_SYMBOL(avl_swap); +EXPORT_SYMBOL(avl_is_empty); +EXPORT_SYMBOL(avl_remove); +EXPORT_SYMBOL(avl_numnodes); +EXPORT_SYMBOL(avl_destroy_nodes); +EXPORT_SYMBOL(avl_destroy); +EXPORT_SYMBOL(avl_update_lt); +EXPORT_SYMBOL(avl_update_gt); +EXPORT_SYMBOL(avl_update); diff --git a/sys/contrib/openzfs/module/icp/Makefile.in b/sys/contrib/openzfs/module/icp/Makefile.in new file mode 100644 index 000000000000..7a01b2f08b8e --- /dev/null +++ b/sys/contrib/openzfs/module/icp/Makefile.in @@ -0,0 +1,96 @@ +ifneq ($(KBUILD_EXTMOD),) +src = @abs_srcdir@ +obj = @abs_builddir@ +icp_include = $(src)/include +else +icp_include = $(srctree)/$(src)/include +endif + +MODULE := icp + +obj-$(CONFIG_ZFS) := $(MODULE).o + +asflags-y := -I$(icp_include) +ccflags-y := -I$(icp_include) + +$(MODULE)-objs += illumos-crypto.o +$(MODULE)-objs += api/kcf_cipher.o +$(MODULE)-objs += api/kcf_digest.o +$(MODULE)-objs += api/kcf_mac.o +$(MODULE)-objs += api/kcf_miscapi.o +$(MODULE)-objs += api/kcf_ctxops.o +$(MODULE)-objs += core/kcf_callprov.o +$(MODULE)-objs += core/kcf_prov_tabs.o +$(MODULE)-objs += core/kcf_sched.o +$(MODULE)-objs += core/kcf_mech_tabs.o +$(MODULE)-objs += core/kcf_prov_lib.o +$(MODULE)-objs += spi/kcf_spi.o +$(MODULE)-objs += io/aes.o +$(MODULE)-objs += io/edonr_mod.o +$(MODULE)-objs += io/sha1_mod.o +$(MODULE)-objs += io/sha2_mod.o +$(MODULE)-objs += io/skein_mod.o +$(MODULE)-objs += os/modhash.o +$(MODULE)-objs += os/modconf.o +$(MODULE)-objs += algs/modes/cbc.o +$(MODULE)-objs += algs/modes/ccm.o +$(MODULE)-objs += algs/modes/ctr.o +$(MODULE)-objs += algs/modes/ecb.o +$(MODULE)-objs += algs/modes/gcm_generic.o +$(MODULE)-objs += algs/modes/gcm.o +$(MODULE)-objs += algs/modes/modes.o +$(MODULE)-objs += algs/aes/aes_impl_generic.o +$(MODULE)-objs += algs/aes/aes_impl.o +$(MODULE)-objs += algs/aes/aes_modes.o +$(MODULE)-objs += algs/edonr/edonr.o +$(MODULE)-objs += algs/sha1/sha1.o +$(MODULE)-objs += algs/sha2/sha2.o +$(MODULE)-objs += algs/sha1/sha1.o +$(MODULE)-objs += algs/skein/skein.o +$(MODULE)-objs += algs/skein/skein_block.o +$(MODULE)-objs += algs/skein/skein_iv.o + +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/aes/aeskey.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/aes/aes_amd64.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/aes/aes_aesni.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/modes/gcm_pclmulqdq.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/modes/aesni-gcm-x86_64.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/modes/ghash-x86_64.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/sha1/sha1-x86_64.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/sha2/sha256_impl.o +$(MODULE)-$(CONFIG_X86_64) += asm-x86_64/sha2/sha512_impl.o + +$(MODULE)-$(CONFIG_X86) += algs/modes/gcm_pclmulqdq.o +$(MODULE)-$(CONFIG_X86) += algs/aes/aes_impl_aesni.o +$(MODULE)-$(CONFIG_X86) += algs/aes/aes_impl_x86-64.o + +# Suppress objtool "can't find jump dest instruction at" warnings. They +# are caused by the constants which are defined in the text section of the +# assembly file using .byte instructions (e.g. bswap_mask). The objtool +# utility tries to interpret them as opcodes and obviously fails doing so. +OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y +OBJECT_FILES_NON_STANDARD_ghash-x86_64.o := y + +ICP_DIRS = \ + api \ + core \ + spi \ + io \ + os \ + algs \ + algs/aes \ + algs/edonr \ + algs/modes \ + algs/sha1 \ + algs/sha2 \ + algs/skein \ + asm-x86_64 \ + asm-x86_64/aes \ + asm-x86_64/modes \ + asm-x86_64/sha1 \ + asm-x86_64/sha2 \ + asm-i386 \ + asm-generic + +all: + mkdir -p $(ICP_DIRS) diff --git a/sys/contrib/openzfs/module/icp/algs/aes/aes_impl.c b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl.c new file mode 100644 index 000000000000..037be0db60d7 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl.c @@ -0,0 +1,443 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/icp.h> +#include <sys/crypto/spi.h> +#include <sys/simd.h> +#include <modes/modes.h> +#include <aes/aes_impl.h> + +/* + * Initialize AES encryption and decryption key schedules. + * + * Parameters: + * cipherKey User key + * keyBits AES key size (128, 192, or 256 bits) + * keysched AES key schedule to be initialized, of type aes_key_t. + * Allocated by aes_alloc_keysched(). + */ +void +aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched) +{ + const aes_impl_ops_t *ops = aes_impl_get_ops(); + aes_key_t *newbie = keysched; + uint_t keysize, i, j; + union { + uint64_t ka64[4]; + uint32_t ka32[8]; + } keyarr; + + switch (keyBits) { + case 128: + newbie->nr = 10; + break; + + case 192: + newbie->nr = 12; + break; + + case 256: + newbie->nr = 14; + break; + + default: + /* should never get here */ + return; + } + keysize = CRYPTO_BITS2BYTES(keyBits); + + /* + * Generic C implementation requires byteswap for little endian + * machines, various accelerated implementations for various + * architectures may not. + */ + if (!ops->needs_byteswap) { + /* no byteswap needed */ + if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) { + for (i = 0, j = 0; j < keysize; i++, j += 8) { + /* LINTED: pointer alignment */ + keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]); + } + } else { + bcopy(cipherKey, keyarr.ka32, keysize); + } + } else { + /* byte swap */ + for (i = 0, j = 0; j < keysize; i++, j += 4) { + keyarr.ka32[i] = + htonl(*(uint32_t *)(void *)&cipherKey[j]); + } + } + + ops->generate(newbie, keyarr.ka32, keyBits); + newbie->ops = ops; + + /* + * Note: if there are systems that need the AES_64BIT_KS type in the + * future, move setting key schedule type to individual implementations + */ + newbie->type = AES_32BIT_KS; +} + + +/* + * Encrypt one block using AES. + * Align if needed and (for x86 32-bit only) byte-swap. + * + * Parameters: + * ks Key schedule, of type aes_key_t + * pt Input block (plain text) + * ct Output block (crypto text). Can overlap with pt + */ +int +aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct) +{ + aes_key_t *ksch = (aes_key_t *)ks; + const aes_impl_ops_t *ops = ksch->ops; + + if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) { + /* LINTED: pointer alignment */ + ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, + /* LINTED: pointer alignment */ + (uint32_t *)pt, (uint32_t *)ct); + } else { + uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)]; + + /* Copy input block into buffer */ + if (ops->needs_byteswap) { + buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]); + buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]); + buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]); + buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]); + } else + bcopy(pt, &buffer, AES_BLOCK_LEN); + + ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer); + + /* Copy result from buffer to output block */ + if (ops->needs_byteswap) { + *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]); + *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]); + *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]); + *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]); + } else + bcopy(&buffer, ct, AES_BLOCK_LEN); + } + return (CRYPTO_SUCCESS); +} + + +/* + * Decrypt one block using AES. + * Align and byte-swap if needed. + * + * Parameters: + * ks Key schedule, of type aes_key_t + * ct Input block (crypto text) + * pt Output block (plain text). Can overlap with pt + */ +int +aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt) +{ + aes_key_t *ksch = (aes_key_t *)ks; + const aes_impl_ops_t *ops = ksch->ops; + + if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) { + /* LINTED: pointer alignment */ + ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, + /* LINTED: pointer alignment */ + (uint32_t *)ct, (uint32_t *)pt); + } else { + uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)]; + + /* Copy input block into buffer */ + if (ops->needs_byteswap) { + buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]); + buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]); + buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]); + buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]); + } else + bcopy(ct, &buffer, AES_BLOCK_LEN); + + ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer); + + /* Copy result from buffer to output block */ + if (ops->needs_byteswap) { + *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]); + *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]); + *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]); + *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]); + } else + bcopy(&buffer, pt, AES_BLOCK_LEN); + } + return (CRYPTO_SUCCESS); +} + + +/* + * Allocate key schedule for AES. + * + * Return the pointer and set size to the number of bytes allocated. + * Memory allocated must be freed by the caller when done. + * + * Parameters: + * size Size of key schedule allocated, in bytes + * kmflag Flag passed to kmem_alloc(9F); ignored in userland. + */ +/* ARGSUSED */ +void * +aes_alloc_keysched(size_t *size, int kmflag) +{ + aes_key_t *keysched; + + keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag); + if (keysched != NULL) { + *size = sizeof (aes_key_t); + return (keysched); + } + return (NULL); +} + +/* AES implementation that contains the fastest methods */ +static aes_impl_ops_t aes_fastest_impl = { + .name = "fastest" +}; + +/* All compiled in implementations */ +const aes_impl_ops_t *aes_all_impl[] = { + &aes_generic_impl, +#if defined(__x86_64) + &aes_x86_64_impl, +#endif +#if defined(__x86_64) && defined(HAVE_AES) + &aes_aesni_impl, +#endif +}; + +/* Indicate that benchmark has been completed */ +static boolean_t aes_impl_initialized = B_FALSE; + +/* Select aes implementation */ +#define IMPL_FASTEST (UINT32_MAX) +#define IMPL_CYCLE (UINT32_MAX-1) + +#define AES_IMPL_READ(i) (*(volatile uint32_t *) &(i)) + +static uint32_t icp_aes_impl = IMPL_FASTEST; +static uint32_t user_sel_impl = IMPL_FASTEST; + +/* Hold all supported implementations */ +static size_t aes_supp_impl_cnt = 0; +static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)]; + +/* + * Returns the AES operations for encrypt/decrypt/key setup. When a + * SIMD implementation is not allowed in the current context, then + * fallback to the fastest generic implementation. + */ +const aes_impl_ops_t * +aes_impl_get_ops(void) +{ + if (!kfpu_allowed()) + return (&aes_generic_impl); + + const aes_impl_ops_t *ops = NULL; + const uint32_t impl = AES_IMPL_READ(icp_aes_impl); + + switch (impl) { + case IMPL_FASTEST: + ASSERT(aes_impl_initialized); + ops = &aes_fastest_impl; + break; + case IMPL_CYCLE: + /* Cycle through supported implementations */ + ASSERT(aes_impl_initialized); + ASSERT3U(aes_supp_impl_cnt, >, 0); + static size_t cycle_impl_idx = 0; + size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt; + ops = aes_supp_impl[idx]; + break; + default: + ASSERT3U(impl, <, aes_supp_impl_cnt); + ASSERT3U(aes_supp_impl_cnt, >, 0); + if (impl < ARRAY_SIZE(aes_all_impl)) + ops = aes_supp_impl[impl]; + break; + } + + ASSERT3P(ops, !=, NULL); + + return (ops); +} + +/* + * Initialize all supported implementations. + */ +void +aes_impl_init(void) +{ + aes_impl_ops_t *curr_impl; + int i, c; + + /* Move supported implementations into aes_supp_impls */ + for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) { + curr_impl = (aes_impl_ops_t *)aes_all_impl[i]; + + if (curr_impl->is_supported()) + aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl; + } + aes_supp_impl_cnt = c; + + /* + * Set the fastest implementation given the assumption that the + * hardware accelerated version is the fastest. + */ +#if defined(__x86_64) +#if defined(HAVE_AES) + if (aes_aesni_impl.is_supported()) { + memcpy(&aes_fastest_impl, &aes_aesni_impl, + sizeof (aes_fastest_impl)); + } else +#endif + { + memcpy(&aes_fastest_impl, &aes_x86_64_impl, + sizeof (aes_fastest_impl)); + } +#else + memcpy(&aes_fastest_impl, &aes_generic_impl, + sizeof (aes_fastest_impl)); +#endif + + strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX); + + /* Finish initialization */ + atomic_swap_32(&icp_aes_impl, user_sel_impl); + aes_impl_initialized = B_TRUE; +} + +static const struct { + char *name; + uint32_t sel; +} aes_impl_opts[] = { + { "cycle", IMPL_CYCLE }, + { "fastest", IMPL_FASTEST }, +}; + +/* + * Function sets desired aes implementation. + * + * If we are called before init(), user preference will be saved in + * user_sel_impl, and applied in later init() call. This occurs when module + * parameter is specified on module load. Otherwise, directly update + * icp_aes_impl. + * + * @val Name of aes implementation to use + * @param Unused. + */ +int +aes_impl_set(const char *val) +{ + int err = -EINVAL; + char req_name[AES_IMPL_NAME_MAX]; + uint32_t impl = AES_IMPL_READ(user_sel_impl); + size_t i; + + /* sanitize input */ + i = strnlen(val, AES_IMPL_NAME_MAX); + if (i == 0 || i >= AES_IMPL_NAME_MAX) + return (err); + + strlcpy(req_name, val, AES_IMPL_NAME_MAX); + while (i > 0 && isspace(req_name[i-1])) + i--; + req_name[i] = '\0'; + + /* Check mandatory options */ + for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) { + if (strcmp(req_name, aes_impl_opts[i].name) == 0) { + impl = aes_impl_opts[i].sel; + err = 0; + break; + } + } + + /* check all supported impl if init() was already called */ + if (err != 0 && aes_impl_initialized) { + /* check all supported implementations */ + for (i = 0; i < aes_supp_impl_cnt; i++) { + if (strcmp(req_name, aes_supp_impl[i]->name) == 0) { + impl = i; + err = 0; + break; + } + } + } + + if (err == 0) { + if (aes_impl_initialized) + atomic_swap_32(&icp_aes_impl, impl); + else + atomic_swap_32(&user_sel_impl, impl); + } + + return (err); +} + +#if defined(_KERNEL) && defined(__linux__) + +static int +icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp) +{ + return (aes_impl_set(val)); +} + +static int +icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp) +{ + int i, cnt = 0; + char *fmt; + const uint32_t impl = AES_IMPL_READ(icp_aes_impl); + + ASSERT(aes_impl_initialized); + + /* list mandatory options */ + for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) { + fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s "; + cnt += sprintf(buffer + cnt, fmt, aes_impl_opts[i].name); + } + + /* list all supported implementations */ + for (i = 0; i < aes_supp_impl_cnt; i++) { + fmt = (i == impl) ? "[%s] " : "%s "; + cnt += sprintf(buffer + cnt, fmt, aes_supp_impl[i]->name); + } + + return (cnt); +} + +module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get, + NULL, 0644); +MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation."); +#endif diff --git a/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_aesni.c b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_aesni.c new file mode 100644 index 000000000000..4b5eefd71b17 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_aesni.c @@ -0,0 +1,124 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#if defined(__x86_64) && defined(HAVE_AES) + +#include <sys/simd.h> +#include <sys/types.h> + +/* These functions are used to execute AES-NI instructions: */ +extern int rijndael_key_setup_enc_intel(uint32_t rk[], + const uint32_t cipherKey[], uint64_t keyBits); +extern int rijndael_key_setup_dec_intel(uint32_t rk[], + const uint32_t cipherKey[], uint64_t keyBits); +extern void aes_encrypt_intel(const uint32_t rk[], int Nr, + const uint32_t pt[4], uint32_t ct[4]); +extern void aes_decrypt_intel(const uint32_t rk[], int Nr, + const uint32_t ct[4], uint32_t pt[4]); + + +#include <aes/aes_impl.h> + +/* + * Expand the 32-bit AES cipher key array into the encryption and decryption + * key schedules. + * + * Parameters: + * key AES key schedule to be initialized + * keyarr32 User key + * keyBits AES key size (128, 192, or 256 bits) + */ +static void +aes_aesni_generate(aes_key_t *key, const uint32_t *keyarr32, int keybits) +{ + kfpu_begin(); + key->nr = rijndael_key_setup_enc_intel(&(key->encr_ks.ks32[0]), + keyarr32, keybits); + key->nr = rijndael_key_setup_dec_intel(&(key->decr_ks.ks32[0]), + keyarr32, keybits); + kfpu_end(); +} + +/* + * Encrypt one block of data. The block is assumed to be an array + * of four uint32_t values, so copy for alignment (and byte-order + * reversal for little endian systems might be necessary on the + * input and output byte streams. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk Key schedule, of aes_ks_t (60 32-bit integers) + * Nr Number of rounds + * pt Input block (plain text) + * ct Output block (crypto text). Can overlap with pt + */ +static void +aes_aesni_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4], + uint32_t ct[4]) +{ + kfpu_begin(); + aes_encrypt_intel(rk, Nr, pt, ct); + kfpu_end(); +} + +/* + * Decrypt one block of data. The block is assumed to be an array + * of four uint32_t values, so copy for alignment (and byte-order + * reversal for little endian systems might be necessary on the + * input and output byte streams. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk Key schedule, of aes_ks_t (60 32-bit integers) + * Nr Number of rounds + * ct Input block (crypto text) + * pt Output block (plain text). Can overlap with pt + */ +static void +aes_aesni_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4], + uint32_t pt[4]) +{ + kfpu_begin(); + aes_decrypt_intel(rk, Nr, ct, pt); + kfpu_end(); +} + +static boolean_t +aes_aesni_will_work(void) +{ + return (kfpu_allowed() && zfs_aes_available()); +} + +const aes_impl_ops_t aes_aesni_impl = { + .generate = &aes_aesni_generate, + .encrypt = &aes_aesni_encrypt, + .decrypt = &aes_aesni_decrypt, + .is_supported = &aes_aesni_will_work, + .needs_byteswap = B_FALSE, + .name = "aesni" +}; + +#endif /* defined(__x86_64) && defined(HAVE_AES) */ diff --git a/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_generic.c b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_generic.c new file mode 100644 index 000000000000..427c096c6ab3 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_generic.c @@ -0,0 +1,1242 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#include <aes/aes_impl.h> + +/* + * This file is derived from the file rijndael-alg-fst.c taken from the + * "optimized C code v3.0" on the "rijndael home page" + * http://www.iaik.tu-graz.ac.at/research/krypto/AES/old/~rijmen/rijndael/ + * pointed by the NIST web-site http://csrc.nist.gov/archive/aes/ + * + * The following note is from the original file: + */ + +/* + * rijndael-alg-fst.c + * + * @version 3.0 (December 2000) + * + * Optimised ANSI C code for the Rijndael cipher (now AES) + * + * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be> + * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be> + * @author Paulo Barreto <paulo.barreto@terra.com.br> + * + * This code is hereby placed in the public domain. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Constant tables + */ + +/* + * Te0[x] = S [x].[02, 01, 01, 03]; + * Te1[x] = S [x].[03, 02, 01, 01]; + * Te2[x] = S [x].[01, 03, 02, 01]; + * Te3[x] = S [x].[01, 01, 03, 02]; + * Te4[x] = S [x].[01, 01, 01, 01]; + * + * Td0[x] = Si[x].[0e, 09, 0d, 0b]; + * Td1[x] = Si[x].[0b, 0e, 09, 0d]; + * Td2[x] = Si[x].[0d, 0b, 0e, 09]; + * Td3[x] = Si[x].[09, 0d, 0b, 0e]; + * Td4[x] = Si[x].[01, 01, 01, 01]; + */ + +/* Encrypt Sbox constants (for the substitute bytes operation) */ + +static const uint32_t Te0[256] = +{ + 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, + 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, + 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, + 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, + 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, + 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, + 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, + 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, + 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, + 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, + 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, + 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, + 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, + 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, + 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, + 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, + 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, + 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, + 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, + 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, + 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, + 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, + 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, + 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, + 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, + 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, + 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, + 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, + 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, + 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, + 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, + 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, + 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, + 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, + 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, + 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, + 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, + 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, + 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, + 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, + 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, + 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, + 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, + 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, + 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, + 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, + 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, + 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, + 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, + 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, + 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, + 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, + 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, + 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, + 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, + 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, + 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, + 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, + 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, + 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, + 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, + 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, + 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, + 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU +}; + + +static const uint32_t Te1[256] = +{ + 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, + 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, + 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, + 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, + 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, + 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, + 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, + 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, + 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, + 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, + 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, + 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, + 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, + 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, + 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, + 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, + 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, + 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, + 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, + 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, + 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, + 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, + 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, + 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, + 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, + 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, + 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, + 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, + 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, + 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, + 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, + 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, + 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, + 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, + 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, + 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, + 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, + 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, + 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, + 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, + 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, + 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, + 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, + 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, + 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, + 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, + 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, + 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, + 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, + 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, + 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, + 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, + 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, + 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, + 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, + 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, + 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, + 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, + 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, + 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, + 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, + 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, + 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, + 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U +}; + + +static const uint32_t Te2[256] = +{ + 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, + 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, + 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, + 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, + 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, + 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, + 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, + 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, + 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, + 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, + 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, + 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, + 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, + 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, + 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, + 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, + 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, + 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, + 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, + 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, + 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, + 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, + 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, + 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, + 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, + 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, + 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, + 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, + 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, + 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, + 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, + 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, + 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, + 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, + 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, + 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, + 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, + 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, + 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, + 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, + 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, + 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, + 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, + 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, + 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, + 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, + 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, + 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, + 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, + 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, + 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, + 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, + 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, + 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, + 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, + 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, + 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, + 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, + 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, + 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, + 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, + 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, + 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, + 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U +}; + + +static const uint32_t Te3[256] = +{ + 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, + 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, + 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, + 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, + 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, + 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, + 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, + 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, + 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, + 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, + 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, + 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, + 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, + 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, + 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, + 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, + 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, + 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, + 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, + 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, + 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, + 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, + 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, + 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, + 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, + 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, + 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, + 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, + 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, + 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, + 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, + 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, + 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, + 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, + 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, + 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, + 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, + 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, + 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, + 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, + 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, + 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, + 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, + 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, + 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, + 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, + 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, + 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, + 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, + 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, + 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, + 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, + 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, + 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, + 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, + 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, + 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, + 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, + 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, + 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, + 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, + 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, + 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, + 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU +}; + +static const uint32_t Te4[256] = +{ + 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, + 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, + 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU, + 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U, + 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, + 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, + 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, + 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U, + 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U, + 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, + 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, + 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U, + 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U, + 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU, + 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, + 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, + 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, + 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U, + 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U, + 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, + 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, + 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, + 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U, + 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU, + 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, + 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, + 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, + 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U, + 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU, + 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, + 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, + 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, + 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU, + 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U, + 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, + 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, + 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, + 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U, + 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U, + 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, + 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, + 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU, + 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U, + 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U, + 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, + 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, + 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, + 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U, + 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU, + 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, + 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, + 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, + 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U, + 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU, + 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, + 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, + 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U, + 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U, + 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U, + 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, + 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, + 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U, + 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU, + 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U +}; + +/* Decrypt Sbox constants (for the substitute bytes operation) */ + +static const uint32_t Td0[256] = +{ + 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, + 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, + 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, + 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, + 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, + 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, + 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, + 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, + 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, + 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, + 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, + 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, + 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, + 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, + 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, + 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, + 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, + 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, + 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, + 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, + 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, + 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, + 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, + 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, + 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, + 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, + 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, + 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, + 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, + 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, + 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, + 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, + 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, + 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, + 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, + 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, + 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, + 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, + 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, + 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, + 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, + 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, + 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, + 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, + 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, + 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, + 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, + 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, + 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, + 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, + 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, + 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, + 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, + 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, + 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, + 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, + 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, + 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, + 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, + 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, + 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, + 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, + 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, + 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U +}; + +static const uint32_t Td1[256] = +{ + 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, + 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, + 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, + 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, + 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, + 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, + 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, + 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, + 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, + 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, + 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, + 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, + 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, + 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, + 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, + 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, + 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, + 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, + 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, + 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, + 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, + 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, + 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, + 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, + 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, + 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, + 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, + 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, + 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, + 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, + 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, + 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, + 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, + 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, + 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, + 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, + 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, + 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, + 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, + 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, + 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, + 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, + 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, + 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, + 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, + 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, + 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, + 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, + 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, + 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, + 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, + 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, + 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, + 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, + 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, + 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, + 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, + 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, + 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, + 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, + 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, + 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, + 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, + 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U +}; + +static const uint32_t Td2[256] = +{ + 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, + 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, + 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, + 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, + 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, + 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, + 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, + 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, + 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, + 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, + 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, + 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, + 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, + 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, + 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, + 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, + 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, + 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, + 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, + 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, + 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, + 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, + 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, + 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, + 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, + 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, + 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, + 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, + 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, + 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, + 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, + 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, + 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, + 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, + 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, + 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, + 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, + 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, + 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, + 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, + 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, + 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, + 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, + 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, + 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, + 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, + 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, + 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, + 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, + 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, + 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, + 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, + 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, + 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, + 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, + 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, + 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, + 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, + 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, + 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, + 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, + 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, + 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, + 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U +}; + +static const uint32_t Td3[256] = +{ + 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, + 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, + 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, + 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, + 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, + 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, + 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, + 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, + 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, + 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, + 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, + 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, + 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, + 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, + 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, + 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, + 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, + 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, + 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, + 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, + 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, + 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, + 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, + 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, + 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, + 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, + 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, + 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, + 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, + 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, + 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, + 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, + 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, + 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, + 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, + 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, + 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, + 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, + 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, + 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, + 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, + 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, + 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, + 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, + 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, + 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, + 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, + 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, + 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, + 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, + 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, + 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, + 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, + 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, + 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, + 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, + 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, + 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, + 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, + 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, + 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, + 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, + 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, + 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U +}; + +static const uint32_t Td4[256] = +{ + 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, + 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U, + 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU, + 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU, + 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, + 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, + 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U, + 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU, + 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U, + 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, + 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, + 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, + 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U, + 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U, + 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, + 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, + 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, + 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U, + 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU, + 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, + 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, + 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, + 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U, + 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U, + 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, + 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, + 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U, + 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U, + 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU, + 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, + 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, + 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, + 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U, + 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU, + 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, + 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, + 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U, + 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U, + 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U, + 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, + 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, + 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U, + 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU, + 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU, + 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, + 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, + 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, + 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U, + 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U, + 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, + 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, + 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU, + 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U, + 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU, + 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, + 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, + 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, + 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U, + 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU, + 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, + 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, + 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U, + 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, + 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU +}; + +/* Rcon is Round Constant; used for encryption key expansion */ +static const uint32_t rcon[RC_LENGTH] = +{ + /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000 +}; + + +/* + * Expand the cipher key into the encryption key schedule. + * + * Return the number of rounds for the given cipher key size. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk AES key schedule 32-bit array to be initialized + * cipherKey User key + * keyBits AES key size (128, 192, or 256 bits) + */ +static int +rijndael_key_setup_enc(uint32_t rk[], const uint32_t cipherKey[], + int keyBits) +{ + int i = 0; + uint32_t temp; + + rk[0] = cipherKey[0]; + rk[1] = cipherKey[1]; + rk[2] = cipherKey[2]; + rk[3] = cipherKey[3]; + + if (keyBits == 128) { + for (;;) { + temp = rk[3]; + rk[4] = rk[0] ^ + (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[temp & 0xff] & 0x0000ff00) ^ + (Te4[temp >> 24] & 0x000000ff) ^ + rcon[i]; + rk[5] = rk[1] ^ rk[4]; + rk[6] = rk[2] ^ rk[5]; + rk[7] = rk[3] ^ rk[6]; + + if (++i == 10) { + return (10); + } + rk += 4; + } + } + + rk[4] = cipherKey[4]; + rk[5] = cipherKey[5]; + + if (keyBits == 192) { + for (;;) { + temp = rk[5]; + rk[6] = rk[0] ^ + (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[temp & 0xff] & 0x0000ff00) ^ + (Te4[temp >> 24] & 0x000000ff) ^ + rcon[i]; + rk[7] = rk[1] ^ rk[6]; + rk[8] = rk[2] ^ rk[7]; + rk[9] = rk[3] ^ rk[8]; + + if (++i == 8) { + return (12); + } + + rk[10] = rk[4] ^ rk[9]; + rk[11] = rk[5] ^ rk[10]; + rk += 6; + } + } + + rk[6] = cipherKey[6]; + rk[7] = cipherKey[7]; + + if (keyBits == 256) { + for (;;) { + temp = rk[7]; + rk[8] = rk[0] ^ + (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (Te4[temp & 0xff] & 0x0000ff00) ^ + (Te4[temp >> 24] & 0x000000ff) ^ + rcon[i]; + rk[9] = rk[1] ^ rk[8]; + rk[10] = rk[2] ^ rk[9]; + rk[11] = rk[3] ^ rk[10]; + + if (++i == 7) { + return (14); + } + temp = rk[11]; + rk[12] = rk[4] ^ + (Te4[temp >> 24] & 0xff000000) ^ + (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[temp & 0xff] & 0x000000ff); + rk[13] = rk[5] ^ rk[12]; + rk[14] = rk[6] ^ rk[13]; + rk[15] = rk[7] ^ rk[14]; + + rk += 8; + } + } + + return (0); +} + +/* + * Expand the cipher key into the decryption key schedule. + * Return the number of rounds for the given cipher key size. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk AES key schedule 32-bit array to be initialized + * cipherKey User key + * keyBits AES key size (128, 192, or 256 bits) + */ +static int +rijndael_key_setup_dec(uint32_t rk[], const uint32_t cipherKey[], int keyBits) +{ + int Nr, i, j; + uint32_t temp; + + /* expand the cipher key: */ + Nr = rijndael_key_setup_enc(rk, cipherKey, keyBits); + + /* invert the order of the round keys: */ + for (i = 0, j = 4 * Nr; i < j; i += 4, j -= 4) { + temp = rk[i]; + rk[i] = rk[j]; + rk[j] = temp; + temp = rk[i + 1]; + rk[i + 1] = rk[j + 1]; + rk[j + 1] = temp; + temp = rk[i + 2]; + rk[i + 2] = rk[j + 2]; + rk[j + 2] = temp; + temp = rk[i + 3]; + rk[i + 3] = rk[j + 3]; + rk[j + 3] = temp; + } + + /* + * apply the inverse MixColumn transform to all + * round keys but the first and the last: + */ + for (i = 1; i < Nr; i++) { + rk += 4; + rk[0] = Td0[Te4[rk[0] >> 24] & 0xff] ^ + Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[rk[0] & 0xff] & 0xff]; + rk[1] = Td0[Te4[rk[1] >> 24] & 0xff] ^ + Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[rk[1] & 0xff] & 0xff]; + rk[2] = Td0[Te4[rk[2] >> 24] & 0xff] ^ + Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[rk[2] & 0xff] & 0xff]; + rk[3] = Td0[Te4[rk[3] >> 24] & 0xff] ^ + Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ + Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ + Td3[Te4[rk[3] & 0xff] & 0xff]; + } + + return (Nr); +} + +/* + * Expand the 32-bit AES cipher key array into the encryption and decryption + * key schedules. + * + * Parameters: + * key AES key schedule to be initialized + * keyarr32 User key + * keyBits AES key size (128, 192, or 256 bits) + */ +static void +aes_generic_generate(aes_key_t *key, const uint32_t *keyarr32, int keybits) +{ + key->nr = rijndael_key_setup_enc(&(key->encr_ks.ks32[0]), keyarr32, + keybits); + key->nr = rijndael_key_setup_dec(&(key->decr_ks.ks32[0]), keyarr32, + keybits); +} + +/* + * Encrypt one block of data. The block is assumed to be an array + * of four uint32_t values, so copy for alignment (and byte-order + * reversal for little endian systems might be necessary on the + * input and output byte streams. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk Key schedule, of aes_ks_t (60 32-bit integers) + * Nr Number of rounds + * pt Input block (plain text) + * ct Output block (crypto text). Can overlap with pt + */ +static void +aes_generic_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4], + uint32_t ct[4]) +{ + uint32_t s0, s1, s2, s3, t0, t1, t2, t3; + int r; + + /* + * map byte array block to cipher state + * and add initial round key: + */ + + s0 = pt[0] ^ rk[0]; + s1 = pt[1] ^ rk[1]; + s2 = pt[2] ^ rk[2]; + s3 = pt[3] ^ rk[3]; + + /* + * Nr - 1 full rounds: + */ + + r = Nr >> 1; + + for (;;) { + t0 = Te0[s0 >> 24] ^ + Te1[(s1 >> 16) & 0xff] ^ + Te2[(s2 >> 8) & 0xff] ^ + Te3[s3 & 0xff] ^ + rk[4]; + + t1 = Te0[s1 >> 24] ^ + Te1[(s2 >> 16) & 0xff] ^ + Te2[(s3 >> 8) & 0xff] ^ + Te3[s0 & 0xff] ^ + rk[5]; + + t2 = Te0[s2 >> 24] ^ + Te1[(s3 >> 16) & 0xff] ^ + Te2[(s0 >> 8) & 0xff] ^ + Te3[s1 & 0xff] ^ + rk[6]; + + t3 = Te0[s3 >> 24] ^ + Te1[(s0 >> 16) & 0xff] ^ + Te2[(s1 >> 8) & 0xff] ^ + Te3[s2 & 0xff] ^ + rk[7]; + + rk += 8; + + if (--r == 0) { + break; + } + + s0 = Te0[t0 >> 24] ^ + Te1[(t1 >> 16) & 0xff] ^ + Te2[(t2 >> 8) & 0xff] ^ + Te3[t3 & 0xff] ^ + rk[0]; + + s1 = Te0[t1 >> 24] ^ + Te1[(t2 >> 16) & 0xff] ^ + Te2[(t3 >> 8) & 0xff] ^ + Te3[t0 & 0xff] ^ + rk[1]; + + s2 = Te0[t2 >> 24] ^ + Te1[(t3 >> 16) & 0xff] ^ + Te2[(t0 >> 8) & 0xff] ^ + Te3[t1 & 0xff] ^ + rk[2]; + + s3 = Te0[t3 >> 24] ^ + Te1[(t0 >> 16) & 0xff] ^ + Te2[(t1 >> 8) & 0xff] ^ + Te3[t2 & 0xff] ^ + rk[3]; + } + + /* + * apply last round and + * map cipher state to byte array block: + */ + + s0 = (Te4[(t0 >> 24)] & 0xff000000) ^ + (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[t3 & 0xff] & 0x000000ff) ^ + rk[0]; + ct[0] = s0; + + s1 = (Te4[(t1 >> 24)] & 0xff000000) ^ + (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[t0 & 0xff] & 0x000000ff) ^ + rk[1]; + ct[1] = s1; + + s2 = (Te4[(t2 >> 24)] & 0xff000000) ^ + (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[t1 & 0xff] & 0x000000ff) ^ + rk[2]; + ct[2] = s2; + + s3 = (Te4[(t3 >> 24)] & 0xff000000) ^ + (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ + (Te4[t2 & 0xff] & 0x000000ff) ^ + rk[3]; + ct[3] = s3; +} + + +/* + * Decrypt one block of data. The block is assumed to be an array + * of four uint32_t values, so copy for alignment (and byte-order + * reversal for little endian systems might be necessary on the + * input and output byte streams. + * The size of the key schedule depends on the number of rounds + * (which can be computed from the size of the key), i.e. 4*(Nr + 1). + * + * Parameters: + * rk Key schedule, of aes_ks_t (60 32-bit integers) + * Nr Number of rounds + * ct Input block (crypto text) + * pt Output block (plain text). Can overlap with pt + */ +static void +aes_generic_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4], + uint32_t pt[4]) +{ + uint32_t s0, s1, s2, s3, t0, t1, t2, t3; + int r; + + /* + * map byte array block to cipher state + * and add initial round key: + */ + s0 = ct[0] ^ rk[0]; + s1 = ct[1] ^ rk[1]; + s2 = ct[2] ^ rk[2]; + s3 = ct[3] ^ rk[3]; + + /* + * Nr - 1 full rounds: + */ + + r = Nr >> 1; + + for (;;) { + t0 = Td0[s0 >> 24] ^ + Td1[(s3 >> 16) & 0xff] ^ + Td2[(s2 >> 8) & 0xff] ^ + Td3[s1 & 0xff] ^ + rk[4]; + + t1 = Td0[s1 >> 24] ^ + Td1[(s0 >> 16) & 0xff] ^ + Td2[(s3 >> 8) & 0xff] ^ + Td3[s2 & 0xff] ^ + rk[5]; + + t2 = Td0[s2 >> 24] ^ + Td1[(s1 >> 16) & 0xff] ^ + Td2[(s0 >> 8) & 0xff] ^ + Td3[s3 & 0xff] ^ + rk[6]; + + t3 = Td0[s3 >> 24] ^ + Td1[(s2 >> 16) & 0xff] ^ + Td2[(s1 >> 8) & 0xff] ^ + Td3[s0 & 0xff] ^ + rk[7]; + + rk += 8; + + if (--r == 0) { + break; + } + + s0 = Td0[t0 >> 24] ^ + Td1[(t3 >> 16) & 0xff] ^ + Td2[(t2 >> 8) & 0xff] ^ + Td3[t1 & 0xff] ^ + rk[0]; + + s1 = Td0[t1 >> 24] ^ + Td1[(t0 >> 16) & 0xff] ^ + Td2[(t3 >> 8) & 0xff] ^ + Td3[t2 & 0xff] ^ + rk[1]; + + s2 = Td0[t2 >> 24] ^ + Td1[(t1 >> 16) & 0xff] ^ + Td2[(t0 >> 8) & 0xff] ^ + Td3[t3 & 0xff] ^ + rk[2]; + + s3 = Td0[t3 >> 24] ^ + Td1[(t2 >> 16) & 0xff] ^ + Td2[(t1 >> 8) & 0xff] ^ + Td3[t0 & 0xff] ^ + rk[3]; + } + + /* + * apply last round and + * map cipher state to byte array block: + */ + + s0 = (Td4[t0 >> 24] & 0xff000000) ^ + (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ + (Td4[t1 & 0xff] & 0x000000ff) ^ + rk[0]; + pt[0] = s0; + + s1 = (Td4[t1 >> 24] & 0xff000000) ^ + (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ + (Td4[t2 & 0xff] & 0x000000ff) ^ + rk[1]; + pt[1] = s1; + + s2 = (Td4[t2 >> 24] & 0xff000000) ^ + (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ + (Td4[t3 & 0xff] & 0x000000ff) ^ + rk[2]; + pt[2] = s2; + + s3 = (Td4[t3 >> 24] & 0xff000000) ^ + (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ + (Td4[t0 & 0xff] & 0x000000ff) ^ + rk[3]; + pt[3] = s3; +} + +static boolean_t +aes_generic_will_work(void) +{ + return (B_TRUE); +} + +/* + * For _LITTLE_ENDIAN machines, reverse every 4 bytes in the key. + * On _BIG_ENDIAN, copy the key without reversing bytes. + * + * SPARCv8/v9 uses a key schedule array with 64-bit elements. + * X86/AMD64 uses a key schedule array with 32-bit elements. + */ +const aes_impl_ops_t aes_generic_impl = { + .generate = &aes_generic_generate, + .encrypt = &aes_generic_encrypt, + .decrypt = &aes_generic_decrypt, + .is_supported = &aes_generic_will_work, +#if defined(_ZFS_LITTLE_ENDIAN) + .needs_byteswap = B_TRUE, +#else + .needs_byteswap = B_FALSE, +#endif + .name = "generic" +}; diff --git a/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_x86-64.c b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_x86-64.c new file mode 100644 index 000000000000..19f8fd5012cf --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/aes/aes_impl_x86-64.c @@ -0,0 +1,63 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#if defined(__x86_64) + +#include <sys/simd.h> +#include <aes/aes_impl.h> + +/* + * Expand the 32-bit AES cipher key array into the encryption and decryption + * key schedules. + * + * Parameters: + * key AES key schedule to be initialized + * keyarr32 User key + * keyBits AES key size (128, 192, or 256 bits) + */ +static void +aes_x86_64_generate(aes_key_t *key, const uint32_t *keyarr32, int keybits) +{ + key->nr = rijndael_key_setup_enc_amd64(&(key->encr_ks.ks32[0]), + keyarr32, keybits); + key->nr = rijndael_key_setup_dec_amd64(&(key->decr_ks.ks32[0]), + keyarr32, keybits); +} + +static boolean_t +aes_x86_64_will_work(void) +{ + return (B_TRUE); +} + +const aes_impl_ops_t aes_x86_64_impl = { + .generate = &aes_x86_64_generate, + .encrypt = &aes_encrypt_amd64, + .decrypt = &aes_decrypt_amd64, + .is_supported = &aes_x86_64_will_work, + .needs_byteswap = B_FALSE, + .name = "x86_64" +}; + +#endif /* defined(__x86_64) */ diff --git a/sys/contrib/openzfs/module/icp/algs/aes/aes_modes.c b/sys/contrib/openzfs/module/icp/algs/aes/aes_modes.c new file mode 100644 index 000000000000..9e4b498fffcb --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/aes/aes_modes.c @@ -0,0 +1,135 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <aes/aes_impl.h> + +/* Copy a 16-byte AES block from "in" to "out" */ +void +aes_copy_block(uint8_t *in, uint8_t *out) +{ + if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) { + /* LINTED: pointer alignment */ + *(uint32_t *)&out[0] = *(uint32_t *)&in[0]; + /* LINTED: pointer alignment */ + *(uint32_t *)&out[4] = *(uint32_t *)&in[4]; + /* LINTED: pointer alignment */ + *(uint32_t *)&out[8] = *(uint32_t *)&in[8]; + /* LINTED: pointer alignment */ + *(uint32_t *)&out[12] = *(uint32_t *)&in[12]; + } else { + AES_COPY_BLOCK(in, out); + } +} + + +/* XOR a 16-byte AES block of data into dst */ +void +aes_xor_block(uint8_t *data, uint8_t *dst) +{ + if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) { + /* LINTED: pointer alignment */ + *(uint32_t *)&dst[0] ^= *(uint32_t *)&data[0]; + /* LINTED: pointer alignment */ + *(uint32_t *)&dst[4] ^= *(uint32_t *)&data[4]; + /* LINTED: pointer alignment */ + *(uint32_t *)&dst[8] ^= *(uint32_t *)&data[8]; + /* LINTED: pointer alignment */ + *(uint32_t *)&dst[12] ^= *(uint32_t *)&data[12]; + } else { + AES_XOR_BLOCK(data, dst); + } +} + + +/* + * Encrypt multiple blocks of data according to mode. + */ +int +aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length, + crypto_data_t *out) +{ + aes_ctx_t *aes_ctx = ctx; + int rv; + + if (aes_ctx->ac_flags & CTR_MODE) { + rv = ctr_mode_contiguous_blocks(ctx, data, length, out, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + } else if (aes_ctx->ac_flags & CCM_MODE) { + rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + } else if (aes_ctx->ac_flags & CBC_MODE) { + rv = cbc_encrypt_contiguous_blocks(ctx, + data, length, out, AES_BLOCK_LEN, aes_encrypt_block, + aes_copy_block, aes_xor_block); + } else { + rv = ecb_cipher_contiguous_blocks(ctx, data, length, out, + AES_BLOCK_LEN, aes_encrypt_block); + } + return (rv); +} + + +/* + * Decrypt multiple blocks of data according to mode. + */ +int +aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length, + crypto_data_t *out) +{ + aes_ctx_t *aes_ctx = ctx; + int rv; + + if (aes_ctx->ac_flags & CTR_MODE) { + rv = ctr_mode_contiguous_blocks(ctx, data, length, out, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + if (rv == CRYPTO_DATA_LEN_RANGE) + rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; + } else if (aes_ctx->ac_flags & CCM_MODE) { + rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + } else if (aes_ctx->ac_flags & CBC_MODE) { + rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out, + AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block, + aes_xor_block); + } else { + rv = ecb_cipher_contiguous_blocks(ctx, data, length, out, + AES_BLOCK_LEN, aes_decrypt_block); + if (rv == CRYPTO_DATA_LEN_RANGE) + rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; + } + return (rv); +} diff --git a/sys/contrib/openzfs/module/icp/algs/edonr/edonr.c b/sys/contrib/openzfs/module/icp/algs/edonr/edonr.c new file mode 100644 index 000000000000..7c677095f1ef --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/edonr/edonr.c @@ -0,0 +1,746 @@ +/* + * IDI,NTNU + * + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://opensource.org/licenses/CDDL-1.0. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + * + * Copyright (C) 2009, 2010, Jorn Amundsen <jorn.amundsen@ntnu.no> + * Tweaked Edon-R implementation for SUPERCOP, based on NIST API. + * + * $Id: edonr.c 517 2013-02-17 20:34:39Z joern $ + */ +/* + * Portions copyright (c) 2013, Saso Kiselkov, All rights reserved + */ + +#include <sys/strings.h> +#include <sys/edonr.h> +#include <sys/debug.h> + +/* big endian support, provides no-op's if run on little endian hosts */ +#include "edonr_byteorder.h" + +#define hashState224(x) ((x)->pipe->p256) +#define hashState256(x) ((x)->pipe->p256) +#define hashState384(x) ((x)->pipe->p512) +#define hashState512(x) ((x)->pipe->p512) + +/* shift and rotate shortcuts */ +#define shl(x, n) ((x) << n) +#define shr(x, n) ((x) >> n) + +#define rotl32(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) +#define rotr32(x, n) (((x) >> (n)) | ((x) << (32 - (n)))) + +#define rotl64(x, n) (((x) << (n)) | ((x) >> (64 - (n)))) +#define rotr64(x, n) (((x) >> (n)) | ((x) << (64 - (n)))) + +#if !defined(__C99_RESTRICT) +#define restrict /* restrict */ +#endif + +#define EDONR_VALID_HASHBITLEN(x) \ + ((x) == 512 || (x) == 384 || (x) == 256 || (x) == 224) + +/* EdonR224 initial double chaining pipe */ +static const uint32_t i224p2[16] = { + 0x00010203ul, 0x04050607ul, 0x08090a0bul, 0x0c0d0e0ful, + 0x10111213ul, 0x14151617ul, 0x18191a1bul, 0x1c1d1e1ful, + 0x20212223ul, 0x24252627ul, 0x28292a2bul, 0x2c2d2e2ful, + 0x30313233ul, 0x34353637ul, 0x38393a3bul, 0x3c3d3e3ful, +}; + +/* EdonR256 initial double chaining pipe */ +static const uint32_t i256p2[16] = { + 0x40414243ul, 0x44454647ul, 0x48494a4bul, 0x4c4d4e4ful, + 0x50515253ul, 0x54555657ul, 0x58595a5bul, 0x5c5d5e5ful, + 0x60616263ul, 0x64656667ul, 0x68696a6bul, 0x6c6d6e6ful, + 0x70717273ul, 0x74757677ul, 0x78797a7bul, 0x7c7d7e7ful, +}; + +/* EdonR384 initial double chaining pipe */ +static const uint64_t i384p2[16] = { + 0x0001020304050607ull, 0x08090a0b0c0d0e0full, + 0x1011121314151617ull, 0x18191a1b1c1d1e1full, + 0x2021222324252627ull, 0x28292a2b2c2d2e2full, + 0x3031323334353637ull, 0x38393a3b3c3d3e3full, + 0x4041424344454647ull, 0x48494a4b4c4d4e4full, + 0x5051525354555657ull, 0x58595a5b5c5d5e5full, + 0x6061626364656667ull, 0x68696a6b6c6d6e6full, + 0x7071727374757677ull, 0x78797a7b7c7d7e7full +}; + +/* EdonR512 initial double chaining pipe */ +static const uint64_t i512p2[16] = { + 0x8081828384858687ull, 0x88898a8b8c8d8e8full, + 0x9091929394959697ull, 0x98999a9b9c9d9e9full, + 0xa0a1a2a3a4a5a6a7ull, 0xa8a9aaabacadaeafull, + 0xb0b1b2b3b4b5b6b7ull, 0xb8b9babbbcbdbebfull, + 0xc0c1c2c3c4c5c6c7ull, 0xc8c9cacbcccdcecfull, + 0xd0d1d2d3d4d5d6d7ull, 0xd8d9dadbdcdddedfull, + 0xe0e1e2e3e4e5e6e7ull, 0xe8e9eaebecedeeefull, + 0xf0f1f2f3f4f5f6f7ull, 0xf8f9fafbfcfdfeffull +}; + +/* + * First Latin Square + * 0 7 1 3 2 4 6 5 + * 4 1 7 6 3 0 5 2 + * 7 0 4 2 5 3 1 6 + * 1 4 0 5 6 2 7 3 + * 2 3 6 7 1 5 0 4 + * 5 2 3 1 7 6 4 0 + * 3 6 5 0 4 7 2 1 + * 6 5 2 4 0 1 3 7 + */ +#define LS1_256(c, x0, x1, x2, x3, x4, x5, x6, x7) \ +{ \ + uint32_t x04, x17, x23, x56, x07, x26; \ + x04 = x0+x4, x17 = x1+x7, x07 = x04+x17; \ + s0 = c + x07 + x2; \ + s1 = rotl32(x07 + x3, 4); \ + s2 = rotl32(x07 + x6, 8); \ + x23 = x2 + x3; \ + s5 = rotl32(x04 + x23 + x5, 22); \ + x56 = x5 + x6; \ + s6 = rotl32(x17 + x56 + x0, 24); \ + x26 = x23+x56; \ + s3 = rotl32(x26 + x7, 13); \ + s4 = rotl32(x26 + x1, 17); \ + s7 = rotl32(x26 + x4, 29); \ +} + +#define LS1_512(c, x0, x1, x2, x3, x4, x5, x6, x7) \ +{ \ + uint64_t x04, x17, x23, x56, x07, x26; \ + x04 = x0+x4, x17 = x1+x7, x07 = x04+x17; \ + s0 = c + x07 + x2; \ + s1 = rotl64(x07 + x3, 5); \ + s2 = rotl64(x07 + x6, 15); \ + x23 = x2 + x3; \ + s5 = rotl64(x04 + x23 + x5, 40); \ + x56 = x5 + x6; \ + s6 = rotl64(x17 + x56 + x0, 50); \ + x26 = x23+x56; \ + s3 = rotl64(x26 + x7, 22); \ + s4 = rotl64(x26 + x1, 31); \ + s7 = rotl64(x26 + x4, 59); \ +} + +/* + * Second Orthogonal Latin Square + * 0 4 2 3 1 6 5 7 + * 7 6 3 2 5 4 1 0 + * 5 3 1 6 0 2 7 4 + * 1 0 5 4 3 7 2 6 + * 2 1 0 7 4 5 6 3 + * 3 5 7 0 6 1 4 2 + * 4 7 6 1 2 0 3 5 + * 6 2 4 5 7 3 0 1 + */ +#define LS2_256(c, y0, y1, y2, y3, y4, y5, y6, y7) \ +{ \ + uint32_t y01, y25, y34, y67, y04, y05, y27, y37; \ + y01 = y0+y1, y25 = y2+y5, y05 = y01+y25; \ + t0 = ~c + y05 + y7; \ + t2 = rotl32(y05 + y3, 9); \ + y34 = y3+y4, y04 = y01+y34; \ + t1 = rotl32(y04 + y6, 5); \ + t4 = rotl32(y04 + y5, 15); \ + y67 = y6+y7, y37 = y34+y67; \ + t3 = rotl32(y37 + y2, 11); \ + t7 = rotl32(y37 + y0, 27); \ + y27 = y25+y67; \ + t5 = rotl32(y27 + y4, 20); \ + t6 = rotl32(y27 + y1, 25); \ +} + +#define LS2_512(c, y0, y1, y2, y3, y4, y5, y6, y7) \ +{ \ + uint64_t y01, y25, y34, y67, y04, y05, y27, y37; \ + y01 = y0+y1, y25 = y2+y5, y05 = y01+y25; \ + t0 = ~c + y05 + y7; \ + t2 = rotl64(y05 + y3, 19); \ + y34 = y3+y4, y04 = y01+y34; \ + t1 = rotl64(y04 + y6, 10); \ + t4 = rotl64(y04 + y5, 36); \ + y67 = y6+y7, y37 = y34+y67; \ + t3 = rotl64(y37 + y2, 29); \ + t7 = rotl64(y37 + y0, 55); \ + y27 = y25+y67; \ + t5 = rotl64(y27 + y4, 44); \ + t6 = rotl64(y27 + y1, 48); \ +} + +#define quasi_exform256(r0, r1, r2, r3, r4, r5, r6, r7) \ +{ \ + uint32_t s04, s17, s23, s56, t01, t25, t34, t67; \ + s04 = s0 ^ s4, t01 = t0 ^ t1; \ + r0 = (s04 ^ s1) + (t01 ^ t5); \ + t67 = t6 ^ t7; \ + r1 = (s04 ^ s7) + (t2 ^ t67); \ + s23 = s2 ^ s3; \ + r7 = (s23 ^ s5) + (t4 ^ t67); \ + t34 = t3 ^ t4; \ + r3 = (s23 ^ s4) + (t0 ^ t34); \ + s56 = s5 ^ s6; \ + r5 = (s3 ^ s56) + (t34 ^ t6); \ + t25 = t2 ^ t5; \ + r6 = (s2 ^ s56) + (t25 ^ t7); \ + s17 = s1 ^ s7; \ + r4 = (s0 ^ s17) + (t1 ^ t25); \ + r2 = (s17 ^ s6) + (t01 ^ t3); \ +} + +#define quasi_exform512(r0, r1, r2, r3, r4, r5, r6, r7) \ +{ \ + uint64_t s04, s17, s23, s56, t01, t25, t34, t67; \ + s04 = s0 ^ s4, t01 = t0 ^ t1; \ + r0 = (s04 ^ s1) + (t01 ^ t5); \ + t67 = t6 ^ t7; \ + r1 = (s04 ^ s7) + (t2 ^ t67); \ + s23 = s2 ^ s3; \ + r7 = (s23 ^ s5) + (t4 ^ t67); \ + t34 = t3 ^ t4; \ + r3 = (s23 ^ s4) + (t0 ^ t34); \ + s56 = s5 ^ s6; \ + r5 = (s3 ^ s56) + (t34 ^ t6); \ + t25 = t2 ^ t5; \ + r6 = (s2 ^ s56) + (t25 ^ t7); \ + s17 = s1 ^ s7; \ + r4 = (s0 ^ s17) + (t1 ^ t25); \ + r2 = (s17 ^ s6) + (t01 ^ t3); \ +} + +static size_t +Q256(size_t bitlen, const uint32_t *data, uint32_t *restrict p) +{ + size_t bl; + + for (bl = bitlen; bl >= EdonR256_BLOCK_BITSIZE; + bl -= EdonR256_BLOCK_BITSIZE, data += 16) { + uint32_t s0, s1, s2, s3, s4, s5, s6, s7, t0, t1, t2, t3, t4, + t5, t6, t7; + uint32_t p0, p1, p2, p3, p4, p5, p6, p7, q0, q1, q2, q3, q4, + q5, q6, q7; + const uint32_t defix = 0xaaaaaaaa; +#if defined(MACHINE_IS_BIG_ENDIAN) + uint32_t swp0, swp1, swp2, swp3, swp4, swp5, swp6, swp7, swp8, + swp9, swp10, swp11, swp12, swp13, swp14, swp15; +#define d(j) swp ## j +#define s32(j) ld_swap32((uint32_t *)data + j, swp ## j) +#else +#define d(j) data[j] +#endif + + /* First row of quasigroup e-transformations */ +#if defined(MACHINE_IS_BIG_ENDIAN) + s32(8); + s32(9); + s32(10); + s32(11); + s32(12); + s32(13); + s32(14); + s32(15); +#endif + LS1_256(defix, d(15), d(14), d(13), d(12), d(11), d(10), d(9), + d(8)); +#if defined(MACHINE_IS_BIG_ENDIAN) + s32(0); + s32(1); + s32(2); + s32(3); + s32(4); + s32(5); + s32(6); + s32(7); +#undef s32 +#endif + LS2_256(defix, d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7)); + quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_256(defix, d(8), d(9), d(10), d(11), d(12), d(13), d(14), + d(15)); + quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Second row of quasigroup e-transformations */ + LS1_256(defix, p[8], p[9], p[10], p[11], p[12], p[13], p[14], + p[15]); + LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_256(defix, q0, q1, q2, q3, q4, q5, q6, q7); + quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Third row of quasigroup e-transformations */ + LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_256(defix, p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_256(defix, q0, q1, q2, q3, q4, q5, q6, q7); + LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Fourth row of quasigroup e-transformations */ + LS1_256(defix, d(7), d(6), d(5), d(4), d(3), d(2), d(1), d(0)); + LS2_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform256(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_256(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_256(defix, q0, q1, q2, q3, q4, q5, q6, q7); + quasi_exform256(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Edon-R tweak on the original SHA-3 Edon-R submission. */ + p[0] ^= d(8) ^ p0; + p[1] ^= d(9) ^ p1; + p[2] ^= d(10) ^ p2; + p[3] ^= d(11) ^ p3; + p[4] ^= d(12) ^ p4; + p[5] ^= d(13) ^ p5; + p[6] ^= d(14) ^ p6; + p[7] ^= d(15) ^ p7; + p[8] ^= d(0) ^ q0; + p[9] ^= d(1) ^ q1; + p[10] ^= d(2) ^ q2; + p[11] ^= d(3) ^ q3; + p[12] ^= d(4) ^ q4; + p[13] ^= d(5) ^ q5; + p[14] ^= d(6) ^ q6; + p[15] ^= d(7) ^ q7; + } + +#undef d + return (bitlen - bl); +} + +/* + * Why is this #pragma here? + * + * Checksum functions like this one can go over the stack frame size check + * Linux imposes on 32-bit platforms (-Wframe-larger-than=1024). We can + * safely ignore the compiler error since we know that in ZoL, that + * the function will be called from a worker thread that won't be using + * much stack. The only function that goes over the 1k limit is Q512(), + * which only goes over it by a hair (1248 bytes on ARM32). + */ +#include <sys/isa_defs.h> /* for _ILP32 */ +#ifdef _ILP32 /* We're 32-bit, assume small stack frames */ +#pragma GCC diagnostic ignored "-Wframe-larger-than=" +#endif + +#if defined(__IBMC__) && defined(_AIX) && defined(__64BIT__) +static inline size_t +#else +static size_t +#endif +Q512(size_t bitlen, const uint64_t *data, uint64_t *restrict p) +{ + size_t bl; + + for (bl = bitlen; bl >= EdonR512_BLOCK_BITSIZE; + bl -= EdonR512_BLOCK_BITSIZE, data += 16) { + uint64_t s0, s1, s2, s3, s4, s5, s6, s7, t0, t1, t2, t3, t4, + t5, t6, t7; + uint64_t p0, p1, p2, p3, p4, p5, p6, p7, q0, q1, q2, q3, q4, + q5, q6, q7; + const uint64_t defix = 0xaaaaaaaaaaaaaaaaull; +#if defined(MACHINE_IS_BIG_ENDIAN) + uint64_t swp0, swp1, swp2, swp3, swp4, swp5, swp6, swp7, swp8, + swp9, swp10, swp11, swp12, swp13, swp14, swp15; +#define d(j) swp##j +#define s64(j) ld_swap64((uint64_t *)data+j, swp##j) +#else +#define d(j) data[j] +#endif + + /* First row of quasigroup e-transformations */ +#if defined(MACHINE_IS_BIG_ENDIAN) + s64(8); + s64(9); + s64(10); + s64(11); + s64(12); + s64(13); + s64(14); + s64(15); +#endif + LS1_512(defix, d(15), d(14), d(13), d(12), d(11), d(10), d(9), + d(8)); +#if defined(MACHINE_IS_BIG_ENDIAN) + s64(0); + s64(1); + s64(2); + s64(3); + s64(4); + s64(5); + s64(6); + s64(7); +#undef s64 +#endif + LS2_512(defix, d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7)); + quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_512(defix, d(8), d(9), d(10), d(11), d(12), d(13), d(14), + d(15)); + quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Second row of quasigroup e-transformations */ + LS1_512(defix, p[8], p[9], p[10], p[11], p[12], p[13], p[14], + p[15]); + LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_512(defix, q0, q1, q2, q3, q4, q5, q6, q7); + quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Third row of quasigroup e-transformations */ + LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_512(defix, p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_512(defix, q0, q1, q2, q3, q4, q5, q6, q7); + LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Fourth row of quasigroup e-transformations */ + LS1_512(defix, d(7), d(6), d(5), d(4), d(3), d(2), d(1), d(0)); + LS2_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + quasi_exform512(p0, p1, p2, p3, p4, p5, p6, p7); + + LS1_512(defix, p0, p1, p2, p3, p4, p5, p6, p7); + LS2_512(defix, q0, q1, q2, q3, q4, q5, q6, q7); + quasi_exform512(q0, q1, q2, q3, q4, q5, q6, q7); + + /* Edon-R tweak on the original SHA-3 Edon-R submission. */ + p[0] ^= d(8) ^ p0; + p[1] ^= d(9) ^ p1; + p[2] ^= d(10) ^ p2; + p[3] ^= d(11) ^ p3; + p[4] ^= d(12) ^ p4; + p[5] ^= d(13) ^ p5; + p[6] ^= d(14) ^ p6; + p[7] ^= d(15) ^ p7; + p[8] ^= d(0) ^ q0; + p[9] ^= d(1) ^ q1; + p[10] ^= d(2) ^ q2; + p[11] ^= d(3) ^ q3; + p[12] ^= d(4) ^ q4; + p[13] ^= d(5) ^ q5; + p[14] ^= d(6) ^ q6; + p[15] ^= d(7) ^ q7; + } + +#undef d + return (bitlen - bl); +} + +void +EdonRInit(EdonRState *state, size_t hashbitlen) +{ + ASSERT(EDONR_VALID_HASHBITLEN(hashbitlen)); + switch (hashbitlen) { + case 224: + state->hashbitlen = 224; + state->bits_processed = 0; + state->unprocessed_bits = 0; + bcopy(i224p2, hashState224(state)->DoublePipe, + 16 * sizeof (uint32_t)); + break; + + case 256: + state->hashbitlen = 256; + state->bits_processed = 0; + state->unprocessed_bits = 0; + bcopy(i256p2, hashState256(state)->DoublePipe, + 16 * sizeof (uint32_t)); + break; + + case 384: + state->hashbitlen = 384; + state->bits_processed = 0; + state->unprocessed_bits = 0; + bcopy(i384p2, hashState384(state)->DoublePipe, + 16 * sizeof (uint64_t)); + break; + + case 512: + state->hashbitlen = 512; + state->bits_processed = 0; + state->unprocessed_bits = 0; + bcopy(i512p2, hashState224(state)->DoublePipe, + 16 * sizeof (uint64_t)); + break; + } +} + + +void +EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen) +{ + uint32_t *data32; + uint64_t *data64; + + size_t bits_processed; + + ASSERT(EDONR_VALID_HASHBITLEN(state->hashbitlen)); + switch (state->hashbitlen) { + case 224: + case 256: + if (state->unprocessed_bits > 0) { + /* LastBytes = databitlen / 8 */ + int LastBytes = (int)databitlen >> 3; + + ASSERT(state->unprocessed_bits + databitlen <= + EdonR256_BLOCK_SIZE * 8); + + bcopy(data, hashState256(state)->LastPart + + (state->unprocessed_bits >> 3), LastBytes); + state->unprocessed_bits += (int)databitlen; + databitlen = state->unprocessed_bits; + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data32 = (uint32_t *)hashState256(state)->LastPart; + } else + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data32 = (uint32_t *)data; + + bits_processed = Q256(databitlen, data32, + hashState256(state)->DoublePipe); + state->bits_processed += bits_processed; + databitlen -= bits_processed; + state->unprocessed_bits = (int)databitlen; + if (databitlen > 0) { + /* LastBytes = Ceil(databitlen / 8) */ + int LastBytes = + ((~(((-(int)databitlen) >> 3) & 0x01ff)) + + 1) & 0x01ff; + + data32 += bits_processed >> 5; /* byte size update */ + bcopy(data32, hashState256(state)->LastPart, LastBytes); + } + break; + + case 384: + case 512: + if (state->unprocessed_bits > 0) { + /* LastBytes = databitlen / 8 */ + int LastBytes = (int)databitlen >> 3; + + ASSERT(state->unprocessed_bits + databitlen <= + EdonR512_BLOCK_SIZE * 8); + + bcopy(data, hashState512(state)->LastPart + + (state->unprocessed_bits >> 3), LastBytes); + state->unprocessed_bits += (int)databitlen; + databitlen = state->unprocessed_bits; + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data64 = (uint64_t *)hashState512(state)->LastPart; + } else + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data64 = (uint64_t *)data; + + bits_processed = Q512(databitlen, data64, + hashState512(state)->DoublePipe); + state->bits_processed += bits_processed; + databitlen -= bits_processed; + state->unprocessed_bits = (int)databitlen; + if (databitlen > 0) { + /* LastBytes = Ceil(databitlen / 8) */ + int LastBytes = + ((~(((-(int)databitlen) >> 3) & 0x03ff)) + + 1) & 0x03ff; + + data64 += bits_processed >> 6; /* byte size update */ + bcopy(data64, hashState512(state)->LastPart, LastBytes); + } + break; + } +} + +void +EdonRFinal(EdonRState *state, uint8_t *hashval) +{ + uint32_t *data32; + uint64_t *data64, num_bits; + + size_t databitlen; + int LastByte, PadOnePosition; + + num_bits = state->bits_processed + state->unprocessed_bits; + ASSERT(EDONR_VALID_HASHBITLEN(state->hashbitlen)); + switch (state->hashbitlen) { + case 224: + case 256: + LastByte = (int)state->unprocessed_bits >> 3; + PadOnePosition = 7 - (state->unprocessed_bits & 0x07); + hashState256(state)->LastPart[LastByte] = + (hashState256(state)->LastPart[LastByte] + & (0xff << (PadOnePosition + 1))) ^ + (0x01 << PadOnePosition); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data64 = (uint64_t *)hashState256(state)->LastPart; + + if (state->unprocessed_bits < 448) { + (void) memset((hashState256(state)->LastPart) + + LastByte + 1, 0x00, + EdonR256_BLOCK_SIZE - LastByte - 9); + databitlen = EdonR256_BLOCK_SIZE * 8; +#if defined(MACHINE_IS_BIG_ENDIAN) + st_swap64(num_bits, data64 + 7); +#else + data64[7] = num_bits; +#endif + } else { + (void) memset((hashState256(state)->LastPart) + + LastByte + 1, 0x00, + EdonR256_BLOCK_SIZE * 2 - LastByte - 9); + databitlen = EdonR256_BLOCK_SIZE * 16; +#if defined(MACHINE_IS_BIG_ENDIAN) + st_swap64(num_bits, data64 + 15); +#else + data64[15] = num_bits; +#endif + } + + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data32 = (uint32_t *)hashState256(state)->LastPart; + state->bits_processed += Q256(databitlen, data32, + hashState256(state)->DoublePipe); + break; + + case 384: + case 512: + LastByte = (int)state->unprocessed_bits >> 3; + PadOnePosition = 7 - (state->unprocessed_bits & 0x07); + hashState512(state)->LastPart[LastByte] = + (hashState512(state)->LastPart[LastByte] + & (0xff << (PadOnePosition + 1))) ^ + (0x01 << PadOnePosition); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + data64 = (uint64_t *)hashState512(state)->LastPart; + + if (state->unprocessed_bits < 960) { + (void) memset((hashState512(state)->LastPart) + + LastByte + 1, 0x00, + EdonR512_BLOCK_SIZE - LastByte - 9); + databitlen = EdonR512_BLOCK_SIZE * 8; +#if defined(MACHINE_IS_BIG_ENDIAN) + st_swap64(num_bits, data64 + 15); +#else + data64[15] = num_bits; +#endif + } else { + (void) memset((hashState512(state)->LastPart) + + LastByte + 1, 0x00, + EdonR512_BLOCK_SIZE * 2 - LastByte - 9); + databitlen = EdonR512_BLOCK_SIZE * 16; +#if defined(MACHINE_IS_BIG_ENDIAN) + st_swap64(num_bits, data64 + 31); +#else + data64[31] = num_bits; +#endif + } + + state->bits_processed += Q512(databitlen, data64, + hashState512(state)->DoublePipe); + break; + } + + switch (state->hashbitlen) { + case 224: { +#if defined(MACHINE_IS_BIG_ENDIAN) + uint32_t *d32 = (uint32_t *)hashval; + uint32_t *s32 = hashState224(state)->DoublePipe + 9; + int j; + + for (j = 0; j < EdonR224_DIGEST_SIZE >> 2; j++) + st_swap32(s32[j], d32 + j); +#else + bcopy(hashState256(state)->DoublePipe + 9, hashval, + EdonR224_DIGEST_SIZE); +#endif + break; + } + case 256: { +#if defined(MACHINE_IS_BIG_ENDIAN) + uint32_t *d32 = (uint32_t *)hashval; + uint32_t *s32 = hashState224(state)->DoublePipe + 8; + int j; + + for (j = 0; j < EdonR256_DIGEST_SIZE >> 2; j++) + st_swap32(s32[j], d32 + j); +#else + bcopy(hashState256(state)->DoublePipe + 8, hashval, + EdonR256_DIGEST_SIZE); +#endif + break; + } + case 384: { +#if defined(MACHINE_IS_BIG_ENDIAN) + uint64_t *d64 = (uint64_t *)hashval; + uint64_t *s64 = hashState384(state)->DoublePipe + 10; + int j; + + for (j = 0; j < EdonR384_DIGEST_SIZE >> 3; j++) + st_swap64(s64[j], d64 + j); +#else + bcopy(hashState384(state)->DoublePipe + 10, hashval, + EdonR384_DIGEST_SIZE); +#endif + break; + } + case 512: { +#if defined(MACHINE_IS_BIG_ENDIAN) + uint64_t *d64 = (uint64_t *)hashval; + uint64_t *s64 = hashState512(state)->DoublePipe + 8; + int j; + + for (j = 0; j < EdonR512_DIGEST_SIZE >> 3; j++) + st_swap64(s64[j], d64 + j); +#else + bcopy(hashState512(state)->DoublePipe + 8, hashval, + EdonR512_DIGEST_SIZE); +#endif + break; + } + } +} + + +void +EdonRHash(size_t hashbitlen, const uint8_t *data, size_t databitlen, + uint8_t *hashval) +{ + EdonRState state; + + EdonRInit(&state, hashbitlen); + EdonRUpdate(&state, data, databitlen); + EdonRFinal(&state, hashval); +} + +#ifdef _KERNEL +EXPORT_SYMBOL(EdonRInit); +EXPORT_SYMBOL(EdonRUpdate); +EXPORT_SYMBOL(EdonRHash); +EXPORT_SYMBOL(EdonRFinal); +#endif diff --git a/sys/contrib/openzfs/module/icp/algs/edonr/edonr_byteorder.h b/sys/contrib/openzfs/module/icp/algs/edonr/edonr_byteorder.h new file mode 100644 index 000000000000..2b5d48287f26 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/edonr/edonr_byteorder.h @@ -0,0 +1,216 @@ +/* + * IDI,NTNU + * + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://opensource.org/licenses/CDDL-1.0. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + * + * Copyright (C) 2009, 2010, Jorn Amundsen <jorn.amundsen@ntnu.no> + * + * C header file to determine compile machine byte order. Take care when cross + * compiling. + * + * $Id: byteorder.h 517 2013-02-17 20:34:39Z joern $ + */ +/* + * Portions copyright (c) 2013, Saso Kiselkov, All rights reserved + */ + +#ifndef _CRYPTO_EDONR_BYTEORDER_H +#define _CRYPTO_EDONR_BYTEORDER_H + +#include <sys/sysmacros.h> +#include <sys/param.h> + +#if defined(__BYTE_ORDER) +#if (__BYTE_ORDER == __BIG_ENDIAN) +#define MACHINE_IS_BIG_ENDIAN +#elif (__BYTE_ORDER == __LITTLE_ENDIAN) +#define MACHINE_IS_LITTLE_ENDIAN +#endif +#elif defined(BYTE_ORDER) +#if (BYTE_ORDER == BIG_ENDIAN) +#define MACHINE_IS_BIG_ENDIAN +#elif (BYTE_ORDER == LITTLE_ENDIAN) +#define MACHINE_IS_LITTLE_ENDIAN +#endif +#endif /* __BYTE_ORDER || BYTE_ORDER */ + +#if !defined(MACHINE_IS_BIG_ENDIAN) && !defined(MACHINE_IS_LITTLE_ENDIAN) +#if defined(_ZFS_BIG_ENDIAN) || defined(_MIPSEB) +#define MACHINE_IS_BIG_ENDIAN +#endif +#if defined(_ZFS_LITTLE_ENDIAN) || defined(_MIPSEL) +#define MACHINE_IS_LITTLE_ENDIAN +#endif +#endif /* !MACHINE_IS_BIG_ENDIAN && !MACHINE_IS_LITTLE_ENDIAN */ + +#if !defined(MACHINE_IS_BIG_ENDIAN) && !defined(MACHINE_IS_LITTLE_ENDIAN) +#error unknown machine byte sex +#endif + +#define BYTEORDER_INCLUDED + +#if defined(MACHINE_IS_BIG_ENDIAN) +/* + * Byte swapping macros for big endian architectures and compilers, + * add as appropriate for other architectures and/or compilers. + * + * ld_swap64(src,dst) : uint64_t dst = *(src) + * st_swap64(src,dst) : *(dst) = uint64_t src + */ + +#if defined(__PPC__) || defined(_ARCH_PPC) + +#if defined(__64BIT__) +#if defined(_ARCH_PWR7) +#define aix_ld_swap64(s64, d64)\ + __asm__("ldbrx %0,0,%1" : "=r"(d64) : "r"(s64)) +#define aix_st_swap64(s64, d64)\ + __asm__ volatile("stdbrx %1,0,%0" : : "r"(d64), "r"(s64)) +#else +#define aix_ld_swap64(s64, d64) \ +{ \ + uint64_t *s4 = 0, h; /* initialize to zero for gcc warning */ \ + \ + __asm__("addi %0,%3,4;lwbrx %1,0,%3;lwbrx %2,0,%0;rldimi %1,%2,32,0"\ + : "+r"(s4), "=r"(d64), "=r"(h) : "b"(s64)); \ +} + +#define aix_st_swap64(s64, d64) \ +{ \ + uint64_t *s4 = 0, h; /* initialize to zero for gcc warning */ \ + h = (s64) >> 32; \ + __asm__ volatile("addi %0,%3,4;stwbrx %1,0,%3;stwbrx %2,0,%0" \ + : "+r"(s4) : "r"(s64), "r"(h), "b"(d64)); \ +} +#endif /* 64BIT && PWR7 */ +#else +#define aix_ld_swap64(s64, d64) \ +{ \ + uint32_t *s4 = 0, h, l; /* initialize to zero for gcc warning */\ + __asm__("addi %0,%3,4;lwbrx %1,0,%3;lwbrx %2,0,%0" \ + : "+r"(s4), "=r"(l), "=r"(h) : "b"(s64)); \ + d64 = ((uint64_t)h<<32) | l; \ +} + +#define aix_st_swap64(s64, d64) \ +{ \ + uint32_t *s4 = 0, h, l; /* initialize to zero for gcc warning */\ + l = (s64) & 0xfffffffful, h = (s64) >> 32; \ + __asm__ volatile("addi %0,%3,4;stwbrx %1,0,%3;stwbrx %2,0,%0" \ + : "+r"(s4) : "r"(l), "r"(h), "b"(d64)); \ +} +#endif /* __64BIT__ */ +#define aix_ld_swap32(s32, d32)\ + __asm__("lwbrx %0,0,%1" : "=r"(d32) : "r"(s32)) +#define aix_st_swap32(s32, d32)\ + __asm__ volatile("stwbrx %1,0,%0" : : "r"(d32), "r"(s32)) +#define ld_swap32(s, d) aix_ld_swap32(s, d) +#define st_swap32(s, d) aix_st_swap32(s, d) +#define ld_swap64(s, d) aix_ld_swap64(s, d) +#define st_swap64(s, d) aix_st_swap64(s, d) +#endif /* __PPC__ || _ARCH_PPC */ + +#if defined(__sparc) +#if !defined(__arch64__) && !defined(__sparcv8) && defined(__sparcv9) +#define __arch64__ +#endif +#if defined(__GNUC__) || (defined(__SUNPRO_C) && __SUNPRO_C > 0x590) +/* need Sun Studio C 5.10 and above for GNU inline assembly */ +#if defined(__arch64__) +#define sparc_ld_swap64(s64, d64) \ + __asm__("ldxa [%1]0x88,%0" : "=r"(d64) : "r"(s64)) +#define sparc_st_swap64(s64, d64) \ + __asm__ volatile("stxa %0,[%1]0x88" : : "r"(s64), "r"(d64)) +#define st_swap64(s, d) sparc_st_swap64(s, d) +#else +#define sparc_ld_swap64(s64, d64) \ +{ \ + uint32_t *s4, h, l; \ + __asm__("add %3,4,%0\n\tlda [%3]0x88,%1\n\tlda [%0]0x88,%2" \ + : "+r"(s4), "=r"(l), "=r"(h) : "r"(s64)); \ + d64 = ((uint64_t)h<<32) | l; \ +} +#define sparc_st_swap64(s64, d64) \ +{ \ + uint32_t *s4, h, l; \ + l = (s64) & 0xfffffffful, h = (s64) >> 32; \ + __asm__ volatile("add %3,4,%0\n\tsta %1,[%3]0x88\n\tsta %2,[%0]0x88"\ + : "+r"(s4) : "r"(l), "r"(h), "r"(d64)); \ +} +#endif /* sparc64 */ +#define sparc_ld_swap32(s32, d32)\ + __asm__("lda [%1]0x88,%0" : "=r"(d32) : "r"(s32)) +#define sparc_st_swap32(s32, d32)\ + __asm__ volatile("sta %0,[%1]0x88" : : "r"(s32), "r"(d32)) +#define ld_swap32(s, d) sparc_ld_swap32(s, d) +#define st_swap32(s, d) sparc_st_swap32(s, d) +#define ld_swap64(s, d) sparc_ld_swap64(s, d) +#define st_swap64(s, d) sparc_st_swap64(s, d) +#endif /* GCC || Sun Studio C > 5.9 */ +#endif /* sparc */ + +/* GCC fallback */ +#if ((__GNUC__ >= 4) || defined(__PGIC__)) && !defined(ld_swap32) +#define ld_swap32(s, d) (d = __builtin_bswap32(*(s))) +#define st_swap32(s, d) (*(d) = __builtin_bswap32(s)) +#endif /* GCC4/PGIC && !swap32 */ +#if ((__GNUC__ >= 4) || defined(__PGIC__)) && !defined(ld_swap64) +#define ld_swap64(s, d) (d = __builtin_bswap64(*(s))) +#define st_swap64(s, d) (*(d) = __builtin_bswap64(s)) +#endif /* GCC4/PGIC && !swap64 */ + +/* generic fallback */ +#if !defined(ld_swap32) +#define ld_swap32(s, d) \ + (d = (*(s) >> 24) | (*(s) >> 8 & 0xff00) | \ + (*(s) << 8 & 0xff0000) | (*(s) << 24)) +#define st_swap32(s, d) \ + (*(d) = ((s) >> 24) | ((s) >> 8 & 0xff00) | \ + ((s) << 8 & 0xff0000) | ((s) << 24)) +#endif +#if !defined(ld_swap64) +#define ld_swap64(s, d) \ + (d = (*(s) >> 56) | (*(s) >> 40 & 0xff00) | \ + (*(s) >> 24 & 0xff0000) | (*(s) >> 8 & 0xff000000) | \ + (*(s) & 0xff000000) << 8 | (*(s) & 0xff0000) << 24 | \ + (*(s) & 0xff00) << 40 | *(s) << 56) +#define st_swap64(s, d) \ + (*(d) = ((s) >> 56) | ((s) >> 40 & 0xff00) | \ + ((s) >> 24 & 0xff0000) | ((s) >> 8 & 0xff000000) | \ + ((s) & 0xff000000) << 8 | ((s) & 0xff0000) << 24 | \ + ((s) & 0xff00) << 40 | (s) << 56) +#endif + +#endif /* MACHINE_IS_BIG_ENDIAN */ + + +#if defined(MACHINE_IS_LITTLE_ENDIAN) +/* replace swaps with simple assignments on little endian systems */ +#undef ld_swap32 +#undef st_swap32 +#define ld_swap32(s, d) (d = *(s)) +#define st_swap32(s, d) (*(d) = s) +#undef ld_swap64 +#undef st_swap64 +#define ld_swap64(s, d) (d = *(s)) +#define st_swap64(s, d) (*(d) = s) +#endif /* MACHINE_IS_LITTLE_ENDIAN */ + +#endif /* _CRYPTO_EDONR_BYTEORDER_H */ diff --git a/sys/contrib/openzfs/module/icp/algs/modes/cbc.c b/sys/contrib/openzfs/module/icp/algs/modes/cbc.c new file mode 100644 index 000000000000..85864f56dead --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/cbc.c @@ -0,0 +1,273 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> + +/* + * Algorithm independent CBC functions. + */ +int +cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + + if (length + ctx->cbc_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + length); + ctx->cbc_remainder_len += length; + ctx->cbc_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->cbc_iv; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + do { + /* Unprocessed data from last call. */ + if (ctx->cbc_remainder_len > 0) { + need = block_size - ctx->cbc_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->cbc_remainder) + [ctx->cbc_remainder_len], need); + + blockp = (uint8_t *)ctx->cbc_remainder; + } else { + blockp = datap; + } + + /* + * XOR the previous cipher block or IV with the + * current clear block. + */ + xor_block(blockp, lastp); + encrypt(ctx->cbc_keysched, lastp, lastp); + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + if (out_data_1_len == block_size) { + copy_block(lastp, out_data_1); + } else { + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, + out_data_2, + block_size - out_data_1_len); + } + } + /* update offset */ + out->cd_offset += block_size; + + /* Update pointer to next block of data to be processed. */ + if (ctx->cbc_remainder_len != 0) { + datap += need; + ctx->cbc_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->cbc_remainder, remainder); + ctx->cbc_remainder_len = remainder; + ctx->cbc_copy_to = datap; + goto out; + } + ctx->cbc_copy_to = NULL; + + } while (remainder > 0); + +out: + /* + * Save the last encrypted block in the context. + */ + if (ctx->cbc_lastp != NULL) { + copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv); + ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv; + } + + return (CRYPTO_SUCCESS); +} + +#define OTHER(a, ctx) \ + (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock) + +/* ARGSUSED */ +int +cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*decrypt)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + + if (length + ctx->cbc_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + length); + ctx->cbc_remainder_len += length; + ctx->cbc_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = ctx->cbc_lastp; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + do { + /* Unprocessed data from last call. */ + if (ctx->cbc_remainder_len > 0) { + need = block_size - ctx->cbc_remainder_len; + + if (need > remainder) + return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->cbc_remainder) + [ctx->cbc_remainder_len], need); + + blockp = (uint8_t *)ctx->cbc_remainder; + } else { + blockp = datap; + } + + /* LINTED: pointer alignment */ + copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx)); + + decrypt(ctx->cbc_keysched, blockp, + (uint8_t *)ctx->cbc_remainder); + blockp = (uint8_t *)ctx->cbc_remainder; + + /* + * XOR the previous cipher block or IV with the + * currently decrypted block. + */ + xor_block(lastp, blockp); + + /* LINTED: pointer alignment */ + lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx); + + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + bcopy(blockp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(blockp + out_data_1_len, out_data_2, + block_size - out_data_1_len); + } + + /* update offset */ + out->cd_offset += block_size; + + /* Update pointer to next block of data to be processed. */ + if (ctx->cbc_remainder_len != 0) { + datap += need; + ctx->cbc_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->cbc_remainder, remainder); + ctx->cbc_remainder_len = remainder; + ctx->cbc_lastp = lastp; + ctx->cbc_copy_to = datap; + return (CRYPTO_SUCCESS); + } + ctx->cbc_copy_to = NULL; + + } while (remainder > 0); + + ctx->cbc_lastp = lastp; + return (CRYPTO_SUCCESS); +} + +int +cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len, + size_t block_size, void (*copy_block)(uint8_t *, uint64_t *)) +{ + /* + * Copy IV into context. + * + * If cm_param == NULL then the IV comes from the + * cd_miscdata field in the crypto_data structure. + */ + if (param != NULL) { + ASSERT(param_len == block_size); + copy_block((uchar_t *)param, cbc_ctx->cbc_iv); + } + + cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0]; + cbc_ctx->cbc_flags |= CBC_MODE; + return (CRYPTO_SUCCESS); +} + +/* ARGSUSED */ +void * +cbc_alloc_ctx(int kmflag) +{ + cbc_ctx_t *cbc_ctx; + + if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL) + return (NULL); + + cbc_ctx->cbc_flags = CBC_MODE; + return (cbc_ctx); +} diff --git a/sys/contrib/openzfs/module/icp/algs/modes/ccm.c b/sys/contrib/openzfs/module/icp/algs/modes/ccm.c new file mode 100644 index 000000000000..5d6507c49db1 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/ccm.c @@ -0,0 +1,907 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> + +#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS +#include <sys/byteorder.h> +#define UNALIGNED_POINTERS_PERMITTED +#endif + +/* + * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode + * is done in another function. + */ +int +ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + uint64_t counter; + uint8_t *mac_buf; + + if (length + ctx->ccm_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + length); + ctx->ccm_remainder_len += length; + ctx->ccm_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->ccm_cb; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + mac_buf = (uint8_t *)ctx->ccm_mac_buf; + + do { + /* Unprocessed data from last call. */ + if (ctx->ccm_remainder_len > 0) { + need = block_size - ctx->ccm_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], need); + + blockp = (uint8_t *)ctx->ccm_remainder; + } else { + blockp = datap; + } + + /* + * do CBC MAC + * + * XOR the previous cipher block current clear block. + * mac_buf always contain previous cipher block. + */ + xor_block(blockp, mac_buf); + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + + /* ccm_cb is the counter block */ + encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, + (uint8_t *)ctx->ccm_tmp); + + lastp = (uint8_t *)ctx->ccm_tmp; + + /* + * Increment counter. Counter bits are confined + * to the bottom 64 bits of the counter block. + */ +#ifdef _ZFS_LITTLE_ENDIAN + counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask); + counter = htonll(counter + 1); +#else + counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask; + counter++; +#endif /* _ZFS_LITTLE_ENDIAN */ + counter &= ctx->ccm_counter_mask; + ctx->ccm_cb[1] = + (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter; + + /* + * XOR encrypted counter block with the current clear block. + */ + xor_block(blockp, lastp); + + ctx->ccm_processed_data_len += block_size; + + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + if (out_data_1_len == block_size) { + copy_block(lastp, out_data_1); + } else { + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, + out_data_2, + block_size - out_data_1_len); + } + } + /* update offset */ + out->cd_offset += block_size; + + /* Update pointer to next block of data to be processed. */ + if (ctx->ccm_remainder_len != 0) { + datap += need; + ctx->ccm_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->ccm_remainder, remainder); + ctx->ccm_remainder_len = remainder; + ctx->ccm_copy_to = datap; + goto out; + } + ctx->ccm_copy_to = NULL; + + } while (remainder > 0); + +out: + return (CRYPTO_SUCCESS); +} + +void +calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)) +{ + uint64_t counter; + uint8_t *counterp, *mac_buf; + int i; + + mac_buf = (uint8_t *)ctx->ccm_mac_buf; + + /* first counter block start with index 0 */ + counter = 0; + ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter; + + counterp = (uint8_t *)ctx->ccm_tmp; + encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp); + + /* calculate XOR of MAC with first counter block */ + for (i = 0; i < ctx->ccm_mac_len; i++) { + ccm_mac[i] = mac_buf[i] ^ counterp[i]; + } +} + +/* ARGSUSED */ +int +ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp = NULL; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + int i; + + if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) { + return (CRYPTO_DATA_LEN_RANGE); + } + + /* + * When we get here, the number of bytes of payload processed + * plus whatever data remains, if any, + * should be the same as the number of bytes that's being + * passed in the argument during init time. + */ + if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len) + != (ctx->ccm_data_len)) { + return (CRYPTO_DATA_LEN_RANGE); + } + + mac_buf = (uint8_t *)ctx->ccm_mac_buf; + + if (ctx->ccm_remainder_len > 0) { + + /* ccm_mac_input_buf is not used for encryption */ + macp = (uint8_t *)ctx->ccm_mac_input_buf; + bzero(macp, block_size); + + /* copy remainder to temporary buffer */ + bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len); + + /* calculate the CBC MAC */ + xor_block(macp, mac_buf); + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + + /* calculate the counter mode */ + lastp = (uint8_t *)ctx->ccm_tmp; + encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp); + + /* XOR with counter block */ + for (i = 0; i < ctx->ccm_remainder_len; i++) { + macp[i] ^= lastp[i]; + } + ctx->ccm_processed_data_len += ctx->ccm_remainder_len; + } + + /* Calculate the CCM MAC */ + ccm_mac_p = (uint8_t *)ctx->ccm_tmp; + calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block); + + crypto_init_ptrs(out, &iov_or_mp, &offset); + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, + ctx->ccm_remainder_len + ctx->ccm_mac_len); + + if (ctx->ccm_remainder_len > 0) { + + /* copy temporary block to where it belongs */ + if (out_data_2 == NULL) { + /* everything will fit in out_data_1 */ + bcopy(macp, out_data_1, ctx->ccm_remainder_len); + bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len, + ctx->ccm_mac_len); + } else { + + if (out_data_1_len < ctx->ccm_remainder_len) { + + size_t data_2_len_used; + + bcopy(macp, out_data_1, out_data_1_len); + + data_2_len_used = ctx->ccm_remainder_len + - out_data_1_len; + + bcopy((uint8_t *)macp + out_data_1_len, + out_data_2, data_2_len_used); + bcopy(ccm_mac_p, out_data_2 + data_2_len_used, + ctx->ccm_mac_len); + } else { + bcopy(macp, out_data_1, out_data_1_len); + if (out_data_1_len == ctx->ccm_remainder_len) { + /* mac will be in out_data_2 */ + bcopy(ccm_mac_p, out_data_2, + ctx->ccm_mac_len); + } else { + size_t len_not_used = out_data_1_len - + ctx->ccm_remainder_len; + /* + * part of mac in will be in + * out_data_1, part of the mac will be + * in out_data_2 + */ + bcopy(ccm_mac_p, + out_data_1 + ctx->ccm_remainder_len, + len_not_used); + bcopy(ccm_mac_p + len_not_used, + out_data_2, + ctx->ccm_mac_len - len_not_used); + + } + } + } + } else { + /* copy block to where it belongs */ + bcopy(ccm_mac_p, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(ccm_mac_p + out_data_1_len, out_data_2, + block_size - out_data_1_len); + } + } + out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len; + ctx->ccm_remainder_len = 0; + return (CRYPTO_SUCCESS); +} + +/* + * This will only deal with decrypting the last block of the input that + * might not be a multiple of block length. + */ +static void +ccm_decrypt_incomplete_block(ccm_ctx_t *ctx, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)) +{ + uint8_t *datap, *outp, *counterp; + int i; + + datap = (uint8_t *)ctx->ccm_remainder; + outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]); + + counterp = (uint8_t *)ctx->ccm_tmp; + encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp); + + /* XOR with counter block */ + for (i = 0; i < ctx->ccm_remainder_len; i++) { + outp[i] = datap[i] ^ counterp[i]; + } +} + +/* + * This will decrypt the cipher text. However, the plaintext won't be + * returned to the caller. It will be returned when decrypt_final() is + * called if the MAC matches + */ +/* ARGSUSED */ +int +ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *cbp; + uint64_t counter; + size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len; + uint8_t *resultp; + + + pm_len = ctx->ccm_processed_mac_len; + + if (pm_len > 0) { + uint8_t *tmp; + /* + * all ciphertext has been processed, just waiting for + * part of the value of the mac + */ + if ((pm_len + length) > ctx->ccm_mac_len) { + return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); + } + tmp = (uint8_t *)ctx->ccm_mac_input_buf; + + bcopy(datap, tmp + pm_len, length); + + ctx->ccm_processed_mac_len += length; + return (CRYPTO_SUCCESS); + } + + /* + * If we decrypt the given data, what total amount of data would + * have been decrypted? + */ + pd_len = ctx->ccm_processed_data_len; + total_decrypted_len = pd_len + length + ctx->ccm_remainder_len; + + if (total_decrypted_len > + (ctx->ccm_data_len + ctx->ccm_mac_len)) { + return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); + } + + pt_len = ctx->ccm_data_len; + + if (total_decrypted_len > pt_len) { + /* + * part of the input will be the MAC, need to isolate that + * to be dealt with later. The left-over data in + * ccm_remainder_len from last time will not be part of the + * MAC. Otherwise, it would have already been taken out + * when this call is made last time. + */ + size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len; + + mac_len = length - pt_part; + + ctx->ccm_processed_mac_len = mac_len; + bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len); + + if (pt_part + ctx->ccm_remainder_len < block_size) { + /* + * since this is last of the ciphertext, will + * just decrypt with it here + */ + bcopy(datap, &((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], pt_part); + ctx->ccm_remainder_len += pt_part; + ccm_decrypt_incomplete_block(ctx, encrypt_block); + ctx->ccm_processed_data_len += ctx->ccm_remainder_len; + ctx->ccm_remainder_len = 0; + return (CRYPTO_SUCCESS); + } else { + /* let rest of the code handle this */ + length = pt_part; + } + } else if (length + ctx->ccm_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + length); + ctx->ccm_remainder_len += length; + ctx->ccm_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + do { + /* Unprocessed data from last call. */ + if (ctx->ccm_remainder_len > 0) { + need = block_size - ctx->ccm_remainder_len; + + if (need > remainder) + return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], need); + + blockp = (uint8_t *)ctx->ccm_remainder; + } else { + blockp = datap; + } + + /* Calculate the counter mode, ccm_cb is the counter block */ + cbp = (uint8_t *)ctx->ccm_tmp; + encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp); + + /* + * Increment counter. + * Counter bits are confined to the bottom 64 bits + */ +#ifdef _ZFS_LITTLE_ENDIAN + counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask); + counter = htonll(counter + 1); +#else + counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask; + counter++; +#endif /* _ZFS_LITTLE_ENDIAN */ + counter &= ctx->ccm_counter_mask; + ctx->ccm_cb[1] = + (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter; + + /* XOR with the ciphertext */ + xor_block(blockp, cbp); + + /* Copy the plaintext to the "holding buffer" */ + resultp = (uint8_t *)ctx->ccm_pt_buf + + ctx->ccm_processed_data_len; + copy_block(cbp, resultp); + + ctx->ccm_processed_data_len += block_size; + + ctx->ccm_lastp = blockp; + + /* Update pointer to next block of data to be processed. */ + if (ctx->ccm_remainder_len != 0) { + datap += need; + ctx->ccm_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->ccm_remainder, remainder); + ctx->ccm_remainder_len = remainder; + ctx->ccm_copy_to = datap; + if (ctx->ccm_processed_mac_len > 0) { + /* + * not expecting anymore ciphertext, just + * compute plaintext for the remaining input + */ + ccm_decrypt_incomplete_block(ctx, + encrypt_block); + ctx->ccm_processed_data_len += remainder; + ctx->ccm_remainder_len = 0; + } + goto out; + } + ctx->ccm_copy_to = NULL; + + } while (remainder > 0); + +out: + return (CRYPTO_SUCCESS); +} + +int +ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t mac_remain, pt_len; + uint8_t *pt, *mac_buf, *macp, *ccm_mac_p; + int rv; + + pt_len = ctx->ccm_data_len; + + /* Make sure output buffer can fit all of the plaintext */ + if (out->cd_length < pt_len) { + return (CRYPTO_DATA_LEN_RANGE); + } + + pt = ctx->ccm_pt_buf; + mac_remain = ctx->ccm_processed_data_len; + mac_buf = (uint8_t *)ctx->ccm_mac_buf; + + macp = (uint8_t *)ctx->ccm_tmp; + + while (mac_remain > 0) { + + if (mac_remain < block_size) { + bzero(macp, block_size); + bcopy(pt, macp, mac_remain); + mac_remain = 0; + } else { + copy_block(pt, macp); + mac_remain -= block_size; + pt += block_size; + } + + /* calculate the CBC MAC */ + xor_block(macp, mac_buf); + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + } + + /* Calculate the CCM MAC */ + ccm_mac_p = (uint8_t *)ctx->ccm_tmp; + calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block); + + /* compare the input CCM MAC value with what we calculated */ + if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) { + /* They don't match */ + return (CRYPTO_INVALID_MAC); + } else { + rv = crypto_put_output_data(ctx->ccm_pt_buf, out, pt_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + out->cd_offset += pt_len; + } + return (CRYPTO_SUCCESS); +} + +static int +ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init) +{ + size_t macSize, nonceSize; + uint8_t q; + uint64_t maxValue; + + /* + * Check the length of the MAC. The only valid + * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16 + */ + macSize = ccm_param->ulMACSize; + if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */ + nonceSize = ccm_param->ulNonceSize; + if ((nonceSize < 7) || (nonceSize > 13)) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + /* q is the length of the field storing the length, in bytes */ + q = (uint8_t)((15 - nonceSize) & 0xFF); + + + /* + * If it is decrypt, need to make sure size of ciphertext is at least + * bigger than MAC len + */ + if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + /* + * Check to make sure the length of the payload is within the + * range of values allowed by q + */ + if (q < 8) { + maxValue = (1ULL << (q * 8)) - 1; + } else { + maxValue = ULONG_MAX; + } + + if (ccm_param->ulDataSize > maxValue) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + return (CRYPTO_SUCCESS); +} + +/* + * Format the first block used in CBC-MAC (B0) and the initial counter + * block based on formatting functions and counter generation functions + * specified in RFC 3610 and NIST publication 800-38C, appendix A + * + * b0 is the first block used in CBC-MAC + * cb0 is the first counter block + * + * It's assumed that the arguments b0 and cb0 are preallocated AES blocks + * + */ +static void +ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize, + ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx) +{ + uint64_t payloadSize; + uint8_t t, q, have_adata = 0; + size_t limit; + int i, j, k; + uint64_t mask = 0; + uint8_t *cb; + + q = (uint8_t)((15 - nonceSize) & 0xFF); + t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF); + + /* Construct the first octet of b0 */ + if (authDataSize > 0) { + have_adata = 1; + } + b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1); + + /* copy the nonce value into b0 */ + bcopy(nonce, &(b0[1]), nonceSize); + + /* store the length of the payload into b0 */ + bzero(&(b0[1+nonceSize]), q); + + payloadSize = aes_ctx->ccm_data_len; + limit = 8 < q ? 8 : q; + + for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) { + b0[k] = (uint8_t)((payloadSize >> j) & 0xFF); + } + + /* format the counter block */ + + cb = (uint8_t *)aes_ctx->ccm_cb; + + cb[0] = 0x07 & (q-1); /* first byte */ + + /* copy the nonce value into the counter block */ + bcopy(nonce, &(cb[1]), nonceSize); + + bzero(&(cb[1+nonceSize]), q); + + /* Create the mask for the counter field based on the size of nonce */ + q <<= 3; + while (q-- > 0) { + mask |= (1ULL << q); + } + +#ifdef _ZFS_LITTLE_ENDIAN + mask = htonll(mask); +#endif + aes_ctx->ccm_counter_mask = mask; + + /* + * During calculation, we start using counter block 1, we will + * set it up right here. + * We can just set the last byte to have the value 1, because + * even with the biggest nonce of 13, the last byte of the + * counter block will be used for the counter value. + */ + cb[15] = 0x01; +} + +/* + * Encode the length of the associated data as + * specified in RFC 3610 and NIST publication 800-38C, appendix A + */ +static void +encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len) +{ +#ifdef UNALIGNED_POINTERS_PERMITTED + uint32_t *lencoded_ptr; +#ifdef _LP64 + uint64_t *llencoded_ptr; +#endif +#endif /* UNALIGNED_POINTERS_PERMITTED */ + + if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) { + /* 0 < a < (2^16-2^8) */ + *encoded_len = 2; + encoded[0] = (auth_data_len & 0xff00) >> 8; + encoded[1] = auth_data_len & 0xff; + + } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) && + (auth_data_len < (1ULL << 31))) { + /* (2^16-2^8) <= a < 2^32 */ + *encoded_len = 6; + encoded[0] = 0xff; + encoded[1] = 0xfe; +#ifdef UNALIGNED_POINTERS_PERMITTED + lencoded_ptr = (uint32_t *)&encoded[2]; + *lencoded_ptr = htonl(auth_data_len); +#else + encoded[2] = (auth_data_len & 0xff000000) >> 24; + encoded[3] = (auth_data_len & 0xff0000) >> 16; + encoded[4] = (auth_data_len & 0xff00) >> 8; + encoded[5] = auth_data_len & 0xff; +#endif /* UNALIGNED_POINTERS_PERMITTED */ + +#ifdef _LP64 + } else { + /* 2^32 <= a < 2^64 */ + *encoded_len = 10; + encoded[0] = 0xff; + encoded[1] = 0xff; +#ifdef UNALIGNED_POINTERS_PERMITTED + llencoded_ptr = (uint64_t *)&encoded[2]; + *llencoded_ptr = htonl(auth_data_len); +#else + encoded[2] = (auth_data_len & 0xff00000000000000) >> 56; + encoded[3] = (auth_data_len & 0xff000000000000) >> 48; + encoded[4] = (auth_data_len & 0xff0000000000) >> 40; + encoded[5] = (auth_data_len & 0xff00000000) >> 32; + encoded[6] = (auth_data_len & 0xff000000) >> 24; + encoded[7] = (auth_data_len & 0xff0000) >> 16; + encoded[8] = (auth_data_len & 0xff00) >> 8; + encoded[9] = auth_data_len & 0xff; +#endif /* UNALIGNED_POINTERS_PERMITTED */ +#endif /* _LP64 */ + } +} + +static int +ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len, + unsigned char *auth_data, size_t auth_data_len, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *mac_buf, *datap, *ivp, *authp; + size_t remainder, processed; + uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */ + size_t encoded_a_len = 0; + + mac_buf = (uint8_t *)&(ctx->ccm_mac_buf); + + /* + * Format the 1st block for CBC-MAC and construct the + * 1st counter block. + * + * aes_ctx->ccm_iv is used for storing the counter block + * mac_buf will store b0 at this time. + */ + ccm_format_initial_blocks(nonce, nonce_len, + auth_data_len, mac_buf, ctx); + + /* The IV for CBC MAC for AES CCM mode is always zero */ + ivp = (uint8_t *)ctx->ccm_tmp; + bzero(ivp, block_size); + + xor_block(ivp, mac_buf); + + /* encrypt the nonce */ + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + + /* take care of the associated data, if any */ + if (auth_data_len == 0) { + return (CRYPTO_SUCCESS); + } + + encode_adata_len(auth_data_len, encoded_a, &encoded_a_len); + + remainder = auth_data_len; + + /* 1st block: it contains encoded associated data, and some data */ + authp = (uint8_t *)ctx->ccm_tmp; + bzero(authp, block_size); + bcopy(encoded_a, authp, encoded_a_len); + processed = block_size - encoded_a_len; + if (processed > auth_data_len) { + /* in case auth_data is very small */ + processed = auth_data_len; + } + bcopy(auth_data, authp+encoded_a_len, processed); + /* xor with previous buffer */ + xor_block(authp, mac_buf); + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + remainder -= processed; + if (remainder == 0) { + /* a small amount of associated data, it's all done now */ + return (CRYPTO_SUCCESS); + } + + do { + if (remainder < block_size) { + /* + * There's not a block full of data, pad rest of + * buffer with zero + */ + bzero(authp, block_size); + bcopy(&(auth_data[processed]), authp, remainder); + datap = (uint8_t *)authp; + remainder = 0; + } else { + datap = (uint8_t *)(&(auth_data[processed])); + processed += block_size; + remainder -= block_size; + } + + xor_block(datap, mac_buf); + encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); + + } while (remainder > 0); + + return (CRYPTO_SUCCESS); +} + +/* + * The following function should be call at encrypt or decrypt init time + * for AES CCM mode. + */ +int +ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag, + boolean_t is_encrypt_init, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + int rv; + CK_AES_CCM_PARAMS *ccm_param; + + if (param != NULL) { + ccm_param = (CK_AES_CCM_PARAMS *)param; + + if ((rv = ccm_validate_args(ccm_param, + is_encrypt_init)) != 0) { + return (rv); + } + + ccm_ctx->ccm_mac_len = ccm_param->ulMACSize; + if (is_encrypt_init) { + ccm_ctx->ccm_data_len = ccm_param->ulDataSize; + } else { + ccm_ctx->ccm_data_len = + ccm_param->ulDataSize - ccm_ctx->ccm_mac_len; + ccm_ctx->ccm_processed_mac_len = 0; + } + ccm_ctx->ccm_processed_data_len = 0; + + ccm_ctx->ccm_flags |= CCM_MODE; + } else { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize, + ccm_param->authData, ccm_param->ulAuthDataSize, block_size, + encrypt_block, xor_block) != 0) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + if (!is_encrypt_init) { + /* allocate buffer for storing decrypted plaintext */ + ccm_ctx->ccm_pt_buf = vmem_alloc(ccm_ctx->ccm_data_len, + kmflag); + if (ccm_ctx->ccm_pt_buf == NULL) { + rv = CRYPTO_HOST_MEMORY; + } + } + return (rv); +} + +void * +ccm_alloc_ctx(int kmflag) +{ + ccm_ctx_t *ccm_ctx; + + if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL) + return (NULL); + + ccm_ctx->ccm_flags = CCM_MODE; + return (ccm_ctx); +} diff --git a/sys/contrib/openzfs/module/icp/algs/modes/ctr.c b/sys/contrib/openzfs/module/icp/algs/modes/ctr.c new file mode 100644 index 000000000000..0188bdd395ff --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/ctr.c @@ -0,0 +1,228 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/byteorder.h> + +/* + * Encrypt and decrypt multiple blocks of data in counter mode. + */ +int +ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + uint64_t lower_counter, upper_counter; + + if (length + ctx->ctr_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len, + length); + ctx->ctr_remainder_len += length; + ctx->ctr_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->ctr_cb; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + do { + /* Unprocessed data from last call. */ + if (ctx->ctr_remainder_len > 0) { + need = block_size - ctx->ctr_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->ctr_remainder) + [ctx->ctr_remainder_len], need); + + blockp = (uint8_t *)ctx->ctr_remainder; + } else { + blockp = datap; + } + + /* ctr_cb is the counter block */ + cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb, + (uint8_t *)ctx->ctr_tmp); + + lastp = (uint8_t *)ctx->ctr_tmp; + + /* + * Increment Counter. + */ + lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask); + lower_counter = htonll(lower_counter + 1); + lower_counter &= ctx->ctr_lower_mask; + ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) | + lower_counter; + + /* wrap around */ + if (lower_counter == 0) { + upper_counter = + ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask); + upper_counter = htonll(upper_counter + 1); + upper_counter &= ctx->ctr_upper_mask; + ctx->ctr_cb[0] = + (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) | + upper_counter; + } + + /* + * XOR encrypted counter block with the current clear block. + */ + xor_block(blockp, lastp); + + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, out_data_2, + block_size - out_data_1_len); + } + /* update offset */ + out->cd_offset += block_size; + + /* Update pointer to next block of data to be processed. */ + if (ctx->ctr_remainder_len != 0) { + datap += need; + ctx->ctr_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->ctr_remainder, remainder); + ctx->ctr_remainder_len = remainder; + ctx->ctr_copy_to = datap; + goto out; + } + ctx->ctr_copy_to = NULL; + + } while (remainder > 0); + +out: + return (CRYPTO_SUCCESS); +} + +int +ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)) +{ + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + uint8_t *p; + int i; + + if (out->cd_length < ctx->ctr_remainder_len) + return (CRYPTO_DATA_LEN_RANGE); + + encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb, + (uint8_t *)ctx->ctr_tmp); + + lastp = (uint8_t *)ctx->ctr_tmp; + p = (uint8_t *)ctx->ctr_remainder; + for (i = 0; i < ctx->ctr_remainder_len; i++) { + p[i] ^= lastp[i]; + } + + crypto_init_ptrs(out, &iov_or_mp, &offset); + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, ctx->ctr_remainder_len); + + bcopy(p, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy((uint8_t *)p + out_data_1_len, + out_data_2, ctx->ctr_remainder_len - out_data_1_len); + } + out->cd_offset += ctx->ctr_remainder_len; + ctx->ctr_remainder_len = 0; + return (CRYPTO_SUCCESS); +} + +int +ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb, + void (*copy_block)(uint8_t *, uint8_t *)) +{ + uint64_t upper_mask = 0; + uint64_t lower_mask = 0; + + if (count == 0 || count > 128) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + /* upper 64 bits of the mask */ + if (count >= 64) { + count -= 64; + upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1; + lower_mask = UINT64_MAX; + } else { + /* now the lower 63 bits */ + lower_mask = (1ULL << count) - 1; + } + ctr_ctx->ctr_lower_mask = htonll(lower_mask); + ctr_ctx->ctr_upper_mask = htonll(upper_mask); + + copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb); + ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0]; + ctr_ctx->ctr_flags |= CTR_MODE; + return (CRYPTO_SUCCESS); +} + +/* ARGSUSED */ +void * +ctr_alloc_ctx(int kmflag) +{ + ctr_ctx_t *ctr_ctx; + + if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL) + return (NULL); + + ctr_ctx->ctr_flags = CTR_MODE; + return (ctr_ctx); +} diff --git a/sys/contrib/openzfs/module/icp/algs/modes/ecb.c b/sys/contrib/openzfs/module/icp/algs/modes/ecb.c new file mode 100644 index 000000000000..025f5825cf04 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/ecb.c @@ -0,0 +1,128 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> + +/* + * Algorithm independent ECB functions. + */ +int +ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct)) +{ + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + + if (length + ctx->ecb_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len, + length); + ctx->ecb_remainder_len += length; + ctx->ecb_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->ecb_iv; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + do { + /* Unprocessed data from last call. */ + if (ctx->ecb_remainder_len > 0) { + need = block_size - ctx->ecb_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->ecb_remainder) + [ctx->ecb_remainder_len], need); + + blockp = (uint8_t *)ctx->ecb_remainder; + } else { + blockp = datap; + } + + cipher(ctx->ecb_keysched, blockp, lastp); + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, out_data_2, + block_size - out_data_1_len); + } + /* update offset */ + out->cd_offset += block_size; + + /* Update pointer to next block of data to be processed. */ + if (ctx->ecb_remainder_len != 0) { + datap += need; + ctx->ecb_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->ecb_remainder, remainder); + ctx->ecb_remainder_len = remainder; + ctx->ecb_copy_to = datap; + goto out; + } + ctx->ecb_copy_to = NULL; + + } while (remainder > 0); + +out: + return (CRYPTO_SUCCESS); +} + +/* ARGSUSED */ +void * +ecb_alloc_ctx(int kmflag) +{ + ecb_ctx_t *ecb_ctx; + + if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL) + return (NULL); + + ecb_ctx->ecb_flags = ECB_MODE; + return (ecb_ctx); +} diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c new file mode 100644 index 000000000000..5553c55e11cd --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c @@ -0,0 +1,1543 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/icp.h> +#include <sys/crypto/impl.h> +#include <sys/byteorder.h> +#include <sys/simd.h> +#include <modes/gcm_impl.h> +#ifdef CAN_USE_GCM_ASM +#include <aes/aes_impl.h> +#endif + +#define GHASH(c, d, t, o) \ + xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \ + (o)->mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \ + (uint64_t *)(void *)(t)); + +/* Select GCM implementation */ +#define IMPL_FASTEST (UINT32_MAX) +#define IMPL_CYCLE (UINT32_MAX-1) +#ifdef CAN_USE_GCM_ASM +#define IMPL_AVX (UINT32_MAX-2) +#endif +#define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i)) +static uint32_t icp_gcm_impl = IMPL_FASTEST; +static uint32_t user_sel_impl = IMPL_FASTEST; + +#ifdef CAN_USE_GCM_ASM +/* Does the architecture we run on support the MOVBE instruction? */ +boolean_t gcm_avx_can_use_movbe = B_FALSE; +/* + * Whether to use the optimized openssl gcm and ghash implementations. + * Set to true if module parameter icp_gcm_impl == "avx". + */ +static boolean_t gcm_use_avx = B_FALSE; +#define GCM_IMPL_USE_AVX (*(volatile boolean_t *)&gcm_use_avx) + +static inline boolean_t gcm_avx_will_work(void); +static inline void gcm_set_avx(boolean_t); +static inline boolean_t gcm_toggle_avx(void); +extern boolean_t atomic_toggle_boolean_nv(volatile boolean_t *); + +static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *, char *, size_t, + crypto_data_t *, size_t); + +static int gcm_encrypt_final_avx(gcm_ctx_t *, crypto_data_t *, size_t); +static int gcm_decrypt_final_avx(gcm_ctx_t *, crypto_data_t *, size_t); +static int gcm_init_avx(gcm_ctx_t *, unsigned char *, size_t, unsigned char *, + size_t, size_t); +#endif /* ifdef CAN_USE_GCM_ASM */ + +/* + * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode + * is done in another function. + */ +int +gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ +#ifdef CAN_USE_GCM_ASM + if (ctx->gcm_use_avx == B_TRUE) + return (gcm_mode_encrypt_contiguous_blocks_avx( + ctx, data, length, out, block_size)); +#endif + + const gcm_impl_ops_t *gops; + size_t remainder = length; + size_t need = 0; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + + if (length + ctx->gcm_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, + length); + ctx->gcm_remainder_len += length; + if (ctx->gcm_copy_to == NULL) { + ctx->gcm_copy_to = datap; + } + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->gcm_cb; + crypto_init_ptrs(out, &iov_or_mp, &offset); + + gops = gcm_impl_get_ops(); + do { + /* Unprocessed data from last call. */ + if (ctx->gcm_remainder_len > 0) { + need = block_size - ctx->gcm_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->gcm_remainder) + [ctx->gcm_remainder_len], need); + + blockp = (uint8_t *)ctx->gcm_remainder; + } else { + blockp = datap; + } + + /* + * Increment counter. Counter bits are confined + * to the bottom 32 bits of the counter block. + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, + (uint8_t *)ctx->gcm_tmp); + xor_block(blockp, (uint8_t *)ctx->gcm_tmp); + + lastp = (uint8_t *)ctx->gcm_tmp; + + ctx->gcm_processed_data_len += block_size; + + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + if (out_data_1_len == block_size) { + copy_block(lastp, out_data_1); + } else { + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, + out_data_2, + block_size - out_data_1_len); + } + } + /* update offset */ + out->cd_offset += block_size; + + /* add ciphertext to the hash */ + GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gops); + + /* Update pointer to next block of data to be processed. */ + if (ctx->gcm_remainder_len != 0) { + datap += need; + ctx->gcm_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->gcm_remainder, remainder); + ctx->gcm_remainder_len = remainder; + ctx->gcm_copy_to = datap; + goto out; + } + ctx->gcm_copy_to = NULL; + + } while (remainder > 0); +out: + return (CRYPTO_SUCCESS); +} + +/* ARGSUSED */ +int +gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ +#ifdef CAN_USE_GCM_ASM + if (ctx->gcm_use_avx == B_TRUE) + return (gcm_encrypt_final_avx(ctx, out, block_size)); +#endif + + const gcm_impl_ops_t *gops; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + uint8_t *ghash, *macp = NULL; + int i, rv; + + if (out->cd_length < + (ctx->gcm_remainder_len + ctx->gcm_tag_len)) { + return (CRYPTO_DATA_LEN_RANGE); + } + + gops = gcm_impl_get_ops(); + ghash = (uint8_t *)ctx->gcm_ghash; + + if (ctx->gcm_remainder_len > 0) { + uint64_t counter; + uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp; + + /* + * Here is where we deal with data that is not a + * multiple of the block size. + */ + + /* + * Increment counter. + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, + (uint8_t *)ctx->gcm_tmp); + + macp = (uint8_t *)ctx->gcm_remainder; + bzero(macp + ctx->gcm_remainder_len, + block_size - ctx->gcm_remainder_len); + + /* XOR with counter block */ + for (i = 0; i < ctx->gcm_remainder_len; i++) { + macp[i] ^= tmpp[i]; + } + + /* add ciphertext to the hash */ + GHASH(ctx, macp, ghash, gops); + + ctx->gcm_processed_data_len += ctx->gcm_remainder_len; + } + + ctx->gcm_len_a_len_c[1] = + htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len)); + GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, + (uint8_t *)ctx->gcm_J0); + xor_block((uint8_t *)ctx->gcm_J0, ghash); + + if (ctx->gcm_remainder_len > 0) { + rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + } + out->cd_offset += ctx->gcm_remainder_len; + ctx->gcm_remainder_len = 0; + rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + out->cd_offset += ctx->gcm_tag_len; + + return (CRYPTO_SUCCESS); +} + +/* + * This will only deal with decrypting the last block of the input that + * might not be a multiple of block length. + */ +static void +gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *datap, *outp, *counterp; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + int i; + + /* + * Increment counter. + * Counter bits are confined to the bottom 32 bits + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + datap = (uint8_t *)ctx->gcm_remainder; + outp = &((ctx->gcm_pt_buf)[index]); + counterp = (uint8_t *)ctx->gcm_tmp; + + /* authentication tag */ + bzero((uint8_t *)ctx->gcm_tmp, block_size); + bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len); + + /* add ciphertext to the hash */ + GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops()); + + /* decrypt remaining ciphertext */ + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp); + + /* XOR with counter block */ + for (i = 0; i < ctx->gcm_remainder_len; i++) { + outp[i] = datap[i] ^ counterp[i]; + } +} + +/* ARGSUSED */ +int +gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t new_len; + uint8_t *new; + + /* + * Copy contiguous ciphertext input blocks to plaintext buffer. + * Ciphertext will be decrypted in the final. + */ + if (length > 0) { + new_len = ctx->gcm_pt_buf_len + length; + new = vmem_alloc(new_len, ctx->gcm_kmflag); + if (new == NULL) { + vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); + ctx->gcm_pt_buf = NULL; + return (CRYPTO_HOST_MEMORY); + } + bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); + vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); + ctx->gcm_pt_buf = new; + ctx->gcm_pt_buf_len = new_len; + bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len], + length); + ctx->gcm_processed_data_len += length; + } + + ctx->gcm_remainder_len = 0; + return (CRYPTO_SUCCESS); +} + +int +gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ +#ifdef CAN_USE_GCM_ASM + if (ctx->gcm_use_avx == B_TRUE) + return (gcm_decrypt_final_avx(ctx, out, block_size)); +#endif + + const gcm_impl_ops_t *gops; + size_t pt_len; + size_t remainder; + uint8_t *ghash; + uint8_t *blockp; + uint8_t *cbp; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + int processed = 0, rv; + + ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len); + + gops = gcm_impl_get_ops(); + pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; + ghash = (uint8_t *)ctx->gcm_ghash; + blockp = ctx->gcm_pt_buf; + remainder = pt_len; + while (remainder > 0) { + /* Incomplete last block */ + if (remainder < block_size) { + bcopy(blockp, ctx->gcm_remainder, remainder); + ctx->gcm_remainder_len = remainder; + /* + * not expecting anymore ciphertext, just + * compute plaintext for the remaining input + */ + gcm_decrypt_incomplete_block(ctx, block_size, + processed, encrypt_block, xor_block); + ctx->gcm_remainder_len = 0; + goto out; + } + /* add ciphertext to the hash */ + GHASH(ctx, blockp, ghash, gops); + + /* + * Increment counter. + * Counter bits are confined to the bottom 32 bits + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + cbp = (uint8_t *)ctx->gcm_tmp; + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp); + + /* XOR with ciphertext */ + xor_block(cbp, blockp); + + processed += block_size; + blockp += block_size; + remainder -= block_size; + } +out: + ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len)); + GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, + (uint8_t *)ctx->gcm_J0); + xor_block((uint8_t *)ctx->gcm_J0, ghash); + + /* compare the input authentication tag with what we calculated */ + if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { + /* They don't match */ + return (CRYPTO_INVALID_MAC); + } else { + rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + out->cd_offset += pt_len; + } + return (CRYPTO_SUCCESS); +} + +static int +gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param) +{ + size_t tag_len; + + /* + * Check the length of the authentication tag (in bits). + */ + tag_len = gcm_param->ulTagBits; + switch (tag_len) { + case 32: + case 64: + case 96: + case 104: + case 112: + case 120: + case 128: + break; + default: + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + if (gcm_param->ulIvLen == 0) + return (CRYPTO_MECHANISM_PARAM_INVALID); + + return (CRYPTO_SUCCESS); +} + +static void +gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len, + gcm_ctx_t *ctx, size_t block_size, + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + const gcm_impl_ops_t *gops; + uint8_t *cb; + ulong_t remainder = iv_len; + ulong_t processed = 0; + uint8_t *datap, *ghash; + uint64_t len_a_len_c[2]; + + gops = gcm_impl_get_ops(); + ghash = (uint8_t *)ctx->gcm_ghash; + cb = (uint8_t *)ctx->gcm_cb; + if (iv_len == 12) { + bcopy(iv, cb, 12); + cb[12] = 0; + cb[13] = 0; + cb[14] = 0; + cb[15] = 1; + /* J0 will be used again in the final */ + copy_block(cb, (uint8_t *)ctx->gcm_J0); + } else { + /* GHASH the IV */ + do { + if (remainder < block_size) { + bzero(cb, block_size); + bcopy(&(iv[processed]), cb, remainder); + datap = (uint8_t *)cb; + remainder = 0; + } else { + datap = (uint8_t *)(&(iv[processed])); + processed += block_size; + remainder -= block_size; + } + GHASH(ctx, datap, ghash, gops); + } while (remainder > 0); + + len_a_len_c[0] = 0; + len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len)); + GHASH(ctx, len_a_len_c, ctx->gcm_J0, gops); + + /* J0 will be used again in the final */ + copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb); + } +} + +static int +gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, + unsigned char *auth_data, size_t auth_data_len, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + const gcm_impl_ops_t *gops; + uint8_t *ghash, *datap, *authp; + size_t remainder, processed; + + /* encrypt zero block to get subkey H */ + bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H, + (uint8_t *)ctx->gcm_H); + + gcm_format_initial_blocks(iv, iv_len, ctx, block_size, + copy_block, xor_block); + + gops = gcm_impl_get_ops(); + authp = (uint8_t *)ctx->gcm_tmp; + ghash = (uint8_t *)ctx->gcm_ghash; + bzero(authp, block_size); + bzero(ghash, block_size); + + processed = 0; + remainder = auth_data_len; + do { + if (remainder < block_size) { + /* + * There's not a block full of data, pad rest of + * buffer with zero + */ + bzero(authp, block_size); + bcopy(&(auth_data[processed]), authp, remainder); + datap = (uint8_t *)authp; + remainder = 0; + } else { + datap = (uint8_t *)(&(auth_data[processed])); + processed += block_size; + remainder -= block_size; + } + + /* add auth data to the hash */ + GHASH(ctx, datap, ghash, gops); + + } while (remainder > 0); + + return (CRYPTO_SUCCESS); +} + +/* + * The following function is called at encrypt or decrypt init time + * for AES GCM mode. + * + * Init the GCM context struct. Handle the cycle and avx implementations here. + */ +int +gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + int rv; + CK_AES_GCM_PARAMS *gcm_param; + + if (param != NULL) { + gcm_param = (CK_AES_GCM_PARAMS *)(void *)param; + + if ((rv = gcm_validate_args(gcm_param)) != 0) { + return (rv); + } + + gcm_ctx->gcm_tag_len = gcm_param->ulTagBits; + gcm_ctx->gcm_tag_len >>= 3; + gcm_ctx->gcm_processed_data_len = 0; + + /* these values are in bits */ + gcm_ctx->gcm_len_a_len_c[0] + = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen)); + + rv = CRYPTO_SUCCESS; + gcm_ctx->gcm_flags |= GCM_MODE; + } else { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + +#ifdef CAN_USE_GCM_ASM + if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) { + gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX; + } else { + /* + * Handle the "cycle" implementation by creating avx and + * non-avx contexts alternately. + */ + gcm_ctx->gcm_use_avx = gcm_toggle_avx(); + /* + * We don't handle byte swapped key schedules in the avx + * code path. + */ + aes_key_t *ks = (aes_key_t *)gcm_ctx->gcm_keysched; + if (ks->ops->needs_byteswap == B_TRUE) { + gcm_ctx->gcm_use_avx = B_FALSE; + } + /* Use the MOVBE and the BSWAP variants alternately. */ + if (gcm_ctx->gcm_use_avx == B_TRUE && + zfs_movbe_available() == B_TRUE) { + (void) atomic_toggle_boolean_nv( + (volatile boolean_t *)&gcm_avx_can_use_movbe); + } + } + /* Avx and non avx context initialization differs from here on. */ + if (gcm_ctx->gcm_use_avx == B_FALSE) { +#endif /* ifdef CAN_USE_GCM_ASM */ + if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen, + gcm_param->pAAD, gcm_param->ulAADLen, block_size, + encrypt_block, copy_block, xor_block) != 0) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + } +#ifdef CAN_USE_GCM_ASM + } else { + if (gcm_init_avx(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen, + gcm_param->pAAD, gcm_param->ulAADLen, block_size) != 0) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + } + } +#endif /* ifdef CAN_USE_GCM_ASM */ + + return (rv); +} + +int +gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + int rv; + CK_AES_GMAC_PARAMS *gmac_param; + + if (param != NULL) { + gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param; + + gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS); + gcm_ctx->gcm_processed_data_len = 0; + + /* these values are in bits */ + gcm_ctx->gcm_len_a_len_c[0] + = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen)); + + rv = CRYPTO_SUCCESS; + gcm_ctx->gcm_flags |= GMAC_MODE; + } else { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + +#ifdef CAN_USE_GCM_ASM + /* + * Handle the "cycle" implementation by creating avx and non avx + * contexts alternately. + */ + if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) { + gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX; + } else { + gcm_ctx->gcm_use_avx = gcm_toggle_avx(); + } + /* We don't handle byte swapped key schedules in the avx code path. */ + aes_key_t *ks = (aes_key_t *)gcm_ctx->gcm_keysched; + if (ks->ops->needs_byteswap == B_TRUE) { + gcm_ctx->gcm_use_avx = B_FALSE; + } + /* Avx and non avx context initialization differs from here on. */ + if (gcm_ctx->gcm_use_avx == B_FALSE) { +#endif /* ifdef CAN_USE_GCM_ASM */ + if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN, + gmac_param->pAAD, gmac_param->ulAADLen, block_size, + encrypt_block, copy_block, xor_block) != 0) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + } +#ifdef CAN_USE_GCM_ASM + } else { + if (gcm_init_avx(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN, + gmac_param->pAAD, gmac_param->ulAADLen, block_size) != 0) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + } + } +#endif /* ifdef CAN_USE_GCM_ASM */ + + return (rv); +} + +void * +gcm_alloc_ctx(int kmflag) +{ + gcm_ctx_t *gcm_ctx; + + if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) + return (NULL); + + gcm_ctx->gcm_flags = GCM_MODE; + return (gcm_ctx); +} + +void * +gmac_alloc_ctx(int kmflag) +{ + gcm_ctx_t *gcm_ctx; + + if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) + return (NULL); + + gcm_ctx->gcm_flags = GMAC_MODE; + return (gcm_ctx); +} + +void +gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag) +{ + ctx->gcm_kmflag = kmflag; +} + +/* GCM implementation that contains the fastest methods */ +static gcm_impl_ops_t gcm_fastest_impl = { + .name = "fastest" +}; + +/* All compiled in implementations */ +const gcm_impl_ops_t *gcm_all_impl[] = { + &gcm_generic_impl, +#if defined(__x86_64) && defined(HAVE_PCLMULQDQ) + &gcm_pclmulqdq_impl, +#endif +}; + +/* Indicate that benchmark has been completed */ +static boolean_t gcm_impl_initialized = B_FALSE; + +/* Hold all supported implementations */ +static size_t gcm_supp_impl_cnt = 0; +static gcm_impl_ops_t *gcm_supp_impl[ARRAY_SIZE(gcm_all_impl)]; + +/* + * Returns the GCM operations for encrypt/decrypt/key setup. When a + * SIMD implementation is not allowed in the current context, then + * fallback to the fastest generic implementation. + */ +const gcm_impl_ops_t * +gcm_impl_get_ops() +{ + if (!kfpu_allowed()) + return (&gcm_generic_impl); + + const gcm_impl_ops_t *ops = NULL; + const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl); + + switch (impl) { + case IMPL_FASTEST: + ASSERT(gcm_impl_initialized); + ops = &gcm_fastest_impl; + break; + case IMPL_CYCLE: + /* Cycle through supported implementations */ + ASSERT(gcm_impl_initialized); + ASSERT3U(gcm_supp_impl_cnt, >, 0); + static size_t cycle_impl_idx = 0; + size_t idx = (++cycle_impl_idx) % gcm_supp_impl_cnt; + ops = gcm_supp_impl[idx]; + break; +#ifdef CAN_USE_GCM_ASM + case IMPL_AVX: + /* + * Make sure that we return a valid implementation while + * switching to the avx implementation since there still + * may be unfinished non-avx contexts around. + */ + ops = &gcm_generic_impl; + break; +#endif + default: + ASSERT3U(impl, <, gcm_supp_impl_cnt); + ASSERT3U(gcm_supp_impl_cnt, >, 0); + if (impl < ARRAY_SIZE(gcm_all_impl)) + ops = gcm_supp_impl[impl]; + break; + } + + ASSERT3P(ops, !=, NULL); + + return (ops); +} + +/* + * Initialize all supported implementations. + */ +void +gcm_impl_init(void) +{ + gcm_impl_ops_t *curr_impl; + int i, c; + + /* Move supported implementations into gcm_supp_impls */ + for (i = 0, c = 0; i < ARRAY_SIZE(gcm_all_impl); i++) { + curr_impl = (gcm_impl_ops_t *)gcm_all_impl[i]; + + if (curr_impl->is_supported()) + gcm_supp_impl[c++] = (gcm_impl_ops_t *)curr_impl; + } + gcm_supp_impl_cnt = c; + + /* + * Set the fastest implementation given the assumption that the + * hardware accelerated version is the fastest. + */ +#if defined(__x86_64) && defined(HAVE_PCLMULQDQ) + if (gcm_pclmulqdq_impl.is_supported()) { + memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl, + sizeof (gcm_fastest_impl)); + } else +#endif + { + memcpy(&gcm_fastest_impl, &gcm_generic_impl, + sizeof (gcm_fastest_impl)); + } + + strlcpy(gcm_fastest_impl.name, "fastest", GCM_IMPL_NAME_MAX); + +#ifdef CAN_USE_GCM_ASM + /* + * Use the avx implementation if it's available and the implementation + * hasn't changed from its default value of fastest on module load. + */ + if (gcm_avx_will_work()) { +#ifdef HAVE_MOVBE + if (zfs_movbe_available() == B_TRUE) { + atomic_swap_32(&gcm_avx_can_use_movbe, B_TRUE); + } +#endif + if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) { + gcm_set_avx(B_TRUE); + } + } +#endif + /* Finish initialization */ + atomic_swap_32(&icp_gcm_impl, user_sel_impl); + gcm_impl_initialized = B_TRUE; +} + +static const struct { + char *name; + uint32_t sel; +} gcm_impl_opts[] = { + { "cycle", IMPL_CYCLE }, + { "fastest", IMPL_FASTEST }, +#ifdef CAN_USE_GCM_ASM + { "avx", IMPL_AVX }, +#endif +}; + +/* + * Function sets desired gcm implementation. + * + * If we are called before init(), user preference will be saved in + * user_sel_impl, and applied in later init() call. This occurs when module + * parameter is specified on module load. Otherwise, directly update + * icp_gcm_impl. + * + * @val Name of gcm implementation to use + * @param Unused. + */ +int +gcm_impl_set(const char *val) +{ + int err = -EINVAL; + char req_name[GCM_IMPL_NAME_MAX]; + uint32_t impl = GCM_IMPL_READ(user_sel_impl); + size_t i; + + /* sanitize input */ + i = strnlen(val, GCM_IMPL_NAME_MAX); + if (i == 0 || i >= GCM_IMPL_NAME_MAX) + return (err); + + strlcpy(req_name, val, GCM_IMPL_NAME_MAX); + while (i > 0 && isspace(req_name[i-1])) + i--; + req_name[i] = '\0'; + + /* Check mandatory options */ + for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) { +#ifdef CAN_USE_GCM_ASM + /* Ignore avx implementation if it won't work. */ + if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) { + continue; + } +#endif + if (strcmp(req_name, gcm_impl_opts[i].name) == 0) { + impl = gcm_impl_opts[i].sel; + err = 0; + break; + } + } + + /* check all supported impl if init() was already called */ + if (err != 0 && gcm_impl_initialized) { + /* check all supported implementations */ + for (i = 0; i < gcm_supp_impl_cnt; i++) { + if (strcmp(req_name, gcm_supp_impl[i]->name) == 0) { + impl = i; + err = 0; + break; + } + } + } +#ifdef CAN_USE_GCM_ASM + /* + * Use the avx implementation if available and the requested one is + * avx or fastest. + */ + if (gcm_avx_will_work() == B_TRUE && + (impl == IMPL_AVX || impl == IMPL_FASTEST)) { + gcm_set_avx(B_TRUE); + } else { + gcm_set_avx(B_FALSE); + } +#endif + + if (err == 0) { + if (gcm_impl_initialized) + atomic_swap_32(&icp_gcm_impl, impl); + else + atomic_swap_32(&user_sel_impl, impl); + } + + return (err); +} + +#if defined(_KERNEL) && defined(__linux__) + +static int +icp_gcm_impl_set(const char *val, zfs_kernel_param_t *kp) +{ + return (gcm_impl_set(val)); +} + +static int +icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp) +{ + int i, cnt = 0; + char *fmt; + const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl); + + ASSERT(gcm_impl_initialized); + + /* list mandatory options */ + for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) { +#ifdef CAN_USE_GCM_ASM + /* Ignore avx implementation if it won't work. */ + if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) { + continue; + } +#endif + fmt = (impl == gcm_impl_opts[i].sel) ? "[%s] " : "%s "; + cnt += sprintf(buffer + cnt, fmt, gcm_impl_opts[i].name); + } + + /* list all supported implementations */ + for (i = 0; i < gcm_supp_impl_cnt; i++) { + fmt = (i == impl) ? "[%s] " : "%s "; + cnt += sprintf(buffer + cnt, fmt, gcm_supp_impl[i]->name); + } + + return (cnt); +} + +module_param_call(icp_gcm_impl, icp_gcm_impl_set, icp_gcm_impl_get, + NULL, 0644); +MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation."); +#endif /* defined(__KERNEL) */ + +#ifdef CAN_USE_GCM_ASM +#define GCM_BLOCK_LEN 16 +/* + * The openssl asm routines are 6x aggregated and need that many bytes + * at minimum. + */ +#define GCM_AVX_MIN_DECRYPT_BYTES (GCM_BLOCK_LEN * 6) +#define GCM_AVX_MIN_ENCRYPT_BYTES (GCM_BLOCK_LEN * 6 * 3) +/* + * Ensure the chunk size is reasonable since we are allocating a + * GCM_AVX_MAX_CHUNK_SIZEd buffer and disabling preemption and interrupts. + */ +#define GCM_AVX_MAX_CHUNK_SIZE \ + (((128*1024)/GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES) + +/* Get the chunk size module parameter. */ +#define GCM_CHUNK_SIZE_READ *(volatile uint32_t *) &gcm_avx_chunk_size + +/* Clear the FPU registers since they hold sensitive internal state. */ +#define clear_fpu_regs() clear_fpu_regs_avx() +#define GHASH_AVX(ctx, in, len) \ + gcm_ghash_avx((ctx)->gcm_ghash, (const uint64_t (*)[2])(ctx)->gcm_Htable, \ + in, len) + +#define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1) + +/* + * Module parameter: number of bytes to process at once while owning the FPU. + * Rounded down to the next GCM_AVX_MIN_DECRYPT_BYTES byte boundary and is + * ensured to be greater or equal than GCM_AVX_MIN_DECRYPT_BYTES. + */ +static uint32_t gcm_avx_chunk_size = + ((32 * 1024) / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES; + +extern void clear_fpu_regs_avx(void); +extern void gcm_xor_avx(const uint8_t *src, uint8_t *dst); +extern void aes_encrypt_intel(const uint32_t rk[], int nr, + const uint32_t pt[4], uint32_t ct[4]); + +extern void gcm_init_htab_avx(uint64_t Htable[16][2], const uint64_t H[2]); +extern void gcm_ghash_avx(uint64_t ghash[2], const uint64_t Htable[16][2], + const uint8_t *in, size_t len); + +extern size_t aesni_gcm_encrypt(const uint8_t *, uint8_t *, size_t, + const void *, uint64_t *, uint64_t *); + +extern size_t aesni_gcm_decrypt(const uint8_t *, uint8_t *, size_t, + const void *, uint64_t *, uint64_t *); + +static inline boolean_t +gcm_avx_will_work(void) +{ + /* Avx should imply aes-ni and pclmulqdq, but make sure anyhow. */ + return (kfpu_allowed() && + zfs_avx_available() && zfs_aes_available() && + zfs_pclmulqdq_available()); +} + +static inline void +gcm_set_avx(boolean_t val) +{ + if (gcm_avx_will_work() == B_TRUE) { + atomic_swap_32(&gcm_use_avx, val); + } +} + +static inline boolean_t +gcm_toggle_avx(void) +{ + if (gcm_avx_will_work() == B_TRUE) { + return (atomic_toggle_boolean_nv(&GCM_IMPL_USE_AVX)); + } else { + return (B_FALSE); + } +} + +/* + * Clear sensitive data in the context. + * + * ctx->gcm_remainder may contain a plaintext remainder. ctx->gcm_H and + * ctx->gcm_Htable contain the hash sub key which protects authentication. + * + * Although extremely unlikely, ctx->gcm_J0 and ctx->gcm_tmp could be used for + * a known plaintext attack, they consists of the IV and the first and last + * counter respectively. If they should be cleared is debatable. + */ +static inline void +gcm_clear_ctx(gcm_ctx_t *ctx) +{ + bzero(ctx->gcm_remainder, sizeof (ctx->gcm_remainder)); + bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); + bzero(ctx->gcm_Htable, sizeof (ctx->gcm_Htable)); + bzero(ctx->gcm_J0, sizeof (ctx->gcm_J0)); + bzero(ctx->gcm_tmp, sizeof (ctx->gcm_tmp)); +} + +/* Increment the GCM counter block by n. */ +static inline void +gcm_incr_counter_block_by(gcm_ctx_t *ctx, int n) +{ + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + uint64_t counter = ntohll(ctx->gcm_cb[1] & counter_mask); + + counter = htonll(counter + n); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; +} + +/* + * Encrypt multiple blocks of data in GCM mode. + * This is done in gcm_avx_chunk_size chunks, utilizing AVX assembler routines + * if possible. While processing a chunk the FPU is "locked". + */ +static int +gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, + size_t length, crypto_data_t *out, size_t block_size) +{ + size_t bleft = length; + size_t need = 0; + size_t done = 0; + uint8_t *datap = (uint8_t *)data; + size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; + const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched); + uint64_t *ghash = ctx->gcm_ghash; + uint64_t *cb = ctx->gcm_cb; + uint8_t *ct_buf = NULL; + uint8_t *tmp = (uint8_t *)ctx->gcm_tmp; + int rv = CRYPTO_SUCCESS; + + ASSERT(block_size == GCM_BLOCK_LEN); + /* + * If the last call left an incomplete block, try to fill + * it first. + */ + if (ctx->gcm_remainder_len > 0) { + need = block_size - ctx->gcm_remainder_len; + if (length < need) { + /* Accumulate bytes here and return. */ + bcopy(datap, (uint8_t *)ctx->gcm_remainder + + ctx->gcm_remainder_len, length); + + ctx->gcm_remainder_len += length; + if (ctx->gcm_copy_to == NULL) { + ctx->gcm_copy_to = datap; + } + return (CRYPTO_SUCCESS); + } else { + /* Complete incomplete block. */ + bcopy(datap, (uint8_t *)ctx->gcm_remainder + + ctx->gcm_remainder_len, need); + + ctx->gcm_copy_to = NULL; + } + } + + /* Allocate a buffer to encrypt to if there is enough input. */ + if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) { + ct_buf = vmem_alloc(chunk_size, ctx->gcm_kmflag); + if (ct_buf == NULL) { + return (CRYPTO_HOST_MEMORY); + } + } + + /* If we completed an incomplete block, encrypt and write it out. */ + if (ctx->gcm_remainder_len > 0) { + kfpu_begin(); + aes_encrypt_intel(key->encr_ks.ks32, key->nr, + (const uint32_t *)cb, (uint32_t *)tmp); + + gcm_xor_avx((const uint8_t *) ctx->gcm_remainder, tmp); + GHASH_AVX(ctx, tmp, block_size); + clear_fpu_regs(); + kfpu_end(); + rv = crypto_put_output_data(tmp, out, block_size); + out->cd_offset += block_size; + gcm_incr_counter_block(ctx); + ctx->gcm_processed_data_len += block_size; + bleft -= need; + datap += need; + ctx->gcm_remainder_len = 0; + } + + /* Do the bulk encryption in chunk_size blocks. */ + for (; bleft >= chunk_size; bleft -= chunk_size) { + kfpu_begin(); + done = aesni_gcm_encrypt( + datap, ct_buf, chunk_size, key, cb, ghash); + + clear_fpu_regs(); + kfpu_end(); + if (done != chunk_size) { + rv = CRYPTO_FAILED; + goto out_nofpu; + } + rv = crypto_put_output_data(ct_buf, out, chunk_size); + if (rv != CRYPTO_SUCCESS) { + goto out_nofpu; + } + out->cd_offset += chunk_size; + datap += chunk_size; + ctx->gcm_processed_data_len += chunk_size; + } + /* Check if we are already done. */ + if (bleft == 0) { + goto out_nofpu; + } + /* Bulk encrypt the remaining data. */ + kfpu_begin(); + if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) { + done = aesni_gcm_encrypt(datap, ct_buf, bleft, key, cb, ghash); + if (done == 0) { + rv = CRYPTO_FAILED; + goto out; + } + rv = crypto_put_output_data(ct_buf, out, done); + if (rv != CRYPTO_SUCCESS) { + goto out; + } + out->cd_offset += done; + ctx->gcm_processed_data_len += done; + datap += done; + bleft -= done; + + } + /* Less than GCM_AVX_MIN_ENCRYPT_BYTES remain, operate on blocks. */ + while (bleft > 0) { + if (bleft < block_size) { + bcopy(datap, ctx->gcm_remainder, bleft); + ctx->gcm_remainder_len = bleft; + ctx->gcm_copy_to = datap; + goto out; + } + /* Encrypt, hash and write out. */ + aes_encrypt_intel(key->encr_ks.ks32, key->nr, + (const uint32_t *)cb, (uint32_t *)tmp); + + gcm_xor_avx(datap, tmp); + GHASH_AVX(ctx, tmp, block_size); + rv = crypto_put_output_data(tmp, out, block_size); + if (rv != CRYPTO_SUCCESS) { + goto out; + } + out->cd_offset += block_size; + gcm_incr_counter_block(ctx); + ctx->gcm_processed_data_len += block_size; + datap += block_size; + bleft -= block_size; + } +out: + clear_fpu_regs(); + kfpu_end(); +out_nofpu: + if (ct_buf != NULL) { + vmem_free(ct_buf, chunk_size); + } + return (rv); +} + +/* + * Finalize the encryption: Zero fill, encrypt, hash and write out an eventual + * incomplete last block. Encrypt the ICB. Calculate the tag and write it out. + */ +static int +gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) +{ + uint8_t *ghash = (uint8_t *)ctx->gcm_ghash; + uint32_t *J0 = (uint32_t *)ctx->gcm_J0; + uint8_t *remainder = (uint8_t *)ctx->gcm_remainder; + size_t rem_len = ctx->gcm_remainder_len; + const void *keysched = ((aes_key_t *)ctx->gcm_keysched)->encr_ks.ks32; + int aes_rounds = ((aes_key_t *)keysched)->nr; + int rv; + + ASSERT(block_size == GCM_BLOCK_LEN); + + if (out->cd_length < (rem_len + ctx->gcm_tag_len)) { + return (CRYPTO_DATA_LEN_RANGE); + } + + kfpu_begin(); + /* Pad last incomplete block with zeros, encrypt and hash. */ + if (rem_len > 0) { + uint8_t *tmp = (uint8_t *)ctx->gcm_tmp; + const uint32_t *cb = (uint32_t *)ctx->gcm_cb; + + aes_encrypt_intel(keysched, aes_rounds, cb, (uint32_t *)tmp); + bzero(remainder + rem_len, block_size - rem_len); + for (int i = 0; i < rem_len; i++) { + remainder[i] ^= tmp[i]; + } + GHASH_AVX(ctx, remainder, block_size); + ctx->gcm_processed_data_len += rem_len; + /* No need to increment counter_block, it's the last block. */ + } + /* Finish tag. */ + ctx->gcm_len_a_len_c[1] = + htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len)); + GHASH_AVX(ctx, (const uint8_t *)ctx->gcm_len_a_len_c, block_size); + aes_encrypt_intel(keysched, aes_rounds, J0, J0); + + gcm_xor_avx((uint8_t *)J0, ghash); + clear_fpu_regs(); + kfpu_end(); + + /* Output remainder. */ + if (rem_len > 0) { + rv = crypto_put_output_data(remainder, out, rem_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + } + out->cd_offset += rem_len; + ctx->gcm_remainder_len = 0; + rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + + out->cd_offset += ctx->gcm_tag_len; + /* Clear sensitive data in the context before returning. */ + gcm_clear_ctx(ctx); + return (CRYPTO_SUCCESS); +} + +/* + * Finalize decryption: We just have accumulated crypto text, so now we + * decrypt it here inplace. + */ +static int +gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) +{ + ASSERT3U(ctx->gcm_processed_data_len, ==, ctx->gcm_pt_buf_len); + ASSERT3U(block_size, ==, 16); + + size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; + size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; + uint8_t *datap = ctx->gcm_pt_buf; + const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched); + uint32_t *cb = (uint32_t *)ctx->gcm_cb; + uint64_t *ghash = ctx->gcm_ghash; + uint32_t *tmp = (uint32_t *)ctx->gcm_tmp; + int rv = CRYPTO_SUCCESS; + size_t bleft, done; + + /* + * Decrypt in chunks of gcm_avx_chunk_size, which is asserted to be + * greater or equal than GCM_AVX_MIN_ENCRYPT_BYTES, and a multiple of + * GCM_AVX_MIN_DECRYPT_BYTES. + */ + for (bleft = pt_len; bleft >= chunk_size; bleft -= chunk_size) { + kfpu_begin(); + done = aesni_gcm_decrypt(datap, datap, chunk_size, + (const void *)key, ctx->gcm_cb, ghash); + clear_fpu_regs(); + kfpu_end(); + if (done != chunk_size) { + return (CRYPTO_FAILED); + } + datap += done; + } + /* Decrypt remainder, which is less then chunk size, in one go. */ + kfpu_begin(); + if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) { + done = aesni_gcm_decrypt(datap, datap, bleft, + (const void *)key, ctx->gcm_cb, ghash); + if (done == 0) { + clear_fpu_regs(); + kfpu_end(); + return (CRYPTO_FAILED); + } + datap += done; + bleft -= done; + } + ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES); + + /* + * Now less then GCM_AVX_MIN_DECRYPT_BYTES bytes remain, + * decrypt them block by block. + */ + while (bleft > 0) { + /* Incomplete last block. */ + if (bleft < block_size) { + uint8_t *lastb = (uint8_t *)ctx->gcm_remainder; + + bzero(lastb, block_size); + bcopy(datap, lastb, bleft); + /* The GCM processing. */ + GHASH_AVX(ctx, lastb, block_size); + aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp); + for (size_t i = 0; i < bleft; i++) { + datap[i] = lastb[i] ^ ((uint8_t *)tmp)[i]; + } + break; + } + /* The GCM processing. */ + GHASH_AVX(ctx, datap, block_size); + aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp); + gcm_xor_avx((uint8_t *)tmp, datap); + gcm_incr_counter_block(ctx); + + datap += block_size; + bleft -= block_size; + } + if (rv != CRYPTO_SUCCESS) { + clear_fpu_regs(); + kfpu_end(); + return (rv); + } + /* Decryption done, finish the tag. */ + ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len)); + GHASH_AVX(ctx, (uint8_t *)ctx->gcm_len_a_len_c, block_size); + aes_encrypt_intel(key->encr_ks.ks32, key->nr, (uint32_t *)ctx->gcm_J0, + (uint32_t *)ctx->gcm_J0); + + gcm_xor_avx((uint8_t *)ctx->gcm_J0, (uint8_t *)ghash); + + /* We are done with the FPU, restore its state. */ + clear_fpu_regs(); + kfpu_end(); + + /* Compare the input authentication tag with what we calculated. */ + if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { + /* They don't match. */ + return (CRYPTO_INVALID_MAC); + } + rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); + if (rv != CRYPTO_SUCCESS) { + return (rv); + } + out->cd_offset += pt_len; + gcm_clear_ctx(ctx); + return (CRYPTO_SUCCESS); +} + +/* + * Initialize the GCM params H, Htabtle and the counter block. Save the + * initial counter block. + */ +static int +gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, + unsigned char *auth_data, size_t auth_data_len, size_t block_size) +{ + uint8_t *cb = (uint8_t *)ctx->gcm_cb; + uint64_t *H = ctx->gcm_H; + const void *keysched = ((aes_key_t *)ctx->gcm_keysched)->encr_ks.ks32; + int aes_rounds = ((aes_key_t *)ctx->gcm_keysched)->nr; + uint8_t *datap = auth_data; + size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; + size_t bleft; + + ASSERT(block_size == GCM_BLOCK_LEN); + + /* Init H (encrypt zero block) and create the initial counter block. */ + bzero(ctx->gcm_ghash, sizeof (ctx->gcm_ghash)); + bzero(H, sizeof (ctx->gcm_H)); + kfpu_begin(); + aes_encrypt_intel(keysched, aes_rounds, + (const uint32_t *)H, (uint32_t *)H); + + gcm_init_htab_avx(ctx->gcm_Htable, H); + + if (iv_len == 12) { + bcopy(iv, cb, 12); + cb[12] = 0; + cb[13] = 0; + cb[14] = 0; + cb[15] = 1; + /* We need the ICB later. */ + bcopy(cb, ctx->gcm_J0, sizeof (ctx->gcm_J0)); + } else { + /* + * Most consumers use 12 byte IVs, so it's OK to use the + * original routines for other IV sizes, just avoid nesting + * kfpu_begin calls. + */ + clear_fpu_regs(); + kfpu_end(); + gcm_format_initial_blocks(iv, iv_len, ctx, block_size, + aes_copy_block, aes_xor_block); + kfpu_begin(); + } + + /* Openssl post increments the counter, adjust for that. */ + gcm_incr_counter_block(ctx); + + /* Ghash AAD in chunk_size blocks. */ + for (bleft = auth_data_len; bleft >= chunk_size; bleft -= chunk_size) { + GHASH_AVX(ctx, datap, chunk_size); + datap += chunk_size; + clear_fpu_regs(); + kfpu_end(); + kfpu_begin(); + } + /* Ghash the remainder and handle possible incomplete GCM block. */ + if (bleft > 0) { + size_t incomp = bleft % block_size; + + bleft -= incomp; + if (bleft > 0) { + GHASH_AVX(ctx, datap, bleft); + datap += bleft; + } + if (incomp > 0) { + /* Zero pad and hash incomplete last block. */ + uint8_t *authp = (uint8_t *)ctx->gcm_tmp; + + bzero(authp, block_size); + bcopy(datap, authp, incomp); + GHASH_AVX(ctx, authp, block_size); + } + } + clear_fpu_regs(); + kfpu_end(); + return (CRYPTO_SUCCESS); +} + +#if defined(_KERNEL) +static int +icp_gcm_avx_set_chunk_size(const char *buf, zfs_kernel_param_t *kp) +{ + unsigned long val; + char val_rounded[16]; + int error = 0; + + error = kstrtoul(buf, 0, &val); + if (error) + return (error); + + val = (val / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES; + + if (val < GCM_AVX_MIN_ENCRYPT_BYTES || val > GCM_AVX_MAX_CHUNK_SIZE) + return (-EINVAL); + + snprintf(val_rounded, 16, "%u", (uint32_t)val); + error = param_set_uint(val_rounded, kp); + return (error); +} + +module_param_call(icp_gcm_avx_chunk_size, icp_gcm_avx_set_chunk_size, + param_get_uint, &gcm_avx_chunk_size, 0644); + +MODULE_PARM_DESC(icp_gcm_avx_chunk_size, + "How many bytes to process while owning the FPU"); + +#endif /* defined(__KERNEL) */ +#endif /* ifdef CAN_USE_GCM_ASM */ diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm_generic.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm_generic.c new file mode 100644 index 000000000000..16b57998a92f --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm_generic.c @@ -0,0 +1,83 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#include <modes/gcm_impl.h> + +struct aes_block { + uint64_t a; + uint64_t b; +}; + +/* + * Perform a carry-less multiplication (that is, use XOR instead of the + * multiply operator) on *x_in and *y and place the result in *res. + * + * Byte swap the input (*x_in and *y) and the output (*res). + * + * Note: x_in, y, and res all point to 16-byte numbers (an array of two + * 64-bit integers). + */ +static void +gcm_generic_mul(uint64_t *x_in, uint64_t *y, uint64_t *res) +{ + static const uint64_t R = 0xe100000000000000ULL; + struct aes_block z = {0, 0}; + struct aes_block v; + uint64_t x; + int i, j; + + v.a = ntohll(y[0]); + v.b = ntohll(y[1]); + + for (j = 0; j < 2; j++) { + x = ntohll(x_in[j]); + for (i = 0; i < 64; i++, x <<= 1) { + if (x & 0x8000000000000000ULL) { + z.a ^= v.a; + z.b ^= v.b; + } + if (v.b & 1ULL) { + v.b = (v.a << 63)|(v.b >> 1); + v.a = (v.a >> 1) ^ R; + } else { + v.b = (v.a << 63)|(v.b >> 1); + v.a = v.a >> 1; + } + } + } + res[0] = htonll(z.a); + res[1] = htonll(z.b); +} + +static boolean_t +gcm_generic_will_work(void) +{ + return (B_TRUE); +} + +const gcm_impl_ops_t gcm_generic_impl = { + .mul = &gcm_generic_mul, + .is_supported = &gcm_generic_will_work, + .name = "generic" +}; diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm_pclmulqdq.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm_pclmulqdq.c new file mode 100644 index 000000000000..05920115ce86 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm_pclmulqdq.c @@ -0,0 +1,64 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + */ + +#if defined(__x86_64) && defined(HAVE_PCLMULQDQ) + +#include <sys/types.h> +#include <sys/simd.h> + +/* These functions are used to execute pclmulqdq based assembly methods */ +extern void gcm_mul_pclmulqdq(uint64_t *, uint64_t *, uint64_t *); + +#include <modes/gcm_impl.h> + +/* + * Perform a carry-less multiplication (that is, use XOR instead of the + * multiply operator) on *x_in and *y and place the result in *res. + * + * Byte swap the input (*x_in and *y) and the output (*res). + * + * Note: x_in, y, and res all point to 16-byte numbers (an array of two + * 64-bit integers). + */ +static void +gcm_pclmulqdq_mul(uint64_t *x_in, uint64_t *y, uint64_t *res) +{ + kfpu_begin(); + gcm_mul_pclmulqdq(x_in, y, res); + kfpu_end(); +} + +static boolean_t +gcm_pclmulqdq_will_work(void) +{ + return (kfpu_allowed() && zfs_pclmulqdq_available()); +} + +const gcm_impl_ops_t gcm_pclmulqdq_impl = { + .mul = &gcm_pclmulqdq_mul, + .is_supported = &gcm_pclmulqdq_will_work, + .name = "pclmulqdq" +}; + +#endif /* defined(__x86_64) && defined(HAVE_PCLMULQDQ) */ diff --git a/sys/contrib/openzfs/module/icp/algs/modes/modes.c b/sys/contrib/openzfs/module/icp/algs/modes/modes.c new file mode 100644 index 000000000000..f07876a478e2 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/modes/modes.c @@ -0,0 +1,157 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> + +/* + * Initialize by setting iov_or_mp to point to the current iovec or mp, + * and by setting current_offset to an offset within the current iovec or mp. + */ +void +crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset) +{ + offset_t offset; + + switch (out->cd_format) { + case CRYPTO_DATA_RAW: + *current_offset = out->cd_offset; + break; + + case CRYPTO_DATA_UIO: { + uio_t *uiop = out->cd_uio; + uint_t vec_idx; + + offset = out->cd_offset; + offset = uio_index_at_offset(uiop, offset, &vec_idx); + + *current_offset = offset; + *iov_or_mp = (void *)(uintptr_t)vec_idx; + break; + } + } /* end switch */ +} + +/* + * Get pointers for where in the output to copy a block of encrypted or + * decrypted data. The iov_or_mp argument stores a pointer to the current + * iovec or mp, and offset stores an offset into the current iovec or mp. + */ +void +crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, + uint8_t **out_data_1, size_t *out_data_1_len, uint8_t **out_data_2, + size_t amt) +{ + offset_t offset; + + switch (out->cd_format) { + case CRYPTO_DATA_RAW: { + iovec_t *iov; + + offset = *current_offset; + iov = &out->cd_raw; + if ((offset + amt) <= iov->iov_len) { + /* one block fits */ + *out_data_1 = (uint8_t *)iov->iov_base + offset; + *out_data_1_len = amt; + *out_data_2 = NULL; + *current_offset = offset + amt; + } + break; + } + + case CRYPTO_DATA_UIO: { + uio_t *uio = out->cd_uio; + offset_t offset; + uint_t vec_idx; + uint8_t *p; + uint64_t iov_len; + void *iov_base; + + offset = *current_offset; + vec_idx = (uintptr_t)(*iov_or_mp); + uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + p = (uint8_t *)iov_base + offset; + *out_data_1 = p; + + if (offset + amt <= iov_len) { + /* can fit one block into this iov */ + *out_data_1_len = amt; + *out_data_2 = NULL; + *current_offset = offset + amt; + } else { + /* one block spans two iovecs */ + *out_data_1_len = iov_len - offset; + if (vec_idx == uio_iovcnt(uio)) + return; + vec_idx++; + uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); + *out_data_2 = (uint8_t *)iov_base; + *current_offset = amt - *out_data_1_len; + } + *iov_or_mp = (void *)(uintptr_t)vec_idx; + break; + } + } /* end switch */ +} + +void +crypto_free_mode_ctx(void *ctx) +{ + common_ctx_t *common_ctx = (common_ctx_t *)ctx; + + switch (common_ctx->cc_flags & + (ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) { + case ECB_MODE: + kmem_free(common_ctx, sizeof (ecb_ctx_t)); + break; + + case CBC_MODE: + kmem_free(common_ctx, sizeof (cbc_ctx_t)); + break; + + case CTR_MODE: + kmem_free(common_ctx, sizeof (ctr_ctx_t)); + break; + + case CCM_MODE: + if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL) + vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf, + ((ccm_ctx_t *)ctx)->ccm_data_len); + + kmem_free(ctx, sizeof (ccm_ctx_t)); + break; + + case GCM_MODE: + case GMAC_MODE: + if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL) + vmem_free(((gcm_ctx_t *)ctx)->gcm_pt_buf, + ((gcm_ctx_t *)ctx)->gcm_pt_buf_len); + + kmem_free(ctx, sizeof (gcm_ctx_t)); + } +} diff --git a/sys/contrib/openzfs/module/icp/algs/sha1/sha1.c b/sys/contrib/openzfs/module/icp/algs/sha1/sha1.c new file mode 100644 index 000000000000..da34222c8fc3 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/sha1/sha1.c @@ -0,0 +1,835 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * The basic framework for this code came from the reference + * implementation for MD5. That implementation is Copyright (C) + * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. + * + * License to copy and use this software is granted provided that it + * is identified as the "RSA Data Security, Inc. MD5 Message-Digest + * Algorithm" in all material mentioning or referencing this software + * or this function. + * + * License is also granted to make and use derivative works provided + * that such works are identified as "derived from the RSA Data + * Security, Inc. MD5 Message-Digest Algorithm" in all material + * mentioning or referencing the derived work. + * + * RSA Data Security, Inc. makes no representations concerning either + * the merchantability of this software or the suitability of this + * software for any particular purpose. It is provided "as is" + * without express or implied warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. + * + * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1 + * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm + * Not as fast as one would like -- further optimizations are encouraged + * and appreciated. + */ + +#include <sys/zfs_context.h> +#include <sha1/sha1.h> +#include <sha1/sha1_consts.h> + +#ifdef _LITTLE_ENDIAN +#include <sys/byteorder.h> +#define HAVE_HTONL +#endif + +#define _RESTRICT_KYWD + +static void Encode(uint8_t *, const uint32_t *, size_t); + +#if defined(__sparc) + +#define SHA1_TRANSFORM(ctx, in) \ + SHA1Transform((ctx)->state[0], (ctx)->state[1], (ctx)->state[2], \ + (ctx)->state[3], (ctx)->state[4], (ctx), (in)) + +static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, + SHA1_CTX *, const uint8_t *); + +#elif defined(__amd64) + +#define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1) +#define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \ + (in), (num)) + +void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks); + +#else + +#define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in)) + +static void SHA1Transform(SHA1_CTX *, const uint8_t *); + +#endif + + +static uint8_t PADDING[64] = { 0x80, /* all zeros */ }; + +/* + * F, G, and H are the basic SHA1 functions. + */ +#define F(b, c, d) (((b) & (c)) | ((~b) & (d))) +#define G(b, c, d) ((b) ^ (c) ^ (d)) +#define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d))) + +/* + * SHA1Init() + * + * purpose: initializes the sha1 context and begins and sha1 digest operation + * input: SHA1_CTX * : the context to initializes. + * output: void + */ + +void +SHA1Init(SHA1_CTX *ctx) +{ + ctx->count[0] = ctx->count[1] = 0; + + /* + * load magic initialization constants. Tell lint + * that these constants are unsigned by using U. + */ + + ctx->state[0] = 0x67452301U; + ctx->state[1] = 0xefcdab89U; + ctx->state[2] = 0x98badcfeU; + ctx->state[3] = 0x10325476U; + ctx->state[4] = 0xc3d2e1f0U; +} + +void +SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len) +{ + uint32_t i, buf_index, buf_len; + const uint8_t *input = inptr; +#if defined(__amd64) + uint32_t block_count; +#endif /* __amd64 */ + + /* check for noop */ + if (input_len == 0) + return; + + /* compute number of bytes mod 64 */ + buf_index = (ctx->count[1] >> 3) & 0x3F; + + /* update number of bits */ + if ((ctx->count[1] += (input_len << 3)) < (input_len << 3)) + ctx->count[0]++; + + ctx->count[0] += (input_len >> 29); + + buf_len = 64 - buf_index; + + /* transform as many times as possible */ + i = 0; + if (input_len >= buf_len) { + + /* + * general optimization: + * + * only do initial bcopy() and SHA1Transform() if + * buf_index != 0. if buf_index == 0, we're just + * wasting our time doing the bcopy() since there + * wasn't any data left over from a previous call to + * SHA1Update(). + */ + + if (buf_index) { + bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); + SHA1_TRANSFORM(ctx, ctx->buf_un.buf8); + i = buf_len; + } + +#if !defined(__amd64) + for (; i + 63 < input_len; i += 64) + SHA1_TRANSFORM(ctx, &input[i]); +#else + block_count = (input_len - i) >> 6; + if (block_count > 0) { + SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count); + i += block_count << 6; + } +#endif /* !__amd64 */ + + /* + * general optimization: + * + * if i and input_len are the same, return now instead + * of calling bcopy(), since the bcopy() in this case + * will be an expensive nop. + */ + + if (input_len == i) + return; + + buf_index = 0; + } + + /* buffer remaining input */ + bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); +} + +/* + * SHA1Final() + * + * purpose: ends an sha1 digest operation, finalizing the message digest and + * zeroing the context. + * input: uchar_t * : A buffer to store the digest. + * : The function actually uses void* because many + * : callers pass things other than uchar_t here. + * SHA1_CTX * : the context to finalize, save, and zero + * output: void + */ + +void +SHA1Final(void *digest, SHA1_CTX *ctx) +{ + uint8_t bitcount_be[sizeof (ctx->count)]; + uint32_t index = (ctx->count[1] >> 3) & 0x3f; + + /* store bit count, big endian */ + Encode(bitcount_be, ctx->count, sizeof (bitcount_be)); + + /* pad out to 56 mod 64 */ + SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index); + + /* append length (before padding) */ + SHA1Update(ctx, bitcount_be, sizeof (bitcount_be)); + + /* store state in digest */ + Encode(digest, ctx->state, sizeof (ctx->state)); + + /* zeroize sensitive information */ + bzero(ctx, sizeof (*ctx)); +} + + +#if !defined(__amd64) + +typedef uint32_t sha1word; + +/* + * sparc optimization: + * + * on the sparc, we can load big endian 32-bit data easily. note that + * special care must be taken to ensure the address is 32-bit aligned. + * in the interest of speed, we don't check to make sure, since + * careful programming can guarantee this for us. + */ + +#if defined(_ZFS_BIG_ENDIAN) +#define LOAD_BIG_32(addr) (*(uint32_t *)(addr)) + +#elif defined(HAVE_HTONL) +#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr))) + +#else +#define LOAD_BIG_32(addr) BE_32(*((uint32_t *)(addr))) +#endif /* _BIG_ENDIAN */ + +/* + * SHA1Transform() + */ +#if defined(W_ARRAY) +#define W(n) w[n] +#else /* !defined(W_ARRAY) */ +#define W(n) w_ ## n +#endif /* !defined(W_ARRAY) */ + +/* + * ROTATE_LEFT rotates x left n bits. + */ + +#if defined(__GNUC__) && defined(_LP64) +static __inline__ uint64_t +ROTATE_LEFT(uint64_t value, uint32_t n) +{ + uint32_t t32; + + t32 = (uint32_t)value; + return ((t32 << n) | (t32 >> (32 - n))); +} + +#else + +#define ROTATE_LEFT(x, n) \ + (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n)))) + +#endif + +#if defined(__sparc) + + +/* + * sparc register window optimization: + * + * `a', `b', `c', `d', and `e' are passed into SHA1Transform + * explicitly since it increases the number of registers available to + * the compiler. under this scheme, these variables can be held in + * %i0 - %i4, which leaves more local and out registers available. + * + * purpose: sha1 transformation -- updates the digest based on `block' + * input: uint32_t : bytes 1 - 4 of the digest + * uint32_t : bytes 5 - 8 of the digest + * uint32_t : bytes 9 - 12 of the digest + * uint32_t : bytes 12 - 16 of the digest + * uint32_t : bytes 16 - 20 of the digest + * SHA1_CTX * : the context to update + * uint8_t [64]: the block to use to update the digest + * output: void + */ + + +void +SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, + SHA1_CTX *ctx, const uint8_t blk[64]) +{ + /* + * sparc optimization: + * + * while it is somewhat counter-intuitive, on sparc, it is + * more efficient to place all the constants used in this + * function in an array and load the values out of the array + * than to manually load the constants. this is because + * setting a register to a 32-bit value takes two ops in most + * cases: a `sethi' and an `or', but loading a 32-bit value + * from memory only takes one `ld' (or `lduw' on v9). while + * this increases memory usage, the compiler can find enough + * other things to do while waiting to keep the pipeline does + * not stall. additionally, it is likely that many of these + * constants are cached so that later accesses do not even go + * out to the bus. + * + * this array is declared `static' to keep the compiler from + * having to bcopy() this array onto the stack frame of + * SHA1Transform() each time it is called -- which is + * unacceptably expensive. + * + * the `const' is to ensure that callers are good citizens and + * do not try to munge the array. since these routines are + * going to be called from inside multithreaded kernelland, + * this is a good safety check. -- `sha1_consts' will end up in + * .rodata. + * + * unfortunately, loading from an array in this manner hurts + * performance under Intel. So, there is a macro, + * SHA1_CONST(), used in SHA1Transform(), that either expands to + * a reference to this array, or to the actual constant, + * depending on what platform this code is compiled for. + */ + + + static const uint32_t sha1_consts[] = { + SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3 + }; + + + /* + * general optimization: + * + * use individual integers instead of using an array. this is a + * win, although the amount it wins by seems to vary quite a bit. + */ + + + uint32_t w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7; + uint32_t w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15; + + + /* + * sparc optimization: + * + * if `block' is already aligned on a 4-byte boundary, use + * LOAD_BIG_32() directly. otherwise, bcopy() into a + * buffer that *is* aligned on a 4-byte boundary and then do + * the LOAD_BIG_32() on that buffer. benchmarks have shown + * that using the bcopy() is better than loading the bytes + * individually and doing the endian-swap by hand. + * + * even though it's quite tempting to assign to do: + * + * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32)); + * + * and only have one set of LOAD_BIG_32()'s, the compiler + * *does not* like that, so please resist the urge. + */ + + + if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ + bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); + w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15); + w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14); + w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13); + w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12); + w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11); + w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10); + w_9 = LOAD_BIG_32(ctx->buf_un.buf32 + 9); + w_8 = LOAD_BIG_32(ctx->buf_un.buf32 + 8); + w_7 = LOAD_BIG_32(ctx->buf_un.buf32 + 7); + w_6 = LOAD_BIG_32(ctx->buf_un.buf32 + 6); + w_5 = LOAD_BIG_32(ctx->buf_un.buf32 + 5); + w_4 = LOAD_BIG_32(ctx->buf_un.buf32 + 4); + w_3 = LOAD_BIG_32(ctx->buf_un.buf32 + 3); + w_2 = LOAD_BIG_32(ctx->buf_un.buf32 + 2); + w_1 = LOAD_BIG_32(ctx->buf_un.buf32 + 1); + w_0 = LOAD_BIG_32(ctx->buf_un.buf32 + 0); + } else { + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_15 = LOAD_BIG_32(blk + 60); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_14 = LOAD_BIG_32(blk + 56); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_13 = LOAD_BIG_32(blk + 52); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_12 = LOAD_BIG_32(blk + 48); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_11 = LOAD_BIG_32(blk + 44); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_10 = LOAD_BIG_32(blk + 40); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_9 = LOAD_BIG_32(blk + 36); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_8 = LOAD_BIG_32(blk + 32); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_7 = LOAD_BIG_32(blk + 28); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_6 = LOAD_BIG_32(blk + 24); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_5 = LOAD_BIG_32(blk + 20); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_4 = LOAD_BIG_32(blk + 16); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_3 = LOAD_BIG_32(blk + 12); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_2 = LOAD_BIG_32(blk + 8); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_1 = LOAD_BIG_32(blk + 4); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w_0 = LOAD_BIG_32(blk + 0); + } +#else /* !defined(__sparc) */ + +void /* CSTYLED */ +SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64]) +{ + /* CSTYLED */ + sha1word a = ctx->state[0]; + sha1word b = ctx->state[1]; + sha1word c = ctx->state[2]; + sha1word d = ctx->state[3]; + sha1word e = ctx->state[4]; + +#if defined(W_ARRAY) + sha1word w[16]; +#else /* !defined(W_ARRAY) */ + sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7; + sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15; +#endif /* !defined(W_ARRAY) */ + + W(0) = LOAD_BIG_32((void *)(blk + 0)); + W(1) = LOAD_BIG_32((void *)(blk + 4)); + W(2) = LOAD_BIG_32((void *)(blk + 8)); + W(3) = LOAD_BIG_32((void *)(blk + 12)); + W(4) = LOAD_BIG_32((void *)(blk + 16)); + W(5) = LOAD_BIG_32((void *)(blk + 20)); + W(6) = LOAD_BIG_32((void *)(blk + 24)); + W(7) = LOAD_BIG_32((void *)(blk + 28)); + W(8) = LOAD_BIG_32((void *)(blk + 32)); + W(9) = LOAD_BIG_32((void *)(blk + 36)); + W(10) = LOAD_BIG_32((void *)(blk + 40)); + W(11) = LOAD_BIG_32((void *)(blk + 44)); + W(12) = LOAD_BIG_32((void *)(blk + 48)); + W(13) = LOAD_BIG_32((void *)(blk + 52)); + W(14) = LOAD_BIG_32((void *)(blk + 56)); + W(15) = LOAD_BIG_32((void *)(blk + 60)); + +#endif /* !defined(__sparc) */ + + /* + * general optimization: + * + * even though this approach is described in the standard as + * being slower algorithmically, it is 30-40% faster than the + * "faster" version under SPARC, because this version has more + * of the constraints specified at compile-time and uses fewer + * variables (and therefore has better register utilization) + * than its "speedier" brother. (i've tried both, trust me) + * + * for either method given in the spec, there is an "assignment" + * phase where the following takes place: + * + * tmp = (main_computation); + * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp; + * + * we can make the algorithm go faster by not doing this work, + * but just pretending that `d' is now `e', etc. this works + * really well and obviates the need for a temporary variable. + * however, we still explicitly perform the rotate action, + * since it is cheaper on SPARC to do it once than to have to + * do it over and over again. + */ + + /* round 1 */ + e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */ + b = ROTATE_LEFT(b, 30); + + d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */ + a = ROTATE_LEFT(a, 30); + + c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */ + e = ROTATE_LEFT(e, 30); + + b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */ + d = ROTATE_LEFT(d, 30); + + a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */ + c = ROTATE_LEFT(c, 30); + + e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */ + b = ROTATE_LEFT(b, 30); + + d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */ + a = ROTATE_LEFT(a, 30); + + c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */ + e = ROTATE_LEFT(e, 30); + + b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */ + d = ROTATE_LEFT(d, 30); + + a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */ + c = ROTATE_LEFT(c, 30); + + e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */ + b = ROTATE_LEFT(b, 30); + + d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */ + a = ROTATE_LEFT(a, 30); + + c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */ + e = ROTATE_LEFT(e, 30); + + b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */ + d = ROTATE_LEFT(d, 30); + + a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */ + c = ROTATE_LEFT(c, 30); + + e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */ + b = ROTATE_LEFT(b, 30); + + W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */ + d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0); + a = ROTATE_LEFT(a, 30); + + W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */ + c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0); + e = ROTATE_LEFT(e, 30); + + W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */ + b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0); + d = ROTATE_LEFT(d, 30); + + W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */ + a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0); + c = ROTATE_LEFT(c, 30); + + /* round 2 */ + W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1); + b = ROTATE_LEFT(b, 30); + + W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1); + a = ROTATE_LEFT(a, 30); + + W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1); + e = ROTATE_LEFT(e, 30); + + W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1); + d = ROTATE_LEFT(d, 30); + + W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1); + c = ROTATE_LEFT(c, 30); + + W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1); + b = ROTATE_LEFT(b, 30); + + W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1); + a = ROTATE_LEFT(a, 30); + + W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1); + e = ROTATE_LEFT(e, 30); + + W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1); + d = ROTATE_LEFT(d, 30); + + W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1); + c = ROTATE_LEFT(c, 30); + + W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1); + b = ROTATE_LEFT(b, 30); + + W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1); + a = ROTATE_LEFT(a, 30); + + W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1); + e = ROTATE_LEFT(e, 30); + + W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1); + d = ROTATE_LEFT(d, 30); + + W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1); + c = ROTATE_LEFT(c, 30); + + W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1); + b = ROTATE_LEFT(b, 30); + + W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1); + a = ROTATE_LEFT(a, 30); + + W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1); + e = ROTATE_LEFT(e, 30); + + W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1); + d = ROTATE_LEFT(d, 30); + + W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1); + c = ROTATE_LEFT(c, 30); + + /* round 3 */ + W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */ + e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2); + b = ROTATE_LEFT(b, 30); + + W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */ + d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2); + a = ROTATE_LEFT(a, 30); + + W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */ + c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2); + e = ROTATE_LEFT(e, 30); + + W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */ + b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2); + d = ROTATE_LEFT(d, 30); + + W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */ + a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2); + c = ROTATE_LEFT(c, 30); + + W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */ + e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2); + b = ROTATE_LEFT(b, 30); + + W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */ + d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2); + a = ROTATE_LEFT(a, 30); + + W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */ + c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2); + e = ROTATE_LEFT(e, 30); + + W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */ + b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2); + d = ROTATE_LEFT(d, 30); + + W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */ + a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2); + c = ROTATE_LEFT(c, 30); + + W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */ + e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2); + b = ROTATE_LEFT(b, 30); + + W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */ + d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2); + a = ROTATE_LEFT(a, 30); + + W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */ + c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2); + e = ROTATE_LEFT(e, 30); + + W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */ + b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2); + d = ROTATE_LEFT(d, 30); + + W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */ + a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2); + c = ROTATE_LEFT(c, 30); + + W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */ + e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2); + b = ROTATE_LEFT(b, 30); + + W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */ + d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2); + a = ROTATE_LEFT(a, 30); + + W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */ + c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2); + e = ROTATE_LEFT(e, 30); + + W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */ + b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2); + d = ROTATE_LEFT(d, 30); + + W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */ + a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2); + c = ROTATE_LEFT(c, 30); + + /* round 4 */ + W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3); + b = ROTATE_LEFT(b, 30); + + W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3); + a = ROTATE_LEFT(a, 30); + + W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3); + e = ROTATE_LEFT(e, 30); + + W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3); + d = ROTATE_LEFT(d, 30); + + W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3); + c = ROTATE_LEFT(c, 30); + + W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3); + b = ROTATE_LEFT(b, 30); + + W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3); + a = ROTATE_LEFT(a, 30); + + W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3); + e = ROTATE_LEFT(e, 30); + + W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3); + d = ROTATE_LEFT(d, 30); + + W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3); + c = ROTATE_LEFT(c, 30); + + W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3); + b = ROTATE_LEFT(b, 30); + + W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3); + a = ROTATE_LEFT(a, 30); + + W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3); + e = ROTATE_LEFT(e, 30); + + W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3); + d = ROTATE_LEFT(d, 30); + + W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */ + a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3); + c = ROTATE_LEFT(c, 30); + + W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */ + e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3); + b = ROTATE_LEFT(b, 30); + + W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */ + d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3); + a = ROTATE_LEFT(a, 30); + + W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */ + c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3); + e = ROTATE_LEFT(e, 30); + + W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */ + b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3); + d = ROTATE_LEFT(d, 30); + + W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */ + + ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) + + SHA1_CONST(3); + ctx->state[1] += b; + ctx->state[2] += ROTATE_LEFT(c, 30); + ctx->state[3] += d; + ctx->state[4] += e; + + /* zeroize sensitive information */ + W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0; + W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0; +} +#endif /* !__amd64 */ + + +/* + * Encode() + * + * purpose: to convert a list of numbers from little endian to big endian + * input: uint8_t * : place to store the converted big endian numbers + * uint32_t * : place to get numbers to convert from + * size_t : the length of the input in bytes + * output: void + */ + +static void +Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input, + size_t len) +{ + size_t i, j; + +#if defined(__sparc) + if (IS_P2ALIGNED(output, sizeof (uint32_t))) { + for (i = 0, j = 0; j < len; i++, j += 4) { + /* LINTED E_BAD_PTR_CAST_ALIGN */ + *((uint32_t *)(output + j)) = input[i]; + } + } else { +#endif /* little endian -- will work on big endian, but slowly */ + + for (i = 0, j = 0; j < len; i++, j += 4) { + output[j] = (input[i] >> 24) & 0xff; + output[j + 1] = (input[i] >> 16) & 0xff; + output[j + 2] = (input[i] >> 8) & 0xff; + output[j + 3] = input[i] & 0xff; + } +#if defined(__sparc) + } +#endif +} diff --git a/sys/contrib/openzfs/module/icp/algs/sha2/sha2.c b/sys/contrib/openzfs/module/icp/algs/sha2/sha2.c new file mode 100644 index 000000000000..75f6a3c1af4b --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/sha2/sha2.c @@ -0,0 +1,956 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ +/* + * Copyright 2013 Saso Kiselkov. All rights reserved. + */ + +/* + * The basic framework for this code came from the reference + * implementation for MD5. That implementation is Copyright (C) + * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. + * + * License to copy and use this software is granted provided that it + * is identified as the "RSA Data Security, Inc. MD5 Message-Digest + * Algorithm" in all material mentioning or referencing this software + * or this function. + * + * License is also granted to make and use derivative works provided + * that such works are identified as "derived from the RSA Data + * Security, Inc. MD5 Message-Digest Algorithm" in all material + * mentioning or referencing the derived work. + * + * RSA Data Security, Inc. makes no representations concerning either + * the merchantability of this software or the suitability of this + * software for any particular purpose. It is provided "as is" + * without express or implied warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. + * + * NOTE: Cleaned-up and optimized, version of SHA2, based on the FIPS 180-2 + * standard, available at + * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf + * Not as fast as one would like -- further optimizations are encouraged + * and appreciated. + */ + +#include <sys/zfs_context.h> +#define _SHA2_IMPL +#include <sys/sha2.h> +#include <sha2/sha2_consts.h> + +#define _RESTRICT_KYWD + +#ifdef _ZFS_LITTLE_ENDIAN +#include <sys/byteorder.h> +#define HAVE_HTONL +#endif +#include <sys/isa_defs.h> /* for _ILP32 */ + +static void Encode(uint8_t *, uint32_t *, size_t); +static void Encode64(uint8_t *, uint64_t *, size_t); + +/* userspace only supports the generic version */ +#if defined(__amd64) && defined(_KERNEL) +#define SHA512Transform(ctx, in) SHA512TransformBlocks((ctx), (in), 1) +#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1) + +void SHA512TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num); +void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num); + +#else +static void SHA256Transform(SHA2_CTX *, const uint8_t *); +static void SHA512Transform(SHA2_CTX *, const uint8_t *); +#endif /* __amd64 && _KERNEL */ + +static uint8_t PADDING[128] = { 0x80, /* all zeros */ }; + +/* + * The low-level checksum routines use a lot of stack space. On systems where + * small stacks are enforced (like 32-bit kernel builds), insert compiler memory + * barriers to reduce stack frame size. This can reduce the SHA512Transform() + * stack frame usage from 3k to <1k on ARM32, for example. + */ +#if defined(_ILP32) || defined(__powerpc) /* small stack */ +#define SMALL_STACK_MEMORY_BARRIER asm volatile("": : :"memory"); +#else +#define SMALL_STACK_MEMORY_BARRIER +#endif + +/* Ch and Maj are the basic SHA2 functions. */ +#define Ch(b, c, d) (((b) & (c)) ^ ((~b) & (d))) +#define Maj(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d))) + +/* Rotates x right n bits. */ +#define ROTR(x, n) \ + (((x) >> (n)) | ((x) << ((sizeof (x) * NBBY)-(n)))) + +/* Shift x right n bits */ +#define SHR(x, n) ((x) >> (n)) + +/* SHA256 Functions */ +#define BIGSIGMA0_256(x) (ROTR((x), 2) ^ ROTR((x), 13) ^ ROTR((x), 22)) +#define BIGSIGMA1_256(x) (ROTR((x), 6) ^ ROTR((x), 11) ^ ROTR((x), 25)) +#define SIGMA0_256(x) (ROTR((x), 7) ^ ROTR((x), 18) ^ SHR((x), 3)) +#define SIGMA1_256(x) (ROTR((x), 17) ^ ROTR((x), 19) ^ SHR((x), 10)) + +#define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \ + T1 = h + BIGSIGMA1_256(e) + Ch(e, f, g) + SHA256_CONST(i) + w; \ + d += T1; \ + T2 = BIGSIGMA0_256(a) + Maj(a, b, c); \ + h = T1 + T2 + +/* SHA384/512 Functions */ +#define BIGSIGMA0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39)) +#define BIGSIGMA1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41)) +#define SIGMA0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ SHR((x), 7)) +#define SIGMA1(x) (ROTR((x), 19) ^ ROTR((x), 61) ^ SHR((x), 6)) +#define SHA512ROUND(a, b, c, d, e, f, g, h, i, w) \ + T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + SHA512_CONST(i) + w; \ + d += T1; \ + T2 = BIGSIGMA0(a) + Maj(a, b, c); \ + h = T1 + T2; \ + SMALL_STACK_MEMORY_BARRIER; + +/* + * sparc optimization: + * + * on the sparc, we can load big endian 32-bit data easily. note that + * special care must be taken to ensure the address is 32-bit aligned. + * in the interest of speed, we don't check to make sure, since + * careful programming can guarantee this for us. + */ + +#if defined(_ZFS_BIG_ENDIAN) +#define LOAD_BIG_32(addr) (*(uint32_t *)(addr)) +#define LOAD_BIG_64(addr) (*(uint64_t *)(addr)) + +#elif defined(HAVE_HTONL) +#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr))) +#define LOAD_BIG_64(addr) htonll(*((uint64_t *)(addr))) + +#else +/* little endian -- will work on big endian, but slowly */ +#define LOAD_BIG_32(addr) \ + (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3]) +#define LOAD_BIG_64(addr) \ + (((uint64_t)(addr)[0] << 56) | ((uint64_t)(addr)[1] << 48) | \ + ((uint64_t)(addr)[2] << 40) | ((uint64_t)(addr)[3] << 32) | \ + ((uint64_t)(addr)[4] << 24) | ((uint64_t)(addr)[5] << 16) | \ + ((uint64_t)(addr)[6] << 8) | (uint64_t)(addr)[7]) +#endif /* _BIG_ENDIAN */ + + +#if !defined(__amd64) || !defined(_KERNEL) +/* SHA256 Transform */ + +static void +SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk) +{ + uint32_t a = ctx->state.s32[0]; + uint32_t b = ctx->state.s32[1]; + uint32_t c = ctx->state.s32[2]; + uint32_t d = ctx->state.s32[3]; + uint32_t e = ctx->state.s32[4]; + uint32_t f = ctx->state.s32[5]; + uint32_t g = ctx->state.s32[6]; + uint32_t h = ctx->state.s32[7]; + + uint32_t w0, w1, w2, w3, w4, w5, w6, w7; + uint32_t w8, w9, w10, w11, w12, w13, w14, w15; + uint32_t T1, T2; + +#if defined(__sparc) + static const uint32_t sha256_consts[] = { + SHA256_CONST_0, SHA256_CONST_1, SHA256_CONST_2, + SHA256_CONST_3, SHA256_CONST_4, SHA256_CONST_5, + SHA256_CONST_6, SHA256_CONST_7, SHA256_CONST_8, + SHA256_CONST_9, SHA256_CONST_10, SHA256_CONST_11, + SHA256_CONST_12, SHA256_CONST_13, SHA256_CONST_14, + SHA256_CONST_15, SHA256_CONST_16, SHA256_CONST_17, + SHA256_CONST_18, SHA256_CONST_19, SHA256_CONST_20, + SHA256_CONST_21, SHA256_CONST_22, SHA256_CONST_23, + SHA256_CONST_24, SHA256_CONST_25, SHA256_CONST_26, + SHA256_CONST_27, SHA256_CONST_28, SHA256_CONST_29, + SHA256_CONST_30, SHA256_CONST_31, SHA256_CONST_32, + SHA256_CONST_33, SHA256_CONST_34, SHA256_CONST_35, + SHA256_CONST_36, SHA256_CONST_37, SHA256_CONST_38, + SHA256_CONST_39, SHA256_CONST_40, SHA256_CONST_41, + SHA256_CONST_42, SHA256_CONST_43, SHA256_CONST_44, + SHA256_CONST_45, SHA256_CONST_46, SHA256_CONST_47, + SHA256_CONST_48, SHA256_CONST_49, SHA256_CONST_50, + SHA256_CONST_51, SHA256_CONST_52, SHA256_CONST_53, + SHA256_CONST_54, SHA256_CONST_55, SHA256_CONST_56, + SHA256_CONST_57, SHA256_CONST_58, SHA256_CONST_59, + SHA256_CONST_60, SHA256_CONST_61, SHA256_CONST_62, + SHA256_CONST_63 + }; +#endif /* __sparc */ + + if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ + bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); + blk = (uint8_t *)ctx->buf_un.buf32; + } + + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w0 = LOAD_BIG_32(blk + 4 * 0); + SHA256ROUND(a, b, c, d, e, f, g, h, 0, w0); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w1 = LOAD_BIG_32(blk + 4 * 1); + SHA256ROUND(h, a, b, c, d, e, f, g, 1, w1); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w2 = LOAD_BIG_32(blk + 4 * 2); + SHA256ROUND(g, h, a, b, c, d, e, f, 2, w2); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w3 = LOAD_BIG_32(blk + 4 * 3); + SHA256ROUND(f, g, h, a, b, c, d, e, 3, w3); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w4 = LOAD_BIG_32(blk + 4 * 4); + SHA256ROUND(e, f, g, h, a, b, c, d, 4, w4); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w5 = LOAD_BIG_32(blk + 4 * 5); + SHA256ROUND(d, e, f, g, h, a, b, c, 5, w5); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w6 = LOAD_BIG_32(blk + 4 * 6); + SHA256ROUND(c, d, e, f, g, h, a, b, 6, w6); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w7 = LOAD_BIG_32(blk + 4 * 7); + SHA256ROUND(b, c, d, e, f, g, h, a, 7, w7); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w8 = LOAD_BIG_32(blk + 4 * 8); + SHA256ROUND(a, b, c, d, e, f, g, h, 8, w8); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w9 = LOAD_BIG_32(blk + 4 * 9); + SHA256ROUND(h, a, b, c, d, e, f, g, 9, w9); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w10 = LOAD_BIG_32(blk + 4 * 10); + SHA256ROUND(g, h, a, b, c, d, e, f, 10, w10); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w11 = LOAD_BIG_32(blk + 4 * 11); + SHA256ROUND(f, g, h, a, b, c, d, e, 11, w11); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w12 = LOAD_BIG_32(blk + 4 * 12); + SHA256ROUND(e, f, g, h, a, b, c, d, 12, w12); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w13 = LOAD_BIG_32(blk + 4 * 13); + SHA256ROUND(d, e, f, g, h, a, b, c, 13, w13); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w14 = LOAD_BIG_32(blk + 4 * 14); + SHA256ROUND(c, d, e, f, g, h, a, b, 14, w14); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w15 = LOAD_BIG_32(blk + 4 * 15); + SHA256ROUND(b, c, d, e, f, g, h, a, 15, w15); + + w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0; + SHA256ROUND(a, b, c, d, e, f, g, h, 16, w0); + w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1; + SHA256ROUND(h, a, b, c, d, e, f, g, 17, w1); + w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2; + SHA256ROUND(g, h, a, b, c, d, e, f, 18, w2); + w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3; + SHA256ROUND(f, g, h, a, b, c, d, e, 19, w3); + w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4; + SHA256ROUND(e, f, g, h, a, b, c, d, 20, w4); + w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5; + SHA256ROUND(d, e, f, g, h, a, b, c, 21, w5); + w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6; + SHA256ROUND(c, d, e, f, g, h, a, b, 22, w6); + w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7; + SHA256ROUND(b, c, d, e, f, g, h, a, 23, w7); + w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8; + SHA256ROUND(a, b, c, d, e, f, g, h, 24, w8); + w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9; + SHA256ROUND(h, a, b, c, d, e, f, g, 25, w9); + w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10; + SHA256ROUND(g, h, a, b, c, d, e, f, 26, w10); + w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11; + SHA256ROUND(f, g, h, a, b, c, d, e, 27, w11); + w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12; + SHA256ROUND(e, f, g, h, a, b, c, d, 28, w12); + w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13; + SHA256ROUND(d, e, f, g, h, a, b, c, 29, w13); + w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14; + SHA256ROUND(c, d, e, f, g, h, a, b, 30, w14); + w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15; + SHA256ROUND(b, c, d, e, f, g, h, a, 31, w15); + + w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0; + SHA256ROUND(a, b, c, d, e, f, g, h, 32, w0); + w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1; + SHA256ROUND(h, a, b, c, d, e, f, g, 33, w1); + w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2; + SHA256ROUND(g, h, a, b, c, d, e, f, 34, w2); + w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3; + SHA256ROUND(f, g, h, a, b, c, d, e, 35, w3); + w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4; + SHA256ROUND(e, f, g, h, a, b, c, d, 36, w4); + w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5; + SHA256ROUND(d, e, f, g, h, a, b, c, 37, w5); + w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6; + SHA256ROUND(c, d, e, f, g, h, a, b, 38, w6); + w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7; + SHA256ROUND(b, c, d, e, f, g, h, a, 39, w7); + w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8; + SHA256ROUND(a, b, c, d, e, f, g, h, 40, w8); + w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9; + SHA256ROUND(h, a, b, c, d, e, f, g, 41, w9); + w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10; + SHA256ROUND(g, h, a, b, c, d, e, f, 42, w10); + w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11; + SHA256ROUND(f, g, h, a, b, c, d, e, 43, w11); + w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12; + SHA256ROUND(e, f, g, h, a, b, c, d, 44, w12); + w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13; + SHA256ROUND(d, e, f, g, h, a, b, c, 45, w13); + w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14; + SHA256ROUND(c, d, e, f, g, h, a, b, 46, w14); + w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15; + SHA256ROUND(b, c, d, e, f, g, h, a, 47, w15); + + w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0; + SHA256ROUND(a, b, c, d, e, f, g, h, 48, w0); + w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1; + SHA256ROUND(h, a, b, c, d, e, f, g, 49, w1); + w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2; + SHA256ROUND(g, h, a, b, c, d, e, f, 50, w2); + w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3; + SHA256ROUND(f, g, h, a, b, c, d, e, 51, w3); + w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4; + SHA256ROUND(e, f, g, h, a, b, c, d, 52, w4); + w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5; + SHA256ROUND(d, e, f, g, h, a, b, c, 53, w5); + w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6; + SHA256ROUND(c, d, e, f, g, h, a, b, 54, w6); + w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7; + SHA256ROUND(b, c, d, e, f, g, h, a, 55, w7); + w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8; + SHA256ROUND(a, b, c, d, e, f, g, h, 56, w8); + w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9; + SHA256ROUND(h, a, b, c, d, e, f, g, 57, w9); + w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10; + SHA256ROUND(g, h, a, b, c, d, e, f, 58, w10); + w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11; + SHA256ROUND(f, g, h, a, b, c, d, e, 59, w11); + w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12; + SHA256ROUND(e, f, g, h, a, b, c, d, 60, w12); + w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13; + SHA256ROUND(d, e, f, g, h, a, b, c, 61, w13); + w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14; + SHA256ROUND(c, d, e, f, g, h, a, b, 62, w14); + w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15; + SHA256ROUND(b, c, d, e, f, g, h, a, 63, w15); + + ctx->state.s32[0] += a; + ctx->state.s32[1] += b; + ctx->state.s32[2] += c; + ctx->state.s32[3] += d; + ctx->state.s32[4] += e; + ctx->state.s32[5] += f; + ctx->state.s32[6] += g; + ctx->state.s32[7] += h; +} + + +/* SHA384 and SHA512 Transform */ + +static void +SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk) +{ + + uint64_t a = ctx->state.s64[0]; + uint64_t b = ctx->state.s64[1]; + uint64_t c = ctx->state.s64[2]; + uint64_t d = ctx->state.s64[3]; + uint64_t e = ctx->state.s64[4]; + uint64_t f = ctx->state.s64[5]; + uint64_t g = ctx->state.s64[6]; + uint64_t h = ctx->state.s64[7]; + + uint64_t w0, w1, w2, w3, w4, w5, w6, w7; + uint64_t w8, w9, w10, w11, w12, w13, w14, w15; + uint64_t T1, T2; + +#if defined(__sparc) + static const uint64_t sha512_consts[] = { + SHA512_CONST_0, SHA512_CONST_1, SHA512_CONST_2, + SHA512_CONST_3, SHA512_CONST_4, SHA512_CONST_5, + SHA512_CONST_6, SHA512_CONST_7, SHA512_CONST_8, + SHA512_CONST_9, SHA512_CONST_10, SHA512_CONST_11, + SHA512_CONST_12, SHA512_CONST_13, SHA512_CONST_14, + SHA512_CONST_15, SHA512_CONST_16, SHA512_CONST_17, + SHA512_CONST_18, SHA512_CONST_19, SHA512_CONST_20, + SHA512_CONST_21, SHA512_CONST_22, SHA512_CONST_23, + SHA512_CONST_24, SHA512_CONST_25, SHA512_CONST_26, + SHA512_CONST_27, SHA512_CONST_28, SHA512_CONST_29, + SHA512_CONST_30, SHA512_CONST_31, SHA512_CONST_32, + SHA512_CONST_33, SHA512_CONST_34, SHA512_CONST_35, + SHA512_CONST_36, SHA512_CONST_37, SHA512_CONST_38, + SHA512_CONST_39, SHA512_CONST_40, SHA512_CONST_41, + SHA512_CONST_42, SHA512_CONST_43, SHA512_CONST_44, + SHA512_CONST_45, SHA512_CONST_46, SHA512_CONST_47, + SHA512_CONST_48, SHA512_CONST_49, SHA512_CONST_50, + SHA512_CONST_51, SHA512_CONST_52, SHA512_CONST_53, + SHA512_CONST_54, SHA512_CONST_55, SHA512_CONST_56, + SHA512_CONST_57, SHA512_CONST_58, SHA512_CONST_59, + SHA512_CONST_60, SHA512_CONST_61, SHA512_CONST_62, + SHA512_CONST_63, SHA512_CONST_64, SHA512_CONST_65, + SHA512_CONST_66, SHA512_CONST_67, SHA512_CONST_68, + SHA512_CONST_69, SHA512_CONST_70, SHA512_CONST_71, + SHA512_CONST_72, SHA512_CONST_73, SHA512_CONST_74, + SHA512_CONST_75, SHA512_CONST_76, SHA512_CONST_77, + SHA512_CONST_78, SHA512_CONST_79 + }; +#endif /* __sparc */ + + + if ((uintptr_t)blk & 0x7) { /* not 8-byte aligned? */ + bcopy(blk, ctx->buf_un.buf64, sizeof (ctx->buf_un.buf64)); + blk = (uint8_t *)ctx->buf_un.buf64; + } + + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w0 = LOAD_BIG_64(blk + 8 * 0); + SHA512ROUND(a, b, c, d, e, f, g, h, 0, w0); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w1 = LOAD_BIG_64(blk + 8 * 1); + SHA512ROUND(h, a, b, c, d, e, f, g, 1, w1); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w2 = LOAD_BIG_64(blk + 8 * 2); + SHA512ROUND(g, h, a, b, c, d, e, f, 2, w2); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w3 = LOAD_BIG_64(blk + 8 * 3); + SHA512ROUND(f, g, h, a, b, c, d, e, 3, w3); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w4 = LOAD_BIG_64(blk + 8 * 4); + SHA512ROUND(e, f, g, h, a, b, c, d, 4, w4); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w5 = LOAD_BIG_64(blk + 8 * 5); + SHA512ROUND(d, e, f, g, h, a, b, c, 5, w5); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w6 = LOAD_BIG_64(blk + 8 * 6); + SHA512ROUND(c, d, e, f, g, h, a, b, 6, w6); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w7 = LOAD_BIG_64(blk + 8 * 7); + SHA512ROUND(b, c, d, e, f, g, h, a, 7, w7); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w8 = LOAD_BIG_64(blk + 8 * 8); + SHA512ROUND(a, b, c, d, e, f, g, h, 8, w8); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w9 = LOAD_BIG_64(blk + 8 * 9); + SHA512ROUND(h, a, b, c, d, e, f, g, 9, w9); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w10 = LOAD_BIG_64(blk + 8 * 10); + SHA512ROUND(g, h, a, b, c, d, e, f, 10, w10); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w11 = LOAD_BIG_64(blk + 8 * 11); + SHA512ROUND(f, g, h, a, b, c, d, e, 11, w11); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w12 = LOAD_BIG_64(blk + 8 * 12); + SHA512ROUND(e, f, g, h, a, b, c, d, 12, w12); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w13 = LOAD_BIG_64(blk + 8 * 13); + SHA512ROUND(d, e, f, g, h, a, b, c, 13, w13); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w14 = LOAD_BIG_64(blk + 8 * 14); + SHA512ROUND(c, d, e, f, g, h, a, b, 14, w14); + /* LINTED E_BAD_PTR_CAST_ALIGN */ + w15 = LOAD_BIG_64(blk + 8 * 15); + SHA512ROUND(b, c, d, e, f, g, h, a, 15, w15); + + w0 = SIGMA1(w14) + w9 + SIGMA0(w1) + w0; + SHA512ROUND(a, b, c, d, e, f, g, h, 16, w0); + w1 = SIGMA1(w15) + w10 + SIGMA0(w2) + w1; + SHA512ROUND(h, a, b, c, d, e, f, g, 17, w1); + w2 = SIGMA1(w0) + w11 + SIGMA0(w3) + w2; + SHA512ROUND(g, h, a, b, c, d, e, f, 18, w2); + w3 = SIGMA1(w1) + w12 + SIGMA0(w4) + w3; + SHA512ROUND(f, g, h, a, b, c, d, e, 19, w3); + w4 = SIGMA1(w2) + w13 + SIGMA0(w5) + w4; + SHA512ROUND(e, f, g, h, a, b, c, d, 20, w4); + w5 = SIGMA1(w3) + w14 + SIGMA0(w6) + w5; + SHA512ROUND(d, e, f, g, h, a, b, c, 21, w5); + w6 = SIGMA1(w4) + w15 + SIGMA0(w7) + w6; + SHA512ROUND(c, d, e, f, g, h, a, b, 22, w6); + w7 = SIGMA1(w5) + w0 + SIGMA0(w8) + w7; + SHA512ROUND(b, c, d, e, f, g, h, a, 23, w7); + w8 = SIGMA1(w6) + w1 + SIGMA0(w9) + w8; + SHA512ROUND(a, b, c, d, e, f, g, h, 24, w8); + w9 = SIGMA1(w7) + w2 + SIGMA0(w10) + w9; + SHA512ROUND(h, a, b, c, d, e, f, g, 25, w9); + w10 = SIGMA1(w8) + w3 + SIGMA0(w11) + w10; + SHA512ROUND(g, h, a, b, c, d, e, f, 26, w10); + w11 = SIGMA1(w9) + w4 + SIGMA0(w12) + w11; + SHA512ROUND(f, g, h, a, b, c, d, e, 27, w11); + w12 = SIGMA1(w10) + w5 + SIGMA0(w13) + w12; + SHA512ROUND(e, f, g, h, a, b, c, d, 28, w12); + w13 = SIGMA1(w11) + w6 + SIGMA0(w14) + w13; + SHA512ROUND(d, e, f, g, h, a, b, c, 29, w13); + w14 = SIGMA1(w12) + w7 + SIGMA0(w15) + w14; + SHA512ROUND(c, d, e, f, g, h, a, b, 30, w14); + w15 = SIGMA1(w13) + w8 + SIGMA0(w0) + w15; + SHA512ROUND(b, c, d, e, f, g, h, a, 31, w15); + + w0 = SIGMA1(w14) + w9 + SIGMA0(w1) + w0; + SHA512ROUND(a, b, c, d, e, f, g, h, 32, w0); + w1 = SIGMA1(w15) + w10 + SIGMA0(w2) + w1; + SHA512ROUND(h, a, b, c, d, e, f, g, 33, w1); + w2 = SIGMA1(w0) + w11 + SIGMA0(w3) + w2; + SHA512ROUND(g, h, a, b, c, d, e, f, 34, w2); + w3 = SIGMA1(w1) + w12 + SIGMA0(w4) + w3; + SHA512ROUND(f, g, h, a, b, c, d, e, 35, w3); + w4 = SIGMA1(w2) + w13 + SIGMA0(w5) + w4; + SHA512ROUND(e, f, g, h, a, b, c, d, 36, w4); + w5 = SIGMA1(w3) + w14 + SIGMA0(w6) + w5; + SHA512ROUND(d, e, f, g, h, a, b, c, 37, w5); + w6 = SIGMA1(w4) + w15 + SIGMA0(w7) + w6; + SHA512ROUND(c, d, e, f, g, h, a, b, 38, w6); + w7 = SIGMA1(w5) + w0 + SIGMA0(w8) + w7; + SHA512ROUND(b, c, d, e, f, g, h, a, 39, w7); + w8 = SIGMA1(w6) + w1 + SIGMA0(w9) + w8; + SHA512ROUND(a, b, c, d, e, f, g, h, 40, w8); + w9 = SIGMA1(w7) + w2 + SIGMA0(w10) + w9; + SHA512ROUND(h, a, b, c, d, e, f, g, 41, w9); + w10 = SIGMA1(w8) + w3 + SIGMA0(w11) + w10; + SHA512ROUND(g, h, a, b, c, d, e, f, 42, w10); + w11 = SIGMA1(w9) + w4 + SIGMA0(w12) + w11; + SHA512ROUND(f, g, h, a, b, c, d, e, 43, w11); + w12 = SIGMA1(w10) + w5 + SIGMA0(w13) + w12; + SHA512ROUND(e, f, g, h, a, b, c, d, 44, w12); + w13 = SIGMA1(w11) + w6 + SIGMA0(w14) + w13; + SHA512ROUND(d, e, f, g, h, a, b, c, 45, w13); + w14 = SIGMA1(w12) + w7 + SIGMA0(w15) + w14; + SHA512ROUND(c, d, e, f, g, h, a, b, 46, w14); + w15 = SIGMA1(w13) + w8 + SIGMA0(w0) + w15; + SHA512ROUND(b, c, d, e, f, g, h, a, 47, w15); + + w0 = SIGMA1(w14) + w9 + SIGMA0(w1) + w0; + SHA512ROUND(a, b, c, d, e, f, g, h, 48, w0); + w1 = SIGMA1(w15) + w10 + SIGMA0(w2) + w1; + SHA512ROUND(h, a, b, c, d, e, f, g, 49, w1); + w2 = SIGMA1(w0) + w11 + SIGMA0(w3) + w2; + SHA512ROUND(g, h, a, b, c, d, e, f, 50, w2); + w3 = SIGMA1(w1) + w12 + SIGMA0(w4) + w3; + SHA512ROUND(f, g, h, a, b, c, d, e, 51, w3); + w4 = SIGMA1(w2) + w13 + SIGMA0(w5) + w4; + SHA512ROUND(e, f, g, h, a, b, c, d, 52, w4); + w5 = SIGMA1(w3) + w14 + SIGMA0(w6) + w5; + SHA512ROUND(d, e, f, g, h, a, b, c, 53, w5); + w6 = SIGMA1(w4) + w15 + SIGMA0(w7) + w6; + SHA512ROUND(c, d, e, f, g, h, a, b, 54, w6); + w7 = SIGMA1(w5) + w0 + SIGMA0(w8) + w7; + SHA512ROUND(b, c, d, e, f, g, h, a, 55, w7); + w8 = SIGMA1(w6) + w1 + SIGMA0(w9) + w8; + SHA512ROUND(a, b, c, d, e, f, g, h, 56, w8); + w9 = SIGMA1(w7) + w2 + SIGMA0(w10) + w9; + SHA512ROUND(h, a, b, c, d, e, f, g, 57, w9); + w10 = SIGMA1(w8) + w3 + SIGMA0(w11) + w10; + SHA512ROUND(g, h, a, b, c, d, e, f, 58, w10); + w11 = SIGMA1(w9) + w4 + SIGMA0(w12) + w11; + SHA512ROUND(f, g, h, a, b, c, d, e, 59, w11); + w12 = SIGMA1(w10) + w5 + SIGMA0(w13) + w12; + SHA512ROUND(e, f, g, h, a, b, c, d, 60, w12); + w13 = SIGMA1(w11) + w6 + SIGMA0(w14) + w13; + SHA512ROUND(d, e, f, g, h, a, b, c, 61, w13); + w14 = SIGMA1(w12) + w7 + SIGMA0(w15) + w14; + SHA512ROUND(c, d, e, f, g, h, a, b, 62, w14); + w15 = SIGMA1(w13) + w8 + SIGMA0(w0) + w15; + SHA512ROUND(b, c, d, e, f, g, h, a, 63, w15); + + w0 = SIGMA1(w14) + w9 + SIGMA0(w1) + w0; + SHA512ROUND(a, b, c, d, e, f, g, h, 64, w0); + w1 = SIGMA1(w15) + w10 + SIGMA0(w2) + w1; + SHA512ROUND(h, a, b, c, d, e, f, g, 65, w1); + w2 = SIGMA1(w0) + w11 + SIGMA0(w3) + w2; + SHA512ROUND(g, h, a, b, c, d, e, f, 66, w2); + w3 = SIGMA1(w1) + w12 + SIGMA0(w4) + w3; + SHA512ROUND(f, g, h, a, b, c, d, e, 67, w3); + w4 = SIGMA1(w2) + w13 + SIGMA0(w5) + w4; + SHA512ROUND(e, f, g, h, a, b, c, d, 68, w4); + w5 = SIGMA1(w3) + w14 + SIGMA0(w6) + w5; + SHA512ROUND(d, e, f, g, h, a, b, c, 69, w5); + w6 = SIGMA1(w4) + w15 + SIGMA0(w7) + w6; + SHA512ROUND(c, d, e, f, g, h, a, b, 70, w6); + w7 = SIGMA1(w5) + w0 + SIGMA0(w8) + w7; + SHA512ROUND(b, c, d, e, f, g, h, a, 71, w7); + w8 = SIGMA1(w6) + w1 + SIGMA0(w9) + w8; + SHA512ROUND(a, b, c, d, e, f, g, h, 72, w8); + w9 = SIGMA1(w7) + w2 + SIGMA0(w10) + w9; + SHA512ROUND(h, a, b, c, d, e, f, g, 73, w9); + w10 = SIGMA1(w8) + w3 + SIGMA0(w11) + w10; + SHA512ROUND(g, h, a, b, c, d, e, f, 74, w10); + w11 = SIGMA1(w9) + w4 + SIGMA0(w12) + w11; + SHA512ROUND(f, g, h, a, b, c, d, e, 75, w11); + w12 = SIGMA1(w10) + w5 + SIGMA0(w13) + w12; + SHA512ROUND(e, f, g, h, a, b, c, d, 76, w12); + w13 = SIGMA1(w11) + w6 + SIGMA0(w14) + w13; + SHA512ROUND(d, e, f, g, h, a, b, c, 77, w13); + w14 = SIGMA1(w12) + w7 + SIGMA0(w15) + w14; + SHA512ROUND(c, d, e, f, g, h, a, b, 78, w14); + w15 = SIGMA1(w13) + w8 + SIGMA0(w0) + w15; + SHA512ROUND(b, c, d, e, f, g, h, a, 79, w15); + + ctx->state.s64[0] += a; + ctx->state.s64[1] += b; + ctx->state.s64[2] += c; + ctx->state.s64[3] += d; + ctx->state.s64[4] += e; + ctx->state.s64[5] += f; + ctx->state.s64[6] += g; + ctx->state.s64[7] += h; + +} +#endif /* !__amd64 || !_KERNEL */ + + +/* + * Encode() + * + * purpose: to convert a list of numbers from little endian to big endian + * input: uint8_t * : place to store the converted big endian numbers + * uint32_t * : place to get numbers to convert from + * size_t : the length of the input in bytes + * output: void + */ + +static void +Encode(uint8_t *_RESTRICT_KYWD output, uint32_t *_RESTRICT_KYWD input, + size_t len) +{ + size_t i, j; + +#if defined(__sparc) + if (IS_P2ALIGNED(output, sizeof (uint32_t))) { + for (i = 0, j = 0; j < len; i++, j += 4) { + /* LINTED E_BAD_PTR_CAST_ALIGN */ + *((uint32_t *)(output + j)) = input[i]; + } + } else { +#endif /* little endian -- will work on big endian, but slowly */ + for (i = 0, j = 0; j < len; i++, j += 4) { + output[j] = (input[i] >> 24) & 0xff; + output[j + 1] = (input[i] >> 16) & 0xff; + output[j + 2] = (input[i] >> 8) & 0xff; + output[j + 3] = input[i] & 0xff; + } +#if defined(__sparc) + } +#endif +} + +static void +Encode64(uint8_t *_RESTRICT_KYWD output, uint64_t *_RESTRICT_KYWD input, + size_t len) +{ + size_t i, j; + +#if defined(__sparc) + if (IS_P2ALIGNED(output, sizeof (uint64_t))) { + for (i = 0, j = 0; j < len; i++, j += 8) { + /* LINTED E_BAD_PTR_CAST_ALIGN */ + *((uint64_t *)(output + j)) = input[i]; + } + } else { +#endif /* little endian -- will work on big endian, but slowly */ + for (i = 0, j = 0; j < len; i++, j += 8) { + + output[j] = (input[i] >> 56) & 0xff; + output[j + 1] = (input[i] >> 48) & 0xff; + output[j + 2] = (input[i] >> 40) & 0xff; + output[j + 3] = (input[i] >> 32) & 0xff; + output[j + 4] = (input[i] >> 24) & 0xff; + output[j + 5] = (input[i] >> 16) & 0xff; + output[j + 6] = (input[i] >> 8) & 0xff; + output[j + 7] = input[i] & 0xff; + } +#if defined(__sparc) + } +#endif +} + + +void +SHA2Init(uint64_t mech, SHA2_CTX *ctx) +{ + + switch (mech) { + case SHA256_MECH_INFO_TYPE: + case SHA256_HMAC_MECH_INFO_TYPE: + case SHA256_HMAC_GEN_MECH_INFO_TYPE: + ctx->state.s32[0] = 0x6a09e667U; + ctx->state.s32[1] = 0xbb67ae85U; + ctx->state.s32[2] = 0x3c6ef372U; + ctx->state.s32[3] = 0xa54ff53aU; + ctx->state.s32[4] = 0x510e527fU; + ctx->state.s32[5] = 0x9b05688cU; + ctx->state.s32[6] = 0x1f83d9abU; + ctx->state.s32[7] = 0x5be0cd19U; + break; + case SHA384_MECH_INFO_TYPE: + case SHA384_HMAC_MECH_INFO_TYPE: + case SHA384_HMAC_GEN_MECH_INFO_TYPE: + ctx->state.s64[0] = 0xcbbb9d5dc1059ed8ULL; + ctx->state.s64[1] = 0x629a292a367cd507ULL; + ctx->state.s64[2] = 0x9159015a3070dd17ULL; + ctx->state.s64[3] = 0x152fecd8f70e5939ULL; + ctx->state.s64[4] = 0x67332667ffc00b31ULL; + ctx->state.s64[5] = 0x8eb44a8768581511ULL; + ctx->state.s64[6] = 0xdb0c2e0d64f98fa7ULL; + ctx->state.s64[7] = 0x47b5481dbefa4fa4ULL; + break; + case SHA512_MECH_INFO_TYPE: + case SHA512_HMAC_MECH_INFO_TYPE: + case SHA512_HMAC_GEN_MECH_INFO_TYPE: + ctx->state.s64[0] = 0x6a09e667f3bcc908ULL; + ctx->state.s64[1] = 0xbb67ae8584caa73bULL; + ctx->state.s64[2] = 0x3c6ef372fe94f82bULL; + ctx->state.s64[3] = 0xa54ff53a5f1d36f1ULL; + ctx->state.s64[4] = 0x510e527fade682d1ULL; + ctx->state.s64[5] = 0x9b05688c2b3e6c1fULL; + ctx->state.s64[6] = 0x1f83d9abfb41bd6bULL; + ctx->state.s64[7] = 0x5be0cd19137e2179ULL; + break; + case SHA512_224_MECH_INFO_TYPE: + ctx->state.s64[0] = 0x8C3D37C819544DA2ULL; + ctx->state.s64[1] = 0x73E1996689DCD4D6ULL; + ctx->state.s64[2] = 0x1DFAB7AE32FF9C82ULL; + ctx->state.s64[3] = 0x679DD514582F9FCFULL; + ctx->state.s64[4] = 0x0F6D2B697BD44DA8ULL; + ctx->state.s64[5] = 0x77E36F7304C48942ULL; + ctx->state.s64[6] = 0x3F9D85A86A1D36C8ULL; + ctx->state.s64[7] = 0x1112E6AD91D692A1ULL; + break; + case SHA512_256_MECH_INFO_TYPE: + ctx->state.s64[0] = 0x22312194FC2BF72CULL; + ctx->state.s64[1] = 0x9F555FA3C84C64C2ULL; + ctx->state.s64[2] = 0x2393B86B6F53B151ULL; + ctx->state.s64[3] = 0x963877195940EABDULL; + ctx->state.s64[4] = 0x96283EE2A88EFFE3ULL; + ctx->state.s64[5] = 0xBE5E1E2553863992ULL; + ctx->state.s64[6] = 0x2B0199FC2C85B8AAULL; + ctx->state.s64[7] = 0x0EB72DDC81C52CA2ULL; + break; +#ifdef _KERNEL + default: + cmn_err(CE_PANIC, + "sha2_init: failed to find a supported algorithm: 0x%x", + (uint32_t)mech); + +#endif /* _KERNEL */ + } + + ctx->algotype = (uint32_t)mech; + ctx->count.c64[0] = ctx->count.c64[1] = 0; +} + +#ifndef _KERNEL + +// #pragma inline(SHA256Init, SHA384Init, SHA512Init) +void +SHA256Init(SHA256_CTX *ctx) +{ + SHA2Init(SHA256, ctx); +} + +void +SHA384Init(SHA384_CTX *ctx) +{ + SHA2Init(SHA384, ctx); +} + +void +SHA512Init(SHA512_CTX *ctx) +{ + SHA2Init(SHA512, ctx); +} + +#endif /* _KERNEL */ + +/* + * SHA2Update() + * + * purpose: continues an sha2 digest operation, using the message block + * to update the context. + * input: SHA2_CTX * : the context to update + * void * : the message block + * size_t : the length of the message block, in bytes + * output: void + */ + +void +SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) +{ + uint32_t i, buf_index, buf_len, buf_limit; + const uint8_t *input = inptr; + uint32_t algotype = ctx->algotype; + + /* check for noop */ + if (input_len == 0) + return; + + if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { + buf_limit = 64; + + /* compute number of bytes mod 64 */ + buf_index = (ctx->count.c32[1] >> 3) & 0x3F; + + /* update number of bits */ + if ((ctx->count.c32[1] += (input_len << 3)) < (input_len << 3)) + ctx->count.c32[0]++; + + ctx->count.c32[0] += (input_len >> 29); + + } else { + buf_limit = 128; + + /* compute number of bytes mod 128 */ + buf_index = (ctx->count.c64[1] >> 3) & 0x7F; + + /* update number of bits */ + if ((ctx->count.c64[1] += (input_len << 3)) < (input_len << 3)) + ctx->count.c64[0]++; + + ctx->count.c64[0] += (input_len >> 29); + } + + buf_len = buf_limit - buf_index; + + /* transform as many times as possible */ + i = 0; + if (input_len >= buf_len) { + + /* + * general optimization: + * + * only do initial bcopy() and SHA2Transform() if + * buf_index != 0. if buf_index == 0, we're just + * wasting our time doing the bcopy() since there + * wasn't any data left over from a previous call to + * SHA2Update(). + */ + if (buf_index) { + bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); + if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) + SHA256Transform(ctx, ctx->buf_un.buf8); + else + SHA512Transform(ctx, ctx->buf_un.buf8); + + i = buf_len; + } + +#if !defined(__amd64) || !defined(_KERNEL) + if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { + for (; i + buf_limit - 1 < input_len; i += buf_limit) { + SHA256Transform(ctx, &input[i]); + } + } else { + for (; i + buf_limit - 1 < input_len; i += buf_limit) { + SHA512Transform(ctx, &input[i]); + } + } + +#else + uint32_t block_count; + if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { + block_count = (input_len - i) >> 6; + if (block_count > 0) { + SHA256TransformBlocks(ctx, &input[i], + block_count); + i += block_count << 6; + } + } else { + block_count = (input_len - i) >> 7; + if (block_count > 0) { + SHA512TransformBlocks(ctx, &input[i], + block_count); + i += block_count << 7; + } + } +#endif /* !__amd64 || !_KERNEL */ + + /* + * general optimization: + * + * if i and input_len are the same, return now instead + * of calling bcopy(), since the bcopy() in this case + * will be an expensive noop. + */ + + if (input_len == i) + return; + + buf_index = 0; + } + + /* buffer remaining input */ + bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); +} + + +/* + * SHA2Final() + * + * purpose: ends an sha2 digest operation, finalizing the message digest and + * zeroing the context. + * input: uchar_t * : a buffer to store the digest + * : The function actually uses void* because many + * : callers pass things other than uchar_t here. + * SHA2_CTX * : the context to finalize, save, and zero + * output: void + */ + +void +SHA2Final(void *digest, SHA2_CTX *ctx) +{ + uint8_t bitcount_be[sizeof (ctx->count.c32)]; + uint8_t bitcount_be64[sizeof (ctx->count.c64)]; + uint32_t index; + uint32_t algotype = ctx->algotype; + + if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { + index = (ctx->count.c32[1] >> 3) & 0x3f; + Encode(bitcount_be, ctx->count.c32, sizeof (bitcount_be)); + SHA2Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index); + SHA2Update(ctx, bitcount_be, sizeof (bitcount_be)); + Encode(digest, ctx->state.s32, sizeof (ctx->state.s32)); + } else { + index = (ctx->count.c64[1] >> 3) & 0x7f; + Encode64(bitcount_be64, ctx->count.c64, + sizeof (bitcount_be64)); + SHA2Update(ctx, PADDING, ((index < 112) ? 112 : 240) - index); + SHA2Update(ctx, bitcount_be64, sizeof (bitcount_be64)); + if (algotype <= SHA384_HMAC_GEN_MECH_INFO_TYPE) { + ctx->state.s64[6] = ctx->state.s64[7] = 0; + Encode64(digest, ctx->state.s64, + sizeof (uint64_t) * 6); + } else if (algotype == SHA512_224_MECH_INFO_TYPE) { + uint8_t last[sizeof (uint64_t)]; + /* + * Since SHA-512/224 doesn't align well to 64-bit + * boundaries, we must do the encoding in three steps: + * 1) encode the three 64-bit words that fit neatly + * 2) encode the last 64-bit word to a temp buffer + * 3) chop out the lower 32-bits from the temp buffer + * and append them to the digest + */ + Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 3); + Encode64(last, &ctx->state.s64[3], sizeof (uint64_t)); + bcopy(last, (uint8_t *)digest + 24, 4); + } else if (algotype == SHA512_256_MECH_INFO_TYPE) { + Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 4); + } else { + Encode64(digest, ctx->state.s64, + sizeof (ctx->state.s64)); + } + } + + /* zeroize sensitive information */ + bzero(ctx, sizeof (*ctx)); +} + +#ifdef _KERNEL +EXPORT_SYMBOL(SHA2Init); +EXPORT_SYMBOL(SHA2Update); +EXPORT_SYMBOL(SHA2Final); +#endif diff --git a/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE b/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE new file mode 100644 index 000000000000..b7434fd17872 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE @@ -0,0 +1,3 @@ +Implementation of the Skein hash function. +Source code author: Doug Whiting, 2008. +This algorithm and source code is released to the public domain. diff --git a/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE.descrip b/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE.descrip new file mode 100644 index 000000000000..0ae89cfdf5ce --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/THIRDPARTYLICENSE.descrip @@ -0,0 +1 @@ +LICENSE TERMS OF SKEIN HASH ALGORITHM IMPLEMENTATION diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein.c b/sys/contrib/openzfs/module/icp/algs/skein/skein.c new file mode 100644 index 000000000000..83fe84260307 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/skein.c @@ -0,0 +1,911 @@ +/* + * Implementation of the Skein hash function. + * Source code author: Doug Whiting, 2008. + * This algorithm and source code is released to the public domain. + */ +/* Copyright 2013 Doug Whiting. This code is released to the public domain. */ + +#include <sys/sysmacros.h> +#include <sys/types.h> +#include <sys/skein.h> /* get the Skein API definitions */ +#include "skein_impl.h" /* get internal definitions */ + +/* 256-bit Skein */ +/* init the context for a straight hashing operation */ +int +Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen) +{ + union { + uint8_t b[SKEIN_256_STATE_BYTES]; + uint64_t w[SKEIN_256_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + + switch (hashBitLen) { /* use pre-computed values, where available */ +#ifndef SKEIN_NO_PRECOMP + case 256: + bcopy(SKEIN_256_IV_256, ctx->X, sizeof (ctx->X)); + break; + case 224: + bcopy(SKEIN_256_IV_224, ctx->X, sizeof (ctx->X)); + break; + case 160: + bcopy(SKEIN_256_IV_160, ctx->X, sizeof (ctx->X)); + break; + case 128: + bcopy(SKEIN_256_IV_128, ctx->X, sizeof (ctx->X)); + break; +#endif + default: + /* here if there is no precomputed IV value available */ + /* + * build/process the config block, type == CONFIG (could be + * precomputed) + */ + /* set tweaks: T0=0; T1=CFG | FINAL */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + /* set the schema, version */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + /* hash result length in bits */ + cfg.w[1] = Skein_Swap64(hashBitLen); + cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); + /* zero pad config block */ + bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + + /* compute the initial chaining values from config block */ + /* zero the chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + Skein_256_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + break; + } + /* + * The chaining vars ctx->X are now initialized for the given + * hashBitLen. + * Set up to process the data message portion of the hash (default) + */ + Skein_Start_New_Type(ctx, MSG); /* T0=0, T1= MSG type */ + + return (SKEIN_SUCCESS); +} + +/* init the context for a MAC and/or tree hash operation */ +/* + * [identical to Skein_256_Init() when keyBytes == 0 && + * treeInfo == SKEIN_CFG_TREE_INFO_SEQUENTIAL] + */ +int +Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, + const uint8_t *key, size_t keyBytes) +{ + union { + uint8_t b[SKEIN_256_STATE_BYTES]; + uint64_t w[SKEIN_256_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + Skein_Assert(keyBytes == 0 || key != NULL, SKEIN_FAIL); + + /* compute the initial chaining values ctx->X[], based on key */ + if (keyBytes == 0) { /* is there a key? */ + /* no key: use all zeroes as key for config block */ + bzero(ctx->X, sizeof (ctx->X)); + } else { /* here to pre-process a key */ + + Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); + /* do a mini-Init right here */ + /* set output hash bit count = state size */ + ctx->h.hashBitLen = 8 * sizeof (ctx->X); + /* set tweaks: T0 = 0; T1 = KEY type */ + Skein_Start_New_Type(ctx, KEY); + /* zero the initial chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + /* hash the key */ + (void) Skein_256_Update(ctx, key, keyBytes); + /* put result into cfg.b[] */ + (void) Skein_256_Final_Pad(ctx, cfg.b); + /* copy over into ctx->X[] */ + bcopy(cfg.b, ctx->X, sizeof (cfg.b)); +#if SKEIN_NEED_SWAP + { + uint_t i; + /* convert key bytes to context words */ + for (i = 0; i < SKEIN_256_STATE_WORDS; i++) + ctx->X[i] = Skein_Swap64(ctx->X[i]); + } +#endif + } + /* + * build/process the config block, type == CONFIG (could be + * precomputed for each key) + */ + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */ + /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ + cfg.w[2] = Skein_Swap64(treeInfo); + + Skein_Show_Key(256, &ctx->h, key, keyBytes); + + /* compute the initial chaining values from config block */ + Skein_256_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + + /* The chaining vars ctx->X are now initialized */ + /* Set up to process the data message portion of the hash (default) */ + ctx->h.bCnt = 0; /* buffer b[] starts out empty */ + Skein_Start_New_Type(ctx, MSG); + + return (SKEIN_SUCCESS); +} + +/* process the input bytes */ +int +Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) +{ + size_t n; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); + + /* process full blocks, if any */ + if (msgByteCnt + ctx->h.bCnt > SKEIN_256_BLOCK_BYTES) { + /* finish up any buffered message data */ + if (ctx->h.bCnt) { + /* # bytes free in buffer b[] */ + n = SKEIN_256_BLOCK_BYTES - ctx->h.bCnt; + if (n) { + /* check on our logic here */ + Skein_assert(n < msgByteCnt); + bcopy(msg, &ctx->b[ctx->h.bCnt], n); + msgByteCnt -= n; + msg += n; + ctx->h.bCnt += n; + } + Skein_assert(ctx->h.bCnt == SKEIN_256_BLOCK_BYTES); + Skein_256_Process_Block(ctx, ctx->b, 1, + SKEIN_256_BLOCK_BYTES); + ctx->h.bCnt = 0; + } + /* + * now process any remaining full blocks, directly from input + * message data + */ + if (msgByteCnt > SKEIN_256_BLOCK_BYTES) { + /* number of full blocks to process */ + n = (msgByteCnt - 1) / SKEIN_256_BLOCK_BYTES; + Skein_256_Process_Block(ctx, msg, n, + SKEIN_256_BLOCK_BYTES); + msgByteCnt -= n * SKEIN_256_BLOCK_BYTES; + msg += n * SKEIN_256_BLOCK_BYTES; + } + Skein_assert(ctx->h.bCnt == 0); + } + + /* copy any remaining source message data bytes into b[] */ + if (msgByteCnt) { + Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES); + bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + ctx->h.bCnt += msgByteCnt; + } + + return (SKEIN_SUCCESS); +} + +/* finalize the hash computation and output the result */ +int +Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN_256_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); + + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN_256_BLOCK_BYTES - ctx->h.bCnt); + + /* process the final block */ + Skein_256_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN_256_BLOCK_BYTES; + if (n >= SKEIN_256_BLOCK_BYTES) + n = SKEIN_256_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN_256_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(256, &ctx->h, n, + hashVal + i * SKEIN_256_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} + +/* 512-bit Skein */ + +/* init the context for a straight hashing operation */ +int +Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen) +{ + union { + uint8_t b[SKEIN_512_STATE_BYTES]; + uint64_t w[SKEIN_512_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + + switch (hashBitLen) { /* use pre-computed values, where available */ +#ifndef SKEIN_NO_PRECOMP + case 512: + bcopy(SKEIN_512_IV_512, ctx->X, sizeof (ctx->X)); + break; + case 384: + bcopy(SKEIN_512_IV_384, ctx->X, sizeof (ctx->X)); + break; + case 256: + bcopy(SKEIN_512_IV_256, ctx->X, sizeof (ctx->X)); + break; + case 224: + bcopy(SKEIN_512_IV_224, ctx->X, sizeof (ctx->X)); + break; +#endif + default: + /* + * here if there is no precomputed IV value available + * build/process the config block, type == CONFIG (could be + * precomputed) + */ + /* set tweaks: T0=0; T1=CFG | FINAL */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + /* set the schema, version */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + /* hash result length in bits */ + cfg.w[1] = Skein_Swap64(hashBitLen); + cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); + /* zero pad config block */ + bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + + /* compute the initial chaining values from config block */ + /* zero the chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + Skein_512_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + break; + } + + /* + * The chaining vars ctx->X are now initialized for the given + * hashBitLen. Set up to process the data message portion of the + * hash (default) + */ + Skein_Start_New_Type(ctx, MSG); /* T0=0, T1= MSG type */ + + return (SKEIN_SUCCESS); +} + +/* init the context for a MAC and/or tree hash operation */ +/* + * [identical to Skein_512_Init() when keyBytes == 0 && + * treeInfo == SKEIN_CFG_TREE_INFO_SEQUENTIAL] + */ +int +Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, + const uint8_t *key, size_t keyBytes) +{ + union { + uint8_t b[SKEIN_512_STATE_BYTES]; + uint64_t w[SKEIN_512_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + Skein_Assert(keyBytes == 0 || key != NULL, SKEIN_FAIL); + + /* compute the initial chaining values ctx->X[], based on key */ + if (keyBytes == 0) { /* is there a key? */ + /* no key: use all zeroes as key for config block */ + bzero(ctx->X, sizeof (ctx->X)); + } else { /* here to pre-process a key */ + + Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); + /* do a mini-Init right here */ + /* set output hash bit count = state size */ + ctx->h.hashBitLen = 8 * sizeof (ctx->X); + /* set tweaks: T0 = 0; T1 = KEY type */ + Skein_Start_New_Type(ctx, KEY); + /* zero the initial chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + (void) Skein_512_Update(ctx, key, keyBytes); /* hash the key */ + /* put result into cfg.b[] */ + (void) Skein_512_Final_Pad(ctx, cfg.b); + /* copy over into ctx->X[] */ + bcopy(cfg.b, ctx->X, sizeof (cfg.b)); +#if SKEIN_NEED_SWAP + { + uint_t i; + /* convert key bytes to context words */ + for (i = 0; i < SKEIN_512_STATE_WORDS; i++) + ctx->X[i] = Skein_Swap64(ctx->X[i]); + } +#endif + } + /* + * build/process the config block, type == CONFIG (could be + * precomputed for each key) + */ + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */ + /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ + cfg.w[2] = Skein_Swap64(treeInfo); + + Skein_Show_Key(512, &ctx->h, key, keyBytes); + + /* compute the initial chaining values from config block */ + Skein_512_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + + /* The chaining vars ctx->X are now initialized */ + /* Set up to process the data message portion of the hash (default) */ + ctx->h.bCnt = 0; /* buffer b[] starts out empty */ + Skein_Start_New_Type(ctx, MSG); + + return (SKEIN_SUCCESS); +} + +/* process the input bytes */ +int +Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) +{ + size_t n; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); + + /* process full blocks, if any */ + if (msgByteCnt + ctx->h.bCnt > SKEIN_512_BLOCK_BYTES) { + /* finish up any buffered message data */ + if (ctx->h.bCnt) { + /* # bytes free in buffer b[] */ + n = SKEIN_512_BLOCK_BYTES - ctx->h.bCnt; + if (n) { + /* check on our logic here */ + Skein_assert(n < msgByteCnt); + bcopy(msg, &ctx->b[ctx->h.bCnt], n); + msgByteCnt -= n; + msg += n; + ctx->h.bCnt += n; + } + Skein_assert(ctx->h.bCnt == SKEIN_512_BLOCK_BYTES); + Skein_512_Process_Block(ctx, ctx->b, 1, + SKEIN_512_BLOCK_BYTES); + ctx->h.bCnt = 0; + } + /* + * now process any remaining full blocks, directly from input + * message data + */ + if (msgByteCnt > SKEIN_512_BLOCK_BYTES) { + /* number of full blocks to process */ + n = (msgByteCnt - 1) / SKEIN_512_BLOCK_BYTES; + Skein_512_Process_Block(ctx, msg, n, + SKEIN_512_BLOCK_BYTES); + msgByteCnt -= n * SKEIN_512_BLOCK_BYTES; + msg += n * SKEIN_512_BLOCK_BYTES; + } + Skein_assert(ctx->h.bCnt == 0); + } + + /* copy any remaining source message data bytes into b[] */ + if (msgByteCnt) { + Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES); + bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + ctx->h.bCnt += msgByteCnt; + } + + return (SKEIN_SUCCESS); +} + +/* finalize the hash computation and output the result */ +int +Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN_512_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); + + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN_512_BLOCK_BYTES - ctx->h.bCnt); + + /* process the final block */ + Skein_512_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN_512_BLOCK_BYTES; + if (n >= SKEIN_512_BLOCK_BYTES) + n = SKEIN_512_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN_512_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(512, &ctx->h, n, + hashVal + i * SKEIN_512_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} + +/* 1024-bit Skein */ + +/* init the context for a straight hashing operation */ +int +Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen) +{ + union { + uint8_t b[SKEIN1024_STATE_BYTES]; + uint64_t w[SKEIN1024_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + + switch (hashBitLen) { /* use pre-computed values, where available */ +#ifndef SKEIN_NO_PRECOMP + case 512: + bcopy(SKEIN1024_IV_512, ctx->X, sizeof (ctx->X)); + break; + case 384: + bcopy(SKEIN1024_IV_384, ctx->X, sizeof (ctx->X)); + break; + case 1024: + bcopy(SKEIN1024_IV_1024, ctx->X, sizeof (ctx->X)); + break; +#endif + default: + /* here if there is no precomputed IV value available */ + /* + * build/process the config block, type == CONFIG (could be + * precomputed) + */ + /* set tweaks: T0=0; T1=CFG | FINAL */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + /* set the schema, version */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + /* hash result length in bits */ + cfg.w[1] = Skein_Swap64(hashBitLen); + cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); + /* zero pad config block */ + bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + + /* compute the initial chaining values from config block */ + /* zero the chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + Skein1024_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + break; + } + + /* + * The chaining vars ctx->X are now initialized for the given + * hashBitLen. Set up to process the data message portion of the hash + * (default) + */ + Skein_Start_New_Type(ctx, MSG); /* T0=0, T1= MSG type */ + + return (SKEIN_SUCCESS); +} + +/* init the context for a MAC and/or tree hash operation */ +/* + * [identical to Skein1024_Init() when keyBytes == 0 && + * treeInfo == SKEIN_CFG_TREE_INFO_SEQUENTIAL] + */ +int +Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, + const uint8_t *key, size_t keyBytes) +{ + union { + uint8_t b[SKEIN1024_STATE_BYTES]; + uint64_t w[SKEIN1024_STATE_WORDS]; + } cfg; /* config block */ + + Skein_Assert(hashBitLen > 0, SKEIN_BAD_HASHLEN); + Skein_Assert(keyBytes == 0 || key != NULL, SKEIN_FAIL); + + /* compute the initial chaining values ctx->X[], based on key */ + if (keyBytes == 0) { /* is there a key? */ + /* no key: use all zeroes as key for config block */ + bzero(ctx->X, sizeof (ctx->X)); + } else { /* here to pre-process a key */ + Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); + /* do a mini-Init right here */ + /* set output hash bit count = state size */ + ctx->h.hashBitLen = 8 * sizeof (ctx->X); + /* set tweaks: T0 = 0; T1 = KEY type */ + Skein_Start_New_Type(ctx, KEY); + /* zero the initial chaining variables */ + bzero(ctx->X, sizeof (ctx->X)); + (void) Skein1024_Update(ctx, key, keyBytes); /* hash the key */ + /* put result into cfg.b[] */ + (void) Skein1024_Final_Pad(ctx, cfg.b); + /* copy over into ctx->X[] */ + bcopy(cfg.b, ctx->X, sizeof (cfg.b)); +#if SKEIN_NEED_SWAP + { + uint_t i; + /* convert key bytes to context words */ + for (i = 0; i < SKEIN1024_STATE_WORDS; i++) + ctx->X[i] = Skein_Swap64(ctx->X[i]); + } +#endif + } + /* + * build/process the config block, type == CONFIG (could be + * precomputed for each key) + */ + ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ + Skein_Start_New_Type(ctx, CFG_FINAL); + + bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); + /* hash result length in bits */ + cfg.w[1] = Skein_Swap64(hashBitLen); + /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ + cfg.w[2] = Skein_Swap64(treeInfo); + + Skein_Show_Key(1024, &ctx->h, key, keyBytes); + + /* compute the initial chaining values from config block */ + Skein1024_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); + + /* The chaining vars ctx->X are now initialized */ + /* Set up to process the data message portion of the hash (default) */ + ctx->h.bCnt = 0; /* buffer b[] starts out empty */ + Skein_Start_New_Type(ctx, MSG); + + return (SKEIN_SUCCESS); +} + +/* process the input bytes */ +int +Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) +{ + size_t n; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES, SKEIN_FAIL); + + /* process full blocks, if any */ + if (msgByteCnt + ctx->h.bCnt > SKEIN1024_BLOCK_BYTES) { + /* finish up any buffered message data */ + if (ctx->h.bCnt) { + /* # bytes free in buffer b[] */ + n = SKEIN1024_BLOCK_BYTES - ctx->h.bCnt; + if (n) { + /* check on our logic here */ + Skein_assert(n < msgByteCnt); + bcopy(msg, &ctx->b[ctx->h.bCnt], n); + msgByteCnt -= n; + msg += n; + ctx->h.bCnt += n; + } + Skein_assert(ctx->h.bCnt == SKEIN1024_BLOCK_BYTES); + Skein1024_Process_Block(ctx, ctx->b, 1, + SKEIN1024_BLOCK_BYTES); + ctx->h.bCnt = 0; + } + /* + * now process any remaining full blocks, directly from + * input message data + */ + if (msgByteCnt > SKEIN1024_BLOCK_BYTES) { + /* number of full blocks to process */ + n = (msgByteCnt - 1) / SKEIN1024_BLOCK_BYTES; + Skein1024_Process_Block(ctx, msg, n, + SKEIN1024_BLOCK_BYTES); + msgByteCnt -= n * SKEIN1024_BLOCK_BYTES; + msg += n * SKEIN1024_BLOCK_BYTES; + } + Skein_assert(ctx->h.bCnt == 0); + } + + /* copy any remaining source message data bytes into b[] */ + if (msgByteCnt) { + Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES); + bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + ctx->h.bCnt += msgByteCnt; + } + + return (SKEIN_SUCCESS); +} + +/* finalize the hash computation and output the result */ +int +Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN1024_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES, SKEIN_FAIL); + + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN1024_BLOCK_BYTES - ctx->h.bCnt); + + /* process the final block */ + Skein1024_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN1024_BLOCK_BYTES; + if (n >= SKEIN1024_BLOCK_BYTES) + n = SKEIN1024_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN1024_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(1024, &ctx->h, n, + hashVal + i * SKEIN1024_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} + +/* Functions to support MAC/tree hashing */ +/* (this code is identical for Optimized and Reference versions) */ + +/* finalize the hash computation and output the block, no OUTPUT stage */ +int +Skein_256_Final_Pad(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) +{ + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); + + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN_256_BLOCK_BYTES - ctx->h.bCnt); + /* process the final block */ + Skein_256_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* "output" the state bytes */ + Skein_Put64_LSB_First(hashVal, ctx->X, SKEIN_256_BLOCK_BYTES); + + return (SKEIN_SUCCESS); +} + +/* finalize the hash computation and output the block, no OUTPUT stage */ +int +Skein_512_Final_Pad(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) +{ + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); + + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN_512_BLOCK_BYTES - ctx->h.bCnt); + /* process the final block */ + Skein_512_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* "output" the state bytes */ + Skein_Put64_LSB_First(hashVal, ctx->X, SKEIN_512_BLOCK_BYTES); + + return (SKEIN_SUCCESS); +} + +/* finalize the hash computation and output the block, no OUTPUT stage */ +int +Skein1024_Final_Pad(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) +{ + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES, SKEIN_FAIL); + + /* tag as the final block */ + ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; + /* zero pad b[] if necessary */ + if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES) + bzero(&ctx->b[ctx->h.bCnt], + SKEIN1024_BLOCK_BYTES - ctx->h.bCnt); + /* process the final block */ + Skein1024_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); + + /* "output" the state bytes */ + Skein_Put64_LSB_First(hashVal, ctx->X, SKEIN1024_BLOCK_BYTES); + + return (SKEIN_SUCCESS); +} + +#if SKEIN_TREE_HASH +/* just do the OUTPUT stage */ +int +Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN_256_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN_256_BLOCK_BYTES; + if (n >= SKEIN_256_BLOCK_BYTES) + n = SKEIN_256_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN_256_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(256, &ctx->h, n, + hashVal + i * SKEIN_256_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} + +/* just do the OUTPUT stage */ +int +Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN_512_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN_512_BLOCK_BYTES; + if (n >= SKEIN_512_BLOCK_BYTES) + n = SKEIN_512_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN_512_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(256, &ctx->h, n, + hashVal + i * SKEIN_512_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} + +/* just do the OUTPUT stage */ +int +Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) +{ + size_t i, n, byteCnt; + uint64_t X[SKEIN1024_STATE_WORDS]; + + /* catch uninitialized context */ + Skein_Assert(ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES, SKEIN_FAIL); + + /* now output the result */ + /* total number of output bytes */ + byteCnt = (ctx->h.hashBitLen + 7) >> 3; + + /* run Threefish in "counter mode" to generate output */ + /* zero out b[], so it can hold the counter */ + bzero(ctx->b, sizeof (ctx->b)); + /* keep a local copy of counter mode "key" */ + bcopy(ctx->X, X, sizeof (X)); + for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) { + /* build the counter block */ + uint64_t tmp = Skein_Swap64((uint64_t)i); + bcopy(&tmp, ctx->b, sizeof (tmp)); + Skein_Start_New_Type(ctx, OUT_FINAL); + /* run "counter mode" */ + Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); + /* number of output bytes left to go */ + n = byteCnt - i * SKEIN1024_BLOCK_BYTES; + if (n >= SKEIN1024_BLOCK_BYTES) + n = SKEIN1024_BLOCK_BYTES; + Skein_Put64_LSB_First(hashVal + i * SKEIN1024_BLOCK_BYTES, + ctx->X, n); /* "output" the ctr mode bytes */ + Skein_Show_Final(256, &ctx->h, n, + hashVal + i * SKEIN1024_BLOCK_BYTES); + /* restore the counter mode key for next time */ + bcopy(X, ctx->X, sizeof (X)); + } + return (SKEIN_SUCCESS); +} +#endif + +#ifdef _KERNEL +EXPORT_SYMBOL(Skein_512_Init); +EXPORT_SYMBOL(Skein_512_InitExt); +EXPORT_SYMBOL(Skein_512_Update); +EXPORT_SYMBOL(Skein_512_Final); +#endif diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein_block.c b/sys/contrib/openzfs/module/icp/algs/skein/skein_block.c new file mode 100644 index 000000000000..7ba165a48511 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/skein_block.c @@ -0,0 +1,790 @@ +/* + * Implementation of the Skein block functions. + * Source code author: Doug Whiting, 2008. + * This algorithm and source code is released to the public domain. + * Compile-time switches: + * SKEIN_USE_ASM -- set bits (256/512/1024) to select which + * versions use ASM code for block processing + * [default: use C for all block sizes] + */ +/* Copyright 2013 Doug Whiting. This code is released to the public domain. */ + +#include <sys/skein.h> +#include "skein_impl.h" +#include <sys/isa_defs.h> /* for _ILP32 */ + +#ifndef SKEIN_USE_ASM +#define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */ +#endif + +#ifndef SKEIN_LOOP +/* + * The low-level checksum routines use a lot of stack space. On systems where + * small stacks frame are enforced (like 32-bit kernel builds), do not unroll + * checksum calculations to save stack space. + * + * Even with no loops unrolled, we still can exceed the 1k stack frame limit + * in Skein1024_Process_Block() (it hits 1272 bytes on ARM32). We can + * safely ignore it though, since that the checksum functions will be called + * from a worker thread that won't be using much stack. That's why we have + * the #pragma here to ignore the warning. + */ +#if defined(_ILP32) || defined(__powerpc) /* Assume small stack */ +#pragma GCC diagnostic ignored "-Wframe-larger-than=" +/* + * We're running on 32-bit, don't unroll loops to save stack frame space + * + * Due to the ways the calculations on SKEIN_LOOP are done in + * Skein_*_Process_Block(), a value of 111 disables unrolling loops + * in any of those functions. + */ +#define SKEIN_LOOP 111 +#else +/* We're compiling with large stacks */ +#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */ +#endif +#endif + +/* some useful definitions for code here */ +#define BLK_BITS (WCNT*64) +#define KW_TWK_BASE (0) +#define KW_KEY_BASE (3) +#define ks (kw + KW_KEY_BASE) +#define ts (kw + KW_TWK_BASE) + +/* no debugging in Illumos version */ +#define DebugSaveTweak(ctx) + +/* Skein_256 */ +#if !(SKEIN_USE_ASM & 256) +void +Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd) +{ + enum { + WCNT = SKEIN_256_STATE_WORDS + }; +#undef RCNT +#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8) + +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10) +#else +#define SKEIN_UNROLL_256 (0) +#endif + +#if SKEIN_UNROLL_256 +#if (RCNT % SKEIN_UNROLL_256) +#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */ +#endif + size_t r; + /* key schedule words : chaining vars + tweak + "rotation" */ + uint64_t kw[WCNT + 4 + RCNT * 2]; +#else + uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ +#endif + /* local copy of context vars, for speed */ + uint64_t X0, X1, X2, X3; + uint64_t w[WCNT]; /* local copy of input block */ +#ifdef SKEIN_DEBUG + /* use for debugging (help compiler put Xn in registers) */ + const uint64_t *Xptr[4]; + Xptr[0] = &X0; + Xptr[1] = &X1; + Xptr[2] = &X2; + Xptr[3] = &X3; +#endif + Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ + ts[0] = ctx->h.T[0]; + ts[1] = ctx->h.T[1]; + do { + /* + * this implementation only supports 2**64 input bytes + * (no carry out here) + */ + ts[0] += byteCntAdd; /* update processed length */ + + /* precompute the key schedule for this block */ + ks[0] = ctx->X[0]; + ks[1] = ctx->X[1]; + ks[2] = ctx->X[2]; + ks[3] = ctx->X[3]; + ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY; + + ts[2] = ts[0] ^ ts[1]; + + /* get input block in little-endian format */ + Skein_Get64_LSB_First(w, blkPtr, WCNT); + DebugSaveTweak(ctx); + Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); + + X0 = w[0] + ks[0]; /* do the first full key injection */ + X1 = w[1] + ks[1] + ts[0]; + X2 = w[2] + ks[2] + ts[1]; + X3 = w[3] + ks[3]; + + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, + Xptr); /* show starting state values */ + + blkPtr += SKEIN_256_BLOCK_BYTES; + + /* run the rounds */ + +#define Round256(p0, p1, p2, p3, ROT, rNum) \ + X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0; \ + X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2; \ + +#if SKEIN_UNROLL_256 == 0 +#define R256(p0, p1, p2, p3, ROT, rNum) /* fully unrolled */ \ + Round256(p0, p1, p2, p3, ROT, rNum) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr); + +#define I256(R) \ + X0 += ks[((R) + 1) % 5]; /* inject the key schedule value */ \ + X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \ + X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \ + X3 += ks[((R) + 4) % 5] + (R) + 1; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); +#else /* looping version */ +#define R256(p0, p1, p2, p3, ROT, rNum) \ + Round256(p0, p1, p2, p3, ROT, rNum) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr); + +#define I256(R) \ + X0 += ks[r + (R) + 0]; /* inject the key schedule value */ \ + X1 += ks[r + (R) + 1] + ts[r + (R) + 0]; \ + X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \ + X3 += ks[r + (R) + 3] + r + (R); \ + ks[r + (R) + 4] = ks[r + (R) - 1]; /* rotate key schedule */ \ + ts[r + (R) + 2] = ts[r + (R) - 1]; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); + + /* loop through it */ + for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256) +#endif + { +#define R256_8_rounds(R) \ + R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \ + R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \ + R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \ + R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \ + I256(2 * (R)); \ + R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \ + R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \ + R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \ + R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \ + I256(2 * (R) + 1); + + R256_8_rounds(0); + +#define R256_Unroll_R(NN) \ + ((SKEIN_UNROLL_256 == 0 && SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \ + (SKEIN_UNROLL_256 > (NN))) + +#if R256_Unroll_R(1) + R256_8_rounds(1); +#endif +#if R256_Unroll_R(2) + R256_8_rounds(2); +#endif +#if R256_Unroll_R(3) + R256_8_rounds(3); +#endif +#if R256_Unroll_R(4) + R256_8_rounds(4); +#endif +#if R256_Unroll_R(5) + R256_8_rounds(5); +#endif +#if R256_Unroll_R(6) + R256_8_rounds(6); +#endif +#if R256_Unroll_R(7) + R256_8_rounds(7); +#endif +#if R256_Unroll_R(8) + R256_8_rounds(8); +#endif +#if R256_Unroll_R(9) + R256_8_rounds(9); +#endif +#if R256_Unroll_R(10) + R256_8_rounds(10); +#endif +#if R256_Unroll_R(11) + R256_8_rounds(11); +#endif +#if R256_Unroll_R(12) + R256_8_rounds(12); +#endif +#if R256_Unroll_R(13) + R256_8_rounds(13); +#endif +#if R256_Unroll_R(14) + R256_8_rounds(14); +#endif +#if (SKEIN_UNROLL_256 > 14) +#error "need more unrolling in Skein_256_Process_Block" +#endif + } + /* + * do the final "feedforward" xor, update context chaining vars + */ + ctx->X[0] = X0 ^ w[0]; + ctx->X[1] = X1 ^ w[1]; + ctx->X[2] = X2 ^ w[2]; + ctx->X[3] = X3 ^ w[3]; + + Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); + + ts[1] &= ~SKEIN_T1_FLAG_FIRST; + } while (--blkCnt); + ctx->h.T[0] = ts[0]; + ctx->h.T[1] = ts[1]; +} + +#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) +size_t +Skein_256_Process_Block_CodeSize(void) +{ + return ((uint8_t *)Skein_256_Process_Block_CodeSize) - + ((uint8_t *)Skein_256_Process_Block); +} + +uint_t +Skein_256_Unroll_Cnt(void) +{ + return (SKEIN_UNROLL_256); +} +#endif +#endif + +/* Skein_512 */ +#if !(SKEIN_USE_ASM & 512) +void +Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd) +{ + enum { + WCNT = SKEIN_512_STATE_WORDS + }; +#undef RCNT +#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8) + +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10) +#else +#define SKEIN_UNROLL_512 (0) +#endif + +#if SKEIN_UNROLL_512 +#if (RCNT % SKEIN_UNROLL_512) +#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */ +#endif + size_t r; + /* key schedule words : chaining vars + tweak + "rotation" */ + uint64_t kw[WCNT + 4 + RCNT * 2]; +#else + uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ +#endif + /* local copy of vars, for speed */ + uint64_t X0, X1, X2, X3, X4, X5, X6, X7; + uint64_t w[WCNT]; /* local copy of input block */ +#ifdef SKEIN_DEBUG + /* use for debugging (help compiler put Xn in registers) */ + const uint64_t *Xptr[8]; + Xptr[0] = &X0; + Xptr[1] = &X1; + Xptr[2] = &X2; + Xptr[3] = &X3; + Xptr[4] = &X4; + Xptr[5] = &X5; + Xptr[6] = &X6; + Xptr[7] = &X7; +#endif + + Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ + ts[0] = ctx->h.T[0]; + ts[1] = ctx->h.T[1]; + do { + /* + * this implementation only supports 2**64 input bytes + * (no carry out here) + */ + ts[0] += byteCntAdd; /* update processed length */ + + /* precompute the key schedule for this block */ + ks[0] = ctx->X[0]; + ks[1] = ctx->X[1]; + ks[2] = ctx->X[2]; + ks[3] = ctx->X[3]; + ks[4] = ctx->X[4]; + ks[5] = ctx->X[5]; + ks[6] = ctx->X[6]; + ks[7] = ctx->X[7]; + ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ + ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY; + + ts[2] = ts[0] ^ ts[1]; + + /* get input block in little-endian format */ + Skein_Get64_LSB_First(w, blkPtr, WCNT); + DebugSaveTweak(ctx); + Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); + + X0 = w[0] + ks[0]; /* do the first full key injection */ + X1 = w[1] + ks[1]; + X2 = w[2] + ks[2]; + X3 = w[3] + ks[3]; + X4 = w[4] + ks[4]; + X5 = w[5] + ks[5] + ts[0]; + X6 = w[6] + ks[6] + ts[1]; + X7 = w[7] + ks[7]; + + blkPtr += SKEIN_512_BLOCK_BYTES; + + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, + Xptr); + /* run the rounds */ +#define Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ + X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0;\ + X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2;\ + X##p4 += X##p5; X##p5 = RotL_64(X##p5, ROT##_2); X##p5 ^= X##p4;\ + X##p6 += X##p7; X##p7 = RotL_64(X##p7, ROT##_3); X##p7 ^= X##p6; + +#if SKEIN_UNROLL_512 == 0 +#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) /* unrolled */ \ + Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rNum, Xptr); + +#define I512(R) \ + X0 += ks[((R) + 1) % 9]; /* inject the key schedule value */\ + X1 += ks[((R) + 2) % 9]; \ + X2 += ks[((R) + 3) % 9]; \ + X3 += ks[((R) + 4) % 9]; \ + X4 += ks[((R) + 5) % 9]; \ + X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \ + X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \ + X7 += ks[((R) + 8) % 9] + (R) + 1; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); +#else /* looping version */ +#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ + Round512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, rNum) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rNum, Xptr); + +#define I512(R) \ + X0 += ks[r + (R) + 0]; /* inject the key schedule value */ \ + X1 += ks[r + (R) + 1]; \ + X2 += ks[r + (R) + 2]; \ + X3 += ks[r + (R) + 3]; \ + X4 += ks[r + (R) + 4]; \ + X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \ + X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \ + X7 += ks[r + (R) + 7] + r + (R); \ + ks[r + (R)+8] = ks[r + (R) - 1]; /* rotate key schedule */\ + ts[r + (R)+2] = ts[r + (R) - 1]; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); + + /* loop through it */ + for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_512) +#endif /* end of looped code definitions */ + { +#define R512_8_rounds(R) /* do 8 full rounds */ \ + R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \ + R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \ + R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \ + R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \ + I512(2 * (R)); \ + R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \ + R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \ + R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \ + R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \ + I512(2*(R) + 1); /* and key injection */ + + R512_8_rounds(0); + +#define R512_Unroll_R(NN) \ + ((SKEIN_UNROLL_512 == 0 && SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \ + (SKEIN_UNROLL_512 > (NN))) + +#if R512_Unroll_R(1) + R512_8_rounds(1); +#endif +#if R512_Unroll_R(2) + R512_8_rounds(2); +#endif +#if R512_Unroll_R(3) + R512_8_rounds(3); +#endif +#if R512_Unroll_R(4) + R512_8_rounds(4); +#endif +#if R512_Unroll_R(5) + R512_8_rounds(5); +#endif +#if R512_Unroll_R(6) + R512_8_rounds(6); +#endif +#if R512_Unroll_R(7) + R512_8_rounds(7); +#endif +#if R512_Unroll_R(8) + R512_8_rounds(8); +#endif +#if R512_Unroll_R(9) + R512_8_rounds(9); +#endif +#if R512_Unroll_R(10) + R512_8_rounds(10); +#endif +#if R512_Unroll_R(11) + R512_8_rounds(11); +#endif +#if R512_Unroll_R(12) + R512_8_rounds(12); +#endif +#if R512_Unroll_R(13) + R512_8_rounds(13); +#endif +#if R512_Unroll_R(14) + R512_8_rounds(14); +#endif +#if (SKEIN_UNROLL_512 > 14) +#error "need more unrolling in Skein_512_Process_Block" +#endif + } + + /* + * do the final "feedforward" xor, update context chaining vars + */ + ctx->X[0] = X0 ^ w[0]; + ctx->X[1] = X1 ^ w[1]; + ctx->X[2] = X2 ^ w[2]; + ctx->X[3] = X3 ^ w[3]; + ctx->X[4] = X4 ^ w[4]; + ctx->X[5] = X5 ^ w[5]; + ctx->X[6] = X6 ^ w[6]; + ctx->X[7] = X7 ^ w[7]; + Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); + + ts[1] &= ~SKEIN_T1_FLAG_FIRST; + } while (--blkCnt); + ctx->h.T[0] = ts[0]; + ctx->h.T[1] = ts[1]; +} + +#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) +size_t +Skein_512_Process_Block_CodeSize(void) +{ + return ((uint8_t *)Skein_512_Process_Block_CodeSize) - + ((uint8_t *)Skein_512_Process_Block); +} + +uint_t +Skein_512_Unroll_Cnt(void) +{ + return (SKEIN_UNROLL_512); +} +#endif +#endif + +/* Skein1024 */ +#if !(SKEIN_USE_ASM & 1024) +void +Skein1024_Process_Block(Skein1024_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd) +{ + /* do it in C, always looping (unrolled is bigger AND slower!) */ + enum { + WCNT = SKEIN1024_STATE_WORDS + }; +#undef RCNT +#define RCNT (SKEIN1024_ROUNDS_TOTAL/8) + +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10) +#else +#define SKEIN_UNROLL_1024 (0) +#endif + +#if (SKEIN_UNROLL_1024 != 0) +#if (RCNT % SKEIN_UNROLL_1024) +#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */ +#endif + size_t r; + /* key schedule words : chaining vars + tweak + "rotation" */ + uint64_t kw[WCNT + 4 + RCNT * 2]; +#else + uint64_t kw[WCNT + 4]; /* key schedule words : chaining vars + tweak */ +#endif + + /* local copy of vars, for speed */ + uint64_t X00, X01, X02, X03, X04, X05, X06, X07, X08, X09, X10, X11, + X12, X13, X14, X15; + uint64_t w[WCNT]; /* local copy of input block */ +#ifdef SKEIN_DEBUG + /* use for debugging (help compiler put Xn in registers) */ + const uint64_t *Xptr[16]; + Xptr[0] = &X00; + Xptr[1] = &X01; + Xptr[2] = &X02; + Xptr[3] = &X03; + Xptr[4] = &X04; + Xptr[5] = &X05; + Xptr[6] = &X06; + Xptr[7] = &X07; + Xptr[8] = &X08; + Xptr[9] = &X09; + Xptr[10] = &X10; + Xptr[11] = &X11; + Xptr[12] = &X12; + Xptr[13] = &X13; + Xptr[14] = &X14; + Xptr[15] = &X15; +#endif + + Skein_assert(blkCnt != 0); /* never call with blkCnt == 0! */ + ts[0] = ctx->h.T[0]; + ts[1] = ctx->h.T[1]; + do { + /* + * this implementation only supports 2**64 input bytes + * (no carry out here) + */ + ts[0] += byteCntAdd; /* update processed length */ + + /* precompute the key schedule for this block */ + ks[0] = ctx->X[0]; + ks[1] = ctx->X[1]; + ks[2] = ctx->X[2]; + ks[3] = ctx->X[3]; + ks[4] = ctx->X[4]; + ks[5] = ctx->X[5]; + ks[6] = ctx->X[6]; + ks[7] = ctx->X[7]; + ks[8] = ctx->X[8]; + ks[9] = ctx->X[9]; + ks[10] = ctx->X[10]; + ks[11] = ctx->X[11]; + ks[12] = ctx->X[12]; + ks[13] = ctx->X[13]; + ks[14] = ctx->X[14]; + ks[15] = ctx->X[15]; + ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ + ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ + ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^ + ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY; + + ts[2] = ts[0] ^ ts[1]; + + /* get input block in little-endian format */ + Skein_Get64_LSB_First(w, blkPtr, WCNT); + DebugSaveTweak(ctx); + Skein_Show_Block(BLK_BITS, &ctx->h, ctx->X, blkPtr, w, ks, ts); + + X00 = w[0] + ks[0]; /* do the first full key injection */ + X01 = w[1] + ks[1]; + X02 = w[2] + ks[2]; + X03 = w[3] + ks[3]; + X04 = w[4] + ks[4]; + X05 = w[5] + ks[5]; + X06 = w[6] + ks[6]; + X07 = w[7] + ks[7]; + X08 = w[8] + ks[8]; + X09 = w[9] + ks[9]; + X10 = w[10] + ks[10]; + X11 = w[11] + ks[11]; + X12 = w[12] + ks[12]; + X13 = w[13] + ks[13] + ts[0]; + X14 = w[14] + ks[14] + ts[1]; + X15 = w[15] + ks[15]; + + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL, + Xptr); + +#define Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ + pD, pE, pF, ROT, rNum) \ + X##p0 += X##p1; X##p1 = RotL_64(X##p1, ROT##_0); X##p1 ^= X##p0;\ + X##p2 += X##p3; X##p3 = RotL_64(X##p3, ROT##_1); X##p3 ^= X##p2;\ + X##p4 += X##p5; X##p5 = RotL_64(X##p5, ROT##_2); X##p5 ^= X##p4;\ + X##p6 += X##p7; X##p7 = RotL_64(X##p7, ROT##_3); X##p7 ^= X##p6;\ + X##p8 += X##p9; X##p9 = RotL_64(X##p9, ROT##_4); X##p9 ^= X##p8;\ + X##pA += X##pB; X##pB = RotL_64(X##pB, ROT##_5); X##pB ^= X##pA;\ + X##pC += X##pD; X##pD = RotL_64(X##pD, ROT##_6); X##pD ^= X##pC;\ + X##pE += X##pF; X##pF = RotL_64(X##pF, ROT##_7); X##pF ^= X##pE; + +#if SKEIN_UNROLL_1024 == 0 +#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, \ + pE, pF, ROT, rn) \ + Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ + pD, pE, pF, ROT, rn) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, rn, Xptr); + +#define I1024(R) \ + X00 += ks[((R) + 1) % 17]; /* inject the key schedule value */\ + X01 += ks[((R) + 2) % 17]; \ + X02 += ks[((R) + 3) % 17]; \ + X03 += ks[((R) + 4) % 17]; \ + X04 += ks[((R) + 5) % 17]; \ + X05 += ks[((R) + 6) % 17]; \ + X06 += ks[((R) + 7) % 17]; \ + X07 += ks[((R) + 8) % 17]; \ + X08 += ks[((R) + 9) % 17]; \ + X09 += ks[((R) + 10) % 17]; \ + X10 += ks[((R) + 11) % 17]; \ + X11 += ks[((R) + 12) % 17]; \ + X12 += ks[((R) + 13) % 17]; \ + X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \ + X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \ + X15 += ks[((R) + 16) % 17] + (R) +1; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); +#else /* looping version */ +#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, \ + pE, pF, ROT, rn) \ + Round1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, \ + pD, pE, pF, ROT, rn) \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rn, Xptr); + +#define I1024(R) \ + X00 += ks[r + (R) + 0]; /* inject the key schedule value */ \ + X01 += ks[r + (R) + 1]; \ + X02 += ks[r + (R) + 2]; \ + X03 += ks[r + (R) + 3]; \ + X04 += ks[r + (R) + 4]; \ + X05 += ks[r + (R) + 5]; \ + X06 += ks[r + (R) + 6]; \ + X07 += ks[r + (R) + 7]; \ + X08 += ks[r + (R) + 8]; \ + X09 += ks[r + (R) + 9]; \ + X10 += ks[r + (R) + 10]; \ + X11 += ks[r + (R) + 11]; \ + X12 += ks[r + (R) + 12]; \ + X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \ + X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \ + X15 += ks[r + (R) + 15] + r + (R); \ + ks[r + (R) + 16] = ks[r + (R) - 1]; /* rotate key schedule */\ + ts[r + (R) + 2] = ts[r + (R) - 1]; \ + Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); + + /* loop through it */ + for (r = 1; r <= 2 * RCNT; r += 2 * SKEIN_UNROLL_1024) +#endif + { +#define R1024_8_rounds(R) /* do 8 full rounds */ \ + R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, \ + 14, 15, R1024_0, 8 * (R) + 1); \ + R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, \ + 08, 01, R1024_1, 8 * (R) + 2); \ + R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, \ + 10, 09, R1024_2, 8 * (R) + 3); \ + R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, \ + 12, 07, R1024_3, 8 * (R) + 4); \ + I1024(2 * (R)); \ + R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, \ + 14, 15, R1024_4, 8 * (R) + 5); \ + R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, \ + 08, 01, R1024_5, 8 * (R) + 6); \ + R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, \ + 10, 09, R1024_6, 8 * (R) + 7); \ + R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, \ + 12, 07, R1024_7, 8 * (R) + 8); \ + I1024(2 * (R) + 1); + + R1024_8_rounds(0); + +#define R1024_Unroll_R(NN) \ + ((SKEIN_UNROLL_1024 == 0 && SKEIN1024_ROUNDS_TOTAL/8 > (NN)) || \ + (SKEIN_UNROLL_1024 > (NN))) + +#if R1024_Unroll_R(1) + R1024_8_rounds(1); +#endif +#if R1024_Unroll_R(2) + R1024_8_rounds(2); +#endif +#if R1024_Unroll_R(3) + R1024_8_rounds(3); +#endif +#if R1024_Unroll_R(4) + R1024_8_rounds(4); +#endif +#if R1024_Unroll_R(5) + R1024_8_rounds(5); +#endif +#if R1024_Unroll_R(6) + R1024_8_rounds(6); +#endif +#if R1024_Unroll_R(7) + R1024_8_rounds(7); +#endif +#if R1024_Unroll_R(8) + R1024_8_rounds(8); +#endif +#if R1024_Unroll_R(9) + R1024_8_rounds(9); +#endif +#if R1024_Unroll_R(10) + R1024_8_rounds(10); +#endif +#if R1024_Unroll_R(11) + R1024_8_rounds(11); +#endif +#if R1024_Unroll_R(12) + R1024_8_rounds(12); +#endif +#if R1024_Unroll_R(13) + R1024_8_rounds(13); +#endif +#if R1024_Unroll_R(14) + R1024_8_rounds(14); +#endif +#if (SKEIN_UNROLL_1024 > 14) +#error "need more unrolling in Skein_1024_Process_Block" +#endif + } + /* + * do the final "feedforward" xor, update context chaining vars + */ + + ctx->X[0] = X00 ^ w[0]; + ctx->X[1] = X01 ^ w[1]; + ctx->X[2] = X02 ^ w[2]; + ctx->X[3] = X03 ^ w[3]; + ctx->X[4] = X04 ^ w[4]; + ctx->X[5] = X05 ^ w[5]; + ctx->X[6] = X06 ^ w[6]; + ctx->X[7] = X07 ^ w[7]; + ctx->X[8] = X08 ^ w[8]; + ctx->X[9] = X09 ^ w[9]; + ctx->X[10] = X10 ^ w[10]; + ctx->X[11] = X11 ^ w[11]; + ctx->X[12] = X12 ^ w[12]; + ctx->X[13] = X13 ^ w[13]; + ctx->X[14] = X14 ^ w[14]; + ctx->X[15] = X15 ^ w[15]; + + Skein_Show_Round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->X); + + ts[1] &= ~SKEIN_T1_FLAG_FIRST; + blkPtr += SKEIN1024_BLOCK_BYTES; + } while (--blkCnt); + ctx->h.T[0] = ts[0]; + ctx->h.T[1] = ts[1]; +} + +#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF) +size_t +Skein1024_Process_Block_CodeSize(void) +{ + return ((uint8_t *)Skein1024_Process_Block_CodeSize) - + ((uint8_t *)Skein1024_Process_Block); +} + +uint_t +Skein1024_Unroll_Cnt(void) +{ + return (SKEIN_UNROLL_1024); +} +#endif +#endif diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h b/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h new file mode 100644 index 000000000000..205a517d69db --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h @@ -0,0 +1,292 @@ +/* + * Internal definitions for Skein hashing. + * Source code author: Doug Whiting, 2008. + * This algorithm and source code is released to the public domain. + * + * The following compile-time switches may be defined to control some + * tradeoffs between speed, code size, error checking, and security. + * + * The "default" note explains what happens when the switch is not defined. + * + * SKEIN_DEBUG -- make callouts from inside Skein code + * to examine/display intermediate values. + * [default: no callouts (no overhead)] + * + * SKEIN_ERR_CHECK -- how error checking is handled inside Skein + * code. If not defined, most error checking + * is disabled (for performance). Otherwise, + * the switch value is interpreted as: + * 0: use assert() to flag errors + * 1: return SKEIN_FAIL to flag errors + */ +/* Copyright 2013 Doug Whiting. This code is released to the public domain. */ + +#ifndef _SKEIN_IMPL_H_ +#define _SKEIN_IMPL_H_ + +#include <sys/skein.h> +#include <sys/strings.h> +#include <sys/note.h> +#include "skein_impl.h" +#include "skein_port.h" + +/* + * "Internal" Skein definitions + * -- not needed for sequential hashing API, but will be + * helpful for other uses of Skein (e.g., tree hash mode). + * -- included here so that they can be shared between + * reference and optimized code. + */ + +/* tweak word T[1]: bit field starting positions */ +/* offset 64 because it's the second word */ +#define SKEIN_T1_BIT(BIT) ((BIT) - 64) + +/* bits 112..118: level in hash tree */ +#define SKEIN_T1_POS_TREE_LVL SKEIN_T1_BIT(112) +/* bit 119: partial final input byte */ +#define SKEIN_T1_POS_BIT_PAD SKEIN_T1_BIT(119) +/* bits 120..125: type field */ +#define SKEIN_T1_POS_BLK_TYPE SKEIN_T1_BIT(120) +/* bits 126: first block flag */ +#define SKEIN_T1_POS_FIRST SKEIN_T1_BIT(126) +/* bit 127: final block flag */ +#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) + +/* tweak word T[1]: flag bit definition(s) */ +#define SKEIN_T1_FLAG_FIRST (((uint64_t)1) << SKEIN_T1_POS_FIRST) +#define SKEIN_T1_FLAG_FINAL (((uint64_t)1) << SKEIN_T1_POS_FINAL) +#define SKEIN_T1_FLAG_BIT_PAD (((uint64_t)1) << SKEIN_T1_POS_BIT_PAD) + +/* tweak word T[1]: tree level bit field mask */ +#define SKEIN_T1_TREE_LVL_MASK (((uint64_t)0x7F) << SKEIN_T1_POS_TREE_LVL) +#define SKEIN_T1_TREE_LEVEL(n) (((uint64_t)(n)) << SKEIN_T1_POS_TREE_LVL) + +/* tweak word T[1]: block type field */ +#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */ +#define SKEIN_BLK_TYPE_CFG (4) /* configuration block */ +#define SKEIN_BLK_TYPE_PERS (8) /* personalization string */ +#define SKEIN_BLK_TYPE_PK (12) /* public key (for signature hashing) */ +#define SKEIN_BLK_TYPE_KDF (16) /* key identifier for KDF */ +#define SKEIN_BLK_TYPE_NONCE (20) /* nonce for PRNG */ +#define SKEIN_BLK_TYPE_MSG (48) /* message processing */ +#define SKEIN_BLK_TYPE_OUT (63) /* output stage */ +#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */ + +#define SKEIN_T1_BLK_TYPE(T) \ + (((uint64_t)(SKEIN_BLK_TYPE_##T)) << SKEIN_T1_POS_BLK_TYPE) +/* key, for MAC and KDF */ +#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) +/* configuration block */ +#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) +/* personalization string */ +#define SKEIN_T1_BLK_TYPE_PERS SKEIN_T1_BLK_TYPE(PERS) +/* public key (for digital signature hashing) */ +#define SKEIN_T1_BLK_TYPE_PK SKEIN_T1_BLK_TYPE(PK) +/* key identifier for KDF */ +#define SKEIN_T1_BLK_TYPE_KDF SKEIN_T1_BLK_TYPE(KDF) +/* nonce for PRNG */ +#define SKEIN_T1_BLK_TYPE_NONCE SKEIN_T1_BLK_TYPE(NONCE) +/* message processing */ +#define SKEIN_T1_BLK_TYPE_MSG SKEIN_T1_BLK_TYPE(MSG) +/* output stage */ +#define SKEIN_T1_BLK_TYPE_OUT SKEIN_T1_BLK_TYPE(OUT) +/* field bit mask */ +#define SKEIN_T1_BLK_TYPE_MASK SKEIN_T1_BLK_TYPE(MASK) + +#define SKEIN_T1_BLK_TYPE_CFG_FINAL \ + (SKEIN_T1_BLK_TYPE_CFG | SKEIN_T1_FLAG_FINAL) +#define SKEIN_T1_BLK_TYPE_OUT_FINAL \ + (SKEIN_T1_BLK_TYPE_OUT | SKEIN_T1_FLAG_FINAL) + +#define SKEIN_VERSION (1) + +#ifndef SKEIN_ID_STRING_LE /* allow compile-time personalization */ +#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian) */ +#endif + +#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((uint64_t)(hi32)) << 32)) +#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE) +#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22) + +#define SKEIN_CFG_STR_LEN (4*8) + +/* bit field definitions in config block treeInfo word */ +#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0) +#define SKEIN_CFG_TREE_NODE_SIZE_POS (8) +#define SKEIN_CFG_TREE_MAX_LEVEL_POS (16) + +#define SKEIN_CFG_TREE_LEAF_SIZE_MSK \ + (((uint64_t)0xFF) << SKEIN_CFG_TREE_LEAF_SIZE_POS) +#define SKEIN_CFG_TREE_NODE_SIZE_MSK \ + (((uint64_t)0xFF) << SKEIN_CFG_TREE_NODE_SIZE_POS) +#define SKEIN_CFG_TREE_MAX_LEVEL_MSK \ + (((uint64_t)0xFF) << SKEIN_CFG_TREE_MAX_LEVEL_POS) + +#define SKEIN_CFG_TREE_INFO(leaf, node, maxLvl) \ + ((((uint64_t)(leaf)) << SKEIN_CFG_TREE_LEAF_SIZE_POS) | \ + (((uint64_t)(node)) << SKEIN_CFG_TREE_NODE_SIZE_POS) | \ + (((uint64_t)(maxLvl)) << SKEIN_CFG_TREE_MAX_LEVEL_POS)) + +/* use as treeInfo in InitExt() call for sequential processing */ +#define SKEIN_CFG_TREE_INFO_SEQUENTIAL SKEIN_CFG_TREE_INFO(0, 0, 0) + +/* + * Skein macros for getting/setting tweak words, etc. + * These are useful for partial input bytes, hash tree init/update, etc. + */ +#define Skein_Get_Tweak(ctxPtr, TWK_NUM) ((ctxPtr)->h.T[TWK_NUM]) +#define Skein_Set_Tweak(ctxPtr, TWK_NUM, tVal) \ + do { \ + (ctxPtr)->h.T[TWK_NUM] = (tVal); \ + _NOTE(CONSTCOND) \ + } while (0) + +#define Skein_Get_T0(ctxPtr) Skein_Get_Tweak(ctxPtr, 0) +#define Skein_Get_T1(ctxPtr) Skein_Get_Tweak(ctxPtr, 1) +#define Skein_Set_T0(ctxPtr, T0) Skein_Set_Tweak(ctxPtr, 0, T0) +#define Skein_Set_T1(ctxPtr, T1) Skein_Set_Tweak(ctxPtr, 1, T1) + +/* set both tweak words at once */ +#define Skein_Set_T0_T1(ctxPtr, T0, T1) \ + do { \ + Skein_Set_T0(ctxPtr, (T0)); \ + Skein_Set_T1(ctxPtr, (T1)); \ + _NOTE(CONSTCOND) \ + } while (0) + +#define Skein_Set_Type(ctxPtr, BLK_TYPE) \ + Skein_Set_T1(ctxPtr, SKEIN_T1_BLK_TYPE_##BLK_TYPE) + +/* + * set up for starting with a new type: h.T[0]=0; h.T[1] = NEW_TYPE; h.bCnt=0; + */ +#define Skein_Start_New_Type(ctxPtr, BLK_TYPE) \ + do { \ + Skein_Set_T0_T1(ctxPtr, 0, SKEIN_T1_FLAG_FIRST | \ + SKEIN_T1_BLK_TYPE_ ## BLK_TYPE); \ + (ctxPtr)->h.bCnt = 0; \ + _NOTE(CONSTCOND) \ + } while (0) + +#define Skein_Clear_First_Flag(hdr) \ + do { \ + (hdr).T[1] &= ~SKEIN_T1_FLAG_FIRST; \ + _NOTE(CONSTCOND) \ + } while (0) +#define Skein_Set_Bit_Pad_Flag(hdr) \ + do { \ + (hdr).T[1] |= SKEIN_T1_FLAG_BIT_PAD; \ + _NOTE(CONSTCOND) \ + } while (0) + +#define Skein_Set_Tree_Level(hdr, height) \ + do { \ + (hdr).T[1] |= SKEIN_T1_TREE_LEVEL(height); \ + _NOTE(CONSTCOND) \ + } while (0) + +/* + * "Internal" Skein definitions for debugging and error checking + * Note: in Illumos we always disable debugging features. + */ +#define Skein_Show_Block(bits, ctx, X, blkPtr, wPtr, ksEvenPtr, ksOddPtr) +#define Skein_Show_Round(bits, ctx, r, X) +#define Skein_Show_R_Ptr(bits, ctx, r, X_ptr) +#define Skein_Show_Final(bits, ctx, cnt, outPtr) +#define Skein_Show_Key(bits, ctx, key, keyBytes) + +/* run-time checks (e.g., bad params, uninitialized context)? */ +#ifndef SKEIN_ERR_CHECK +/* default: ignore all Asserts, for performance */ +#define Skein_Assert(x, retCode) +#define Skein_assert(x) +#elif defined(SKEIN_ASSERT) +#include <sys/debug.h> +#define Skein_Assert(x, retCode) ASSERT(x) +#define Skein_assert(x) ASSERT(x) +#else +#include <sys/debug.h> +/* caller error */ +#define Skein_Assert(x, retCode) \ + do { \ + if (!(x)) \ + return (retCode); \ + _NOTE(CONSTCOND) \ + } while (0) +/* internal error */ +#define Skein_assert(x) ASSERT(x) +#endif + +/* + * Skein block function constants (shared across Ref and Opt code) + */ +enum { + /* Skein_256 round rotation constants */ + R_256_0_0 = 14, R_256_0_1 = 16, + R_256_1_0 = 52, R_256_1_1 = 57, + R_256_2_0 = 23, R_256_2_1 = 40, + R_256_3_0 = 5, R_256_3_1 = 37, + R_256_4_0 = 25, R_256_4_1 = 33, + R_256_5_0 = 46, R_256_5_1 = 12, + R_256_6_0 = 58, R_256_6_1 = 22, + R_256_7_0 = 32, R_256_7_1 = 32, + + /* Skein_512 round rotation constants */ + R_512_0_0 = 46, R_512_0_1 = 36, R_512_0_2 = 19, R_512_0_3 = 37, + R_512_1_0 = 33, R_512_1_1 = 27, R_512_1_2 = 14, R_512_1_3 = 42, + R_512_2_0 = 17, R_512_2_1 = 49, R_512_2_2 = 36, R_512_2_3 = 39, + R_512_3_0 = 44, R_512_3_1 = 9, R_512_3_2 = 54, R_512_3_3 = 56, + R_512_4_0 = 39, R_512_4_1 = 30, R_512_4_2 = 34, R_512_4_3 = 24, + R_512_5_0 = 13, R_512_5_1 = 50, R_512_5_2 = 10, R_512_5_3 = 17, + R_512_6_0 = 25, R_512_6_1 = 29, R_512_6_2 = 39, R_512_6_3 = 43, + R_512_7_0 = 8, R_512_7_1 = 35, R_512_7_2 = 56, R_512_7_3 = 22, + + /* Skein1024 round rotation constants */ + R1024_0_0 = 24, R1024_0_1 = 13, R1024_0_2 = 8, R1024_0_3 = + 47, R1024_0_4 = 8, R1024_0_5 = 17, R1024_0_6 = 22, R1024_0_7 = 37, + R1024_1_0 = 38, R1024_1_1 = 19, R1024_1_2 = 10, R1024_1_3 = + 55, R1024_1_4 = 49, R1024_1_5 = 18, R1024_1_6 = 23, R1024_1_7 = 52, + R1024_2_0 = 33, R1024_2_1 = 4, R1024_2_2 = 51, R1024_2_3 = + 13, R1024_2_4 = 34, R1024_2_5 = 41, R1024_2_6 = 59, R1024_2_7 = 17, + R1024_3_0 = 5, R1024_3_1 = 20, R1024_3_2 = 48, R1024_3_3 = + 41, R1024_3_4 = 47, R1024_3_5 = 28, R1024_3_6 = 16, R1024_3_7 = 25, + R1024_4_0 = 41, R1024_4_1 = 9, R1024_4_2 = 37, R1024_4_3 = + 31, R1024_4_4 = 12, R1024_4_5 = 47, R1024_4_6 = 44, R1024_4_7 = 30, + R1024_5_0 = 16, R1024_5_1 = 34, R1024_5_2 = 56, R1024_5_3 = + 51, R1024_5_4 = 4, R1024_5_5 = 53, R1024_5_6 = 42, R1024_5_7 = 41, + R1024_6_0 = 31, R1024_6_1 = 44, R1024_6_2 = 47, R1024_6_3 = + 46, R1024_6_4 = 19, R1024_6_5 = 42, R1024_6_6 = 44, R1024_6_7 = 25, + R1024_7_0 = 9, R1024_7_1 = 48, R1024_7_2 = 35, R1024_7_3 = + 52, R1024_7_4 = 23, R1024_7_5 = 31, R1024_7_6 = 37, R1024_7_7 = 20 +}; + +/* number of rounds for the different block sizes */ +#define SKEIN_256_ROUNDS_TOTAL (72) +#define SKEIN_512_ROUNDS_TOTAL (72) +#define SKEIN1024_ROUNDS_TOTAL (80) + + +extern const uint64_t SKEIN_256_IV_128[]; +extern const uint64_t SKEIN_256_IV_160[]; +extern const uint64_t SKEIN_256_IV_224[]; +extern const uint64_t SKEIN_256_IV_256[]; +extern const uint64_t SKEIN_512_IV_128[]; +extern const uint64_t SKEIN_512_IV_160[]; +extern const uint64_t SKEIN_512_IV_224[]; +extern const uint64_t SKEIN_512_IV_256[]; +extern const uint64_t SKEIN_512_IV_384[]; +extern const uint64_t SKEIN_512_IV_512[]; +extern const uint64_t SKEIN1024_IV_384[]; +extern const uint64_t SKEIN1024_IV_512[]; +extern const uint64_t SKEIN1024_IV_1024[]; + +/* Functions to process blkCnt (nonzero) full block(s) of data. */ +void Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd); +void Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd); +void Skein1024_Process_Block(Skein1024_Ctxt_t *ctx, const uint8_t *blkPtr, + size_t blkCnt, size_t byteCntAdd); + +#endif /* _SKEIN_IMPL_H_ */ diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein_iv.c b/sys/contrib/openzfs/module/icp/algs/skein/skein_iv.c new file mode 100644 index 000000000000..140d38f76547 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/skein_iv.c @@ -0,0 +1,185 @@ +/* + * Pre-computed Skein IVs + * + * NOTE: these values are not "magic" constants, but + * are generated using the Threefish block function. + * They are pre-computed here only for speed; i.e., to + * avoid the need for a Threefish call during Init(). + * + * The IV for any fixed hash length may be pre-computed. + * Only the most common values are included here. + */ +/* Copyright 2013 Doug Whiting. This code is released to the public domain. */ +/* + * Illumos implementation note: these constants are for Skein v1.3 as per: + * http://www.skein-hash.info/sites/default/files/skein1.3.pdf + */ + +#include <sys/skein.h> /* get Skein macros and types */ +#include "skein_impl.h" /* get internal definitions */ + +#define MK_64 SKEIN_MK_64 + +/* blkSize = 256 bits. hashSize = 128 bits */ +const uint64_t SKEIN_256_IV_128[] = { + MK_64(0xE1111906, 0x964D7260), + MK_64(0x883DAAA7, 0x7C8D811C), + MK_64(0x10080DF4, 0x91960F7A), + MK_64(0xCCF7DDE5, 0xB45BC1C2) +}; + +/* blkSize = 256 bits. hashSize = 160 bits */ +const uint64_t SKEIN_256_IV_160[] = { + MK_64(0x14202314, 0x72825E98), + MK_64(0x2AC4E9A2, 0x5A77E590), + MK_64(0xD47A5856, 0x8838D63E), + MK_64(0x2DD2E496, 0x8586AB7D) +}; + +/* blkSize = 256 bits. hashSize = 224 bits */ +const uint64_t SKEIN_256_IV_224[] = { + MK_64(0xC6098A8C, 0x9AE5EA0B), + MK_64(0x876D5686, 0x08C5191C), + MK_64(0x99CB88D7, 0xD7F53884), + MK_64(0x384BDDB1, 0xAEDDB5DE) +}; + +/* blkSize = 256 bits. hashSize = 256 bits */ +const uint64_t SKEIN_256_IV_256[] = { + MK_64(0xFC9DA860, 0xD048B449), + MK_64(0x2FCA6647, 0x9FA7D833), + MK_64(0xB33BC389, 0x6656840F), + MK_64(0x6A54E920, 0xFDE8DA69) +}; + +/* blkSize = 512 bits. hashSize = 128 bits */ +const uint64_t SKEIN_512_IV_128[] = { + MK_64(0xA8BC7BF3, 0x6FBF9F52), + MK_64(0x1E9872CE, 0xBD1AF0AA), + MK_64(0x309B1790, 0xB32190D3), + MK_64(0xBCFBB854, 0x3F94805C), + MK_64(0x0DA61BCD, 0x6E31B11B), + MK_64(0x1A18EBEA, 0xD46A32E3), + MK_64(0xA2CC5B18, 0xCE84AA82), + MK_64(0x6982AB28, 0x9D46982D) +}; + +/* blkSize = 512 bits. hashSize = 160 bits */ +const uint64_t SKEIN_512_IV_160[] = { + MK_64(0x28B81A2A, 0xE013BD91), + MK_64(0xC2F11668, 0xB5BDF78F), + MK_64(0x1760D8F3, 0xF6A56F12), + MK_64(0x4FB74758, 0x8239904F), + MK_64(0x21EDE07F, 0x7EAF5056), + MK_64(0xD908922E, 0x63ED70B8), + MK_64(0xB8EC76FF, 0xECCB52FA), + MK_64(0x01A47BB8, 0xA3F27A6E) +}; + +/* blkSize = 512 bits. hashSize = 224 bits */ +const uint64_t SKEIN_512_IV_224[] = { + MK_64(0xCCD06162, 0x48677224), + MK_64(0xCBA65CF3, 0xA92339EF), + MK_64(0x8CCD69D6, 0x52FF4B64), + MK_64(0x398AED7B, 0x3AB890B4), + MK_64(0x0F59D1B1, 0x457D2BD0), + MK_64(0x6776FE65, 0x75D4EB3D), + MK_64(0x99FBC70E, 0x997413E9), + MK_64(0x9E2CFCCF, 0xE1C41EF7) +}; + +/* blkSize = 512 bits. hashSize = 256 bits */ +const uint64_t SKEIN_512_IV_256[] = { + MK_64(0xCCD044A1, 0x2FDB3E13), + MK_64(0xE8359030, 0x1A79A9EB), + MK_64(0x55AEA061, 0x4F816E6F), + MK_64(0x2A2767A4, 0xAE9B94DB), + MK_64(0xEC06025E, 0x74DD7683), + MK_64(0xE7A436CD, 0xC4746251), + MK_64(0xC36FBAF9, 0x393AD185), + MK_64(0x3EEDBA18, 0x33EDFC13) +}; + +/* blkSize = 512 bits. hashSize = 384 bits */ +const uint64_t SKEIN_512_IV_384[] = { + MK_64(0xA3F6C6BF, 0x3A75EF5F), + MK_64(0xB0FEF9CC, 0xFD84FAA4), + MK_64(0x9D77DD66, 0x3D770CFE), + MK_64(0xD798CBF3, 0xB468FDDA), + MK_64(0x1BC4A666, 0x8A0E4465), + MK_64(0x7ED7D434, 0xE5807407), + MK_64(0x548FC1AC, 0xD4EC44D6), + MK_64(0x266E1754, 0x6AA18FF8) +}; + +/* blkSize = 512 bits. hashSize = 512 bits */ +const uint64_t SKEIN_512_IV_512[] = { + MK_64(0x4903ADFF, 0x749C51CE), + MK_64(0x0D95DE39, 0x9746DF03), + MK_64(0x8FD19341, 0x27C79BCE), + MK_64(0x9A255629, 0xFF352CB1), + MK_64(0x5DB62599, 0xDF6CA7B0), + MK_64(0xEABE394C, 0xA9D5C3F4), + MK_64(0x991112C7, 0x1A75B523), + MK_64(0xAE18A40B, 0x660FCC33) +}; + +/* blkSize = 1024 bits. hashSize = 384 bits */ +const uint64_t SKEIN1024_IV_384[] = { + MK_64(0x5102B6B8, 0xC1894A35), + MK_64(0xFEEBC9E3, 0xFE8AF11A), + MK_64(0x0C807F06, 0xE32BED71), + MK_64(0x60C13A52, 0xB41A91F6), + MK_64(0x9716D35D, 0xD4917C38), + MK_64(0xE780DF12, 0x6FD31D3A), + MK_64(0x797846B6, 0xC898303A), + MK_64(0xB172C2A8, 0xB3572A3B), + MK_64(0xC9BC8203, 0xA6104A6C), + MK_64(0x65909338, 0xD75624F4), + MK_64(0x94BCC568, 0x4B3F81A0), + MK_64(0x3EBBF51E, 0x10ECFD46), + MK_64(0x2DF50F0B, 0xEEB08542), + MK_64(0x3B5A6530, 0x0DBC6516), + MK_64(0x484B9CD2, 0x167BBCE1), + MK_64(0x2D136947, 0xD4CBAFEA) +}; + +/* blkSize = 1024 bits. hashSize = 512 bits */ +const uint64_t SKEIN1024_IV_512[] = { + MK_64(0xCAEC0E5D, 0x7C1B1B18), + MK_64(0xA01B0E04, 0x5F03E802), + MK_64(0x33840451, 0xED912885), + MK_64(0x374AFB04, 0xEAEC2E1C), + MK_64(0xDF25A0E2, 0x813581F7), + MK_64(0xE4004093, 0x8B12F9D2), + MK_64(0xA662D539, 0xC2ED39B6), + MK_64(0xFA8B85CF, 0x45D8C75A), + MK_64(0x8316ED8E, 0x29EDE796), + MK_64(0x053289C0, 0x2E9F91B8), + MK_64(0xC3F8EF1D, 0x6D518B73), + MK_64(0xBDCEC3C4, 0xD5EF332E), + MK_64(0x549A7E52, 0x22974487), + MK_64(0x67070872, 0x5B749816), + MK_64(0xB9CD28FB, 0xF0581BD1), + MK_64(0x0E2940B8, 0x15804974) +}; + +/* blkSize = 1024 bits. hashSize = 1024 bits */ +const uint64_t SKEIN1024_IV_1024[] = { + MK_64(0xD593DA07, 0x41E72355), + MK_64(0x15B5E511, 0xAC73E00C), + MK_64(0x5180E5AE, 0xBAF2C4F0), + MK_64(0x03BD41D3, 0xFCBCAFAF), + MK_64(0x1CAEC6FD, 0x1983A898), + MK_64(0x6E510B8B, 0xCDD0589F), + MK_64(0x77E2BDFD, 0xC6394ADA), + MK_64(0xC11E1DB5, 0x24DCB0A3), + MK_64(0xD6D14AF9, 0xC6329AB5), + MK_64(0x6A9B0BFC, 0x6EB67E0D), + MK_64(0x9243C60D, 0xCCFF1332), + MK_64(0x1A1F1DDE, 0x743F02D4), + MK_64(0x0996753C, 0x10ED0BB8), + MK_64(0x6572DD22, 0xF2B4969A), + MK_64(0x61FD3062, 0xD00A579A), + MK_64(0x1DE0536E, 0x8682E539) +}; diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein_port.h b/sys/contrib/openzfs/module/icp/algs/skein/skein_port.h new file mode 100644 index 000000000000..ce4353082552 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/algs/skein/skein_port.h @@ -0,0 +1,116 @@ +/* + * Platform-specific definitions for Skein hash function. + * + * Source code author: Doug Whiting, 2008. + * + * This algorithm and source code is released to the public domain. + * + * Many thanks to Brian Gladman for his portable header files. + * + * To port Skein to an "unsupported" platform, change the definitions + * in this file appropriately. + */ +/* Copyright 2013 Doug Whiting. This code is released to the public domain. */ + +#ifndef _SKEIN_PORT_H_ +#define _SKEIN_PORT_H_ + +#include <sys/types.h> /* get integer type definitions */ + +#ifndef RotL_64 +#define RotL_64(x, N) (((x) << (N)) | ((x) >> (64 - (N)))) +#endif + +/* + * Skein is "natively" little-endian (unlike SHA-xxx), for optimal + * performance on x86 CPUs. The Skein code requires the following + * definitions for dealing with endianness: + * + * SKEIN_NEED_SWAP: 0 for little-endian, 1 for big-endian + * Skein_Put64_LSB_First + * Skein_Get64_LSB_First + * Skein_Swap64 + * + * If SKEIN_NEED_SWAP is defined at compile time, it is used here + * along with the portable versions of Put64/Get64/Swap64, which + * are slow in general. + * + * Otherwise, an "auto-detect" of endianness is attempted below. + * If the default handling doesn't work well, the user may insert + * platform-specific code instead (e.g., for big-endian CPUs). + * + */ +#ifndef SKEIN_NEED_SWAP /* compile-time "override" for endianness? */ + +#include <sys/isa_defs.h> /* get endianness selection */ + +#if defined(_ZFS_BIG_ENDIAN) +/* here for big-endian CPUs */ +#define SKEIN_NEED_SWAP (1) +#else +/* here for x86 and x86-64 CPUs (and other detected little-endian CPUs) */ +#define SKEIN_NEED_SWAP (0) +#define Skein_Put64_LSB_First(dst08, src64, bCnt) bcopy(src64, dst08, bCnt) +#define Skein_Get64_LSB_First(dst64, src08, wCnt) \ + bcopy(src08, dst64, 8 * (wCnt)) +#endif + +#endif /* ifndef SKEIN_NEED_SWAP */ + +/* + * Provide any definitions still needed. + */ +#ifndef Skein_Swap64 /* swap for big-endian, nop for little-endian */ +#if SKEIN_NEED_SWAP +#define Skein_Swap64(w64) \ + (((((uint64_t)(w64)) & 0xFF) << 56) | \ + (((((uint64_t)(w64)) >> 8) & 0xFF) << 48) | \ + (((((uint64_t)(w64)) >> 16) & 0xFF) << 40) | \ + (((((uint64_t)(w64)) >> 24) & 0xFF) << 32) | \ + (((((uint64_t)(w64)) >> 32) & 0xFF) << 24) | \ + (((((uint64_t)(w64)) >> 40) & 0xFF) << 16) | \ + (((((uint64_t)(w64)) >> 48) & 0xFF) << 8) | \ + (((((uint64_t)(w64)) >> 56) & 0xFF))) +#else +#define Skein_Swap64(w64) (w64) +#endif +#endif /* ifndef Skein_Swap64 */ + +#ifndef Skein_Put64_LSB_First +static inline void +Skein_Put64_LSB_First(uint8_t *dst, const uint64_t *src, size_t bCnt) +{ + /* + * this version is fully portable (big-endian or little-endian), + * but slow + */ + size_t n; + + for (n = 0; n < bCnt; n++) + dst[n] = (uint8_t)(src[n >> 3] >> (8 * (n & 7))); +} +#endif /* ifndef Skein_Put64_LSB_First */ + +#ifndef Skein_Get64_LSB_First +static inline void +Skein_Get64_LSB_First(uint64_t *dst, const uint8_t *src, size_t wCnt) +{ + /* + * this version is fully portable (big-endian or little-endian), + * but slow + */ + size_t n; + + for (n = 0; n < 8 * wCnt; n += 8) + dst[n / 8] = (((uint64_t)src[n])) + + (((uint64_t)src[n + 1]) << 8) + + (((uint64_t)src[n + 2]) << 16) + + (((uint64_t)src[n + 3]) << 24) + + (((uint64_t)src[n + 4]) << 32) + + (((uint64_t)src[n + 5]) << 40) + + (((uint64_t)src[n + 6]) << 48) + + (((uint64_t)src[n + 7]) << 56); +} +#endif /* ifndef Skein_Get64_LSB_First */ + +#endif /* _SKEIN_PORT_H_ */ diff --git a/sys/contrib/openzfs/module/icp/api/kcf_cipher.c b/sys/contrib/openzfs/module/icp/api/kcf_cipher.c new file mode 100644 index 000000000000..d6aa48147edb --- /dev/null +++ b/sys/contrib/openzfs/module/icp/api/kcf_cipher.c @@ -0,0 +1,930 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/crypto/api.h> +#include <sys/crypto/spi.h> +#include <sys/crypto/sched_impl.h> + +/* + * Encryption and decryption routines. + */ + +/* + * The following are the possible returned values common to all the routines + * below. The applicability of some of these return values depends on the + * presence of the arguments. + * + * CRYPTO_SUCCESS: The operation completed successfully. + * CRYPTO_QUEUED: A request was submitted successfully. The callback + * routine will be called when the operation is done. + * CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or + * CRYPTO_INVALID_MECH for problems with the 'mech'. + * CRYPTO_INVALID_DATA for bogus 'data' + * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work. + * CRYPTO_INVALID_CONTEXT: Not a valid context. + * CRYPTO_BUSY: Cannot process the request now. Schedule a + * crypto_bufcall(), or try later. + * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is + * capable of a function or a mechanism. + * CRYPTO_INVALID_KEY: bogus 'key' argument. + * CRYPTO_INVALID_PLAINTEXT: bogus 'plaintext' argument. + * CRYPTO_INVALID_CIPHERTEXT: bogus 'ciphertext' argument. + */ + +/* + * crypto_cipher_init_prov() + * + * Arguments: + * + * pd: provider descriptor + * sid: session id + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * tmpl: a crypto_ctx_template_t, opaque template of a context of an + * encryption or decryption with the 'mech' using 'key'. + * 'tmpl' is created by a previous call to + * crypto_create_ctx_template(). + * ctxp: Pointer to a crypto_context_t. + * func: CRYPTO_FG_ENCRYPT or CRYPTO_FG_DECRYPT. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * This is a common function invoked internally by both + * crypto_encrypt_init() and crypto_decrypt_init(). + * Asynchronously submits a request for, or synchronously performs the + * initialization of an encryption or a decryption operation. + * When possible and applicable, will internally use the pre-expanded key + * schedule from the context template, tmpl. + * When complete and successful, 'ctxp' will contain a crypto_context_t + * valid for later calls to encrypt_update() and encrypt_final(), or + * decrypt_update() and decrypt_final(). + * The caller should hold a reference on the specified provider + * descriptor before calling this function. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +static int +crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_key_t *key, + crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq, crypto_func_group_t func) +{ + int error; + crypto_ctx_t *ctx; + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + if (func == CRYPTO_FG_ENCRYPT) { + error = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_ENCRYPT); + } else { + error = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_DECRYPT); + } + + if (error != CRYPTO_SUCCESS) + return (error); + } + + /* Allocate and initialize the canonical context */ + if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + return (CRYPTO_HOST_MEMORY); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); + + if (func == CRYPTO_FG_ENCRYPT) + error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx, + &lmech, key, tmpl, KCF_SWFP_RHNDL(crq)); + else { + ASSERT(func == CRYPTO_FG_DECRYPT); + + error = KCF_PROV_DECRYPT_INIT(real_provider, ctx, + &lmech, key, tmpl, KCF_SWFP_RHNDL(crq)); + } + KCF_PROV_INCRSTATS(pd, error); + + goto done; + } + + /* Check if context sharing is possible */ + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + key->ck_format == CRYPTO_KEY_RAW && + KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) { + kcf_context_t *tctxp = (kcf_context_t *)ctx; + kcf_provider_desc_t *tpd = NULL; + crypto_mech_info_t *sinfo; + + if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech, + B_FALSE) == CRYPTO_SUCCESS)) { + int tlen; + + sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type)); + /* + * key->ck_length from the consumer is always in bits. + * We convert it to be in the same unit registered by + * the provider in order to do a comparison. + */ + if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES) + tlen = key->ck_length >> 3; + else + tlen = key->ck_length; + /* + * Check if the software provider can support context + * sharing and support this key length. + */ + if ((sinfo->cm_mech_flags & CRYPTO_CAN_SHARE_OPSTATE) && + (tlen >= sinfo->cm_min_key_length) && + (tlen <= sinfo->cm_max_key_length)) { + ctx->cc_flags = CRYPTO_INIT_OPSTATE; + tctxp->kc_sw_prov_desc = tpd; + } else + KCF_PROV_REFRELE(tpd); + } + } + + if (func == CRYPTO_FG_ENCRYPT) { + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, + mech, key, NULL, NULL, tmpl); + } else { + ASSERT(func == CRYPTO_FG_DECRYPT); + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, + mech, key, NULL, NULL, tmpl); + } + + error = kcf_submit_request(real_provider, ctx, crq, ¶ms, + B_FALSE); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + +done: + if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED)) + *ctxp = (crypto_context_t)ctx; + else { + /* Release the hold done in kcf_new_ctx(). */ + KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private); + } + + return (error); +} + +/* + * Same as crypto_cipher_init_prov(), but relies on the scheduler to pick + * an appropriate provider. See crypto_cipher_init_prov() comments for more + * details. + */ +static int +crypto_cipher_init(crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq, crypto_func_group_t func) +{ + int error; + kcf_mech_entry_t *me; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, func, CHECK_RESTRICT(crq), 0)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + error = crypto_cipher_init_prov(pd, pd->pd_sid, mech, key, + spi_ctx_tmpl, ctxp, crq, func); + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_encrypt_prov() + * + * Arguments: + * pd: provider descriptor + * sid: session id + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * plaintext: The message to be encrypted + * ciphertext: Storage for the encrypted message. The length needed + * depends on the mechanism, and the plaintext's size. + * tmpl: a crypto_ctx_template_t, opaque template of a context of an + * encryption with the 'mech' using 'key'. 'tmpl' is created by + * a previous call to crypto_create_ctx_template(). + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * single-part encryption of 'plaintext' with the mechanism 'mech', using + * the key 'key'. + * When complete and successful, 'ciphertext' will contain the encrypted + * message. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_data_t *plaintext, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_data_t *ciphertext, + crypto_call_req_t *crq) +{ + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + int error; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + error = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_ENCRYPT_ATOMIC); + + if (error != CRYPTO_SUCCESS) + return (error); + } + + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, key, + plaintext, ciphertext, tmpl); + + error = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + return (error); +} + +/* + * Same as crypto_encrypt_prov(), but relies on the scheduler to pick + * a provider. See crypto_encrypt_prov() for more details. + */ +int +crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext, + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext, + crypto_call_req_t *crq) +{ + int error; + kcf_mech_entry_t *me; + kcf_req_params_t params; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, CRYPTO_FG_ENCRYPT_ATOMIC, CHECK_RESTRICT(crq), + plaintext->cd_length)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); + + error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key, + plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, pd->pd_sid, + mech, key, plaintext, ciphertext, spi_ctx_tmpl); + error = kcf_submit_request(pd, NULL, crq, ¶ms, B_FALSE); + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_encrypt_init_prov() + * + * Calls crypto_cipher_init_prov() to initialize an encryption operation. + */ +int +crypto_encrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq, + CRYPTO_FG_ENCRYPT)); +} + +/* + * crypto_encrypt_init() + * + * Calls crypto_cipher_init() to initialize an encryption operation + */ +int +crypto_encrypt_init(crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + return (crypto_cipher_init(mech, key, tmpl, ctxp, crq, + CRYPTO_FG_ENCRYPT)); +} + +/* + * crypto_encrypt_update() + * + * Arguments: + * context: A crypto_context_t initialized by encrypt_init(). + * plaintext: The message part to be encrypted + * ciphertext: Storage for the encrypted message part. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * part of an encryption operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext, + crypto_data_t *ciphertext, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext, + ciphertext, NULL); + KCF_PROV_INCRSTATS(pd, error); + return (error); + } + + /* Check if we should use a software provider for small jobs */ + if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) { + if (plaintext->cd_length < kcf_ctx->kc_mech->me_threshold && + kcf_ctx->kc_sw_prov_desc != NULL && + KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) { + pd = kcf_ctx->kc_sw_prov_desc; + } + } + + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_UPDATE, + ctx->cc_session, NULL, NULL, plaintext, ciphertext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + + return (error); +} + +/* + * crypto_encrypt_final() + * + * Arguments: + * context: A crypto_context_t initialized by encrypt_init(). + * ciphertext: Storage for the last part of encrypted message + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * final part of an encryption operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_FINAL, + ctx->cc_session, NULL, NULL, NULL, ciphertext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +/* + * crypto_decrypt_prov() + * + * Arguments: + * pd: provider descriptor + * sid: session id + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * ciphertext: The message to be encrypted + * plaintext: Storage for the encrypted message. The length needed + * depends on the mechanism, and the plaintext's size. + * tmpl: a crypto_ctx_template_t, opaque template of a context of an + * encryption with the 'mech' using 'key'. 'tmpl' is created by + * a previous call to crypto_create_ctx_template(). + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * single-part decryption of 'ciphertext' with the mechanism 'mech', using + * the key 'key'. + * When complete and successful, 'plaintext' will contain the decrypted + * message. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_data_t *ciphertext, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_data_t *plaintext, + crypto_call_req_t *crq) +{ + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + int rv; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + rv = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_DECRYPT_ATOMIC); + + if (rv != CRYPTO_SUCCESS) + return (rv); + } + + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, key, + ciphertext, plaintext, tmpl); + + rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + return (rv); +} + +/* + * Same as crypto_decrypt_prov(), but relies on the KCF scheduler to + * choose a provider. See crypto_decrypt_prov() comments for more + * information. + */ +int +crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext, + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext, + crypto_call_req_t *crq) +{ + int error; + kcf_mech_entry_t *me; + kcf_req_params_t params; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, CRYPTO_FG_DECRYPT_ATOMIC, CHECK_RESTRICT(crq), + ciphertext->cd_length)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); + + error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key, + ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, pd->pd_sid, + mech, key, ciphertext, plaintext, spi_ctx_tmpl); + error = kcf_submit_request(pd, NULL, crq, ¶ms, B_FALSE); + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_decrypt_init_prov() + * + * Calls crypto_cipher_init_prov() to initialize a decryption operation + */ +int +crypto_decrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq, + CRYPTO_FG_DECRYPT)); +} + +/* + * crypto_decrypt_init() + * + * Calls crypto_cipher_init() to initialize a decryption operation + */ +int +crypto_decrypt_init(crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + return (crypto_cipher_init(mech, key, tmpl, ctxp, crq, + CRYPTO_FG_DECRYPT)); +} + +/* + * crypto_decrypt_update() + * + * Arguments: + * context: A crypto_context_t initialized by decrypt_init(). + * ciphertext: The message part to be decrypted + * plaintext: Storage for the decrypted message part. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * part of an decryption operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext, + crypto_data_t *plaintext, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext, + plaintext, NULL); + KCF_PROV_INCRSTATS(pd, error); + return (error); + } + + /* Check if we should use a software provider for small jobs */ + if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) { + if (ciphertext->cd_length < kcf_ctx->kc_mech->me_threshold && + kcf_ctx->kc_sw_prov_desc != NULL && + KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) { + pd = kcf_ctx->kc_sw_prov_desc; + } + } + + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_UPDATE, + ctx->cc_session, NULL, NULL, ciphertext, plaintext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + + return (error); +} + +/* + * crypto_decrypt_final() + * + * Arguments: + * context: A crypto_context_t initialized by decrypt_init(). + * plaintext: Storage for the last part of the decrypted message + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * final part of a decryption operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext, + NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_FINAL, + ctx->cc_session, NULL, NULL, NULL, plaintext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +/* + * See comments for crypto_encrypt_update(). + */ +int +crypto_encrypt_single(crypto_context_t context, crypto_data_t *plaintext, + crypto_data_t *ciphertext, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_ENCRYPT(pd, ctx, plaintext, + ciphertext, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_SINGLE, pd->pd_sid, + NULL, NULL, plaintext, ciphertext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +/* + * See comments for crypto_decrypt_update(). + */ +int +crypto_decrypt_single(crypto_context_t context, crypto_data_t *ciphertext, + crypto_data_t *plaintext, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DECRYPT(pd, ctx, ciphertext, + plaintext, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_SINGLE, pd->pd_sid, + NULL, NULL, ciphertext, plaintext, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +#if defined(_KERNEL) +EXPORT_SYMBOL(crypto_encrypt_prov); +EXPORT_SYMBOL(crypto_encrypt); +EXPORT_SYMBOL(crypto_encrypt_init_prov); +EXPORT_SYMBOL(crypto_encrypt_init); +EXPORT_SYMBOL(crypto_encrypt_update); +EXPORT_SYMBOL(crypto_encrypt_final); +EXPORT_SYMBOL(crypto_decrypt_prov); +EXPORT_SYMBOL(crypto_decrypt); +EXPORT_SYMBOL(crypto_decrypt_init_prov); +EXPORT_SYMBOL(crypto_decrypt_init); +EXPORT_SYMBOL(crypto_decrypt_update); +EXPORT_SYMBOL(crypto_decrypt_final); +EXPORT_SYMBOL(crypto_encrypt_single); +EXPORT_SYMBOL(crypto_decrypt_single); +#endif diff --git a/sys/contrib/openzfs/module/icp/api/kcf_ctxops.c b/sys/contrib/openzfs/module/icp/api/kcf_ctxops.c new file mode 100644 index 000000000000..21b0977d3634 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/api/kcf_ctxops.c @@ -0,0 +1,151 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/crypto/api.h> +#include <sys/crypto/spi.h> +#include <sys/crypto/sched_impl.h> + +/* + * Crypto contexts manipulation routines + */ + +/* + * crypto_create_ctx_template() + * + * Arguments: + * + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * ptmpl: a storage for the opaque crypto_ctx_template_t, allocated and + * initialized by the software provider this routine is + * dispatched to. + * kmflag: KM_SLEEP/KM_NOSLEEP mem. alloc. flag. + * + * Description: + * Redirects the call to the software provider of the specified + * mechanism. That provider will allocate and pre-compute/pre-expand + * the context template, reusable by later calls to crypto_xxx_init(). + * The size and address of that provider context template are stored + * in an internal structure, kcf_ctx_template_t. The address of that + * structure is given back to the caller in *ptmpl. + * + * Context: + * Process or interrupt. + * + * Returns: + * CRYPTO_SUCCESS when the context template is successfully created. + * CRYPTO_HOST_MEMORY: mem alloc failure + * CRYPTO_ARGUMENTS_BAD: NULL storage for the ctx template. + * RYPTO_MECHANISM_INVALID: invalid mechanism 'mech'. + */ +int +crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t *ptmpl, int kmflag) +{ + int error; + kcf_mech_entry_t *me; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_mechanism_t prov_mech; + + /* A few args validation */ + + if (ptmpl == NULL) + return (CRYPTO_ARGUMENTS_BAD); + + if (mech == NULL) + return (CRYPTO_MECHANISM_INVALID); + + error = kcf_get_sw_prov(mech->cm_type, &pd, &me, B_TRUE); + if (error != CRYPTO_SUCCESS) + return (error); + + if ((ctx_tmpl = (kcf_ctx_template_t *)kmem_alloc( + sizeof (kcf_ctx_template_t), kmflag)) == NULL) { + KCF_PROV_REFRELE(pd); + return (CRYPTO_HOST_MEMORY); + } + + /* Pass a mechtype that the provider understands */ + prov_mech.cm_type = KCF_TO_PROV_MECHNUM(pd, mech->cm_type); + prov_mech.cm_param = mech->cm_param; + prov_mech.cm_param_len = mech->cm_param_len; + + error = KCF_PROV_CREATE_CTX_TEMPLATE(pd, &prov_mech, key, + &(ctx_tmpl->ct_prov_tmpl), &(ctx_tmpl->ct_size), KCF_RHNDL(kmflag)); + + if (error == CRYPTO_SUCCESS) { + ctx_tmpl->ct_generation = me->me_gen_swprov; + *ptmpl = ctx_tmpl; + } else { + kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t)); + } + KCF_PROV_REFRELE(pd); + + return (error); +} + +/* + * crypto_destroy_ctx_template() + * + * Arguments: + * + * tmpl: an opaque crypto_ctx_template_t previously created by + * crypto_create_ctx_template() + * + * Description: + * Frees the embedded crypto_spi_ctx_template_t, then the + * kcf_ctx_template_t. + * + * Context: + * Process or interrupt. + * + */ +void +crypto_destroy_ctx_template(crypto_ctx_template_t tmpl) +{ + kcf_ctx_template_t *ctx_tmpl = (kcf_ctx_template_t *)tmpl; + + if (ctx_tmpl == NULL) + return; + + ASSERT(ctx_tmpl->ct_prov_tmpl != NULL); + + bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size); + kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size); + kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t)); +} + +#if defined(_KERNEL) +EXPORT_SYMBOL(crypto_create_ctx_template); +EXPORT_SYMBOL(crypto_destroy_ctx_template); +#endif diff --git a/sys/contrib/openzfs/module/icp/api/kcf_digest.c b/sys/contrib/openzfs/module/icp/api/kcf_digest.c new file mode 100644 index 000000000000..aa68d69bc162 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/api/kcf_digest.c @@ -0,0 +1,491 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/crypto/api.h> +#include <sys/crypto/spi.h> +#include <sys/crypto/sched_impl.h> + +/* + * Message digest routines + */ + +/* + * The following are the possible returned values common to all the routines + * below. The applicability of some of these return values depends on the + * presence of the arguments. + * + * CRYPTO_SUCCESS: The operation completed successfully. + * CRYPTO_QUEUED: A request was submitted successfully. The callback + * routine will be called when the operation is done. + * CRYPTO_MECHANISM_INVALID or CRYPTO_INVALID_MECH_PARAM + * for problems with the 'mech'. + * CRYPTO_INVALID_DATA for bogus 'data' + * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work. + * CRYPTO_INVALID_CONTEXT: Not a valid context. + * CRYPTO_BUSY: Cannot process the request now. Schedule a + * crypto_bufcall(), or try later. + * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: + * No provider is capable of a function or a mechanism. + */ + + +/* + * crypto_digest_prov() + * + * Arguments: + * pd: pointer to the descriptor of the provider to use for this + * operation. + * sid: provider session id. + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * data: The message to be digested. + * digest: Storage for the digest. The length needed depends on the + * mechanism. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * digesting operation of 'data' on the specified + * provider with the specified session. + * When complete and successful, 'digest' will contain the digest value. + * The caller should hold a reference on the specified provider + * descriptor before calling this function. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest, + crypto_call_req_t *crq) +{ + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + int rv; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + rv = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), + pd, &real_provider, CRYPTO_FG_DIGEST_ATOMIC); + + if (rv != CRYPTO_SUCCESS) + return (rv); + } + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, NULL, + data, digest); + + /* no crypto context to carry between multiple parts. */ + rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + return (rv); +} + + +/* + * Same as crypto_digest_prov(), but relies on the KCF scheduler to + * choose a provider. See crypto_digest_prov() comments for more information. + */ +int +crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data, + crypto_data_t *digest, crypto_call_req_t *crq) +{ + int error; + kcf_provider_desc_t *pd; + kcf_req_params_t params; + kcf_prov_tried_t *list = NULL; + +retry: + /* The pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list, + CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq), + data->cd_length)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); + error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data, + digest, KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) && + (data->cd_length > pd->pd_hash_limit)) { + error = CRYPTO_BUFFER_TOO_BIG; + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, + pd->pd_sid, mech, NULL, data, digest); + + /* no crypto context to carry between multiple parts. */ + error = kcf_submit_request(pd, NULL, crq, ¶ms, + B_FALSE); + } + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_digest_init_prov() + * + * pd: pointer to the descriptor of the provider to use for this + * operation. + * sid: provider session id. + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * ctxp: Pointer to a crypto_context_t. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * initialization of a message digest operation on the specified + * provider with the specified session. + * When complete and successful, 'ctxp' will contain a crypto_context_t + * valid for later calls to digest_update() and digest_final(). + * The caller should hold a reference on the specified provider + * descriptor before calling this function. + */ +int +crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_context_t *ctxp, crypto_call_req_t *crq) +{ + int error; + crypto_ctx_t *ctx; + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + error = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_DIGEST); + + if (error != CRYPTO_SUCCESS) + return (error); + } + + /* Allocate and initialize the canonical context */ + if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + return (CRYPTO_HOST_MEMORY); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); + error = KCF_PROV_DIGEST_INIT(real_provider, ctx, &lmech, + KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, + mech, NULL, NULL, NULL); + error = kcf_submit_request(real_provider, ctx, crq, ¶ms, + B_FALSE); + } + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED)) + *ctxp = (crypto_context_t)ctx; + else { + /* Release the hold done in kcf_new_ctx(). */ + KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private); + } + + return (error); +} + +/* + * Same as crypto_digest_init_prov(), but relies on the KCF scheduler + * to choose a provider. See crypto_digest_init_prov() comments for + * more information. + */ +int +crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + int error; + kcf_provider_desc_t *pd; + kcf_prov_tried_t *list = NULL; + +retry: + /* The pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, + list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) { + /* + * The hardware provider has limited digest support. + * So, we fallback early here to using a software provider. + * + * XXX - need to enhance to do the fallback later in + * crypto_digest_update() if the size of accumulated input data + * exceeds the maximum size digestable by hardware provider. + */ + error = CRYPTO_BUFFER_TOO_BIG; + } else { + error = crypto_digest_init_prov(pd, pd->pd_sid, + mech, ctxp, crq); + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_digest_update() + * + * Arguments: + * context: A crypto_context_t initialized by digest_init(). + * data: The part of message to be digested. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * part of a message digest operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_digest_update(crypto_context_t context, crypto_data_t *data, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DIGEST_UPDATE(pd, ctx, data, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_UPDATE, + ctx->cc_session, NULL, NULL, data, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + return (error); +} + +/* + * crypto_digest_final() + * + * Arguments: + * context: A crypto_context_t initialized by digest_init(). + * digest: The storage for the digest. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * final part of a message digest operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_digest_final(crypto_context_t context, crypto_data_t *digest, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DIGEST_FINAL(pd, ctx, digest, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_FINAL, + ctx->cc_session, NULL, NULL, NULL, digest); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +/* + * Performs a digest update on the specified key. Note that there is + * no k-API crypto_digest_key() equivalent of this function. + */ +int +crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DIGEST_KEY(pd, ctx, key, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_DIGEST_KEY, + ctx->cc_session, NULL, key, NULL, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + return (error); +} + +/* + * See comments for crypto_digest_update() and crypto_digest_final(). + */ +int +crypto_digest_single(crypto_context_t context, crypto_data_t *data, + crypto_data_t *digest, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_DIGEST(pd, ctx, data, digest, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_SINGLE, pd->pd_sid, + NULL, NULL, data, digest); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +#if defined(_KERNEL) +EXPORT_SYMBOL(crypto_digest_prov); +EXPORT_SYMBOL(crypto_digest); +EXPORT_SYMBOL(crypto_digest_init_prov); +EXPORT_SYMBOL(crypto_digest_init); +EXPORT_SYMBOL(crypto_digest_update); +EXPORT_SYMBOL(crypto_digest_final); +EXPORT_SYMBOL(crypto_digest_key_prov); +EXPORT_SYMBOL(crypto_digest_single); +#endif diff --git a/sys/contrib/openzfs/module/icp/api/kcf_mac.c b/sys/contrib/openzfs/module/icp/api/kcf_mac.c new file mode 100644 index 000000000000..a7722d8f914c --- /dev/null +++ b/sys/contrib/openzfs/module/icp/api/kcf_mac.c @@ -0,0 +1,645 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/crypto/api.h> +#include <sys/crypto/spi.h> +#include <sys/crypto/sched_impl.h> + +/* + * Message authentication codes routines. + */ + +/* + * The following are the possible returned values common to all the routines + * below. The applicability of some of these return values depends on the + * presence of the arguments. + * + * CRYPTO_SUCCESS: The operation completed successfully. + * CRYPTO_QUEUED: A request was submitted successfully. The callback + * routine will be called when the operation is done. + * CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or + * CRYPTO_INVALID_MECH for problems with the 'mech'. + * CRYPTO_INVALID_DATA for bogus 'data' + * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work. + * CRYPTO_INVALID_CONTEXT: Not a valid context. + * CRYPTO_BUSY: Cannot process the request now. Schedule a + * crypto_bufcall(), or try later. + * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is + * capable of a function or a mechanism. + * CRYPTO_INVALID_KEY: bogus 'key' argument. + * CRYPTO_INVALID_MAC: bogus 'mac' argument. + */ + +/* + * crypto_mac_prov() + * + * Arguments: + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * data: The message to compute the MAC for. + * mac: Storage for the MAC. The length needed depends on the mechanism. + * tmpl: a crypto_ctx_template_t, opaque template of a context of a + * MAC with the 'mech' using 'key'. 'tmpl' is created by + * a previous call to crypto_create_ctx_template(). + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * single-part message authentication of 'data' with the mechanism + * 'mech', using * the key 'key', on the specified provider with + * the specified session id. + * When complete and successful, 'mac' will contain the message + * authentication code. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'crq'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq) +{ + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + int rv; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + rv = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_MAC_ATOMIC); + + if (rv != CRYPTO_SUCCESS) + return (rv); + } + + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, key, + data, mac, tmpl); + rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + return (rv); +} + +/* + * Same as crypto_mac_prov(), but relies on the KCF scheduler to choose + * a provider. See crypto_mac() comments for more information. + */ +int +crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data, + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, + crypto_call_req_t *crq) +{ + int error; + kcf_mech_entry_t *me; + kcf_req_params_t params; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* The pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq), + data->cd_length)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); + + error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data, + mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) && + (data->cd_length > pd->pd_hash_limit)) { + /* + * XXX - We need a check to see if this is indeed + * a HMAC. So far, all kernel clients use + * this interface only for HMAC. So, this is fine + * for now. + */ + error = CRYPTO_BUFFER_TOO_BIG; + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, + pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl); + + error = kcf_submit_request(pd, NULL, crq, ¶ms, + KCF_ISDUALREQ(crq)); + } + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * Single part operation to compute the MAC corresponding to the specified + * 'data' and to verify that it matches the MAC specified by 'mac'. + * The other arguments are the same as the function crypto_mac_prov(). + */ +int +crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq) +{ + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + int rv; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + rv = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_MAC_ATOMIC); + + if (rv != CRYPTO_SUCCESS) + return (rv); + } + + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech, + key, data, mac, tmpl); + rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + return (rv); +} + +/* + * Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose + * a provider. See crypto_mac_verify_prov() comments for more information. + */ +int +crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data, + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, + crypto_call_req_t *crq) +{ + int error; + kcf_mech_entry_t *me; + kcf_req_params_t params; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* The pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq), + data->cd_length)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); + + error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key, + data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, error); + } else { + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) && + (data->cd_length > pd->pd_hash_limit)) { + /* see comments in crypto_mac() */ + error = CRYPTO_BUFFER_TOO_BIG; + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, + KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech, + key, data, mac, spi_ctx_tmpl); + + error = kcf_submit_request(pd, NULL, crq, ¶ms, + KCF_ISDUALREQ(crq)); + } + } + + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_mac_init_prov() + * + * Arguments: + * pd: pointer to the descriptor of the provider to use for this + * operation. + * sid: provider session id. + * mech: crypto_mechanism_t pointer. + * mech_type is a valid value previously returned by + * crypto_mech2id(); + * When the mech's parameter is not NULL, its definition depends + * on the standard definition of the mechanism. + * key: pointer to a crypto_key_t structure. + * tmpl: a crypto_ctx_template_t, opaque template of a context of a + * MAC with the 'mech' using 'key'. 'tmpl' is created by + * a previous call to crypto_create_ctx_template(). + * ctxp: Pointer to a crypto_context_t. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs the + * initialization of a MAC operation on the specified provider with + * the specified session. + * When possible and applicable, will internally use the pre-computed MAC + * context from the context template, tmpl. + * When complete and successful, 'ctxp' will contain a crypto_context_t + * valid for later calls to mac_update() and mac_final(). + * The caller should hold a reference on the specified provider + * descriptor before calling this function. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid, + crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl, + crypto_context_t *ctxp, crypto_call_req_t *crq) +{ + int rv; + crypto_ctx_t *ctx; + kcf_req_params_t params; + kcf_provider_desc_t *pd = provider; + kcf_provider_desc_t *real_provider = pd; + + ASSERT(KCF_PROV_REFHELD(pd)); + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { + rv = kcf_get_hardware_provider(mech->cm_type, + CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, + &real_provider, CRYPTO_FG_MAC); + + if (rv != CRYPTO_SUCCESS) + return (rv); + } + + /* Allocate and initialize the canonical context */ + if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + return (CRYPTO_HOST_MEMORY); + } + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(crq, pd)) { + crypto_mechanism_t lmech; + + lmech = *mech; + KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); + rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl, + KCF_SWFP_RHNDL(crq)); + KCF_PROV_INCRSTATS(pd, rv); + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, mech, key, + NULL, NULL, tmpl); + rv = kcf_submit_request(real_provider, ctx, crq, ¶ms, + B_FALSE); + } + + if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) + KCF_PROV_REFRELE(real_provider); + + if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED)) + *ctxp = (crypto_context_t)ctx; + else { + /* Release the hold done in kcf_new_ctx(). */ + KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private); + } + + return (rv); +} + +/* + * Same as crypto_mac_init_prov(), but relies on the KCF scheduler to + * choose a provider. See crypto_mac_init_prov() comments for more + * information. + */ +int +crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key, + crypto_ctx_template_t tmpl, crypto_context_t *ctxp, + crypto_call_req_t *crq) +{ + int error; + kcf_mech_entry_t *me; + kcf_provider_desc_t *pd; + kcf_ctx_template_t *ctx_tmpl; + crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; + kcf_prov_tried_t *list = NULL; + +retry: + /* The pd is returned held */ + if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, + list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) { + if (list != NULL) + kcf_free_triedlist(list); + return (error); + } + + /* + * For SW providers, check the validity of the context template + * It is very rare that the generation number mis-matches, so + * is acceptable to fail here, and let the consumer recover by + * freeing this tmpl and create a new one for the key and new SW + * provider + */ + + if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && + ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { + if (ctx_tmpl->ct_generation != me->me_gen_swprov) { + if (list != NULL) + kcf_free_triedlist(list); + KCF_PROV_REFRELE(pd); + return (CRYPTO_OLD_CTX_TEMPLATE); + } else { + spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; + } + } + + if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && + (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) { + /* + * The hardware provider has limited HMAC support. + * So, we fallback early here to using a software provider. + * + * XXX - need to enhance to do the fallback later in + * crypto_mac_update() if the size of accumulated input data + * exceeds the maximum size digestable by hardware provider. + */ + error = CRYPTO_BUFFER_TOO_BIG; + } else { + error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key, + spi_ctx_tmpl, ctxp, crq); + } + if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && + IS_RECOVERABLE(error)) { + /* Add pd to the linked list of providers tried. */ + if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + goto retry; + } + + if (list != NULL) + kcf_free_triedlist(list); + + KCF_PROV_REFRELE(pd); + return (error); +} + +/* + * crypto_mac_update() + * + * Arguments: + * context: A crypto_context_t initialized by mac_init(). + * data: The message part to be MAC'ed + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * part of a MAC operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_mac_update(crypto_context_t context, crypto_data_t *data, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + kcf_req_params_t params; + int rv; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL); + KCF_PROV_INCRSTATS(pd, rv); + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_UPDATE, + ctx->cc_session, NULL, NULL, data, NULL, NULL); + rv = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + return (rv); +} + +/* + * crypto_mac_final() + * + * Arguments: + * context: A crypto_context_t initialized by mac_init(). + * mac: Storage for the message authentication code. + * cr: crypto_call_req_t calling conditions and call back info. + * + * Description: + * Asynchronously submits a request for, or synchronously performs a + * part of a message authentication operation. + * + * Context: + * Process or interrupt, according to the semantics dictated by the 'cr'. + * + * Returns: + * See comment in the beginning of the file. + */ +int +crypto_mac_final(crypto_context_t context, crypto_data_t *mac, + crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + kcf_req_params_t params; + int rv; + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL); + KCF_PROV_INCRSTATS(pd, rv); + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_FINAL, + ctx->cc_session, NULL, NULL, NULL, mac, NULL); + rv = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx); + return (rv); +} + +/* + * See comments for crypto_mac_update() and crypto_mac_final(). + */ +int +crypto_mac_single(crypto_context_t context, crypto_data_t *data, + crypto_data_t *mac, crypto_call_req_t *cr) +{ + crypto_ctx_t *ctx = (crypto_ctx_t *)context; + kcf_context_t *kcf_ctx; + kcf_provider_desc_t *pd; + int error; + kcf_req_params_t params; + + + if ((ctx == NULL) || + ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || + ((pd = kcf_ctx->kc_prov_desc) == NULL)) { + return (CRYPTO_INVALID_CONTEXT); + } + + + /* The fast path for SW providers. */ + if (CHECK_FASTPATH(cr, pd)) { + error = KCF_PROV_MAC(pd, ctx, data, mac, NULL); + KCF_PROV_INCRSTATS(pd, error); + } else { + KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_SINGLE, pd->pd_sid, + NULL, NULL, data, mac, NULL); + error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); + } + + /* Release the hold done in kcf_new_ctx() during init step. */ + KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); + return (error); +} + +#if defined(_KERNEL) +EXPORT_SYMBOL(crypto_mac_prov); +EXPORT_SYMBOL(crypto_mac); +EXPORT_SYMBOL(crypto_mac_verify_prov); +EXPORT_SYMBOL(crypto_mac_verify); +EXPORT_SYMBOL(crypto_mac_init_prov); +EXPORT_SYMBOL(crypto_mac_init); +EXPORT_SYMBOL(crypto_mac_update); +EXPORT_SYMBOL(crypto_mac_final); +EXPORT_SYMBOL(crypto_mac_single); +#endif diff --git a/sys/contrib/openzfs/module/icp/api/kcf_miscapi.c b/sys/contrib/openzfs/module/icp/api/kcf_miscapi.c new file mode 100644 index 000000000000..c0f415b264a7 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/api/kcf_miscapi.c @@ -0,0 +1,127 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include <sys/zfs_context.h> +#include <sys/crypto/common.h> +#include <sys/crypto/api.h> +#include <sys/crypto/impl.h> +#include <sys/crypto/sched_impl.h> + +/* + * All event subscribers are put on a list. kcf_notify_list_lock + * protects changes to this list. + * + * The following locking order is maintained in the code - The + * global kcf_notify_list_lock followed by the individual lock + * in a kcf_ntfy_elem structure (kn_lock). + */ +kmutex_t ntfy_list_lock; +kcondvar_t ntfy_list_cv; /* cv the service thread waits on */ +static kcf_ntfy_elem_t *ntfy_list_head; + +/* + * crypto_mech2id() + * + * Arguments: + * . mechname: A null-terminated string identifying the mechanism name. + * + * Description: + * Walks the mechanisms tables, looking for an entry that matches the + * mechname. Once it find it, it builds the 64-bit mech_type and returns + * it. If there are no hardware or software providers for the mechanism, + * but there is an unloaded software provider, this routine will attempt + * to load it. + * + * Context: + * Process and interruption. + * + * Returns: + * The unique mechanism identified by 'mechname', if found. + * CRYPTO_MECH_INVALID otherwise. + */ +crypto_mech_type_t +crypto_mech2id(char *mechname) +{ + return (crypto_mech2id_common(mechname, B_TRUE)); +} + +/* + * We walk the notification list and do the callbacks. + */ +void +kcf_walk_ntfylist(uint32_t event, void *event_arg) +{ + kcf_ntfy_elem_t *nep; + int nelem = 0; + + mutex_enter(&ntfy_list_lock); + + /* + * Count how many clients are on the notification list. We need + * this count to ensure that clients which joined the list after we + * have started this walk, are not wrongly notified. + */ + for (nep = ntfy_list_head; nep != NULL; nep = nep->kn_next) + nelem++; + + for (nep = ntfy_list_head; (nep != NULL && nelem); nep = nep->kn_next) { + nelem--; + + /* + * Check if this client is interested in the + * event. + */ + if (!(nep->kn_event_mask & event)) + continue; + + mutex_enter(&nep->kn_lock); + nep->kn_state = NTFY_RUNNING; + mutex_exit(&nep->kn_lock); + mutex_exit(&ntfy_list_lock); + + /* + * We invoke the callback routine with no locks held. Another + * client could have joined the list meanwhile. This is fine + * as we maintain nelem as stated above. The NULL check in the + * for loop guards against shrinkage. Also, any callers of + * crypto_unnotify_events() at this point cv_wait till kn_state + * changes to NTFY_WAITING. Hence, nep is assured to be valid. + */ + (*nep->kn_func)(event, event_arg); + + mutex_enter(&nep->kn_lock); + nep->kn_state = NTFY_WAITING; + cv_broadcast(&nep->kn_cv); + mutex_exit(&nep->kn_lock); + + mutex_enter(&ntfy_list_lock); + } + + mutex_exit(&ntfy_list_lock); +} + +#if defined(_KERNEL) +EXPORT_SYMBOL(crypto_mech2id); +#endif diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman new file mode 100644 index 000000000000..48fea7bb333e --- /dev/null +++ b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman @@ -0,0 +1,23 @@ + --------------------------------------------------------------------------- + Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software is allowed (with or without + changes) provided that: + + 1. source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + 2. binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation; + + 3. the name of the copyright holder is not used to endorse products + built using this software without specific written permission. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip new file mode 100644 index 000000000000..5f822cf27586 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip @@ -0,0 +1 @@ +PORTIONS OF AES FUNCTIONALITY diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl new file mode 100644 index 000000000000..92c9e196a318 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl @@ -0,0 +1,127 @@ + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a dual license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. Actually both licenses are BSD-style + Open Source licenses. In case of any license issues related to OpenSSL + please contact openssl-core@openssl.org. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip new file mode 100644 index 000000000000..5f822cf27586 --- /dev/null +++ b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip @@ -0,0 +1 @@ +PORTIONS OF AES FUNCTIONALITY diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/aes/aes_aesni.S b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/aes_aesni.S new file mode 100644 index 000000000000..4a80c62097ae --- /dev/null +++ b/sys/contrib/openzfs/module/icp/asm-x86_64/aes/aes_aesni.S @@ -0,0 +1,748 @@ +/* + * ==================================================================== + * Written by Intel Corporation for the OpenSSL project to add support + * for Intel AES-NI instructions. Rights for redistribution and usage + * in source and binary forms are granted according to the OpenSSL + * license. + * + * Author: Huang Ying <ying.huang at intel dot com> + * Vinodh Gopal <vinodh.gopal at intel dot com> + * Kahraman Akdemir + * + * Intel AES-NI is a new set of Single Instruction Multiple Data (SIMD) + * instructions that are going to be introduced in the next generation + * of Intel processor, as of 2009. These instructions enable fast and + * secure data encryption and decryption, using the Advanced Encryption + * Standard (AES), defined by FIPS Publication number 197. The + * architecture introduces six instructions that offer full hardware + * support for AES. Four of them support high performance data + * encryption and decryption, and the other two instructions support + * the AES key expansion procedure. + * ==================================================================== + */ + +/* + * ==================================================================== + * Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright |