aboutsummaryrefslogtreecommitdiff
path: root/sys/crypto
diff options
context:
space:
mode:
authorPawel Jakub Dawidek <pjd@FreeBSD.org>2006-07-25 19:32:58 +0000
committerPawel Jakub Dawidek <pjd@FreeBSD.org>2006-07-25 19:32:58 +0000
commitb623eec50973163ec943d89691e4089c5115c4e3 (patch)
tree1635ceac93e95dc995604b83baefb63e64c3010e /sys/crypto
parent78c344f3da94f34938dcc1a95512f16a2ba6ae8f (diff)
downloadsrc-b623eec50973163ec943d89691e4089c5115c4e3.tar.gz
src-b623eec50973163ec943d89691e4089c5115c4e3.zip
Avoid memory allocations when the given address is already 16 bytes aligned.
Such an address can be used directly in padlock's AES. This improves speed of geli(8) significantly: # sysctl kern.geom.zero.clear=0 # geli onetime -s 4096 gzero # dd if=/dev/gzero.eli of=/dev/null bs=1m count=1000 Before: 113MB/s After: 203MB/s BTW. If sector size is set to 128kB, I can read at 276MB/s :)
Notes
Notes: svn path=/head/; revision=160676
Diffstat (limited to 'sys/crypto')
-rw-r--r--sys/crypto/via/padlock_cipher.c57
1 files changed, 51 insertions, 6 deletions
diff --git a/sys/crypto/via/padlock_cipher.c b/sys/crypto/via/padlock_cipher.c
index cbc94e38b991..6b1862184e39 100644
--- a/sys/crypto/via/padlock_cipher.c
+++ b/sys/crypto/via/padlock_cipher.c
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/libkern.h>
+#include <sys/uio.h>
#include <opencrypto/cryptodev.h>
#include <crypto/rijndael/rijndael.h>
@@ -158,6 +159,43 @@ padlock_cipher_setup(struct padlock_session *ses, struct cryptoini *encini)
return (0);
}
+/*
+ * Function checks if the given buffer is already 16 bytes aligned.
+ * If it is there is no need to allocate new buffer.
+ * If it isn't, new buffer is allocated.
+ */
+static u_char *
+padlock_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
+ int *allocated)
+{
+ u_char *addr;
+
+ if (crp->crp_flags & CRYPTO_F_IMBUF)
+ goto alloc;
+ else {
+ if (crp->crp_flags & CRYPTO_F_IOV) {
+ struct uio *uio;
+ struct iovec *iov;
+
+ uio = (struct uio *)crp->crp_buf;
+ if (uio->uio_iovcnt != 1)
+ goto alloc;
+ iov = uio->uio_iov;
+ addr = (u_char *)iov->iov_base + enccrd->crd_skip;
+ } else {
+ addr = (u_char *)crp->crp_buf;
+ }
+ if (((uintptr_t)addr & 0xf) != 0) /* 16 bytes aligned? */
+ goto alloc;
+ *allocated = 0;
+ return (addr);
+ }
+alloc:
+ *allocated = 1;
+ addr = malloc(enccrd->crd_len + 16, M_PADLOCK, M_NOWAIT);
+ return (addr);
+}
+
int
padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
struct cryptop *crp)
@@ -165,12 +203,15 @@ padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
union padlock_cw *cw;
u_char *buf, *abuf;
uint32_t *key;
+ int allocated;
- buf = malloc(enccrd->crd_len + 16, M_PADLOCK, M_NOWAIT);
+ buf = padlock_cipher_alloc(enccrd, crp, &allocated);
if (buf == NULL)
return (ENOMEM);
/* Buffer has to be 16 bytes aligned. */
abuf = PADLOCK_ALIGN(buf);
+ if (!allocated && abuf != buf)
+ panic("allocated=%d abuf=%p buf=%p", allocated, abuf, buf);
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
padlock_cipher_key_setup(ses, enccrd->crd_key,
@@ -203,13 +244,17 @@ padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
}
}
- crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, abuf);
+ if (allocated) {
+ crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
+ enccrd->crd_len, abuf);
+ }
padlock_cbc(abuf, abuf, enccrd->crd_len / 16, key, cw, ses->ses_iv);
- crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, abuf);
+ if (allocated) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
+ enccrd->crd_len, abuf);
+ }
/* copy out last block for use as next session IV */
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
@@ -218,7 +263,7 @@ padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
AES_BLOCK_LEN, ses->ses_iv);
}
- if (buf != NULL) {
+ if (allocated) {
bzero(buf, enccrd->crd_len + 16);
free(buf, M_PADLOCK);
}