aboutsummaryrefslogtreecommitdiff
path: root/crypto/heimdal/lib/gssapi/krb5/cfx.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/heimdal/lib/gssapi/krb5/cfx.c')
-rw-r--r--crypto/heimdal/lib/gssapi/krb5/cfx.c1249
1 files changed, 1070 insertions, 179 deletions
diff --git a/crypto/heimdal/lib/gssapi/krb5/cfx.c b/crypto/heimdal/lib/gssapi/krb5/cfx.c
index 6452f802ab84..28cf47bdbdd2 100644
--- a/crypto/heimdal/lib/gssapi/krb5/cfx.c
+++ b/crypto/heimdal/lib/gssapi/krb5/cfx.c
@@ -30,12 +30,10 @@
* SUCH DAMAGE.
*/
-#include "krb5/gsskrb5_locl.h"
-
-RCSID("$Id: cfx.c 19031 2006-11-13 18:02:57Z lha $");
+#include "gsskrb5_locl.h"
/*
- * Implementation of draft-ietf-krb-wg-gssapi-cfx-06.txt
+ * Implementation of RFC 4121
*/
#define CFXSentByAcceptor (1 << 0)
@@ -46,6 +44,7 @@ krb5_error_code
_gsskrb5cfx_wrap_length_cfx(krb5_context context,
krb5_crypto crypto,
int conf_req_flag,
+ int dce_style,
size_t input_length,
size_t *output_length,
size_t *cksumsize,
@@ -72,7 +71,11 @@ _gsskrb5cfx_wrap_length_cfx(krb5_context context,
/* Header is concatenated with data before encryption */
input_length += sizeof(gss_cfx_wrap_token_desc);
- ret = krb5_crypto_getpadsize(context, crypto, &padsize);
+ if (dce_style) {
+ ret = krb5_crypto_getblocksize(context, crypto, &padsize);
+ } else {
+ ret = krb5_crypto_getpadsize(context, crypto, &padsize);
+ }
if (ret) {
return ret;
}
@@ -96,49 +99,48 @@ _gsskrb5cfx_wrap_length_cfx(krb5_context context,
return 0;
}
-krb5_error_code
-_gsskrb5cfx_max_wrap_length_cfx(krb5_context context,
- krb5_crypto crypto,
- int conf_req_flag,
- size_t input_length,
- OM_uint32 *output_length)
+OM_uint32
+_gssapi_wrap_size_cfx(OM_uint32 *minor_status,
+ const gsskrb5_ctx ctx,
+ krb5_context context,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ OM_uint32 req_output_size,
+ OM_uint32 *max_input_size)
{
krb5_error_code ret;
- *output_length = 0;
+ *max_input_size = 0;
/* 16-byte header is always first */
- if (input_length < 16)
+ if (req_output_size < 16)
return 0;
- input_length -= 16;
+ req_output_size -= 16;
if (conf_req_flag) {
size_t wrapped_size, sz;
- wrapped_size = input_length + 1;
+ wrapped_size = req_output_size + 1;
do {
wrapped_size--;
- sz = krb5_get_wrapped_length(context,
- crypto, wrapped_size);
- } while (wrapped_size && sz > input_length);
- if (wrapped_size == 0) {
- *output_length = 0;
+ sz = krb5_get_wrapped_length(context,
+ ctx->crypto, wrapped_size);
+ } while (wrapped_size && sz > req_output_size);
+ if (wrapped_size == 0)
return 0;
- }
/* inner header */
- if (wrapped_size < 16) {
- *output_length = 0;
+ if (wrapped_size < 16)
return 0;
- }
+
wrapped_size -= 16;
- *output_length = wrapped_size;
+ *max_input_size = wrapped_size;
} else {
krb5_cksumtype type;
size_t cksumsize;
- ret = krb5_crypto_get_checksum_type(context, crypto, &type);
+ ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
if (ret)
return ret;
@@ -146,48 +148,16 @@ _gsskrb5cfx_max_wrap_length_cfx(krb5_context context,
if (ret)
return ret;
- if (input_length < cksumsize)
+ if (req_output_size < cksumsize)
return 0;
/* Checksum is concatenated with data */
- *output_length = input_length - cksumsize;
+ *max_input_size = req_output_size - cksumsize;
}
return 0;
}
-
-OM_uint32 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
- const gsskrb5_ctx context_handle,
- krb5_context context,
- int conf_req_flag,
- gss_qop_t qop_req,
- OM_uint32 req_output_size,
- OM_uint32 *max_input_size,
- krb5_keyblock *key)
-{
- krb5_error_code ret;
- krb5_crypto crypto;
-
- ret = krb5_crypto_init(context, key, 0, &crypto);
- if (ret != 0) {
- *minor_status = ret;
- return GSS_S_FAILURE;
- }
-
- ret = _gsskrb5cfx_max_wrap_length_cfx(context, crypto, conf_req_flag,
- req_output_size, max_input_size);
- if (ret != 0) {
- *minor_status = ret;
- krb5_crypto_destroy(context, crypto);
- return GSS_S_FAILURE;
- }
-
- krb5_crypto_destroy(context, crypto);
-
- return GSS_S_COMPLETE;
-}
-
/*
* Rotate "rrc" bytes to the front or back
*/
@@ -212,10 +182,10 @@ rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
tmp = buf;
} else {
tmp = malloc(rrc);
- if (tmp == NULL)
+ if (tmp == NULL)
return ENOMEM;
}
-
+
if (unrotate) {
memcpy(tmp, data, rrc);
memmove(data, (u_char *)data + rrc, left);
@@ -226,23 +196,975 @@ rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
memcpy(data, tmp, rrc);
}
- if (rrc > sizeof(buf))
+ if (rrc > sizeof(buf))
free(tmp);
return 0;
}
+gss_iov_buffer_desc *
+_gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
+{
+ int i;
+
+ for (i = 0; i < iov_count; i++)
+ if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
+ return &iov[i];
+ return NULL;
+}
+
+OM_uint32
+_gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
+{
+ if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
+ if (buffer->buffer.length == size)
+ return GSS_S_COMPLETE;
+ free(buffer->buffer.value);
+ }
+
+ buffer->buffer.value = malloc(size);
+ buffer->buffer.length = size;
+ if (buffer->buffer.value == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
+
+ return GSS_S_COMPLETE;
+}
+
+
+OM_uint32
+_gk_verify_buffers(OM_uint32 *minor_status,
+ const gsskrb5_ctx ctx,
+ const gss_iov_buffer_desc *header,
+ const gss_iov_buffer_desc *padding,
+ const gss_iov_buffer_desc *trailer)
+{
+ if (header == NULL) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ if (IS_DCE_STYLE(ctx)) {
+ /*
+ * In DCE style mode we reject having a padding or trailer buffer
+ */
+ if (padding) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+ if (trailer) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+ } else {
+ /*
+ * In non-DCE style mode we require having a padding buffer
+ */
+ if (padding == NULL) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+ }
+
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
+}
+
+#if 0
+OM_uint32
+_gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
+ gsskrb5_ctx ctx,
+ krb5_context context,
+ int conf_req_flag,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ OM_uint32 major_status, junk;
+ gss_iov_buffer_desc *header, *trailer, *padding;
+ size_t gsshsize, k5hsize;
+ size_t gsstsize, k5tsize;
+ size_t rrc = 0, ec = 0;
+ int i;
+ gss_cfx_wrap_token token;
+ krb5_error_code ret;
+ int32_t seq_number;
+ unsigned usage;
+ krb5_crypto_iov *data = NULL;
+
+ header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ if (header == NULL) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
+ if (padding != NULL) {
+ padding->buffer.length = 0;
+ }
+
+ trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
+
+ major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
+ if (major_status != GSS_S_COMPLETE) {
+ return major_status;
+ }
+
+ if (conf_req_flag) {
+ size_t k5psize = 0;
+ size_t k5pbase = 0;
+ size_t k5bsize = 0;
+ size_t size = 0;
+
+ for (i = 0; i < iov_count; i++) {
+ switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ size += iov[i].buffer.length;
+ break;
+ default:
+ break;
+ }
+ }
+
+ size += sizeof(gss_cfx_wrap_token_desc);
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_HEADER,
+ &k5hsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_TRAILER,
+ &k5tsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_PADDING,
+ &k5pbase);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ if (k5pbase > 1) {
+ k5psize = k5pbase - (size % k5pbase);
+ } else {
+ k5psize = 0;
+ }
+
+ if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
+ *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
+ &k5bsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+ ec = k5bsize;
+ } else {
+ ec = k5psize;
+ }
+
+ gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
+ gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
+ } else {
+ if (IS_DCE_STYLE(ctx)) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ k5hsize = 0;
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_CHECKSUM,
+ &k5tsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ gsshsize = sizeof(gss_cfx_wrap_token_desc);
+ gsstsize = k5tsize;
+ }
+
+ /*
+ *
+ */
+
+ if (trailer == NULL) {
+ rrc = gsstsize;
+ if (IS_DCE_STYLE(ctx))
+ rrc -= ec;
+ gsshsize += gsstsize;
+ gsstsize = 0;
+ } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
+ major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
+ if (major_status)
+ goto failure;
+ } else if (trailer->buffer.length < gsstsize) {
+ *minor_status = KRB5_BAD_MSIZE;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ } else
+ trailer->buffer.length = gsstsize;
+
+ /*
+ *
+ */
+
+ if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
+ major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
+ if (major_status != GSS_S_COMPLETE)
+ goto failure;
+ } else if (header->buffer.length < gsshsize) {
+ *minor_status = KRB5_BAD_MSIZE;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ } else
+ header->buffer.length = gsshsize;
+
+ token = (gss_cfx_wrap_token)header->buffer.value;
+
+ token->TOK_ID[0] = 0x05;
+ token->TOK_ID[1] = 0x04;
+ token->Flags = 0;
+ token->Filler = 0xFF;
+
+ if ((ctx->more_flags & LOCAL) == 0)
+ token->Flags |= CFXSentByAcceptor;
+
+ if (ctx->more_flags & ACCEPTOR_SUBKEY)
+ token->Flags |= CFXAcceptorSubkey;
+
+ if (ctx->more_flags & LOCAL)
+ usage = KRB5_KU_USAGE_INITIATOR_SEAL;
+ else
+ usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
+
+ if (conf_req_flag) {
+ /*
+ * In Wrap tokens with confidentiality, the EC field is
+ * used to encode the size (in bytes) of the random filler.
+ */
+ token->Flags |= CFXSealed;
+ token->EC[0] = (ec >> 8) & 0xFF;
+ token->EC[1] = (ec >> 0) & 0xFF;
+
+ } else {
+ /*
+ * In Wrap tokens without confidentiality, the EC field is
+ * used to encode the size (in bytes) of the trailing
+ * checksum.
+ *
+ * This is not used in the checksum calcuation itself,
+ * because the checksum length could potentially vary
+ * depending on the data length.
+ */
+ token->EC[0] = 0;
+ token->EC[1] = 0;
+ }
+
+ /*
+ * In Wrap tokens that provide for confidentiality, the RRC
+ * field in the header contains the hex value 00 00 before
+ * encryption.
+ *
+ * In Wrap tokens that do not provide for confidentiality,
+ * both the EC and RRC fields in the appended checksum
+ * contain the hex value 00 00 for the purpose of calculating
+ * the checksum.
+ */
+ token->RRC[0] = 0;
+ token->RRC[1] = 0;
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+ krb5_auth_con_getlocalseqnumber(context,
+ ctx->auth_context,
+ &seq_number);
+ _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
+ _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
+ krb5_auth_con_setlocalseqnumber(context,
+ ctx->auth_context,
+ ++seq_number);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+
+ data = calloc(iov_count + 3, sizeof(data[0]));
+ if (data == NULL) {
+ *minor_status = ENOMEM;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ if (conf_req_flag) {
+ /*
+ plain packet:
+
+ {"header" | encrypt(plaintext-data | ec-padding | E"header")}
+
+ Expanded, this is with with RRC = 0:
+
+ {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
+
+ In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
+
+ {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
+ */
+
+ i = 0;
+ data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
+ data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
+ data[i].data.length = k5hsize;
+
+ for (i = 1; i < iov_count + 1; i++) {
+ switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
+ default:
+ data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
+ }
+ data[i].data.length = iov[i - 1].buffer.length;
+ data[i].data.data = iov[i - 1].buffer.value;
+ }
+
+ /*
+ * Any necessary padding is added here to ensure that the
+ * encrypted token header is always at the end of the
+ * ciphertext.
+ */
+
+ /* encrypted CFX header in trailer (or after the header if in
+ DCE mode). Copy in header into E"header"
+ */
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ if (trailer)
+ data[i].data.data = trailer->buffer.value;
+ else
+ data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
+
+ data[i].data.length = ec + sizeof(*token);
+ memset(data[i].data.data, 0xFF, ec);
+ memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
+ i++;
+
+ /* Kerberos trailer comes after the gss trailer */
+ data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
+ data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
+ data[i].data.length = k5tsize;
+ i++;
+
+ ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
+ if (ret != 0) {
+ *minor_status = ret;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ if (rrc) {
+ token->RRC[0] = (rrc >> 8) & 0xFF;
+ token->RRC[1] = (rrc >> 0) & 0xFF;
+ }
+
+ } else {
+ /*
+ plain packet:
+
+ {data | "header" | gss-trailer (krb5 checksum)
+
+ don't do RRC != 0
+
+ */
+
+ for (i = 0; i < iov_count; i++) {
+ switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
+ default:
+ data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
+ }
+ data[i].data.length = iov[i].buffer.length;
+ data[i].data.data = iov[i].buffer.value;
+ }
+
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ data[i].data.data = header->buffer.value;
+ data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
+ i++;
+
+ data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
+ if (trailer) {
+ data[i].data.data = trailer->buffer.value;
+ } else {
+ data[i].data.data = (uint8_t *)header->buffer.value +
+ sizeof(gss_cfx_wrap_token_desc);
+ }
+ data[i].data.length = k5tsize;
+ i++;
+
+ ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
+ if (ret) {
+ *minor_status = ret;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ if (rrc) {
+ token->RRC[0] = (rrc >> 8) & 0xFF;
+ token->RRC[1] = (rrc >> 0) & 0xFF;
+ }
+
+ token->EC[0] = (k5tsize >> 8) & 0xFF;
+ token->EC[1] = (k5tsize >> 0) & 0xFF;
+ }
+
+ if (conf_state != NULL)
+ *conf_state = conf_req_flag;
+
+ free(data);
+
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
+
+ failure:
+ if (data)
+ free(data);
+
+ gss_release_iov_buffer(&junk, iov, iov_count);
+
+ return major_status;
+}
+#endif
+
+/* This is slowpath */
+static OM_uint32
+unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
+{
+ uint8_t *p, *q;
+ size_t len = 0, skip;
+ int i;
+
+ for (i = 0; i < iov_count; i++)
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
+ len += iov[i].buffer.length;
+
+ p = malloc(len);
+ if (p == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ q = p;
+
+ /* copy up */
+
+ for (i = 0; i < iov_count; i++) {
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
+ {
+ memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
+ q += iov[i].buffer.length;
+ }
+ }
+ assert((size_t)(q - p) == len);
+
+ /* unrotate first part */
+ q = p + rrc;
+ skip = rrc;
+ for (i = 0; i < iov_count; i++) {
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
+ {
+ if (iov[i].buffer.length <= skip) {
+ skip -= iov[i].buffer.length;
+ } else {
+ memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
+ q += iov[i].buffer.length - skip;
+ skip = 0;
+ }
+ }
+ }
+ /* copy trailer */
+ q = p;
+ skip = rrc;
+ for (i = 0; i < iov_count; i++) {
+ if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
+ GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
+ {
+ memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
+ if (iov[i].buffer.length > skip)
+ break;
+ skip -= iov[i].buffer.length;
+ q += iov[i].buffer.length;
+ }
+ }
+ return GSS_S_COMPLETE;
+}
+
+#if 0
+
+OM_uint32
+_gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
+ gsskrb5_ctx ctx,
+ krb5_context context,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
+ gss_iov_buffer_desc *header, *trailer, *padding;
+ gss_cfx_wrap_token token, ttoken;
+ u_char token_flags;
+ krb5_error_code ret;
+ unsigned usage;
+ uint16_t ec, rrc;
+ krb5_crypto_iov *data = NULL;
+ int i, j;
+
+ *minor_status = 0;
+
+ header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ if (header == NULL) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ if (header->buffer.length < sizeof(*token)) /* we check exact below */
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
+ if (padding != NULL && padding->buffer.length != 0) {
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+
+ trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
+
+ major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
+ if (major_status != GSS_S_COMPLETE) {
+ return major_status;
+ }
+
+ token = (gss_cfx_wrap_token)header->buffer.value;
+
+ if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ /* Ignore unknown flags */
+ token_flags = token->Flags &
+ (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
+
+ if (token_flags & CFXSentByAcceptor) {
+ if ((ctx->more_flags & LOCAL) == 0)
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (ctx->more_flags & ACCEPTOR_SUBKEY) {
+ if ((token_flags & CFXAcceptorSubkey) == 0)
+ return GSS_S_DEFECTIVE_TOKEN;
+ } else {
+ if (token_flags & CFXAcceptorSubkey)
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (token->Filler != 0xFF)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ if (conf_state != NULL)
+ *conf_state = (token_flags & CFXSealed) ? 1 : 0;
+
+ ec = (token->EC[0] << 8) | token->EC[1];
+ rrc = (token->RRC[0] << 8) | token->RRC[1];
+
+ /*
+ * Check sequence number
+ */
+ _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
+ _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
+ if (seq_number_hi) {
+ /* no support for 64-bit sequence numbers */
+ *minor_status = ERANGE;
+ return GSS_S_UNSEQ_TOKEN;
+ }
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+ ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
+ if (ret != 0) {
+ *minor_status = 0;
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return ret;
+ }
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+
+ /*
+ * Decrypt and/or verify checksum
+ */
+
+ if (ctx->more_flags & LOCAL) {
+ usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
+ } else {
+ usage = KRB5_KU_USAGE_INITIATOR_SEAL;
+ }
+
+ data = calloc(iov_count + 3, sizeof(data[0]));
+ if (data == NULL) {
+ *minor_status = ENOMEM;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ if (token_flags & CFXSealed) {
+ size_t k5tsize, k5hsize;
+
+ krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
+ krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
+
+ /* Rotate by RRC; bogus to do this in-place XXX */
+ /* Check RRC */
+
+ if (trailer == NULL) {
+ size_t gsstsize = k5tsize + sizeof(*token);
+ size_t gsshsize = k5hsize + sizeof(*token);
+
+ if (rrc != gsstsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ }
+
+ if (IS_DCE_STYLE(ctx))
+ gsstsize += ec;
+
+ gsshsize += gsstsize;
+
+ if (header->buffer.length != gsshsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ }
+ } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ } else if (header->buffer.length != sizeof(*token) + k5hsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ } else if (rrc != 0) {
+ /* go though slowpath */
+ major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
+ if (major_status)
+ goto failure;
+ }
+
+ i = 0;
+ data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
+ data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
+ data[i].data.length = k5hsize;
+ i++;
+
+ for (j = 0; j < iov_count; i++, j++) {
+ switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
+ default:
+ data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
+ }
+ data[i].data.length = iov[j].buffer.length;
+ data[i].data.data = iov[j].buffer.value;
+ }
+
+ /* encrypted CFX header in trailer (or after the header if in
+ DCE mode). Copy in header into E"header"
+ */
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ if (trailer) {
+ data[i].data.data = trailer->buffer.value;
+ } else {
+ data[i].data.data = ((uint8_t *)header->buffer.value) +
+ header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
+ }
+
+ data[i].data.length = ec + sizeof(*token);
+ ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
+ i++;
+
+ /* Kerberos trailer comes after the gss trailer */
+ data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
+ data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
+ data[i].data.length = k5tsize;
+ i++;
+
+ ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
+ if (ret != 0) {
+ *minor_status = ret;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ ttoken->RRC[0] = token->RRC[0];
+ ttoken->RRC[1] = token->RRC[1];
+
+ /* Check the integrity of the header */
+ if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
+ major_status = GSS_S_BAD_MIC;
+ goto failure;
+ }
+ } else {
+ size_t gsstsize = ec;
+ size_t gsshsize = sizeof(*token);
+
+ if (trailer == NULL) {
+ /* Check RRC */
+ if (rrc != gsstsize) {
+ *minor_status = EINVAL;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ gsshsize += gsstsize;
+ gsstsize = 0;
+ } else if (trailer->buffer.length != gsstsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ } else if (rrc != 0) {
+ /* Check RRC */
+ *minor_status = EINVAL;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+
+ if (header->buffer.length != gsshsize) {
+ major_status = GSS_S_DEFECTIVE_TOKEN;
+ goto failure;
+ }
+
+ for (i = 0; i < iov_count; i++) {
+ switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
+ break;
+ default:
+ data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
+ break;
+ }
+ data[i].data.length = iov[i].buffer.length;
+ data[i].data.data = iov[i].buffer.value;
+ }
+
+ data[i].flags = KRB5_CRYPTO_TYPE_DATA;
+ data[i].data.data = header->buffer.value;
+ data[i].data.length = sizeof(*token);
+ i++;
+
+ data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
+ if (trailer) {
+ data[i].data.data = trailer->buffer.value;
+ } else {
+ data[i].data.data = (uint8_t *)header->buffer.value +
+ sizeof(*token);
+ }
+ data[i].data.length = ec;
+ i++;
+
+ token = (gss_cfx_wrap_token)header->buffer.value;
+ token->EC[0] = 0;
+ token->EC[1] = 0;
+ token->RRC[0] = 0;
+ token->RRC[1] = 0;
+
+ ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
+ if (ret) {
+ *minor_status = ret;
+ major_status = GSS_S_FAILURE;
+ goto failure;
+ }
+ }
+
+ if (qop_state != NULL) {
+ *qop_state = GSS_C_QOP_DEFAULT;
+ }
+
+ free(data);
+
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
+
+ failure:
+ if (data)
+ free(data);
+
+ gss_release_iov_buffer(&junk, iov, iov_count);
+
+ return major_status;
+}
+#endif
+
+OM_uint32
+_gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
+ gsskrb5_ctx ctx,
+ krb5_context context,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ OM_uint32 major_status;
+ size_t size;
+ int i;
+ gss_iov_buffer_desc *header = NULL;
+ gss_iov_buffer_desc *padding = NULL;
+ gss_iov_buffer_desc *trailer = NULL;
+ size_t gsshsize = 0;
+ size_t gsstsize = 0;
+ size_t k5hsize = 0;
+ size_t k5tsize = 0;
+
+ GSSAPI_KRB5_INIT (&context);
+ *minor_status = 0;
+
+ for (size = 0, i = 0; i < iov_count; i++) {
+ switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
+ case GSS_IOV_BUFFER_TYPE_EMPTY:
+ break;
+ case GSS_IOV_BUFFER_TYPE_DATA:
+ size += iov[i].buffer.length;
+ break;
+ case GSS_IOV_BUFFER_TYPE_HEADER:
+ if (header != NULL) {
+ *minor_status = 0;
+ return GSS_S_FAILURE;
+ }
+ header = &iov[i];
+ break;
+ case GSS_IOV_BUFFER_TYPE_TRAILER:
+ if (trailer != NULL) {
+ *minor_status = 0;
+ return GSS_S_FAILURE;
+ }
+ trailer = &iov[i];
+ break;
+ case GSS_IOV_BUFFER_TYPE_PADDING:
+ if (padding != NULL) {
+ *minor_status = 0;
+ return GSS_S_FAILURE;
+ }
+ padding = &iov[i];
+ break;
+ case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
+ break;
+ default:
+ *minor_status = EINVAL;
+ return GSS_S_FAILURE;
+ }
+ }
+
+ major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
+ if (major_status != GSS_S_COMPLETE) {
+ return major_status;
+ }
+
+ if (conf_req_flag) {
+ size_t k5psize = 0;
+ size_t k5pbase = 0;
+ size_t k5bsize = 0;
+ size_t ec = 0;
+
+ size += sizeof(gss_cfx_wrap_token_desc);
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_HEADER,
+ &k5hsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_TRAILER,
+ &k5tsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_PADDING,
+ &k5pbase);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ if (k5pbase > 1) {
+ k5psize = k5pbase - (size % k5pbase);
+ } else {
+ k5psize = 0;
+ }
+
+ if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
+ *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
+ &k5bsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ ec = k5bsize;
+ } else {
+ ec = k5psize;
+ }
+
+ gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
+ gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
+ } else {
+ *minor_status = krb5_crypto_length(context, ctx->crypto,
+ KRB5_CRYPTO_TYPE_CHECKSUM,
+ &k5tsize);
+ if (*minor_status)
+ return GSS_S_FAILURE;
+
+ gsshsize = sizeof(gss_cfx_wrap_token_desc);
+ gsstsize = k5tsize;
+ }
+
+ if (trailer != NULL) {
+ trailer->buffer.length = gsstsize;
+ } else {
+ gsshsize += gsstsize;
+ }
+
+ header->buffer.length = gsshsize;
+
+ if (padding) {
+ /* padding is done via EC and is contained in the header or trailer */
+ padding->buffer.length = 0;
+ }
+
+ if (conf_state) {
+ *conf_state = conf_req_flag;
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+
+
+
OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
- const gsskrb5_ctx context_handle,
+ const gsskrb5_ctx ctx,
krb5_context context,
int conf_req_flag,
- gss_qop_t qop_req,
const gss_buffer_t input_message_buffer,
int *conf_state,
- gss_buffer_t output_message_buffer,
- krb5_keyblock *key)
+ gss_buffer_t output_message_buffer)
{
- krb5_crypto crypto;
gss_cfx_wrap_token token;
krb5_error_code ret;
unsigned usage;
@@ -252,19 +1174,13 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
int32_t seq_number;
u_char *p;
- ret = krb5_crypto_init(context, key, 0, &crypto);
- if (ret != 0) {
- *minor_status = ret;
- return GSS_S_FAILURE;
- }
-
ret = _gsskrb5cfx_wrap_length_cfx(context,
- crypto, conf_req_flag,
+ ctx->crypto, conf_req_flag,
+ IS_DCE_STYLE(ctx),
input_message_buffer->length,
&wrapped_len, &cksumsize, &padlength);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
@@ -275,7 +1191,6 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
output_message_buffer->value = malloc(output_message_buffer->length);
if (output_message_buffer->value == NULL) {
*minor_status = ENOMEM;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
@@ -285,9 +1200,9 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
token->TOK_ID[1] = 0x04;
token->Flags = 0;
token->Filler = 0xFF;
- if ((context_handle->more_flags & LOCAL) == 0)
+ if ((ctx->more_flags & LOCAL) == 0)
token->Flags |= CFXSentByAcceptor;
- if (context_handle->more_flags & ACCEPTOR_SUBKEY)
+ if (ctx->more_flags & ACCEPTOR_SUBKEY)
token->Flags |= CFXAcceptorSubkey;
if (conf_req_flag) {
/*
@@ -324,16 +1239,16 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
token->RRC[0] = 0;
token->RRC[1] = 0;
- HEIMDAL_MUTEX_lock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
krb5_auth_con_getlocalseqnumber(context,
- context_handle->auth_context,
+ ctx->auth_context,
&seq_number);
_gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
_gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
krb5_auth_con_setlocalseqnumber(context,
- context_handle->auth_context,
+ ctx->auth_context,
++seq_number);
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
/*
* If confidentiality is requested, the token header is
@@ -344,7 +1259,7 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
* calculated over the plaintext concatenated with the
* token header.
*/
- if (context_handle->more_flags & LOCAL) {
+ if (ctx->more_flags & LOCAL) {
usage = KRB5_KU_USAGE_INITIATOR_SEAL;
} else {
usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
@@ -365,25 +1280,31 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
memcpy(p + input_message_buffer->length + padlength,
token, sizeof(*token));
- ret = krb5_encrypt(context, crypto,
+ ret = krb5_encrypt(context, ctx->crypto,
usage, p,
input_message_buffer->length + padlength +
sizeof(*token),
&cipher);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
return GSS_S_FAILURE;
}
assert(sizeof(*token) + cipher.length == wrapped_len);
- token->RRC[0] = (rrc >> 8) & 0xFF;
+ token->RRC[0] = (rrc >> 8) & 0xFF;
token->RRC[1] = (rrc >> 0) & 0xFF;
- ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
+ /*
+ * this is really ugly, but needed against windows
+ * for DCERPC, as windows rotates by EC+RRC.
+ */
+ if (IS_DCE_STYLE(ctx)) {
+ ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
+ } else {
+ ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
+ }
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
return GSS_S_FAILURE;
}
@@ -396,21 +1317,19 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
buf = malloc(input_message_buffer->length + sizeof(*token));
if (buf == NULL) {
*minor_status = ENOMEM;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
return GSS_S_FAILURE;
}
memcpy(buf, input_message_buffer->value, input_message_buffer->length);
memcpy(buf + input_message_buffer->length, token, sizeof(*token));
- ret = krb5_create_checksum(context, crypto,
- usage, 0, buf,
+ ret = krb5_create_checksum(context, ctx->crypto,
+ usage, 0, buf,
input_message_buffer->length +
- sizeof(*token),
+ sizeof(*token),
&cksum);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
free(buf);
return GSS_S_FAILURE;
@@ -421,7 +1340,7 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
assert(cksum.checksum.length == cksumsize);
token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
- token->RRC[0] = (rrc >> 8) & 0xFF;
+ token->RRC[0] = (rrc >> 8) & 0xFF;
token->RRC[1] = (rrc >> 0) & 0xFF;
p += sizeof(*token);
@@ -433,7 +1352,6 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
free_Checksum(&cksum);
return GSS_S_FAILURE;
@@ -441,8 +1359,6 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
free_Checksum(&cksum);
}
- krb5_crypto_destroy(context, crypto);
-
if (conf_state != NULL) {
*conf_state = conf_req_flag;
}
@@ -452,15 +1368,13 @@ OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
}
OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
- const gsskrb5_ctx context_handle,
+ const gsskrb5_ctx ctx,
krb5_context context,
const gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer,
int *conf_state,
- gss_qop_t *qop_state,
- krb5_keyblock *key)
+ gss_qop_t *qop_state)
{
- krb5_crypto crypto;
gss_cfx_wrap_token token;
u_char token_flags;
krb5_error_code ret;
@@ -490,11 +1404,11 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
if (token_flags & CFXSentByAcceptor) {
- if ((context_handle->more_flags & LOCAL) == 0)
+ if ((ctx->more_flags & LOCAL) == 0)
return GSS_S_DEFECTIVE_TOKEN;
}
- if (context_handle->more_flags & ACCEPTOR_SUBKEY) {
+ if (ctx->more_flags & ACCEPTOR_SUBKEY) {
if ((token_flags & CFXAcceptorSubkey) == 0)
return GSS_S_DEFECTIVE_TOKEN;
} else {
@@ -524,26 +1438,21 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
return GSS_S_UNSEQ_TOKEN;
}
- HEIMDAL_MUTEX_lock(&context_handle->ctx_id_mutex);
- ret = _gssapi_msg_order_check(context_handle->order, seq_number_lo);
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+ ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
if (ret != 0) {
*minor_status = 0;
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
return ret;
}
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
/*
* Decrypt and/or verify checksum
*/
- ret = krb5_crypto_init(context, key, 0, &crypto);
- if (ret != 0) {
- *minor_status = ret;
- return GSS_S_FAILURE;
- }
- if (context_handle->more_flags & LOCAL) {
+ if (ctx->more_flags & LOCAL) {
usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
} else {
usage = KRB5_KU_USAGE_INITIATOR_SEAL;
@@ -553,25 +1462,29 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
len = input_message_buffer->length;
len -= (p - (u_char *)input_message_buffer->value);
- /* Rotate by RRC; bogus to do this in-place XXX */
- *minor_status = rrc_rotate(p, len, rrc, TRUE);
- if (*minor_status != 0) {
- krb5_crypto_destroy(context, crypto);
- return GSS_S_FAILURE;
- }
-
if (token_flags & CFXSealed) {
- ret = krb5_decrypt(context, crypto, usage,
+ /*
+ * this is really ugly, but needed against windows
+ * for DCERPC, as windows rotates by EC+RRC.
+ */
+ if (IS_DCE_STYLE(ctx)) {
+ *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
+ } else {
+ *minor_status = rrc_rotate(p, len, rrc, TRUE);
+ }
+ if (*minor_status != 0) {
+ return GSS_S_FAILURE;
+ }
+
+ ret = krb5_decrypt(context, ctx->crypto, usage,
p, len, &data);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
return GSS_S_BAD_MIC;
}
/* Check that there is room for the pad and token header */
if (data.length < ec + sizeof(*token)) {
- krb5_crypto_destroy(context, crypto);
krb5_data_free(&data);
return GSS_S_DEFECTIVE_TOKEN;
}
@@ -583,8 +1496,7 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
/* Check the integrity of the header */
- if (memcmp(p, token, sizeof(*token)) != 0) {
- krb5_crypto_destroy(context, crypto);
+ if (ct_memcmp(p, token, sizeof(*token)) != 0) {
krb5_data_free(&data);
return GSS_S_BAD_MIC;
}
@@ -594,12 +1506,18 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
} else {
Checksum cksum;
+ /* Rotate by RRC; bogus to do this in-place XXX */
+ *minor_status = rrc_rotate(p, len, rrc, TRUE);
+ if (*minor_status != 0) {
+ return GSS_S_FAILURE;
+ }
+
/* Determine checksum type */
ret = krb5_crypto_get_checksum_type(context,
- crypto, &cksum.cksumtype);
+ ctx->crypto,
+ &cksum.cksumtype);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
@@ -608,7 +1526,6 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
/* Check we have at least as much data as the checksum */
if (len < cksum.checksum.length) {
*minor_status = ERANGE;
- krb5_crypto_destroy(context, crypto);
return GSS_S_BAD_MIC;
}
@@ -620,13 +1537,12 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
output_message_buffer->value = malloc(len + sizeof(*token));
if (output_message_buffer->value == NULL) {
*minor_status = ENOMEM;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
/* Checksum is over (plaintext-data | "header") */
memcpy(output_message_buffer->value, p, len);
- memcpy((u_char *)output_message_buffer->value + len,
+ memcpy((u_char *)output_message_buffer->value + len,
token, sizeof(*token));
/* EC is not included in checksum calculation */
@@ -637,21 +1553,18 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
token->RRC[0] = 0;
token->RRC[1] = 0;
- ret = krb5_verify_checksum(context, crypto,
+ ret = krb5_verify_checksum(context, ctx->crypto,
usage,
output_message_buffer->value,
len + sizeof(*token),
&cksum);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
_gsskrb5_release_buffer(minor_status, output_message_buffer);
return GSS_S_BAD_MIC;
}
}
- krb5_crypto_destroy(context, crypto);
-
if (qop_state != NULL) {
*qop_state = GSS_C_QOP_DEFAULT;
}
@@ -661,14 +1574,12 @@ OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
}
OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
- const gsskrb5_ctx context_handle,
+ const gsskrb5_ctx ctx,
krb5_context context,
gss_qop_t qop_req,
const gss_buffer_t message_buffer,
- gss_buffer_t message_token,
- krb5_keyblock *key)
+ gss_buffer_t message_token)
{
- krb5_crypto crypto;
gss_cfx_mic_token token;
krb5_error_code ret;
unsigned usage;
@@ -677,17 +1588,10 @@ OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
size_t len;
int32_t seq_number;
- ret = krb5_crypto_init(context, key, 0, &crypto);
- if (ret != 0) {
- *minor_status = ret;
- return GSS_S_FAILURE;
- }
-
len = message_buffer->length + sizeof(*token);
buf = malloc(len);
if (buf == NULL) {
*minor_status = ENOMEM;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
@@ -697,38 +1601,36 @@ OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
token->TOK_ID[0] = 0x04;
token->TOK_ID[1] = 0x04;
token->Flags = 0;
- if ((context_handle->more_flags & LOCAL) == 0)
+ if ((ctx->more_flags & LOCAL) == 0)
token->Flags |= CFXSentByAcceptor;
- if (context_handle->more_flags & ACCEPTOR_SUBKEY)
+ if (ctx->more_flags & ACCEPTOR_SUBKEY)
token->Flags |= CFXAcceptorSubkey;
memset(token->Filler, 0xFF, 5);
- HEIMDAL_MUTEX_lock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
krb5_auth_con_getlocalseqnumber(context,
- context_handle->auth_context,
+ ctx->auth_context,
&seq_number);
_gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
_gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
krb5_auth_con_setlocalseqnumber(context,
- context_handle->auth_context,
+ ctx->auth_context,
++seq_number);
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
- if (context_handle->more_flags & LOCAL) {
+ if (ctx->more_flags & LOCAL) {
usage = KRB5_KU_USAGE_INITIATOR_SIGN;
} else {
usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
}
- ret = krb5_create_checksum(context, crypto,
+ ret = krb5_create_checksum(context, ctx->crypto,
usage, 0, buf, len, &cksum);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
free(buf);
return GSS_S_FAILURE;
}
- krb5_crypto_destroy(context, crypto);
/* Determine MIC length */
message_token->length = sizeof(*token) + cksum.checksum.length;
@@ -753,14 +1655,12 @@ OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
}
OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
- const gsskrb5_ctx context_handle,
+ const gsskrb5_ctx ctx,
krb5_context context,
const gss_buffer_t message_buffer,
const gss_buffer_t token_buffer,
- gss_qop_t *qop_state,
- krb5_keyblock *key)
+ gss_qop_t *qop_state)
{
- krb5_crypto crypto;
gss_cfx_mic_token token;
u_char token_flags;
krb5_error_code ret;
@@ -787,10 +1687,10 @@ OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
if (token_flags & CFXSentByAcceptor) {
- if ((context_handle->more_flags & LOCAL) == 0)
+ if ((ctx->more_flags & LOCAL) == 0)
return GSS_S_DEFECTIVE_TOKEN;
}
- if (context_handle->more_flags & ACCEPTOR_SUBKEY) {
+ if (ctx->more_flags & ACCEPTOR_SUBKEY) {
if ((token_flags & CFXAcceptorSubkey) == 0)
return GSS_S_DEFECTIVE_TOKEN;
} else {
@@ -798,7 +1698,7 @@ OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
return GSS_S_DEFECTIVE_TOKEN;
}
- if (memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
+ if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
return GSS_S_DEFECTIVE_TOKEN;
}
@@ -812,36 +1712,29 @@ OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
return GSS_S_UNSEQ_TOKEN;
}
- HEIMDAL_MUTEX_lock(&context_handle->ctx_id_mutex);
- ret = _gssapi_msg_order_check(context_handle->order, seq_number_lo);
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+ ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
if (ret != 0) {
*minor_status = 0;
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
return ret;
}
- HEIMDAL_MUTEX_unlock(&context_handle->ctx_id_mutex);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
/*
* Verify checksum
*/
- ret = krb5_crypto_init(context, key, 0, &crypto);
- if (ret != 0) {
- *minor_status = ret;
- return GSS_S_FAILURE;
- }
-
- ret = krb5_crypto_get_checksum_type(context, crypto,
+ ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
&cksum.cksumtype);
if (ret != 0) {
*minor_status = ret;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
cksum.checksum.data = p + sizeof(*token);
cksum.checksum.length = token_buffer->length - sizeof(*token);
- if (context_handle->more_flags & LOCAL) {
+ if (ctx->more_flags & LOCAL) {
usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
} else {
usage = KRB5_KU_USAGE_INITIATOR_SIGN;
@@ -850,18 +1743,16 @@ OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
buf = malloc(message_buffer->length + sizeof(*token));
if (buf == NULL) {
*minor_status = ENOMEM;
- krb5_crypto_destroy(context, crypto);
return GSS_S_FAILURE;
}
memcpy(buf, message_buffer->value, message_buffer->length);
memcpy(buf + message_buffer->length, token, sizeof(*token));
- ret = krb5_verify_checksum(context, crypto,
+ ret = krb5_verify_checksum(context, ctx->crypto,
usage,
buf,
sizeof(*token) + message_buffer->length,
&cksum);
- krb5_crypto_destroy(context, crypto);
if (ret != 0) {
*minor_status = ret;
free(buf);