From e43dbcfb580f8404b4fd90c9080f238bc79d9cfa Mon Sep 17 00:00:00 2001 From: wuliaokanke Date: Tue, 6 Dec 2022 17:45:57 +0800 Subject: [PATCH] add kae2 nosva --- kae_engine/src/v1/alg/ciphers/sec_ciphers.c | 756 +++++++++++++++++ kae_engine/src/v1/alg/ciphers/sec_ciphers.h | 101 +++ .../src/v1/alg/ciphers/sec_ciphers_soft.c | 334 ++++++++ .../src/v1/alg/ciphers/sec_ciphers_soft.h | 52 ++ .../src/v1/alg/ciphers/sec_ciphers_utils.c | 214 +++++ .../src/v1/alg/ciphers/sec_ciphers_utils.h | 59 ++ .../src/v1/alg/ciphers/sec_ciphers_wd.c | 299 +++++++ .../src/v1/alg/ciphers/sec_ciphers_wd.h | 48 ++ kae_engine/src/v1/alg/dh/hpre_dh.c | 313 +++++++ kae_engine/src/v1/alg/dh/hpre_dh.h | 34 + kae_engine/src/v1/alg/dh/hpre_dh_soft.c | 118 +++ kae_engine/src/v1/alg/dh/hpre_dh_soft.h | 50 ++ kae_engine/src/v1/alg/dh/hpre_dh_util.h | 33 + kae_engine/src/v1/alg/dh/hpre_dh_wd.c | 427 ++++++++++ kae_engine/src/v1/alg/dh/hpre_dh_wd.h | 87 ++ kae_engine/src/v1/alg/digests/sec_digests.c | 590 ++++++++++++++ kae_engine/src/v1/alg/digests/sec_digests.h | 83 ++ .../src/v1/alg/digests/sec_digests_soft.c | 117 +++ .../src/v1/alg/digests/sec_digests_soft.h | 41 + .../src/v1/alg/digests/sec_digests_wd.c | 253 ++++++ .../src/v1/alg/digests/sec_digests_wd.h | 40 + kae_engine/src/v1/alg/pkey/hpre_rsa.c | 771 ++++++++++++++++++ kae_engine/src/v1/alg/pkey/hpre_rsa.h | 73 ++ kae_engine/src/v1/alg/pkey/hpre_rsa_soft.c | 71 ++ kae_engine/src/v1/alg/pkey/hpre_rsa_soft.h | 29 + kae_engine/src/v1/alg/pkey/hpre_rsa_utils.c | 540 ++++++++++++ kae_engine/src/v1/alg/pkey/hpre_rsa_utils.h | 45 + kae_engine/src/v1/alg/pkey/hpre_wd.c | 452 ++++++++++ kae_engine/src/v1/alg/pkey/hpre_wd.h | 90 ++ kae_engine/src/v1/async/async_callback.c | 61 ++ kae_engine/src/v1/async/async_callback.h | 34 + kae_engine/src/v1/async/async_event.c | 190 +++++ kae_engine/src/v1/async/async_event.h | 42 + kae_engine/src/v1/async/async_poll.c | 150 ++++ kae_engine/src/v1/async/async_poll.h | 28 + kae_engine/src/v1/async/async_task_queue.c | 188 +++++ kae_engine/src/v1/async/async_task_queue.h | 64 ++ kae_engine/src/v1/uadk_v1.h | 40 + kae_engine/src/v1/utils/engine_check.c | 143 ++++ kae_engine/src/v1/utils/engine_check.h | 42 + kae_engine/src/v1/utils/engine_config.c | 103 +++ kae_engine/src/v1/utils/engine_config.h | 29 + kae_engine/src/v1/utils/engine_fork.c | 66 ++ kae_engine/src/v1/utils/engine_fork.h | 26 + kae_engine/src/v1/utils/engine_log.c | 247 ++++++ kae_engine/src/v1/utils/engine_log.h | 71 ++ kae_engine/src/v1/utils/engine_opensslerr.c | 119 +++ kae_engine/src/v1/utils/engine_opensslerr.h | 106 +++ kae_engine/src/v1/utils/engine_types.h | 31 + kae_engine/src/v1/utils/engine_utils.c | 98 +++ kae_engine/src/v1/utils/engine_utils.h | 132 +++ kae_engine/src/v1/wdmngr/wd_alg_queue.c | 80 ++ kae_engine/src/v1/wdmngr/wd_alg_queue.h | 30 + kae_engine/src/v1/wdmngr/wd_queue_memory.c | 480 +++++++++++ kae_engine/src/v1/wdmngr/wd_queue_memory.h | 106 +++ 55 files changed, 8826 insertions(+) create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers.c create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers.h create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.c create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.h create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.c create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.h create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.c create mode 100644 kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.h create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh.c create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh.h create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh_soft.c create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh_soft.h create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh_util.h create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh_wd.c create mode 100644 kae_engine/src/v1/alg/dh/hpre_dh_wd.h create mode 100644 kae_engine/src/v1/alg/digests/sec_digests.c create mode 100644 kae_engine/src/v1/alg/digests/sec_digests.h create mode 100644 kae_engine/src/v1/alg/digests/sec_digests_soft.c create mode 100644 kae_engine/src/v1/alg/digests/sec_digests_soft.h create mode 100644 kae_engine/src/v1/alg/digests/sec_digests_wd.c create mode 100644 kae_engine/src/v1/alg/digests/sec_digests_wd.h create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa.c create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa.h create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa_soft.c create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa_soft.h create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa_utils.c create mode 100644 kae_engine/src/v1/alg/pkey/hpre_rsa_utils.h create mode 100644 kae_engine/src/v1/alg/pkey/hpre_wd.c create mode 100644 kae_engine/src/v1/alg/pkey/hpre_wd.h create mode 100644 kae_engine/src/v1/async/async_callback.c create mode 100644 kae_engine/src/v1/async/async_callback.h create mode 100644 kae_engine/src/v1/async/async_event.c create mode 100644 kae_engine/src/v1/async/async_event.h create mode 100644 kae_engine/src/v1/async/async_poll.c create mode 100644 kae_engine/src/v1/async/async_poll.h create mode 100644 kae_engine/src/v1/async/async_task_queue.c create mode 100644 kae_engine/src/v1/async/async_task_queue.h create mode 100644 kae_engine/src/v1/uadk_v1.h create mode 100644 kae_engine/src/v1/utils/engine_check.c create mode 100644 kae_engine/src/v1/utils/engine_check.h create mode 100644 kae_engine/src/v1/utils/engine_config.c create mode 100644 kae_engine/src/v1/utils/engine_config.h create mode 100644 kae_engine/src/v1/utils/engine_fork.c create mode 100644 kae_engine/src/v1/utils/engine_fork.h create mode 100644 kae_engine/src/v1/utils/engine_log.c create mode 100644 kae_engine/src/v1/utils/engine_log.h create mode 100644 kae_engine/src/v1/utils/engine_opensslerr.c create mode 100644 kae_engine/src/v1/utils/engine_opensslerr.h create mode 100644 kae_engine/src/v1/utils/engine_types.h create mode 100644 kae_engine/src/v1/utils/engine_utils.c create mode 100644 kae_engine/src/v1/utils/engine_utils.h create mode 100644 kae_engine/src/v1/wdmngr/wd_alg_queue.c create mode 100644 kae_engine/src/v1/wdmngr/wd_alg_queue.h create mode 100644 kae_engine/src/v1/wdmngr/wd_queue_memory.c create mode 100644 kae_engine/src/v1/wdmngr/wd_queue_memory.h diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers.c b/kae_engine/src/v1/alg/ciphers/sec_ciphers.c new file mode 100644 index 0000000..b4743ed --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers.c @@ -0,0 +1,756 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine ciphers + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_ciphers.c + * + * This file provides the implementation for ciphers + * +*****************************************************************************/ +#include "sec_ciphers.h" +#include "sec_ciphers_soft.h" +#include "sec_ciphers_utils.h" +#include "sec_ciphers_wd.h" + +#include "../../utils/engine_check.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "../../utils/engine_utils.h" +#include "../../async/async_callback.h" +#include "../../async/async_event.h" +#include "../../async/async_task_queue.h" + +#define INPUT_CACHE_SIZE (256 * 1024) + +struct cipher_info { + int nid; + int blocksize; + int keylen; + int ivlen; + int flags; + int is_enabled; + EVP_CIPHER *cipher; +}; +typedef struct cipher_info cipher_info_t; + +static cipher_info_t g_sec_ciphers_info[] = { + {NID_aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, 1, NULL}, + {NID_aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, 1, NULL}, + {NID_aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, 1, NULL}, + {NID_aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, 1, NULL}, + {NID_aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, 1, NULL}, + {NID_aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, 1, NULL}, + {NID_aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, 1, NULL}, + {NID_aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, 1, NULL}, + {NID_aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, 1, NULL}, + {NID_aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, 1, NULL}, + {NID_aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, 1, NULL}, + + {NID_sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, 1, NULL}, + {NID_sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1, 1, NULL}, + {NID_sm4_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, 1, NULL}, + {NID_sm4_ecb, 16, 16, 0, EVP_CIPH_CTR_MODE, 1, NULL}, +}; + +#define CIPHERS_COUNT (BLOCKSIZES_OF(g_sec_ciphers_info)) + +static int g_known_cipher_nids[CIPHERS_COUNT] = { + NID_aes_128_ecb, + NID_aes_192_ecb, + NID_aes_256_ecb, + NID_aes_128_cbc, + NID_aes_192_cbc, + NID_aes_256_cbc, + NID_aes_128_ctr, + NID_aes_192_ctr, + NID_aes_256_ctr, + NID_aes_128_xts, + NID_aes_256_xts, + + NID_sm4_ctr, + NID_sm4_cbc, + NID_sm4_ofb128, + NID_sm4_ecb, +}; + +#define SEC_CIPHERS_RETURN_FAIL_IF(cond, mesg, ret) \ + do { \ + if (unlikely(cond)) { \ + US_ERR(mesg); \ + return (ret); \ + } \ + } while (0) + +#define SEC_CIPHERS_GOTO_FAIL_IF(cond, mesg, tag) \ + do { \ + if (unlikely(cond)) { \ + US_ERR(mesg); \ + goto tag; \ + } \ + } while (0) + +static int sec_ciphers_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int encrypt); +static int sec_ciphers_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl); +static int sec_ciphers_cleanup(EVP_CIPHER_CTX *ctx); +static int sec_ciphers_priv_ctx_cleanup(EVP_CIPHER_CTX *ctx); +static int sec_ciphers_is_check_valid(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx); +static int sec_ciphers_async_do_crypto(cipher_engine_ctx_t *e_cipher_ctx, op_done_t *op_done); + +void sec_ciphers_set_enabled(int nid, int enabled) +{ + unsigned int i = 0; + + for (i = 0; i < CIPHERS_COUNT; i++) { + if (g_sec_ciphers_info[i].nid == nid) + g_sec_ciphers_info[i].is_enabled = enabled; + } +} + +static int sec_ciphers_sync_do_crypto(EVP_CIPHER_CTX *ctx, cipher_engine_ctx_t *e_cipher_ctx, + cipher_priv_ctx_t *priv_ctx); + +static int sec_ciphers_init_priv_ctx(cipher_priv_ctx_t *priv_ctx, EVP_CIPHER_CTX *ctx, + const unsigned char *key, const unsigned char *iv) +{ + int nid = 0; + int ret = KAE_FAIL; + + SEC_CIPHERS_RETURN_FAIL_IF(ctx == NULL || priv_ctx == NULL, "null ctx or priv ctx", KAE_FAIL); + + // init encrypt of private ctx + priv_ctx->encrypt = EVP_CIPHER_CTX_encrypting(ctx); + // init offset of private ctx + priv_ctx->offset = 0; + // init key of private ctx + if (priv_ctx->key == NULL) { + priv_ctx->key = (uint8_t *)kae_malloc(EVP_CIPHER_CTX_key_length(ctx)); + SEC_CIPHERS_GOTO_FAIL_IF(priv_ctx->key == NULL, "malloc key failed", ERR); + } + + kae_memcpy(priv_ctx->key, key, EVP_CIPHER_CTX_key_length(ctx)); + priv_ctx->key_len = EVP_CIPHER_CTX_key_length(ctx); + + // init iv of private ctx + if (priv_ctx->iv == NULL) { + priv_ctx->iv = (uint8_t *)kae_malloc(EVP_CIPHER_CTX_iv_length(ctx)); + SEC_CIPHERS_GOTO_FAIL_IF(priv_ctx->iv == NULL, "malloc iv failed.", ERR); + } + if (iv != NULL) + kae_memcpy(priv_ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); + else + kae_memcpy(priv_ctx->iv, EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); + + priv_ctx->iv_len = EVP_CIPHER_CTX_iv_length(ctx); + + if (priv_ctx->next_iv == NULL) { + priv_ctx->next_iv = (uint8_t *)kae_malloc(priv_ctx->iv_len); + SEC_CIPHERS_GOTO_FAIL_IF(priv_ctx->next_iv == NULL, "malloc next iv failed.", ERR); + } + + // init cipher mode and alg of private ctx + nid = EVP_CIPHER_CTX_nid(ctx); + priv_ctx->c_mode = sec_ciphers_get_cipher_mode(nid); + priv_ctx->c_alg = sec_ciphers_get_cipher_alg(nid); + SEC_CIPHERS_GOTO_FAIL_IF(priv_ctx->c_mode == NO_C_MODE || priv_ctx->c_alg == NO_C_ALG, + "unsupport the cipher nid", ERR); + + if (priv_ctx->ecb_encryto == NULL && priv_ctx->c_mode == XTS) { + // set XTS PARAM + priv_ctx->ecb_encryto = (xts_ecb_data *)kae_malloc(sizeof(xts_ecb_data)); + SEC_CIPHERS_GOTO_FAIL_IF(priv_ctx->ecb_encryto == NULL, "malloc ecb ctx", ERR); + + priv_ctx->ecb_encryto->ecb_ctx = EVP_CIPHER_CTX_new(); + priv_ctx->ecb_encryto->key2_len = priv_ctx->key_len >> 1; + priv_ctx->ecb_encryto->key2 = (uint8_t *)kae_malloc(priv_ctx->key_len >> 1); + priv_ctx->ecb_encryto->encryto_iv = (uint8_t *)kae_malloc(priv_ctx->iv_len); + priv_ctx->ecb_encryto->iv_out = (uint8_t *)kae_malloc(priv_ctx->iv_len); + if (priv_ctx->ecb_encryto->ecb_ctx == NULL + || priv_ctx->ecb_encryto->key2 == NULL + || priv_ctx->ecb_encryto->encryto_iv == NULL + || priv_ctx->ecb_encryto->iv_out == NULL) { + if (priv_ctx->ecb_encryto->ecb_ctx != NULL) { + EVP_CIPHER_CTX_free(priv_ctx->ecb_encryto->ecb_ctx); + priv_ctx->ecb_encryto->ecb_ctx = NULL; + } + + kae_free(priv_ctx->ecb_encryto->key2); + kae_free(priv_ctx->ecb_encryto->encryto_iv); + kae_free(priv_ctx->ecb_encryto->iv_out); + kae_free(priv_ctx->ecb_encryto); + goto ERR; + } + + if (priv_ctx->ecb_encryto->key2_len == 32) { // 256-xts key2len is 32 + priv_ctx->ecb_encryto->cipher_type = EVP_aes_256_ecb(); + } else { + priv_ctx->ecb_encryto->cipher_type = EVP_aes_128_ecb(); + } + priv_ctx->ecb_encryto->countNum = 0; + kae_memcpy(priv_ctx->ecb_encryto->key2, + priv_ctx->key + priv_ctx->ecb_encryto->key2_len, + priv_ctx->ecb_encryto->key2_len); + } + +#ifndef OPENSSL_ENABLE_KAE_SMALL_PACKKET_CIPHER_OFFLOADS + ret = sec_ciphers_sw_impl_init(ctx, key, iv, priv_ctx->encrypt); + SEC_CIPHERS_GOTO_FAIL_IF(ret != KAE_SUCCESS, "kae sw iml init failed", ERR); + + priv_ctx->switch_threshold = + (size_t)sec_ciphers_sw_get_threshold(EVP_CIPHER_CTX_nid(ctx)); +#endif + + return KAE_SUCCESS; + +ERR: + US_ERR("sec_ciphers_sec_state_init failed. ctx=%p", ctx); + (void)sec_ciphers_priv_ctx_cleanup(ctx); + return KAE_FAIL; +} + +static int sec_ciphers_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int encrypt) +{ + cipher_priv_ctx_t *priv_ctx = NULL; + + if (unlikely((ctx == NULL) || (key == NULL))) { + US_ERR("ctx or key is NULL."); + return OPENSSL_FAIL; + } + + if (encrypt != EVP_CIPHER_CTX_encrypting(ctx)) { + US_ERR("encrypt different, ctx=%p", ctx); + return OPENSSL_FAIL; + } + + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(priv_ctx == NULL)) { + US_ERR("sec private ctx is NULL"); + return OPENSSL_FAIL; + } + + if (sec_ciphers_init_priv_ctx(priv_ctx, ctx, key, iv) != KAE_SUCCESS) { + US_ERR("init failed. ctx=%p", ctx); + goto ERR; + } + US_DEBUG("init success, ctx=%p", ctx); +#ifdef KAE_DEBUG_KEY_ENABLE + dump_data("key", priv_ctx->key, priv_ctx->key_len); + dump_data("iv", priv_ctx->iv, priv_ctx->iv_len); +#endif + return OPENSSL_SUCCESS; +ERR: + sec_ciphers_cleanup(ctx); + return OPENSSL_SUCCESS; +} + +static void sec_ciphers_update_priv_ctx(cipher_priv_ctx_t *priv_ctx) +{ + uint32_t do_cipher_len = priv_ctx->do_cipher_len; + uint32_t increase_counter = 0; + + if (do_cipher_len == 0) + return; + + priv_ctx->in += priv_ctx->do_cipher_len; + priv_ctx->out += priv_ctx->do_cipher_len; + priv_ctx->left_len -= priv_ctx->do_cipher_len; + + switch (priv_ctx->c_mode) { + case ECB: + break; + case CBC: + if (priv_ctx->encrypt == OPENSSL_ENCRYPTION) + kae_memcpy(priv_ctx->iv, priv_ctx->out - 16, 16); // hardware need 16-byte alignment + else + kae_memcpy(priv_ctx->iv, priv_ctx->next_iv, 16); // hardware need 16-byte alignment + break; + case CTR: + increase_counter = (do_cipher_len + priv_ctx->offset) >> 4; // right shift 4 + sec_ciphers_ctr_iv_inc(priv_ctx->iv, increase_counter); + priv_ctx->offset = (priv_ctx->offset + (do_cipher_len & 0xf)) % 16; // hardware need 16-byte alignment + break; + case XTS: + if (priv_ctx->c_alg == AES) { + priv_ctx->ecb_encryto->countNum = (priv_ctx->do_cipher_len + priv_ctx->offset) >> 4; // right shift 4 + sec_ciphers_xts_iv_inc(priv_ctx); + priv_ctx->offset = (priv_ctx->offset + (do_cipher_len & 0xf)) % 16; // hardware need 16-byte alignment + } + break; + case OFB: + kae_memcpy(priv_ctx->iv, (uint8_t *)priv_ctx->e_cipher_ctx->op_data.iv, + priv_ctx->e_cipher_ctx->op_data.iv_bytes); + break; + default: + US_WARN("mode=%d don't support.", priv_ctx->c_mode); + break; + } + + US_DEBUG("update priv_ctx success."); +} + +static int sec_ciphers_before_dociphers_cb(cipher_priv_ctx_t *priv_ctx) +{ + // store IV for next cbc decryption operation + if (priv_ctx->encrypt == OPENSSL_DECRYPTION && priv_ctx->c_mode == CBC) + kae_memcpy(priv_ctx->next_iv, priv_ctx->in + priv_ctx->do_cipher_len - priv_ctx->iv_len, priv_ctx->iv_len); + + if (priv_ctx->c_mode == XTS && priv_ctx->c_alg == AES) { + sec_ciphers_ecb_encryt(priv_ctx->ecb_encryto, + priv_ctx->ecb_encryto->encryto_iv, + priv_ctx->iv, priv_ctx->iv_len); + } + + return KAE_SUCCESS; +} + +static int sec_ciphers_after_dociphers_cb(EVP_CIPHER_CTX *ctx) +{ + // sync priv ctx to next cipher, in case next cipher may be soft cipher + return sec_ciphers_sw_hw_ctx_sync(ctx, SEC_CIHPER_SYNC_H2S); +} + +/* + * |<--16*n bytes--> |<----16*n bytes------->|<--16*n bytes--->| + * |-----------------|<--offset----->|<----->|-----------------| + * |<--first cipher----------------->|<---next cipher--------->| + * + * + * to make 16*n align to next cipher data copy to hardware addr should start at + * hardware_addr+offset and get out put at hardware_addr+offset + * + * |<----16*n bytes------>|<--16*n bytes--->| + * |<--offset----->|------------------------+ + * hardware_addr |<---next cipher-------->| + * + */ +static int sec_ciphers_do_crypto(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) +{ + int ret = KAE_FAIL; + + // add async parm + int job_ret; + op_done_t op_done; + + SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx == NULL, "priv_ctx is NULL.", KAE_FAIL); + cipher_engine_ctx_t *e_cipher_ctx = priv_ctx->e_cipher_ctx; + + SEC_CIPHERS_RETURN_FAIL_IF(e_cipher_ctx == NULL, "e_cipher_ctx is NULL", KAE_FAIL); + + SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx->inl <= 0, "in length less than or equal to zero.", KAE_FAIL); + // packageSize>input_cache_size + if (priv_ctx->left_len > INPUT_CACHE_SIZE - priv_ctx->offset) { + ret = sec_ciphers_sync_do_crypto(ctx, e_cipher_ctx, priv_ctx); + if (ret != 0) { + US_ERR("sec sync crypto fail"); + return ret; + } + return KAE_SUCCESS; + } + + // async + async_init_op_done_v1(&op_done); + + if (op_done.job != NULL && kae_is_async_enabled()) { + if (async_setup_async_event_notification_v1(0) == 0) { + US_ERR("sec async event notifying failed"); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; + } + } else { + US_DEBUG("NO ASYNC Job or async disable, back to SYNC!"); + async_cleanup_op_done_v1(&op_done); + return sec_ciphers_sync_do_crypto(ctx, e_cipher_ctx, priv_ctx); + } + + if (sec_ciphers_async_do_crypto(e_cipher_ctx, &op_done) == KAE_FAIL) + goto err; + + do { + job_ret = async_pause_job_v1(op_done.job, ASYNC_STATUS_OK); + if ((job_ret == 0)) { + US_DEBUG("- pthread_yidle -"); + kae_pthread_yield(); + } + } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); + + if (op_done.verifyRst < 0) { + US_ERR("verify result failed with %d", op_done.verifyRst); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; + } + + async_cleanup_op_done_v1(&op_done); + + US_DEBUG(" Cipher Async Job Finish! priv_ctx = %p\n", priv_ctx); + + // after cipher cycle should update: in, out, iv, key, length. + sec_ciphers_update_priv_ctx(priv_ctx); + (void)sec_ciphers_after_dociphers_cb(ctx); + + return KAE_SUCCESS; +err: + US_ERR("async job err"); + (void)async_clear_async_event_notification_v1(); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; +} + +static int sec_ciphers_sync_do_crypto(EVP_CIPHER_CTX *ctx, cipher_engine_ctx_t *e_cipher_ctx, + cipher_priv_ctx_t *priv_ctx) +{ + int ret = KAE_FAIL; + int leftlen = priv_ctx->left_len; + + while (leftlen != 0) { + priv_ctx->do_cipher_len = wd_ciphers_get_do_cipher_len(priv_ctx->offset, leftlen); + + (void)sec_ciphers_before_dociphers_cb(e_cipher_ctx->priv_ctx); + + wd_ciphers_set_input_data(e_cipher_ctx); + + ret = wd_ciphers_do_crypto_impl(e_cipher_ctx); + if (ret != KAE_SUCCESS) + return ret; + + wd_ciphers_get_output_data(e_cipher_ctx); + + // after cipher cycle should update: in, out, iv, key, length. + sec_ciphers_update_priv_ctx(priv_ctx); + + (void)sec_ciphers_after_dociphers_cb(ctx); + + leftlen -= priv_ctx->do_cipher_len; + } + + US_DEBUG("sec state update success."); + + return KAE_SUCCESS; +} + +static int sec_ciphers_async_do_crypto(cipher_engine_ctx_t *e_cipher_ctx, op_done_t *op_done) +{ + int ret = 0; + int cnt = 0; + cipher_priv_ctx_t *priv_ctx = e_cipher_ctx->priv_ctx; + enum task_type type = ASYNC_TASK_CIPHER; + void *tag = e_cipher_ctx; + + priv_ctx->do_cipher_len = wd_ciphers_get_do_cipher_len(priv_ctx->offset, priv_ctx->left_len); + + (void)sec_ciphers_before_dociphers_cb(e_cipher_ctx->priv_ctx); + + wd_ciphers_set_input_data(e_cipher_ctx); + + do { + if (cnt > MAX_SEND_TRY_CNTS) + break; + + ret = wcrypto_do_cipher(e_cipher_ctx->wd_ctx, &e_cipher_ctx->op_data, tag); + if (ret == -WD_EBUSY) { + if ((async_wake_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || + async_pause_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0)) { + US_ERR("sec wake job or sec pause job fail!\n"); + ret = 0; + break; + } + cnt++; + } + } while (ret == -WD_EBUSY); + + if (ret != WD_SUCCESS) { + US_ERR("sec async wcryto do cipher failed"); + return KAE_FAIL; + } + + if (async_add_poll_task_v1(e_cipher_ctx, op_done, type) == 0) { + US_ERR("sec add task failed "); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +static int sec_ciphers_is_check_valid(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) +{ + if (priv_ctx->switch_threshold > (size_t)priv_ctx->inl) { + US_WARN_LIMIT("small packet cipher offload, switch to soft cipher, inl %d", (int)priv_ctx->inl); + return KAE_FAIL; + } + + if (sec_ciphers_is_iv_may_overflow(ctx, priv_ctx)) { + US_WARN("sec do cipher, the iv will overflow"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +static int sec_ciphers_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) +{ + int ret = KAE_FAIL; + int num = 0; + cipher_priv_ctx_t *priv_ctx = NULL; + + SEC_CIPHERS_RETURN_FAIL_IF(ctx == NULL, "ctx is NULL", OPENSSL_FAIL); + SEC_CIPHERS_RETURN_FAIL_IF(in == NULL, "in is NULL", OPENSSL_FAIL); + SEC_CIPHERS_RETURN_FAIL_IF(out == NULL, "out is NULL", OPENSSL_FAIL); + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx == NULL, "ctx cipher data is NULL.", OPENSSL_FAIL); + priv_ctx->inl = inl; + priv_ctx->in = in; + priv_ctx->out = out; + priv_ctx->left_len = inl; + + num = EVP_CIPHER_CTX_num(ctx); + + ret = sec_ciphers_is_check_valid(ctx, priv_ctx); + if (ret != KAE_SUCCESS) { + US_WARN_LIMIT("sec cipher check invalid, switch to soft cipher"); + goto do_soft_cipher; + } + + if (priv_ctx->e_cipher_ctx == NULL) { + priv_ctx->e_cipher_ctx = wd_ciphers_get_engine_ctx(priv_ctx); + if (priv_ctx->e_cipher_ctx == NULL) { + US_WARN("failed to get engine ctx, switch to soft cipher"); + goto do_soft_cipher; + } + } + + ret = sec_ciphers_do_crypto(ctx, priv_ctx); + if (ret != KAE_SUCCESS) { + US_WARN("sec cipher do ciphers failed, switch to soft cipher"); + goto do_soft_cipher; + } + + US_DEBUG("do cipher success. ctx=%p, ctx->num=%d, inl=%d", ctx, num, (int)inl); + + return OPENSSL_SUCCESS; + +do_soft_cipher: + if (priv_ctx->e_cipher_ctx != NULL) { + wd_ciphers_put_engine_ctx(priv_ctx->e_cipher_ctx); + priv_ctx->e_cipher_ctx = NULL; + } + + if (sec_ciphers_software_encrypt(ctx, priv_ctx) != KAE_SUCCESS) { + US_WARN("sec cipher do soft ciphers failed"); + return OPENSSL_FAIL; + } + + return OPENSSL_SUCCESS; +} + +static int sec_ciphers_priv_ctx_cleanup(EVP_CIPHER_CTX *ctx) +{ + cipher_priv_ctx_t *priv_ctx = NULL; + + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(priv_ctx == NULL)) { + US_WARN("ctx cipher data is NULL."); + return KAE_FAIL; + } + + kae_free(priv_ctx->iv); + kae_free(priv_ctx->key); + kae_free(priv_ctx->next_iv); + if (priv_ctx->ecb_encryto) { + if (priv_ctx->ecb_encryto->ecb_ctx != NULL) { + EVP_CIPHER_CTX_free(priv_ctx->ecb_encryto->ecb_ctx); + priv_ctx->ecb_encryto->ecb_ctx = NULL; + } + + kae_free(priv_ctx->ecb_encryto->key2); + kae_free(priv_ctx->ecb_encryto->encryto_iv); + kae_free(priv_ctx->ecb_encryto->iv_out); + kae_free(priv_ctx->ecb_encryto); + } + + (void)wd_ciphers_put_engine_ctx(priv_ctx->e_cipher_ctx); + priv_ctx->e_cipher_ctx = NULL; + + return KAE_SUCCESS; +} + +static int sec_ciphers_cleanup(EVP_CIPHER_CTX *ctx) +{ + if (unlikely(ctx == NULL)) { + US_WARN("ctx is NULL"); + return OPENSSL_FAIL; + } + + int ret = sec_ciphers_sw_impl_cleanup(ctx); + + if (ret != KAE_SUCCESS) + US_ERR("Cipher soft impl cleanup failed. ctx=%p", ctx); + + ret = sec_ciphers_priv_ctx_cleanup(ctx); + if (ret != KAE_SUCCESS) + return OPENSSL_FAIL; + + US_DEBUG("Cleanup success, ctx=%p", ctx); + + return OPENSSL_SUCCESS; +} + +static EVP_CIPHER *sec_ciphers_set_cipher_method(cipher_info_t cipherinfo) +{ + EVP_CIPHER *cipher = EVP_CIPHER_meth_new(cipherinfo.nid, cipherinfo.blocksize, cipherinfo.keylen); + int ret = 1; + + if (cipher == NULL) + return NULL; + + ret &= EVP_CIPHER_meth_set_iv_length(cipher, cipherinfo.ivlen); + ret &= EVP_CIPHER_meth_set_flags(cipher, cipherinfo.flags); + ret &= EVP_CIPHER_meth_set_init(cipher, sec_ciphers_init); + ret &= EVP_CIPHER_meth_set_do_cipher(cipher, sec_ciphers_do_cipher); + ret &= EVP_CIPHER_meth_set_set_asn1_params(cipher, EVP_CIPHER_set_asn1_iv); + ret &= EVP_CIPHER_meth_set_get_asn1_params(cipher, EVP_CIPHER_get_asn1_iv); + ret &= EVP_CIPHER_meth_set_cleanup(cipher, sec_ciphers_cleanup); + ret &= EVP_CIPHER_meth_set_impl_ctx_size(cipher, sizeof(cipher_priv_ctx_t)); + if (ret == 0) { + US_WARN("Failed to set cipher methods for nid %d\n", cipherinfo.nid); + return NULL; + } else { + return cipher; + } +} + +void sec_create_ciphers(void) +{ + unsigned int i = 0; + + for (i = 0; i < CIPHERS_COUNT; i++) { + if (g_sec_ciphers_info[i].cipher == NULL) + g_sec_ciphers_info[i].cipher = sec_ciphers_set_cipher_method(g_sec_ciphers_info[i]); + } +} + +static EVP_CIPHER *get_ciphers_default_method(int nid) +{ + EVP_CIPHER *cipher = NULL; + + switch (nid) { + case NID_sm4_ctr: + cipher = (EVP_CIPHER *)EVP_sm4_ctr(); + break; + case NID_sm4_cbc: + cipher = (EVP_CIPHER *)EVP_sm4_cbc(); + break; + case NID_sm4_ofb128: + cipher = (EVP_CIPHER *)EVP_sm4_ofb(); + break; + case NID_sm4_ecb: + cipher = (EVP_CIPHER *)EVP_sm4_ecb(); + break; + default: + US_WARN("nid = %d not support.", nid); + break; + } + return cipher; +} + +int sec_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) +{ + UNUSED(e); + unsigned int i = 0; + + if (unlikely((nids == NULL) && ((cipher == NULL) || (nid < 0)))) { + US_WARN("Invalid input param."); + if (cipher != NULL) + *cipher = NULL; + return OPENSSL_FAIL; + } + + /* No specific cipher => return a list of supported nids ... */ + if (cipher == NULL) { + if (nids != NULL) + *nids = g_known_cipher_nids; + return BLOCKSIZES_OF(g_sec_ciphers_info); + } + + for (i = 0; i < CIPHERS_COUNT; i++) { + if (g_sec_ciphers_info[i].nid == nid) { + if (g_sec_ciphers_info[i].cipher == NULL) + sec_create_ciphers(); + /*SM4 is disabled*/ + *cipher = g_sec_ciphers_info[i].is_enabled ? g_sec_ciphers_info[i].cipher : get_ciphers_default_method(nid); + return OPENSSL_SUCCESS; + } + } + + US_WARN("nid = %d not support.", nid); + *cipher = NULL; + + return OPENSSL_FAIL; +} + +void sec_ciphers_free_ciphers(void) +{ + unsigned int i = 0; + + for (i = 0; i < CIPHERS_COUNT; i++) { + if (g_sec_ciphers_info[i].cipher != NULL) { + EVP_CIPHER_meth_free(g_sec_ciphers_info[i].cipher); + g_sec_ciphers_info[i].cipher = NULL; + } + } +} + +void sec_ciphers_cb(const void *msg, void *tag) +{ + if (!msg || !tag) { + US_ERR("sec cb params err!\n"); + return; + } + struct wcrypto_cipher_msg *message = (struct wcrypto_cipher_msg *)msg; + cipher_engine_ctx_t *eng_ctx = (cipher_engine_ctx_t *)tag; + + kae_memcpy(eng_ctx->priv_ctx->out, message->out, message->out_bytes); +} + +// async poll thread create +int sec_cipher_engine_ctx_poll(void *engnine_ctx) +{ + int ret = 0; + struct cipher_engine_ctx *eng_ctx = (struct cipher_engine_ctx *)engnine_ctx; + struct wd_queue *q = eng_ctx->q_node->kae_wd_queue; + +POLL_AGAIN: + ret = wcrypto_cipher_poll(q, 1); + if (!ret) { + goto POLL_AGAIN; + } else if (ret < 0) { + US_ERR("cipher poll failed\n"); + return ret; + } + return ret; +} + +int cipher_module_init(void) +{ + wd_ciphers_init_qnode_pool(); + + sec_create_ciphers(); + + // reg async interface here + async_register_poll_fn_v1(ASYNC_TASK_CIPHER, sec_cipher_engine_ctx_poll); + + return 1; +} diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers.h b/kae_engine/src/v1/alg/ciphers/sec_ciphers.h new file mode 100644 index 0000000..1b5d98c --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for KAE engine dealing with wrapdrive + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_cipher.h + * + * This file provides the interface for SEC engine dealing with wrapdrive + * + *****************************************************************************/ + +#ifndef SEC_CIPHERS_H +#define SEC_CIPHERS_H +#include +#include +#include "../../wdmngr/wd_queue_memory.h" + +#define MAX_SEND_TRY_CNTS 50 + +enum openssl_cipher_enc_t { + OPENSSL_DECRYPTION = 0, + OPENSSL_ENCRYPTION = 1 +}; + +enum sec_cipher_priv_ctx_syncto { + SEC_CIHPER_SYNC_S2W = 1, // software priv ctx sync to hareware priv ctx + SEC_CIHPER_SYNC_H2S, // hareware priv ctx sync to software priv ctx +}; +typedef enum sec_cipher_priv_ctx_syncto sec_cipher_priv_ctx_syncto_t; + +typedef struct xts_ecb_data_strcut { + EVP_CIPHER_CTX *ecb_ctx; + const EVP_CIPHER *cipher_type; + uint8_t *key2; + uint8_t key2_len; + uint8_t *iv_out; + uint8_t *encryto_iv; + uint32_t countNum; +} xts_ecb_data; + +typedef struct cipher_engine_ctx cipher_engine_ctx_t; +/* + * | 16bytes * n length | offset | | + * | <---------first buf -----------><---next buf -->| + * the next buf send to warpdriv should start at hardaddr + first offset + */ +struct cipher_priv_ctx { + int32_t encrypt; // encrypt or decryto DECRYPTION = 0, ENCRYPTION = 1 + uint32_t inl; // input length + uint32_t left_len; // left length for warpdrive to do + uint32_t offset; // prev buf offset, that indicate the next buf should start at hardware_addr+offset + uint8_t *key; // key + uint32_t key_len; // key length + uint8_t *iv; // iv + uint32_t iv_len; // iv length + uint8_t *next_iv; // store IV for next cbc operation in decryption + const uint8_t *in; + uint8_t *out; + uint32_t c_mode; + uint32_t c_alg; + uint32_t do_cipher_len; // do one cycle cipher length + size_t switch_threshold; // crypt small packet offload threshold + void *sw_ctx_data; // Pointer for context data that will be used by Small packet offload feature. + xts_ecb_data *ecb_encryto; + cipher_engine_ctx_t *e_cipher_ctx; +}; + +typedef struct cipher_priv_ctx cipher_priv_ctx_t; + +struct cipher_engine_ctx { + KAE_QUEUE_DATA_NODE_S *q_node; + struct wcrypto_cipher_op_data op_data; + struct wcrypto_cipher_ctx_setup setup; + void *wd_ctx; // one ctx or a list of ctx + + cipher_priv_ctx_t *priv_ctx; +}; + +void sec_ciphers_set_enabled(int nid, int enabled); +int sec_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); +void sec_ciphers_free_ciphers(void); +int sec_cipher_engine_ctx_poll(void *engnine_ctx); + +int cipher_module_init(void); +void sec_ciphers_cb(const void *msg, void *tag); + +#endif diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.c b/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.c new file mode 100644 index 0000000..71fbb33 --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.c @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for switch to soft ciphers + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_ciphers_soft.c + * + * This file provides the implementation for switch to soft ciphers + * + *****************************************************************************/ +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "sec_ciphers_soft.h" +#include "sec_ciphers.h" +#include "sec_ciphers_utils.h" + +#define CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT 192 + +static cipher_threshold_table_t g_sec_ciphers_pkt_threshold_table[] = { + { NID_aes_128_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_192_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_256_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_128_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_192_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_256_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_128_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_192_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_256_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_128_xts, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_aes_256_xts, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_sm4_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_sm4_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_sm4_ofb128, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_sm4_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, +}; +static int g_sec_ciphers_pkt_threshold_table_size = BLOCKSIZES_OF(g_sec_ciphers_pkt_threshold_table); + +static sw_cipher_t g_sec_ciphers_sw_cipher_table[] = { + + { NID_aes_128_ecb, EVP_aes_128_ecb }, + { NID_aes_192_ecb, EVP_aes_192_ecb }, + { NID_aes_256_ecb, EVP_aes_256_ecb }, + { NID_aes_128_cbc, EVP_aes_128_cbc }, + { NID_aes_192_cbc, EVP_aes_192_cbc }, + { NID_aes_256_cbc, EVP_aes_256_cbc }, + { NID_aes_128_ctr, EVP_aes_128_ctr }, + { NID_aes_192_ctr, EVP_aes_192_ctr }, + { NID_aes_256_ctr, EVP_aes_256_ctr }, + { NID_aes_128_xts, EVP_aes_128_xts }, + { NID_aes_256_xts, EVP_aes_256_xts }, + + { NID_sm4_cbc, EVP_sm4_cbc }, + { NID_sm4_ctr, EVP_sm4_ctr }, + { NID_sm4_ofb128, EVP_sm4_ofb }, + { NID_sm4_ecb, EVP_sm4_ecb }, +}; +static int g_sec_ciphers_sw_cipher_table_size = BLOCKSIZES_OF(g_sec_ciphers_sw_cipher_table); + +static int sec_ciphers_sw_impl_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl); + +int sec_ciphers_sw_get_threshold(int nid) +{ + int i = 0; + + do { + if (g_sec_ciphers_pkt_threshold_table[i].nid == nid) + return g_sec_ciphers_pkt_threshold_table[i].threshold; + } while (++i < g_sec_ciphers_pkt_threshold_table_size); + + US_ERR("nid %d not found in threshold table", nid); + + return KAE_FAIL; +} + +const EVP_CIPHER *sec_ciphers_get_cipher_sw_impl(int nid) +{ + int i = 0; + + for (i = 0; i < g_sec_ciphers_sw_cipher_table_size; i++) { + if (nid == g_sec_ciphers_sw_cipher_table[i].nid) + return (g_sec_ciphers_sw_cipher_table[i].get_cipher)(); + } + US_WARN("Invalid nid %d\n", nid); + + return (EVP_CIPHER *)NULL; +} + +int sec_ciphers_sw_impl_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + int ret = KAE_FAIL; + unsigned int sw_size = 0; + + cipher_priv_ctx_t *priv_ctx = NULL; + const EVP_CIPHER *sw_cipher = NULL; + + /* allowed iv to be empty. */ + if (unlikely(key == NULL)) { + US_ERR("kae sw init parameter is NULL. key=%p", key); + return KAE_FAIL; + } + if (unlikely(ctx == NULL)) { + US_ERR("kae sw init parameter is NULL. ctx=%p", ctx); + return KAE_FAIL; + } + + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(priv_ctx == NULL)) { + US_ERR("state is NULL"); + return KAE_FAIL; + } + + sw_cipher = sec_ciphers_get_cipher_sw_impl(EVP_CIPHER_CTX_nid(ctx)); + if (unlikely(sw_cipher == NULL)) { + int nid = EVP_CIPHER_CTX_nid(ctx); + + US_ERR("get openssl software cipher failed. nid = %d", nid); + return KAE_FAIL; + } + + sw_size = EVP_CIPHER_impl_ctx_size(sw_cipher); + if (unlikely(sw_size == 0)) { + US_ERR("get EVP cipher ctx size failed, sw_size=%d", sw_size); + return KAE_FAIL; + } + + if (priv_ctx->sw_ctx_data == NULL) { + priv_ctx->sw_ctx_data = kae_malloc(sw_size); + if (priv_ctx->sw_ctx_data == NULL) { + US_ERR("Unable to allocate memory [%u bytes] for sw_ctx_data", sw_size); + return KAE_FAIL; + } + } + kae_memset(priv_ctx->sw_ctx_data, 0, sw_size); + + if (iv == NULL) + iv = EVP_CIPHER_CTX_iv_noconst(ctx); + + /* real implementation: Openssl soft arithmetic key initialization function */ + EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx->sw_ctx_data); + ret = EVP_CIPHER_meth_get_init(sw_cipher)(ctx, key, iv, enc); + EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); + if (ret != OPENSSL_SUCCESS) { + US_ERR("OPENSSL init key failed. ctx=%p", ctx); + kae_free(priv_ctx->sw_ctx_data); + return KAE_FAIL; + } + US_DEBUG("kae sw init impl success. ctx=%p", ctx); + + return KAE_SUCCESS; +} + +int sec_ciphers_sw_impl_cleanup(EVP_CIPHER_CTX *ctx) +{ + cipher_priv_ctx_t *priv_ctx = NULL; + + if (unlikely(ctx == NULL)) { + US_WARN("ctx is NULL"); + return KAE_FAIL; + } + +#ifdef KAE_DEBUG_KEY_ENABLE + dump_data("iv", EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); +#endif + + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(priv_ctx == NULL)) { + US_WARN("ctx cipher private data is NULL."); + return KAE_FAIL; + } + + kae_free(priv_ctx->sw_ctx_data); + + US_DEBUG("kae sw cleanup impl success, ctx=%p", ctx); + + return KAE_SUCCESS; +} + +static int sec_ciphers_sw_impl_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inl) +{ + if (unlikely((ctx == NULL) || (out == NULL) || (in == NULL))) { + US_ERR("kae sw cipher parameter is null.ctx=%p, in=%p, out=%p, inl=%d", ctx, out, in, (int)inl); + return KAE_FAIL; + } + + cipher_priv_ctx_t *priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + + if (unlikely(priv_ctx == NULL)) { + US_ERR("state is NULL"); + return KAE_FAIL; + } + + const EVP_CIPHER *sw_cipher = sec_ciphers_get_cipher_sw_impl(EVP_CIPHER_CTX_nid(ctx)); + + if (unlikely(sw_cipher == NULL)) { + US_ERR("get OpenSSL cipher failed. ctx=%p", ctx); + return KAE_FAIL; + } + + EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx->sw_ctx_data); + int ret = EVP_CIPHER_meth_get_do_cipher(sw_cipher)(ctx, out, in, inl); + + if (unlikely(ret == OPENSSL_FAIL)) { + EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); + US_ERR("OpenSSL do cipher failed. ctx=%p", ctx); + return KAE_FAIL; + } + + EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); + + US_DEBUG("kae sw impl do cipher success, ctx=%p", ctx); + + return KAE_SUCCESS; +} + +int sec_ciphers_software_encrypt(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) +{ + int ret = sec_ciphers_sw_impl_do_cipher(ctx, priv_ctx->out, priv_ctx->in, priv_ctx->left_len); + + if (ret != KAE_SUCCESS) { + US_ERR("kae software do cipher or small packet cipher offload failed."); + return KAE_FAIL; + } + + /* after openssl software do cipher, sync priv data to next priv data for hareware to contiune to do cipher */ + ret = sec_ciphers_sw_hw_ctx_sync(ctx, SEC_CIHPER_SYNC_S2W); + if (unlikely(ret != KAE_SUCCESS)) { + US_ERR("kae sw hw state sync failed."); + return KAE_FAIL; + } + + US_DEBUG("Cipher success, ctx=%p", ctx); + return KAE_SUCCESS; +} + +int sec_ciphers_sw_hw_ctx_sync(EVP_CIPHER_CTX *ctx, sec_cipher_priv_ctx_syncto_t direction) +{ + cipher_priv_ctx_t *priv_ctx = NULL; + unsigned int num = 0; + unsigned int offset = 0; + + US_DEBUG("sw hw state sync start. ctx=%p", ctx); + + priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(priv_ctx == NULL)) { + US_ERR("cipher priv ctx data is NULL."); + return KAE_FAIL; + } + + if (direction == SEC_CIHPER_SYNC_S2W) { + kae_memcpy(priv_ctx->iv, EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); + num = EVP_CIPHER_CTX_num(ctx); + if (num) + sec_ciphers_ctr_iv_sub(priv_ctx->iv); + priv_ctx->offset = num; + priv_ctx->left_len = 0; + } else { + if (priv_ctx->do_cipher_len != 0) { + offset = priv_ctx->offset; + kae_memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), priv_ctx->iv, EVP_CIPHER_CTX_iv_length(ctx)); + EVP_CIPHER_CTX_set_num(ctx, offset); + } + } + + US_DEBUG("state sync success, direct=%d[1:SW_TO_HW, 2:HW_TO_SW], offset=%d", direction, num); + + return KAE_SUCCESS; +} + + +int sec_ciphers_ecb_encryt(xts_ecb_data *ecb_encryto, uint8_t *buf_out, uint8_t *buf_in, int buf_len) +{ + int out_len1, tmplen; + /* Encrypt */ + if (!EVP_EncryptInit_ex(ecb_encryto->ecb_ctx, ecb_encryto->cipher_type, NULL, ecb_encryto->key2, NULL)) { + US_ERR("EVP_EncryptInit failed.\n"); + return KAE_FAIL; + } + EVP_CIPHER_CTX_set_padding(ecb_encryto->ecb_ctx, 0); + + if (!EVP_EncryptUpdate(ecb_encryto->ecb_ctx, buf_out, &out_len1, buf_in, buf_len)) { + US_ERR("EVP_EncryptUpdate failed.\n"); + return KAE_FAIL; + } + + if (!EVP_EncryptFinal_ex(ecb_encryto->ecb_ctx, buf_out + out_len1, &tmplen)) { + /* Error */ + return KAE_FAIL; + } + out_len1 += tmplen; + + return KAE_SUCCESS; +} + +int sec_ciphers_ecb_decrypt(xts_ecb_data *ecb_encryto, uint8_t *buf_out, uint8_t *buf_in, int buf_len) +{ + int out_len1, tmplen; + + /* decrypt */ + if (!EVP_DecryptInit_ex(ecb_encryto->ecb_ctx, ecb_encryto->cipher_type, NULL, ecb_encryto->key2, NULL)) { + US_ERR("EVP_EncryptInit failed.\n"); + return KAE_FAIL; + } + + EVP_CIPHER_CTX_set_padding(ecb_encryto->ecb_ctx, 0); + + if (!EVP_DecryptUpdate(ecb_encryto->ecb_ctx, buf_out, &out_len1, buf_in, buf_len)) { + US_ERR("EVP_EncryptUpdate failed.\n"); + return KAE_FAIL; + } + + if (!EVP_DecryptFinal_ex(ecb_encryto->ecb_ctx, buf_out + out_len1, &tmplen)) { + /* Error */ + return KAE_FAIL; + } + out_len1 += tmplen; + + return KAE_SUCCESS; +} diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.h b/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.h new file mode 100644 index 0000000..73ded7d --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_soft.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the cipher interface for soft ciphers + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_ciphers_soft.h + * + * This file provides the cipher interface for soft ciphers + * + *****************************************************************************/ + +#ifndef SEC_CIPHERS_SOFT_H +#define SEC_CIPHERS_SOFT_H + +#include "sec_ciphers.h" + +typedef struct cipher_threshold_table_s { + int nid; + int threshold; +} cipher_threshold_table_t; + +typedef struct sw_cipher_s { + int nid; + const EVP_CIPHER *(*get_cipher)(void); +} sw_cipher_t; + +const EVP_CIPHER *sec_ciphers_get_cipher_sw_impl(int nid); +int sec_ciphers_sw_get_threshold(int nid); +int sec_ciphers_sw_impl_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); +int sec_ciphers_sw_impl_cleanup(EVP_CIPHER_CTX *ctx); +int sec_ciphers_software_encrypt(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx); +int sec_ciphers_sw_hw_ctx_sync(EVP_CIPHER_CTX *ctx, sec_cipher_priv_ctx_syncto_t direction); +int sec_ciphers_ecb_encryt(xts_ecb_data *ecb_encryto, uint8_t *buf_out, uint8_t *buf_in, int buf_len); +int sec_ciphers_ecb_decrypt(xts_ecb_data *ecb_encryto, uint8_t *buf_out, uint8_t *buf_in, int buf_len); + + +#endif + diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.c b/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.c new file mode 100644 index 0000000..27631db --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine utils dealing with wrapdrive + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_ciphers_utils.c + * + * This file provides the interface for SEC engine dealing with wrapdrive + * + *****************************************************************************/ + + +#include "sec_ciphers_utils.h" +#include "sec_ciphers_soft.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" + +int sec_ciphers_get_cipher_mode(int nid) +{ + uint32_t c_mode = NO_C_MODE; + + switch (nid) { + case NID_aes_128_ecb: + case NID_aes_192_ecb: + case NID_aes_256_ecb: + case NID_sm4_ecb: + c_mode = ECB; + break; + case NID_aes_128_cbc: + case NID_aes_192_cbc: + case NID_aes_256_cbc: + case NID_sm4_cbc: + c_mode = CBC; + break; + case NID_aes_128_ctr: + case NID_aes_192_ctr: + case NID_aes_256_ctr: + case NID_sm4_ctr: + c_mode = CTR; + break; + case NID_aes_128_xts: + case NID_aes_256_xts: + c_mode = XTS; + break; + case NID_sm4_ofb128: + c_mode = OFB; + break; + default: + US_WARN("nid=%d don't support by sec engine.", nid); + break; + } + + return c_mode; +} + +int sec_ciphers_get_cipher_alg(int nid) +{ + uint32_t c_alg = NO_C_ALG; + + switch (nid) { + case NID_sm4_ctr: + case NID_sm4_cbc: + case NID_sm4_ofb128: + case NID_sm4_ecb: + c_alg = SM4; + break; + case NID_aes_128_ecb: + case NID_aes_192_ecb: + case NID_aes_256_ecb: + case NID_aes_128_cbc: + case NID_aes_192_cbc: + case NID_aes_256_cbc: + case NID_aes_128_ctr: + case NID_aes_192_ctr: + case NID_aes_256_ctr: + case NID_aes_128_xts: + case NID_aes_256_xts: + c_alg = AES; + break; + default: + US_WARN("nid=%d don't support by sec engine.", nid); + break; + } + + return c_alg; +} + +/* + * SEC ENGINE IV: {Flag, Random, Counter} + * | <--4--> <--8--> | <---4bytes ---> | + * | Flag, Random | counter | + */ +static unsigned int __iv_to_engine_counter(const uint8_t *iv) +{ + unsigned int counter = 0; + const unsigned int SEC_IV_COUNTER_POSTION = 12; + + counter |= iv[SEC_IV_COUNTER_POSTION]; + counter <<= 8; // left shift 8 + counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 1)]; // count num 1 + counter <<= 8; // left shift 8 + counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 2)]; // count num 2 + counter <<= 8; // left shift 8 + counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 3)]; // count num 3 + + return counter; +} + +/* increment counter (128-bit int) by c */ +void sec_ciphers_ctr_iv_inc(uint8_t *counter, uint32_t c) +{ + uint32_t n = 16; + + do { + --n; + c += counter[n]; + counter[n] = (uint8_t)c; + c >>= 8; // right shift 8 + } while (n); +} + +void sec_ciphers_xts_iv_inc(cipher_priv_ctx_t *priv_ctx) +{ + uint32_t i = 0; + unsigned int carry; + unsigned int res; + + union { + uint64_t u[2]; // union length 2 + uint32_t d[4]; // union length 4 + uint8_t c[16]; // union length 16 + } tweak; + + kae_memcpy(tweak.c, priv_ctx->ecb_encryto->encryto_iv, 16); // encrypto iv length 16 + + for (i = 0; i < priv_ctx->ecb_encryto->countNum; i++) { + // cppcheck-suppress * + res = 0x87 & (((int)tweak.d[3]) >> 31); // algorithm para 31 + carry = (unsigned int)(tweak.u[0] >> 63); // algorithm para 63 + tweak.u[0] = (tweak.u[0] << 1) ^ res; + tweak.u[1] = (tweak.u[1] << 1) | carry; + } + + sec_ciphers_ecb_decrypt(priv_ctx->ecb_encryto, priv_ctx->ecb_encryto->iv_out, tweak.c, 16); // iv len 16 + kae_memcpy(priv_ctx->iv, priv_ctx->ecb_encryto->iv_out, 16); // update iv len 16 +} + +void sec_ciphers_ctr_iv_sub(uint8_t *counter) +{ + unsigned int n = 16; + int c = 0; + + do { + --n; + c = counter[n] < 1 ? 1 : 0; + counter[n] = (unsigned char)(counter[n] + c * 256 - 1); // algorithm para 256 + if (c == 0) + break; + } while (n); +} + +void sec_ciphers_update_iv(cipher_priv_ctx_t *tmp_docipher_ctx, int cipher_length) +{ + unsigned int inc_counter = 0; + + switch (tmp_docipher_ctx->c_mode) { + case CBC: + if (tmp_docipher_ctx->encrypt == OPENSSL_ENCRYPTION) + kae_memcpy(tmp_docipher_ctx->iv, tmp_docipher_ctx->out + cipher_length - IV_SIZE, IV_SIZE); + break; + case CTR: + inc_counter = cipher_length >> 4; // right shift 4 + sec_ciphers_ctr_iv_inc(tmp_docipher_ctx->iv, inc_counter); + break; + case XTS: + // update iv here + break; + default: + break; + } +} + +int sec_ciphers_is_iv_may_overflow(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) +{ + unsigned int will_inc_counter = 0; + unsigned int current_counter = 0; + + if (sec_ciphers_get_cipher_mode(EVP_CIPHER_CTX_nid(ctx)) == CTR) { + // (input length + prev offset)/ 16 = will_inc_counter + will_inc_counter = (priv_ctx->inl + priv_ctx->offset) >> 4; // right shift 4 + current_counter = __iv_to_engine_counter(priv_ctx->iv); + if ((0xFFFFFFFFU - current_counter < will_inc_counter)) { + US_DEBUG("ciphers increase iv overflow 0xFFFFFFFF."); + return 1; + } + } + + return 0; +} + diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.h b/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.h new file mode 100644 index 0000000..722b00c --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_utils.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the cipher interface for KAE engine utils dealing + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_ciphers_utils.h + * + * This file provides the implementation for SEC engine utils dealing + * + *****************************************************************************/ + +#ifndef SEC_CIPHERS_CHECKER_H +#define SEC_CIPHERS_CHECKER_H + +#include "sec_ciphers.h" + +#define IV_SIZE 16 + +enum CIPHERS_MODE { + ECB, + CBC, + CTR, + XTS, + OFB, +}; + +enum CIPHERS_ALG { + SM4, + AES, + DES, + M_3DES, +}; + +int sec_ciphers_is_iv_may_overflow(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx); +int sec_ciphers_get_cipher_mode(int nid); +int sec_ciphers_get_cipher_alg(int nid); + +void sec_ciphers_ctr_iv_inc(uint8_t *counter, uint32_t c); +void sec_ciphers_ctr_iv_sub(uint8_t *counter); +void sec_ciphers_xts_iv_inc(cipher_priv_ctx_t *priv_ctx); + +void sec_ciphers_update_iv(cipher_priv_ctx_t *tmp_docipher_ctx, int cipher_length); + +#endif + diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.c b/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.c new file mode 100644 index 0000000..cd84c54 --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE ciphers using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_cipher_wd.c + * + * This file provides the implementation for SEC ciphers using wd interface + * + *****************************************************************************/ +#include "sec_ciphers_wd.h" +#include "sec_ciphers_utils.h" +#include "../../wdmngr/wd_queue_memory.h" +#include "../../utils/engine_utils.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" + +#define OUTPUT_CACHE_SIZE (256*1024) +#define INPUT_CACHE_SIZE (256*1024) +#define MAX_KEY_SIZE 64 +#define MAX_IV_SIZE 16 + +KAE_QUEUE_POOL_HEAD_S *g_sec_ciphers_qnode_pool; +static cipher_engine_ctx_t *wd_ciphers_new_engine_ctx(KAE_QUEUE_DATA_NODE_S *q_node, cipher_priv_ctx_t *priv_ctx); + +void wd_ciphers_free_engine_ctx(void *engine_ctx) +{ + cipher_engine_ctx_t *e_cipher_ctx = (cipher_engine_ctx_t *)engine_ctx; + + if (e_cipher_ctx == NULL) + return; + + if (e_cipher_ctx->op_data.in && e_cipher_ctx->setup.br.usr) { + e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.in); + e_cipher_ctx->op_data.in = NULL; + } + + if (e_cipher_ctx->op_data.out && e_cipher_ctx->setup.br.usr) { + e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.out); + e_cipher_ctx->op_data.out = NULL; + } + + if (e_cipher_ctx->op_data.iv && e_cipher_ctx->setup.br.usr) { + e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.iv); + e_cipher_ctx->op_data.iv = NULL; + } + + OPENSSL_free(e_cipher_ctx); + e_cipher_ctx = NULL; +} + +static cipher_engine_ctx_t *wd_ciphers_new_engine_ctx(KAE_QUEUE_DATA_NODE_S *q_node, cipher_priv_ctx_t *priv_ctx) +{ + cipher_engine_ctx_t *e_cipher_ctx = NULL; + + e_cipher_ctx = (cipher_engine_ctx_t *)OPENSSL_malloc(sizeof(cipher_engine_ctx_t)); + if (e_cipher_ctx == NULL) { + US_ERR("OPENSSL_malloc ctx failed"); + return NULL; + } + kae_memset(e_cipher_ctx, 0, sizeof(cipher_engine_ctx_t)); + + e_cipher_ctx->setup.br.alloc = kae_wd_alloc_blk; + e_cipher_ctx->setup.br.free = kae_wd_free_blk; + e_cipher_ctx->setup.br.iova_map = kae_dma_map; + e_cipher_ctx->setup.br.iova_unmap = kae_dma_unmap; + e_cipher_ctx->setup.br.usr = q_node->kae_queue_mem_pool; + + e_cipher_ctx->op_data.in = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, INPUT_CACHE_SIZE); + if (e_cipher_ctx->op_data.in == NULL) { + US_ERR("alloc opdata in buf failed"); + goto err; + } + + e_cipher_ctx->op_data.out = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, OUTPUT_CACHE_SIZE); + if (e_cipher_ctx->op_data.out == NULL) { + US_ERR("alloc opdata out buf failed"); + goto err; + } + + e_cipher_ctx->op_data.iv = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, priv_ctx->iv_len); + if (e_cipher_ctx->op_data.iv == NULL) { + US_ERR("alloc opdata iv buf failed"); + goto err; + } + + e_cipher_ctx->priv_ctx = priv_ctx; + e_cipher_ctx->q_node = q_node; + q_node->engine_ctx = e_cipher_ctx; + + return e_cipher_ctx; + +err: + (void)wd_ciphers_free_engine_ctx(e_cipher_ctx); + + return NULL; +} + +static int wd_ciphers_init_engine_ctx(cipher_engine_ctx_t *e_cipher_ctx) +{ + struct wd_queue *q = e_cipher_ctx->q_node->kae_wd_queue; + cipher_priv_ctx_t *priv_ctx = e_cipher_ctx->priv_ctx; + + if (e_cipher_ctx->wd_ctx != NULL) { + US_WARN("wd ctx is in used by other ciphers"); + + return KAE_FAIL; + } + + e_cipher_ctx->setup.alg = (enum wcrypto_cipher_alg)priv_ctx->c_alg; // for example: WD_CIPHER_SM4; + e_cipher_ctx->setup.mode = (enum wcrypto_cipher_mode)priv_ctx->c_mode; // for example: WD_CIPHER_CBC; + e_cipher_ctx->setup.cb = (wcrypto_cb)sec_ciphers_cb; + e_cipher_ctx->wd_ctx = wcrypto_create_cipher_ctx(q, &e_cipher_ctx->setup); + + if (e_cipher_ctx->wd_ctx == NULL) { + US_ERR("wd create sec cipher ctx fail!"); + return KAE_FAIL; + } + + wcrypto_set_cipher_key(e_cipher_ctx->wd_ctx, priv_ctx->key, priv_ctx->key_len); + + return KAE_SUCCESS; +} + +cipher_engine_ctx_t *wd_ciphers_get_engine_ctx(cipher_priv_ctx_t *priv_ctx) +{ + KAE_QUEUE_DATA_NODE_S *q_node = NULL; + cipher_engine_ctx_t *e_cipher_ctx = NULL; + + if (unlikely(priv_ctx == NULL)) { + US_ERR("sec cipher priv ctx NULL!"); + return NULL; + } + + q_node = kae_get_node_from_pool(g_sec_ciphers_qnode_pool); + if (q_node == NULL) { + US_ERR_LIMIT("failed to get hardware queue"); + return NULL; + } + + e_cipher_ctx = (cipher_engine_ctx_t *)q_node->engine_ctx; + if (e_cipher_ctx == NULL) { + e_cipher_ctx = wd_ciphers_new_engine_ctx(q_node, priv_ctx); + if (e_cipher_ctx == NULL) { + US_WARN("sec new engine ctx fail!"); + (void)kae_put_node_to_pool(g_sec_ciphers_qnode_pool, q_node); + return NULL; + } + } + + e_cipher_ctx->priv_ctx = priv_ctx; + + if (wd_ciphers_init_engine_ctx(e_cipher_ctx) == KAE_FAIL) { + US_WARN("init engine ctx fail!"); + wd_ciphers_put_engine_ctx(e_cipher_ctx); + return NULL; + } + + return e_cipher_ctx; +} + +void wd_ciphers_put_engine_ctx(cipher_engine_ctx_t *e_cipher_ctx) +{ + if (unlikely(e_cipher_ctx == NULL)) { + US_WARN("sec cipher engine ctx NULL!"); + return; + } + + if (e_cipher_ctx->wd_ctx != NULL) { + wcrypto_del_cipher_ctx(e_cipher_ctx->wd_ctx); + e_cipher_ctx->wd_ctx = NULL; + } + + if (e_cipher_ctx->priv_ctx && e_cipher_ctx->priv_ctx->ecb_encryto) { + if (e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx != NULL) { + EVP_CIPHER_CTX_free(e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx); + e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx = NULL; + } + + kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->key2); + kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->encryto_iv); + kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->iv_out); + kae_free(e_cipher_ctx->priv_ctx->ecb_encryto); + } + + if (e_cipher_ctx->q_node != NULL) + (void)kae_put_node_to_pool(g_sec_ciphers_qnode_pool, e_cipher_ctx->q_node); + + e_cipher_ctx = NULL; +} + +int wd_ciphers_do_crypto_impl(cipher_engine_ctx_t *e_cipher_ctx) +{ + int ret = -WD_EINVAL; + int trycount = 0; + + if (unlikely(e_cipher_ctx == NULL)) { + US_ERR("do cipher ctx NULL!"); + return KAE_FAIL; + } + +again: + ret = wcrypto_do_cipher(e_cipher_ctx->wd_ctx, &e_cipher_ctx->op_data, NULL); + if (ret != WD_SUCCESS) { + if (ret == -WD_EBUSY && trycount <= 5) { // try 5 times + US_WARN("do cipher busy, retry again!"); + trycount++; + goto again; + } else { + US_ERR("do cipher failed!"); + return KAE_FAIL; + } + } + + return KAE_SUCCESS; +} + +void wd_ciphers_set_input_data(cipher_engine_ctx_t *e_cipher_ctx) +{ + // fill engine ctx opdata + cipher_priv_ctx_t *priv_ctx = e_cipher_ctx->priv_ctx; + + kae_memcpy(((uint8_t *)e_cipher_ctx->op_data.in + priv_ctx->offset), priv_ctx->in, priv_ctx->do_cipher_len); + + if (priv_ctx->encrypt == OPENSSL_ENCRYPTION) + e_cipher_ctx->op_data.op_type = WCRYPTO_CIPHER_ENCRYPTION; + else + e_cipher_ctx->op_data.op_type = WCRYPTO_CIPHER_DECRYPTION; + + e_cipher_ctx->op_data.in_bytes = priv_ctx->do_cipher_len + priv_ctx->offset; + + // the real out data start at opdata.out + offset + e_cipher_ctx->op_data.out_bytes = priv_ctx->offset + priv_ctx->do_cipher_len; + kae_memcpy(e_cipher_ctx->op_data.iv, priv_ctx->iv, priv_ctx->iv_len); + e_cipher_ctx->op_data.iv_bytes = priv_ctx->iv_len; +} + +void wd_ciphers_get_output_data(cipher_engine_ctx_t *e_cipher_ctx) +{ + cipher_priv_ctx_t *priv_ctx = e_cipher_ctx->priv_ctx; + + // the real out data start at opdata.out + offset + kae_memcpy(priv_ctx->out, (uint8_t *)e_cipher_ctx->op_data.out + priv_ctx->offset, + priv_ctx->do_cipher_len); +} + +uint32_t wd_ciphers_get_do_cipher_len(uint32_t offset, int leftlen) +{ + uint32_t do_cipher_len = 0; + int max_input_datalen = INPUT_CACHE_SIZE - offset; + /* + * Note: Small encrypted block can be encrypted once. + * or the last encrypted slice of a large encrypted block + */ + if (leftlen <= max_input_datalen) + do_cipher_len = leftlen; + else + do_cipher_len = max_input_datalen; + + return do_cipher_len; +} + +KAE_QUEUE_POOL_HEAD_S *wd_ciphers_get_qnode_pool(void) +{ + return g_sec_ciphers_qnode_pool; +} + +int wd_ciphers_init_qnode_pool(void) +{ + kae_queue_pool_destroy(g_sec_ciphers_qnode_pool, wd_ciphers_free_engine_ctx); + + g_sec_ciphers_qnode_pool = kae_init_queue_pool(WCRYPTO_CIPHER); + if (g_sec_ciphers_qnode_pool == NULL) { + US_ERR("do cipher ctx NULL!"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +void wd_ciphers_uninit_qnode_pool(void) +{ + kae_queue_pool_destroy(g_sec_ciphers_qnode_pool, wd_ciphers_free_engine_ctx); + g_sec_ciphers_qnode_pool = NULL; +} diff --git a/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.h b/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.h new file mode 100644 index 0000000..4801fd1 --- /dev/null +++ b/kae_engine/src/v1/alg/ciphers/sec_ciphers_wd.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the cipher interface for KAE ciphers using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * @file sec_cipher_wd.h + * + * This file provides the interface for SEC ciphers using wd interface + * + *****************************************************************************/ + +#ifndef SEC_CIPHERS_WD_H +#define SEC_CIPHERS_WD_H + +#include "sec_ciphers.h" + +extern KAE_QUEUE_POOL_HEAD_S *g_sec_ciphers_qnode_pool; + +cipher_engine_ctx_t *wd_ciphers_get_engine_ctx(cipher_priv_ctx_t *priv_ctx); +void wd_ciphers_put_engine_ctx(cipher_engine_ctx_t *e_cipher_ctx); +int wd_ciphers_do_crypto_impl(cipher_engine_ctx_t *e_cipher_ctx); + +void wd_ciphers_set_input_data(cipher_engine_ctx_t *e_cipher_ctx); +void wd_ciphers_get_output_data(cipher_engine_ctx_t *e_cipher_ctx); +uint32_t wd_ciphers_get_do_cipher_len(uint32_t offset, int leftlen); + +int wd_ciphers_init_qnode_pool(void); +void wd_ciphers_uninit_qnode_pool(void); + +KAE_QUEUE_POOL_HEAD_S *wd_ciphers_get_qnode_pool(void); +void wd_ciphers_free_engine_ctx(void *engine_ctx); + +#endif + diff --git a/kae_engine/src/v1/alg/dh/hpre_dh.c b/kae_engine/src/v1/alg/dh/hpre_dh.c new file mode 100644 index 0000000..5a05970 --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh.c @@ -0,0 +1,313 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine DH. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "hpre_dh.h" +#include "hpre_dh_wd.h" +#include "hpre_dh_soft.h" +#include "hpre_dh_util.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_opensslerr.h" +#include "../../async/async_task_queue.h" +#include "../../utils/engine_log.h" + +#define DH768BITS 768 +#define DH1024BITS 1024 +#define DH1536BITS 1536 +#define DH2048BITS 2048 +#define DH3072BITS 3072 +#define DH4096BITS 4096 + +#define GENERATOR_2 2 + +#ifndef OPENSSL_NO_DH +const int DHPKEYMETH_IDX = 1; +#else +const int DHPKEYMETH_IDX = -1; +#endif + +const char *g_hpre_dh_device = "hisi_hpre"; +static DH_METHOD *g_hpre_dh_method; +static EVP_PKEY_METHOD *g_hpre_dh_pkey_meth; + +static int hpre_dh_generate_key(DH *dh); + +static int hpre_dh_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh); + +static int hpre_db_bn_mod_exp( + const DH *dh, BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); + +static int check_dh_bit_useful(const int bit); + +static int prepare_dh_data(const int bits, const BIGNUM *g, DH *dh, hpre_dh_engine_ctx_t **eng_ctx, BIGNUM **priv_key); + +static int hpre_dh_ctx_poll(void *engine_ctx); + +const DH_METHOD *hpre_get_dh_methods(void) +{ + int ret = 1; + + if (g_hpre_dh_method != NULL) + return g_hpre_dh_method; + + /* + * if (!kae_get_device(g_hpre_dh_device)) { + * const DH_METHOD* default_soft_method = DH_OpenSSL(); + * return default_soft_method; + * } + */ + g_hpre_dh_method = DH_meth_new("HPRE DH method", 0); + if (g_hpre_dh_method == NULL) { + KAEerr(KAE_F_HPRE_GET_DH_METHODS, KAE_R_MALLOC_FAILURE); + US_ERR("Failed to allocate HPRE DH methods"); + return NULL; + } + + ret &= DH_meth_set_generate_key(g_hpre_dh_method, hpre_dh_generate_key); + ret &= DH_meth_set_compute_key(g_hpre_dh_method, hpre_dh_compute_key); + ret &= DH_meth_set_bn_mod_exp(g_hpre_dh_method, hpre_db_bn_mod_exp); + if (ret == 0) { + KAEerr(KAE_F_HPRE_GET_DH_METHODS, KAE_R_DH_SET_METHODS_FAILURE); + US_ERR("Failed to set HPRE DH methods"); + return NULL; + } + + return g_hpre_dh_method; +} + +int hpre_module_dh_init(void) +{ + wd_hpre_dh_init_qnode_pool(); + + (void)get_dh_pkey_meth(); + (void)hpre_get_dh_methods(); + + /* register async poll func */ + async_register_poll_fn_v1(ASYNC_TASK_DH, hpre_dh_ctx_poll); + + return HPRE_DH_SUCCESS; +} + +void hpre_dh_destroy(void) +{ + if (g_hpre_dh_method != NULL) { + DH_meth_free(g_hpre_dh_method); + g_hpre_dh_method = NULL; + } +} + +EVP_PKEY_METHOD *get_dh_pkey_meth(void) +{ + const EVP_PKEY_METHOD *def_dh = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); + + if (g_hpre_dh_pkey_meth == NULL) { + g_hpre_dh_pkey_meth = EVP_PKEY_meth_new(EVP_PKEY_DH, 0); + if (g_hpre_dh_pkey_meth == NULL) { + US_ERR("failed to new pkey meth"); + return NULL; + } + EVP_PKEY_meth_copy(g_hpre_dh_pkey_meth, def_dh); + } + + return g_hpre_dh_pkey_meth; +} + +EVP_PKEY_METHOD *get_dsa_pkey_meth(void) +{ + return (EVP_PKEY_METHOD *)EVP_PKEY_meth_get0(DHPKEYMETH_IDX); +} + +static int hpre_dh_ctx_poll(void *engine_ctx) +{ + int ret; + hpre_dh_engine_ctx_t *eng_ctx = (hpre_dh_engine_ctx_t *)engine_ctx; + struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; +poll_again: + ret = wcrypto_dh_poll(q, 1); + if (!ret) { + goto poll_again; + } else if (ret < 0) { + US_ERR("dh poll fail!\n"); + return ret; + } + return ret; +} + +static int hpre_dh_generate_key(DH *dh) +{ + int bits = DH_bits(dh); + const BIGNUM *p = NULL; + const BIGNUM *g = NULL; + const BIGNUM *q = NULL; + BIGNUM *pub_key = NULL; + BIGNUM *priv_key = NULL; + hpre_dh_engine_ctx_t *eng_ctx = NULL; + int ret = HPRE_DH_FAIL; + + if (dh == NULL) { + KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_INVALID_PARAMETER); + US_ERR("DH_BUILTIN_KEYGEN KAE_R_DH_INVALID_PARAMETER"); + return HPRE_DH_FAIL; + } + + hpre_dh_soft_get_pg(dh, &p, &g, &q); + if (p == NULL || g == NULL) { + KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_INVALID_PARAMETER); + US_ERR("invalid g or p."); + return HPRE_DH_FAIL; + } + // check whether it is dsa parameter. + CHECK_AND_GOTO(q != NULL, end_soft, "q is not null, then switch to soft!"); + + // check whether bits exceeds the limit. + if (bits > OPENSSL_DH_MAX_MODULUS_BITS) { + KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_KEY_SIZE_TOO_LARGE); + US_ERR("DH_BUILTIN_KEYGEN DH_KEY_SIZE_TOO_LARGE"); + return HPRE_DH_FAIL; + } + + ret = prepare_dh_data(bits, g, dh, &eng_ctx, &priv_key); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "prepare dh data failed!"); + + // construct opdata + ret = hpre_dh_fill_genkey_opdata(g, p, priv_key, eng_ctx); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "fill opdata fail then switch to soft!"); + + // call wd api + ret = hpre_dh_genkey(eng_ctx); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "hpre generate dh key failed.switch to soft!"); + + // get public key from opdata + ret = hpre_dh_get_pubkey(eng_ctx, &pub_key); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "get pub key failed.switch to soft!"); + + // set public key and secret key to the DH. + hpre_dh_soft_set_pkeys(dh, pub_key, priv_key); + +end_soft: + if (pub_key != DH_get0_pub_key(dh)) + BN_free(pub_key); + if (priv_key != DH_get0_priv_key(dh)) + BN_free(priv_key); + hpre_dh_free_eng_ctx(eng_ctx); + + if (ret != HPRE_DH_SUCCESS) + return hpre_dh_soft_generate_key(dh); + + US_DEBUG("hpre dh generate key success!"); + return HPRE_DH_SUCCESS; +} + +static int hpre_dh_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh) +{ + int bits = DH_bits(dh); + const BIGNUM *p = NULL; + const BIGNUM *g = NULL; + const BIGNUM *q = NULL; + BIGNUM *priv_key = NULL; + hpre_dh_engine_ctx_t *eng_ctx = NULL; + int ret = HPRE_DH_FAIL; + int ret_size = 0; + + if (dh == NULL || key == NULL || pub_key == NULL || DH_get0_priv_key(dh) == NULL) { + KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_INVALID_PARAMETER); + US_ERR("KAE_F_HPRE_DH_KEYCOMP KAE_R_DH_INVALID_PARAMETER"); + return HPRE_DH_FAIL; + } + + hpre_dh_soft_get_pg(dh, &p, &g, &q); + if (p == NULL || g == NULL) { + KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_INVALID_PARAMETER); + US_ERR("invalid g or p."); + return HPRE_DH_FAIL; + } + // check whether it is dsa parameter. + CHECK_AND_GOTO(q != NULL, end_soft, "q is not null, then switch to soft!"); + + // check whether bits exceeds the limit. + if (bits > OPENSSL_DH_MAX_MODULUS_BITS) { + KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_KEY_SIZE_TOO_LARGE); + US_ERR("DH_BUILTIN_KEYGEN DH_KEY_SIZE_TOO_LARGE"); + return HPRE_DH_FAIL; + } + + ret = prepare_dh_data(bits, g, dh, &eng_ctx, &priv_key); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "prepare dh data failed!"); + + // construct opdata + ret = hpre_dh_fill_compkey_opdata(g, p, priv_key, pub_key, eng_ctx); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "fill opdata fail then switch to soft!"); + + // call wd api to generate shared secret key. + ret = hpre_dh_compkey(eng_ctx); + CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "hpre compute dh key failed.switch to soft!"); + + ret_size = hpre_dh_get_output_chars(eng_ctx, key); + +end_soft: + + hpre_dh_free_eng_ctx(eng_ctx); + + if (ret != HPRE_DH_SUCCESS) + return hpre_dh_soft_compute_key(key, pub_key, dh); + + US_DEBUG("hpre dh compute key success!"); + return ret_size; +} + +static int hpre_db_bn_mod_exp( + const DH *dh, BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx) +{ + return BN_mod_exp_mont(r, a, p, m, ctx, m_ctx); +} + +static int check_dh_bit_useful(const int bit) +{ + switch (bit) { + case DH768BITS: + case DH1024BITS: + case DH1536BITS: + case DH2048BITS: + case DH3072BITS: + case DH4096BITS: + return 1; + default: + break; + } + return 0; +} + +static int prepare_dh_data(const int bits, const BIGNUM *g, DH *dh, hpre_dh_engine_ctx_t **eng_ctx, BIGNUM **priv_key) +{ + int ret = HPRE_DH_FAIL; + bool is_g2 = BN_is_word(g, GENERATOR_2); + // check whether the bits is supported by hpre. + CHECK_AND_GOTO(!check_dh_bit_useful(bits), err, "op sizes not supported by hpre engine then back to soft!"); + + // get ctx + *eng_ctx = hpre_dh_get_eng_ctx(dh, bits, is_g2); + CHECK_AND_GOTO(*eng_ctx == NULL, err, "get eng ctx fail then switch to soft!"); + + // get private key + ret = hpre_dh_soft_try_get_priv_key(dh, priv_key); + CHECK_AND_GOTO(ret != OPENSSL_SUCCESS, err, "get priv key fail then switch to soft!"); + + return HPRE_DH_SUCCESS; +err: + return HPRE_DH_FAIL; +} diff --git a/kae_engine/src/v1/alg/dh/hpre_dh.h b/kae_engine/src/v1/alg/dh/hpre_dh.h new file mode 100644 index 0000000..974dbd5 --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine DH. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_DH_H +#define HPRE_DH_H + +#include + +const DH_METHOD *hpre_get_dh_methods(void); + +int hpre_module_dh_init(void); + +void hpre_dh_destroy(void); + +EVP_PKEY_METHOD *get_dh_pkey_meth(void); + +EVP_PKEY_METHOD *get_dsa_pkey_meth(void); + +#endif diff --git a/kae_engine/src/v1/alg/dh/hpre_dh_soft.c b/kae_engine/src/v1/alg/dh/hpre_dh_soft.c new file mode 100644 index 0000000..5f7303d --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh_soft.c @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for switch to soft dh. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hpre_dh_soft.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" + +static int generate_new_priv_key(const DH *dh, BIGNUM *new_priv_key); + +void hpre_dh_soft_get_pg(const DH *dh, const BIGNUM **p, const BIGNUM **g, const BIGNUM **q) +{ + DH_get0_pqg(dh, p, q, g); +} + +int hpre_dh_soft_try_get_priv_key(const DH *dh, BIGNUM **priv_key) +{ + int generate_new_key = 0; + BIGNUM *new_priv_key = NULL; + + // get the private key from dh. + *priv_key = (BIGNUM *)DH_get0_priv_key(dh); + if (*priv_key == NULL) { + new_priv_key = BN_secure_new(); + if (new_priv_key == NULL) + goto err; + generate_new_key = 1; + } + + if (generate_new_key) { + // generate random private key,referencing function 'generate_key' in openssl + if (generate_new_priv_key(dh, new_priv_key) == OPENSSL_FAIL) + goto err; + else + *priv_key = new_priv_key; + } + return OPENSSL_SUCCESS; + +err: + BN_free(new_priv_key); + return OPENSSL_FAIL; +} + +void hpre_dh_soft_set_pkeys(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key) +{ + const BIGNUM *old_pub = DH_get0_pub_key(dh); + const BIGNUM *old_priv = DH_get0_priv_key(dh); + + if (old_pub != pub_key && old_priv != priv_key) + DH_set0_key(dh, pub_key, priv_key); + else if (old_pub != pub_key) + DH_set0_key(dh, pub_key, NULL); + else if (old_priv != priv_key) + DH_set0_key(dh, NULL, priv_key); +} + +int hpre_dh_soft_generate_key(DH *dh) +{ + int (*dh_soft_generate_key)(DH *dh); + + dh_soft_generate_key = DH_meth_get_generate_key(DH_OpenSSL()); + int ret = dh_soft_generate_key(dh); + + if (ret < 0) { + US_ERR("dh soft key generate fail: %d", ret); + return OPENSSL_FAIL; + } + + return OPENSSL_SUCCESS; +} + +int hpre_dh_soft_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh) +{ + int (*dh_soft_compute_key)(unsigned char *key, const BIGNUM *pub_key, DH *dh); + + dh_soft_compute_key = DH_meth_get_compute_key(DH_OpenSSL()); + int ret = dh_soft_compute_key(key, pub_key, dh); + + if (ret < 0) { + US_ERR("dh soft key compute fail: %d", ret); + return OPENSSL_FAIL; + } + + return ret; +} + +static int generate_new_priv_key(const DH *dh, BIGNUM *new_priv_key) +{ + const BIGNUM *q = DH_get0_q(dh); + int l; + + if (q) { + do { + if (!BN_priv_rand_range(new_priv_key, q)) + return OPENSSL_FAIL; + } while (BN_is_zero(new_priv_key) || BN_is_one(new_priv_key)); + } else { + l = DH_get_length(dh) ? DH_get_length(dh) : BN_num_bits(DH_get0_p(dh)) - 1; + if (!BN_priv_rand(new_priv_key, l, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ANY)) + return OPENSSL_FAIL; + } + + return OPENSSL_SUCCESS; +} diff --git a/kae_engine/src/v1/alg/dh/hpre_dh_soft.h b/kae_engine/src/v1/alg/dh/hpre_dh_soft.h new file mode 100644 index 0000000..65fc5aa --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh_soft.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for switch to soft dh. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef HPRE_DH_SOFT_H +#define HPRE_DH_SOFT_H + +#include + +/* + * get p, g, q in dh. + */ +void hpre_dh_soft_get_pg(const DH *dh, const BIGNUM **p, const BIGNUM **g, const BIGNUM **q); + +/* + * get private key in dh, if null, then generate a random one. + */ +int hpre_dh_soft_try_get_priv_key(const DH *dh, BIGNUM **priv_key); + +/* + * put private key and public key in the dh. + */ +void hpre_dh_soft_set_pkeys(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); + +/* + * call openssl API to generate public key . + */ +int hpre_dh_soft_generate_key(DH *dh); + +/* + * call openssl API to generate secret key . + */ +int hpre_dh_soft_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh); + +#endif diff --git a/kae_engine/src/v1/alg/dh/hpre_dh_util.h b/kae_engine/src/v1/alg/dh/hpre_dh_util.h new file mode 100644 index 0000000..f9df2be --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh_util.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides common function for DH. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_DH_UTILS_H +#define HPRE_DH_UTILS_H + +#define HPRE_DH_SUCCESS 1 +#define HPRE_DH_FAIL 0 + +#define CHECK_AND_GOTO(cond, goto_tag, log) \ + do { \ + if (cond) { \ + US_WARN(log); \ + goto goto_tag; \ + } \ + } while (0) + +#endif diff --git a/kae_engine/src/v1/alg/dh/hpre_dh_wd.c b/kae_engine/src/v1/alg/dh/hpre_dh_wd.c new file mode 100644 index 0000000..38a3d09 --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh_wd.c @@ -0,0 +1,427 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides wd api for DH. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "hpre_dh_wd.h" +#include "hpre_dh_util.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "../../async/async_callback.h" +#include "../../async/async_task_queue.h" +#include "../../async/async_event.h" +#include "../../utils/engine_check.h" +#include + +#define DH_GENERATOR_2 2 +#define DH_GENERATOR_5 5 +#define CHAR_BIT_SIZE 3 +#define DH_PARAMS_CNT 4 +#define MAX_SEND_TRY_CNTS 50 +#define WD_STATUS_BUSY (-EBUSY) + +KAE_QUEUE_POOL_HEAD_S *g_hpre_dh_qnode_pool; + +static hpre_dh_engine_ctx_t *hpre_dh_new_eng_ctx(DH *alg); + +static int hpre_dh_init_eng_ctx(hpre_dh_engine_ctx_t *eng_ctx, int bits, bool is_g2); + +static int hpre_dh_set_g(const BIGNUM *g, const int key_size, unsigned char *ag_bin, hpre_dh_engine_ctx_t *engine_ctx); + +static int hpre_dh_fill_g_p_priv_key(const BIGNUM *g, const BIGNUM *p, + const BIGNUM *priv_key, hpre_dh_engine_ctx_t *engine_ctx, + unsigned char *ag_bin); + +static int hpre_dh_internal_do(void *ctx, struct wcrypto_dh_op_data *opdata); + +static int hpre_dh_fill_pub_key(const BIGNUM *pub_key, hpre_dh_engine_ctx_t *engine_ctx, unsigned char *ag_bin); + +static void hpre_dh_free_opdata(hpre_dh_engine_ctx_t *eng_ctx); + +static int hpre_internal_do_dh(hpre_dh_engine_ctx_t *eng_ctx, enum wcrypto_dh_op_type op_type); + +static int hpre_dh_async(hpre_dh_engine_ctx_t *eng_ctx, + struct wcrypto_dh_op_data *opdata, op_done_t *op_done); + +void wd_hpre_dh_uninit_qnode_pool(void) +{ + kae_queue_pool_destroy(g_hpre_dh_qnode_pool, NULL); + g_hpre_dh_qnode_pool = NULL; +} + +int wd_hpre_dh_init_qnode_pool(void) +{ + kae_queue_pool_destroy(g_hpre_dh_qnode_pool, NULL); + + g_hpre_dh_qnode_pool = kae_init_queue_pool(WCRYPTO_DH); + if (g_hpre_dh_qnode_pool == NULL) { + US_ERR("hpre dh qnode poll init fail!\n"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +KAE_QUEUE_POOL_HEAD_S *wd_hpre_dh_get_qnode_pool(void) +{ + return g_hpre_dh_qnode_pool; +} + +hpre_dh_engine_ctx_t *hpre_dh_get_eng_ctx(DH *dh, int bits, bool is_g2) +{ + hpre_dh_engine_ctx_t *eng_ctx = hpre_dh_new_eng_ctx(dh); + + if (eng_ctx == NULL) { + US_WARN("new eng ctx fail then switch to soft!"); + return NULL; + } + + if (hpre_dh_init_eng_ctx(eng_ctx, bits, is_g2) == 0) { + hpre_dh_free_eng_ctx(eng_ctx); + US_WARN("init eng ctx fail then switch to soft!"); + return NULL; + } + return eng_ctx; +} + +int hpre_dh_fill_genkey_opdata( + const BIGNUM *g, const BIGNUM *p, const BIGNUM *priv_key, hpre_dh_engine_ctx_t *engine_ctx) +{ + unsigned char *ag_bin = NULL; + int key_size = engine_ctx->priv_ctx.key_size; + + // allocate data block + ag_bin = (unsigned char *)kae_wd_alloc_blk(engine_ctx->qlist->kae_queue_mem_pool, key_size); + if (!ag_bin) { + US_ERR("pool alloc ag_bin fail!"); + return -ENOMEM; + } + int ret = hpre_dh_fill_g_p_priv_key(g, p, priv_key, engine_ctx, ag_bin); + + if (ret != HPRE_DH_SUCCESS) { + kae_wd_free_blk(engine_ctx->qlist->kae_queue_mem_pool, ag_bin); + return ret; + } + engine_ctx->priv_ctx.block_addr = ag_bin; + + return HPRE_DH_SUCCESS; +} + +int hpre_dh_fill_compkey_opdata( + const BIGNUM *g, const BIGNUM *p, const BIGNUM *priv_key, const BIGNUM *pub_key, hpre_dh_engine_ctx_t *engine_ctx) +{ + unsigned char *ag_bin = NULL; + int key_size = engine_ctx->priv_ctx.key_size; + + ag_bin = (unsigned char *)kae_wd_alloc_blk(engine_ctx->qlist->kae_queue_mem_pool, key_size); + if (!ag_bin) { + US_ERR("pool alloc ag_bin fail!"); + return -ENOMEM; + } + int ret = hpre_dh_fill_g_p_priv_key(g, p, priv_key, engine_ctx, ag_bin); + + if (ret != HPRE_DH_SUCCESS) { + kae_wd_free_blk(engine_ctx->qlist->kae_queue_mem_pool, ag_bin); + return ret; + } + + ret = hpre_dh_fill_pub_key(pub_key, engine_ctx, ag_bin); + if (ret != HPRE_DH_SUCCESS) + return ret; + engine_ctx->priv_ctx.block_addr = ag_bin; + + return HPRE_DH_SUCCESS; +} + +int hpre_dh_genkey(hpre_dh_engine_ctx_t *engine_ctx) +{ + return hpre_internal_do_dh(engine_ctx, WCRYPTO_DH_PHASE1); +} + +int hpre_dh_compkey(hpre_dh_engine_ctx_t *engine_ctx) +{ + return hpre_internal_do_dh(engine_ctx, WCRYPTO_DH_PHASE2); +} + +int hpre_dh_get_output_chars(hpre_dh_engine_ctx_t *engine_ctx, unsigned char *out) +{ + kae_memcpy(out, engine_ctx->opdata.pri, engine_ctx->opdata.pri_bytes); + return engine_ctx->opdata.pri_bytes; +} + +int hpre_dh_get_pubkey(hpre_dh_engine_ctx_t *engine_ctx, BIGNUM **pubkey) +{ + const unsigned char *pubkey_str = (const unsigned char *)engine_ctx->opdata.pri; + + if (pubkey_str == NULL) + return HPRE_DH_FAIL; + *pubkey = BN_bin2bn(pubkey_str, engine_ctx->opdata.pri_bytes, *pubkey); + if (*pubkey == NULL) + return HPRE_DH_FAIL; + + return HPRE_DH_SUCCESS; +} + +void hpre_dh_free_eng_ctx(hpre_dh_engine_ctx_t *eng_ctx) +{ + US_DEBUG("hpre dh free engine ctx start!"); + if (eng_ctx == NULL) { + US_DEBUG("no eng_ctx to free"); + return; + } + + if (eng_ctx->qlist != NULL) { + if (eng_ctx->ctx != NULL) + wcrypto_del_dh_ctx(eng_ctx->ctx); + kae_put_node_to_pool(g_hpre_dh_qnode_pool, eng_ctx->qlist); + } + + hpre_dh_free_opdata(eng_ctx); + + eng_ctx->priv_ctx.block_addr = NULL; + eng_ctx->priv_ctx.ssl_alg = NULL; + eng_ctx->qlist = NULL; + eng_ctx->ctx = NULL; + eng_ctx->opdata.pri = NULL; + eng_ctx->opdata.x_p = NULL; + eng_ctx->opdata.pv = NULL; + OPENSSL_free(eng_ctx); + eng_ctx = NULL; +} + +static int hpre_internal_do_dh(hpre_dh_engine_ctx_t *eng_ctx, enum wcrypto_dh_op_type op_type) +{ + int job_ret; + op_done_t op_done; + + async_init_op_done_v1(&op_done); + + eng_ctx->opdata.op_type = op_type; + if (op_done.job != NULL && kae_is_async_enabled()) { + if (async_setup_async_event_notification_v1(0) == 0) { + US_ERR("hpre async event notifying failed"); + async_cleanup_op_done_v1(&op_done); + return HPRE_DH_FAIL; + } + } else { + US_DEBUG("hpre dh no async Job or async disable, back to sync!"); + async_cleanup_op_done_v1(&op_done); + return hpre_dh_internal_do(eng_ctx->ctx, &eng_ctx->opdata); + } + + if (hpre_dh_async(eng_ctx, &eng_ctx->opdata, &op_done) == HPRE_DH_FAIL) + goto err; + + do { + job_ret = async_pause_job_v1(op_done.job, ASYNC_STATUS_OK); + if (job_ret == 0) { + US_DEBUG("- pthread_yidle -"); + kae_pthread_yield(); + } + } while (!op_done.flag || + ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); + + if (op_done.verifyRst <= 0) { + US_ERR("hpre dh verify result failed with %d", op_done.verifyRst); + async_cleanup_op_done_v1(&op_done); + return HPRE_DH_FAIL; + } + + async_cleanup_op_done_v1(&op_done); + + US_DEBUG("hpre dh do async job success!"); + return HPRE_DH_SUCCESS; + +err: + US_ERR("hpre dh do async job err"); + (void)async_clear_async_event_notification_v1(); + async_cleanup_op_done_v1(&op_done); + return HPRE_DH_FAIL; +} + +static void hpre_dh_free_opdata(hpre_dh_engine_ctx_t *eng_ctx) +{ + if (eng_ctx->priv_ctx.block_addr != NULL) { + if (eng_ctx->qlist != NULL) + eng_ctx->dh_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->priv_ctx.block_addr); + } +} + +static hpre_dh_engine_ctx_t *hpre_dh_new_eng_ctx(DH *alg) +{ + hpre_dh_engine_ctx_t *eng_ctx = NULL; + + eng_ctx = (hpre_dh_engine_ctx_t *)OPENSSL_malloc(sizeof(hpre_dh_engine_ctx_t)); + if (eng_ctx == NULL) { + US_ERR("hpre engine_ctx malloc fail"); + return NULL; + } + kae_memset(eng_ctx, 0, sizeof(hpre_dh_engine_ctx_t)); + + eng_ctx->priv_ctx.ssl_alg = alg; + eng_ctx->qlist = kae_get_node_from_pool(g_hpre_dh_qnode_pool); + if (eng_ctx->qlist == NULL) { + US_ERR_LIMIT("error. get hardware queue failed"); + OPENSSL_free(eng_ctx); + eng_ctx = NULL; + return NULL; + } + return eng_ctx; +} + +static void hpre_dh_cb(const void *message, void *tag) +{ + if (!message || !tag) { + US_ERR("hpre cb params err!\n"); + return; + } + struct wcrypto_dh_msg *msg = (struct wcrypto_dh_msg *)message; + hpre_dh_engine_ctx_t *eng_ctx = (hpre_dh_engine_ctx_t *)tag; + + eng_ctx->opdata.pri = msg->out; + eng_ctx->opdata.pri_bytes = msg->out_bytes; + eng_ctx->opdata.status = msg->result; +} + +static int hpre_dh_init_eng_ctx(hpre_dh_engine_ctx_t *eng_ctx, int bits, bool is_g2) +{ + struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; + struct wd_queue_mempool *pool = eng_ctx->qlist->kae_queue_mem_pool; + + // this is for ctx is in use.we dont need to re create ctx->ctx again + if (eng_ctx->ctx) + return OPENSSL_SUCCESS; + if (eng_ctx->ctx == NULL) { + if (bits == 0) + eng_ctx->priv_ctx.key_size = DH_size(eng_ctx->priv_ctx.ssl_alg); + else + eng_ctx->priv_ctx.key_size = bits >> CHAR_BIT_SIZE; + eng_ctx->priv_ctx.block_addr = NULL; + eng_ctx->dh_setup.key_bits = eng_ctx->priv_ctx.key_size << CHAR_BIT_SIZE; + eng_ctx->dh_setup.cb = hpre_dh_cb; + eng_ctx->dh_setup.br.alloc = kae_wd_alloc_blk; + eng_ctx->dh_setup.br.free = kae_wd_free_blk; + eng_ctx->dh_setup.br.usr = pool; + eng_ctx->dh_setup.is_g2 = is_g2; + eng_ctx->ctx = wcrypto_create_dh_ctx(q, &eng_ctx->dh_setup); + if (eng_ctx->ctx == NULL) { + US_ERR("create dh ctx fail!"); + return OPENSSL_FAIL; + } + } + + return OPENSSL_SUCCESS; +} + +static int hpre_dh_set_g(const BIGNUM *g, const int key_size, unsigned char *ag_bin, hpre_dh_engine_ctx_t *engine_ctx) +{ + struct wd_dtb g_dtb; + int ret; + __u32 gbytes = BN_bn2bin(g, ag_bin); + + g_dtb.data = (char *)ag_bin; + g_dtb.bsize = key_size; + g_dtb.dsize = gbytes; + + ret = wcrypto_set_dh_g(engine_ctx->ctx, &g_dtb); + if (ret) { + US_ERR("wcrypto_set_dh_g fail: %d", ret); + return HPRE_DH_FAIL; + } + return HPRE_DH_SUCCESS; +} + +static int hpre_dh_fill_g_p_priv_key( + const BIGNUM *g, const BIGNUM *p, const BIGNUM *priv_key, hpre_dh_engine_ctx_t *engine_ctx, unsigned char *ag_bin) +{ + unsigned char *apriv_key_bin = NULL; + unsigned char *ap_bin = NULL; + int key_size = engine_ctx->priv_ctx.key_size; + int ret = 0; + + apriv_key_bin = ag_bin + key_size; + ap_bin = apriv_key_bin + key_size; + memset(ag_bin, 0, key_size * DH_PARAMS_CNT); + + // construct data block of g + ret = hpre_dh_set_g(g, key_size, ag_bin, engine_ctx); + if (ret != HPRE_DH_SUCCESS) + return HPRE_DH_FAIL; + + // construct data block of p and private key + engine_ctx->opdata.pbytes = BN_bn2bin(p, ap_bin); + engine_ctx->opdata.xbytes = BN_bn2bin(priv_key, apriv_key_bin); + + engine_ctx->opdata.x_p = apriv_key_bin; + engine_ctx->opdata.pri = ap_bin + key_size; + + return HPRE_DH_SUCCESS; +} + +static int hpre_dh_internal_do(void *ctx, struct wcrypto_dh_op_data *opdata) +{ + int ret = wcrypto_do_dh(ctx, opdata, NULL); + + if (ret) { + US_ERR("wcrypto_do_dh fail: %d", ret); + return HPRE_DH_FAIL; + } else if (opdata->pri == NULL) { + US_ERR("output is empty"); + return HPRE_DH_FAIL; + } else { + return HPRE_DH_SUCCESS; + } +} + +static int hpre_dh_fill_pub_key(const BIGNUM *pub_key, hpre_dh_engine_ctx_t *engine_ctx, unsigned char *ag_bin) +{ + engine_ctx->opdata.pvbytes = BN_bn2bin(pub_key, ag_bin); + engine_ctx->opdata.pv = ag_bin; /* bob's public key here */ + return HPRE_DH_SUCCESS; +} + +static int hpre_dh_async(hpre_dh_engine_ctx_t *eng_ctx, + struct wcrypto_dh_op_data *opdata, op_done_t *op_done) +{ + int ret = 0; + int cnt = 0; + enum task_type type = ASYNC_TASK_DH; + void *tag = eng_ctx; + + do { + if (cnt > MAX_SEND_TRY_CNTS) + break; + ret = wcrypto_do_dh(eng_ctx->ctx, opdata, tag); + if (ret == WD_STATUS_BUSY) { + if ((async_wake_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || + (async_pause_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0))) { + US_ERR("hpre wake job or hpre pause job fail!"); + ret = 0; + break; + } + cnt++; + } + } while (ret == WD_STATUS_BUSY); + + if (ret != WD_SUCCESS) + return HPRE_DH_FAIL; + + if (async_add_poll_task_v1(eng_ctx, op_done, type) == 0) + return HPRE_DH_FAIL; + + return HPRE_DH_SUCCESS; +} diff --git a/kae_engine/src/v1/alg/dh/hpre_dh_wd.h b/kae_engine/src/v1/alg/dh/hpre_dh_wd.h new file mode 100644 index 0000000..0f3758b --- /dev/null +++ b/kae_engine/src/v1/alg/dh/hpre_dh_wd.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides wd api for DH. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_DH_WD_H +#define HPRE_DH_WD_H + +#include +#include +#include "../../wdmngr/wd_queue_memory.h" + +struct hpre_dh_priv_ctx { + DH *ssl_alg; + int key_size; + unsigned char *block_addr; +}; + +typedef struct hpre_dh_priv_ctx hpre_dh_priv_ctx_t; + +struct hpre_dh_engine_ctx { + void *ctx; + struct wcrypto_dh_op_data opdata; + struct wcrypto_dh_ctx_setup dh_setup; + struct KAE_QUEUE_DATA_NODE *qlist; + hpre_dh_priv_ctx_t priv_ctx; +}; + +typedef struct hpre_dh_engine_ctx hpre_dh_engine_ctx_t; + +extern KAE_QUEUE_POOL_HEAD_S *g_hpre_dh_qnode_pool; + +int wd_hpre_dh_init_qnode_pool(void); +void wd_hpre_dh_uninit_qnode_pool(void); + +KAE_QUEUE_POOL_HEAD_S *wd_hpre_dh_get_qnode_pool(void); + +void hpre_dh_free_eng_ctx(hpre_dh_engine_ctx_t *eng_ctx); + +hpre_dh_engine_ctx_t *hpre_dh_get_eng_ctx(DH *dh, int bits, bool is_g2); + +/* + * fill opdata for generate_key. + */ +int hpre_dh_fill_genkey_opdata(const BIGNUM *g, const BIGNUM *p, + const BIGNUM *priv_key, hpre_dh_engine_ctx_t *engine_ctx); + +/* + * fill opdata for compute_key. + */ +int hpre_dh_fill_compkey_opdata(const BIGNUM *g, const BIGNUM *p, + const BIGNUM *priv_key, const BIGNUM *pub_key, hpre_dh_engine_ctx_t *engine_ctx); + +/* + * call wd API for generating public key. + */ +int hpre_dh_genkey(hpre_dh_engine_ctx_t *engine_ctx); + +/* + * call wd API for generating secret key. + */ +int hpre_dh_compkey(hpre_dh_engine_ctx_t *engine_ctx); + +/* + * get public key from engine ctx. + */ +int hpre_dh_get_pubkey(hpre_dh_engine_ctx_t *engine_ctx, BIGNUM **pubkey); + +/* + * get secret key from engine ctx. + */ +int hpre_dh_get_output_chars(hpre_dh_engine_ctx_t *engine_ctx, unsigned char *out); + +#endif diff --git a/kae_engine/src/v1/alg/digests/sec_digests.c b/kae_engine/src/v1/alg/digests/sec_digests.c new file mode 100644 index 0000000..c7efc30 --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests.c @@ -0,0 +1,590 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine digests + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sec_digests.h" +#include "sec_digests_soft.h" +#include "sec_digests_wd.h" + +#include "../../utils/engine_check.h" +#include "../../utils/engine_utils.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "../../async/async_callback.h" +#include "../../async/async_event.h" +#include "../../async/async_task_queue.h" + +#define DIGEST_SM3_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT (512) +#define DIGEST_MD5_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT (8 * 1024) + +struct digest_info { + int nid; + int is_enabled; + EVP_MD *digest; +}; + +static struct digest_threshold_table g_digest_pkt_threshold_table[] = { + { NID_sm3, DIGEST_SM3_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, + { NID_md5, DIGEST_MD5_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, +}; + +static struct digest_info g_sec_digests_info[] = { + { NID_sm3, 1, NULL }, + { NID_md5, 1, NULL }, +}; + +#define DIGESTS_COUNT (BLOCKSIZES_OF(g_sec_digests_info)) +static int g_known_digest_nids[DIGESTS_COUNT] = { + NID_sm3, + NID_md5, +}; + +#define SEC_DIGESTS_RETURN_FAIL_IF(cond, mesg, ret) \ + do { \ + if (unlikely(cond)) { \ + US_ERR(mesg); \ + return (ret); \ + } \ + } while (0) + +static int sec_digests_init(EVP_MD_CTX *ctx); +static int sec_digests_update(EVP_MD_CTX *ctx, const void *data, size_t data_len); +static int sec_digests_final(EVP_MD_CTX *ctx, unsigned char *digest); +static int sec_digests_cleanup(EVP_MD_CTX *ctx); +static int sec_digests_dowork(sec_digest_priv_t *md_ctx); +static int sec_digests_sync_dowork(sec_digest_priv_t *md_ctx); +static int sec_digests_async_dowork(sec_digest_priv_t *md_ctx, op_done_t *op_done); +static uint32_t sec_digests_sw_get_threshold(int nid); + +void sec_digests_set_enabled(int nid, int enabled) +{ + unsigned int i = 0; + + for (i = 0; i < DIGESTS_COUNT; i++) { + if (g_sec_digests_info[i].nid == nid) + g_sec_digests_info[i].is_enabled = enabled; + } +} +static uint32_t sec_digests_sw_get_threshold(int nid) +{ + int threshold_table_sz = BLOCKSIZES_OF(g_digest_pkt_threshold_table); + int i = 0; + + do { + if (g_digest_pkt_threshold_table[i].nid == nid) + return g_digest_pkt_threshold_table[i].threshold; + } while (++i < threshold_table_sz); + + US_ERR("nid %d not found in digest threshold table", nid); + return UINT_MAX; +} + +static void sec_digests_get_alg(sec_digest_priv_t *md_ctx) +{ + switch (md_ctx->e_nid) { + case NID_sm3: + md_ctx->d_alg = WCRYPTO_SM3; + md_ctx->out_len = SM3_LEN; + break; + case NID_md5: + md_ctx->d_alg = WCRYPTO_MD5; + md_ctx->out_len = MD5_HASH_LEN; + break; + default: + US_WARN("nid=%d don't support by sec engine.", md_ctx->e_nid); + break; + } +} + +int sec_digests_init(EVP_MD_CTX *ctx) +{ + sec_digest_priv_t *md_ctx = NULL; + + if (unlikely(ctx == NULL)) + return OPENSSL_FAIL; + + md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); + if (unlikely(md_ctx == NULL)) + return OPENSSL_FAIL; + + memset((void *)md_ctx, 0, sizeof(sec_digest_priv_t)); + md_ctx->e_nid = EVP_MD_nid(EVP_MD_CTX_md(ctx)); + sec_digests_get_alg(md_ctx); + md_ctx->state = SEC_DIGEST_INIT; + + return OPENSSL_SUCCESS; +} + +static int sec_digests_update_inner(sec_digest_priv_t *md_ctx, size_t data_len, const void *data) +{ + int ret = OPENSSL_FAIL; + size_t left_len = data_len; + const unsigned char *tmpdata = (const unsigned char *)data; + + while (md_ctx->last_update_bufflen + left_len > INPUT_CACHE_SIZE) { + int copy_to_bufflen = INPUT_CACHE_SIZE - md_ctx->last_update_bufflen; + + kae_memcpy(md_ctx->last_update_buff + md_ctx->last_update_bufflen, tmpdata, copy_to_bufflen); + md_ctx->last_update_bufflen = INPUT_CACHE_SIZE; + left_len -= copy_to_bufflen; + tmpdata += copy_to_bufflen; + + if (md_ctx->state == SEC_DIGEST_INIT) + md_ctx->state = SEC_DIGEST_FIRST_UPDATING; + else if (md_ctx->state == SEC_DIGEST_FIRST_UPDATING) + md_ctx->state = SEC_DIGEST_DOING; + else + (void)md_ctx->state; + + ret = sec_digests_sync_dowork(md_ctx); + if (ret != KAE_SUCCESS) { + US_WARN("do sec digest failed, switch to soft digest"); + goto do_soft_digest; + } + + md_ctx->last_update_bufflen = 0; + if (left_len <= INPUT_CACHE_SIZE) { + md_ctx->last_update_bufflen = left_len; + kae_memcpy(md_ctx->last_update_buff, tmpdata, md_ctx->last_update_bufflen); + break; + } + } + + return OPENSSL_SUCCESS; + +do_soft_digest: + if (md_ctx->state == SEC_DIGEST_FIRST_UPDATING + && md_ctx->last_update_buff + && md_ctx->last_update_bufflen != 0) { + md_ctx->switch_flag = 1; + sec_digests_soft_init(md_ctx->soft_ctx, md_ctx->e_nid); + ret = sec_digests_soft_update(md_ctx->soft_ctx, md_ctx->last_update_buff, + md_ctx->last_update_bufflen, md_ctx->e_nid); + ret &= sec_digests_soft_update(md_ctx->soft_ctx, tmpdata, left_len, md_ctx->e_nid); + + return ret; + } + + US_ERR("do sec digest failed"); + return OPENSSL_FAIL; +} + +static int sec_digests_update(EVP_MD_CTX *ctx, const void *data, + size_t data_len) +{ + SEC_DIGESTS_RETURN_FAIL_IF(unlikely(!ctx || !data), "ctx is NULL.", OPENSSL_FAIL); + sec_digest_priv_t *md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); + + SEC_DIGESTS_RETURN_FAIL_IF(unlikely(md_ctx == NULL), "md_ctx is NULL.", OPENSSL_FAIL); + + if (md_ctx->soft_ctx == NULL) + md_ctx->soft_ctx = EVP_MD_CTX_new(); + + if (md_ctx->switch_flag) + return sec_digests_soft_update(md_ctx->soft_ctx, data, data_len, md_ctx->e_nid); + + if (md_ctx->e_digest_ctx == NULL) { + md_ctx->e_digest_ctx = wd_digests_get_engine_ctx(md_ctx); + if (md_ctx->e_digest_ctx == NULL) { + US_WARN("failed to get engine ctx"); + return OPENSSL_FAIL; + } + } + digest_engine_ctx_t *e_digest_ctx = md_ctx->e_digest_ctx; + + if (md_ctx->last_update_buff == NULL) + md_ctx->last_update_buff = e_digest_ctx->op_data.in; + + int nid = EVP_MD_nid(EVP_MD_CTX_md(ctx)); + unsigned char digest[MAX_OUTLEN] = {0}; + + md_ctx->e_nid = nid; + sec_digests_get_alg(md_ctx); + md_ctx->out = digest; + + if (md_ctx->last_update_bufflen + data_len <= INPUT_CACHE_SIZE) { + kae_memcpy(md_ctx->last_update_buff + md_ctx->last_update_bufflen, data, data_len); + md_ctx->last_update_bufflen += data_len; + return OPENSSL_SUCCESS; + } + + return sec_digests_update_inner(md_ctx, data_len, data); +} + +static int sec_digests_final(EVP_MD_CTX *ctx, unsigned char *digest) +{ + int ret = KAE_FAIL; + + SEC_DIGESTS_RETURN_FAIL_IF(!ctx || !digest, "ctx is NULL.", OPENSSL_FAIL); + sec_digest_priv_t *md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); + + SEC_DIGESTS_RETURN_FAIL_IF(unlikely(md_ctx == NULL), "md_ctx is NULL.", OPENSSL_FAIL); + + if (md_ctx->switch_flag) { + ret = sec_digests_soft_final(md_ctx->soft_ctx, digest, md_ctx->e_nid); + goto end; + } + + if (md_ctx->last_update_bufflen == 0) { + US_WARN("no data input, swich to soft digest"); + goto do_soft_digest; + } + + if (md_ctx->last_update_buff && md_ctx->last_update_bufflen != 0) { + if (md_ctx->state == SEC_DIGEST_INIT + && md_ctx->last_update_bufflen < sec_digests_sw_get_threshold(md_ctx->e_nid)) { + US_WARN_LIMIT("small package offload, switch to soft digest"); + goto do_soft_digest; + } + + uint32_t tmp = md_ctx->state; + + md_ctx->state = SEC_DIGEST_FINAL; + + md_ctx->out = digest; + ret = sec_digests_dowork(md_ctx); + if (ret != KAE_SUCCESS) { + US_WARN("do sec digest failed, switch to soft digest"); + md_ctx->state = tmp; + goto do_soft_digest; + } + ret = OPENSSL_SUCCESS; + } + + US_DEBUG("do digest success. ctx=%p", md_ctx); + +end: + sec_digests_soft_cleanup(md_ctx); + if (md_ctx->e_digest_ctx != NULL) { + (void)wd_digests_put_engine_ctx(md_ctx->e_digest_ctx); + md_ctx->e_digest_ctx = NULL; + } + + return ret; + +do_soft_digest: + if (md_ctx->state == SEC_DIGEST_INIT) { + sec_digests_soft_work(md_ctx, md_ctx->last_update_bufflen, digest); + ret = OPENSSL_SUCCESS; + } else { + US_ERR("do sec digest failed"); + ret = OPENSSL_FAIL; + } + + if (md_ctx->e_digest_ctx != NULL) { + (void)wd_digests_put_engine_ctx(md_ctx->e_digest_ctx); + md_ctx->e_digest_ctx = NULL; + } + + return ret; +} + +static void sec_digests_update_md_ctx(sec_digest_priv_t *md_ctx) +{ + if (md_ctx->do_digest_len == 0) + return; + + md_ctx->in += md_ctx->do_digest_len; +} + +static int sec_digests_dowork(sec_digest_priv_t *md_ctx) +{ + int ret = KAE_FAIL; + + // add async parm + int job_ret; + op_done_t op_done; + + SEC_DIGESTS_RETURN_FAIL_IF(md_ctx->last_update_bufflen <= 0, "in length less than or equal to zero.", KAE_FAIL); + // packageSize>input_cache_size + if (md_ctx->last_update_bufflen > INPUT_CACHE_SIZE) { + ret = sec_digests_sync_dowork(md_ctx); + if (ret != 0) { + US_ERR("sec digest sync fail"); + return ret; + } + return KAE_SUCCESS; + } + + // async + async_init_op_done_v1(&op_done); + + if (op_done.job != NULL && kae_is_async_enabled()) { + if (async_setup_async_event_notification_v1(0) == 0) { + US_ERR("sec async event notifying failed"); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; + } + } else { + US_DEBUG("NO ASYNC Job or async disable, back to SYNC!"); + async_cleanup_op_done_v1(&op_done); + return sec_digests_sync_dowork(md_ctx); + } + + if (sec_digests_async_dowork(md_ctx, &op_done) == KAE_FAIL) + goto err; + + do { + job_ret = async_pause_job_v1(op_done.job, ASYNC_STATUS_OK); + if ((job_ret == 0)) { + US_DEBUG("- pthread_yidle -"); + kae_pthread_yield(); + } + } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); + + if (op_done.verifyRst < 0) { + US_ERR("verify result failed with %d", op_done.verifyRst); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; + } + + async_cleanup_op_done_v1(&op_done); + + US_DEBUG(" Digest Async Job Finish! md_ctx = %p\n", md_ctx); + return KAE_SUCCESS; +err: + US_ERR("async job err"); + (void)async_clear_async_event_notification_v1(); + async_cleanup_op_done_v1(&op_done); + return KAE_FAIL; +} + +static int sec_digests_sync_dowork(sec_digest_priv_t *md_ctx) +{ + SEC_DIGESTS_RETURN_FAIL_IF(md_ctx == NULL, "md_ctx is NULL.", KAE_FAIL); + digest_engine_ctx_t *e_digest_ctx = md_ctx->e_digest_ctx; + uint32_t leftlen = md_ctx->last_update_bufflen; + int ret; + + md_ctx->in = md_ctx->last_update_buff; + + while (leftlen != 0) { + md_ctx->do_digest_len = wd_digests_get_do_digest_len(e_digest_ctx, leftlen); + + wd_digests_set_input_data(e_digest_ctx); + + ret = wd_digests_doimpl(e_digest_ctx); + if (ret != KAE_SUCCESS) + return ret; + + wd_digests_get_output_data(e_digest_ctx); + sec_digests_update_md_ctx(md_ctx); + leftlen -= md_ctx->do_digest_len; + } + + US_DEBUG("sec do digest success."); + + return KAE_SUCCESS; +} + +static int sec_digests_async_dowork(sec_digest_priv_t *md_ctx, op_done_t *op_done) +{ + int ret = 0; + int cnt = 0; + enum task_type type = ASYNC_TASK_DIGEST; + + SEC_DIGESTS_RETURN_FAIL_IF(md_ctx == NULL, "md_ctx is NULL.", KAE_FAIL); + digest_engine_ctx_t *e_digest_ctx = md_ctx->e_digest_ctx; + + SEC_DIGESTS_RETURN_FAIL_IF(e_digest_ctx == NULL, "e_digest_ctx is NULL", KAE_FAIL); + void *tag = e_digest_ctx; + uint32_t leftlen = md_ctx->last_update_bufflen; + + md_ctx->in = md_ctx->last_update_buff; + md_ctx->do_digest_len = wd_digests_get_do_digest_len(e_digest_ctx, leftlen); + wd_digests_set_input_data(e_digest_ctx); + + do { + if (cnt > MAX_SEND_TRY_CNTS) + break; + ret = wcrypto_do_digest(e_digest_ctx->wd_ctx, &e_digest_ctx->op_data, tag); + if (ret == -WD_EBUSY) { + if ((async_wake_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || + async_pause_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0)) { + US_ERR("sec wake job or sec pause job fail!\n"); + ret = 0; + break; + } + cnt++; + } + } while (ret == -WD_EBUSY); + + if (ret != WD_SUCCESS) { + US_ERR("sec async wcryto do cipher failed"); + return KAE_FAIL; + } + + if (async_add_poll_task_v1(e_digest_ctx, op_done, type) == 0) { + US_ERR("sec add task failed "); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +static int sec_digests_cleanup(EVP_MD_CTX *ctx) +{ + return OPENSSL_SUCCESS; +} + +/** + * desc:bind digest func as hardware function + * @return + */ +static EVP_MD *sec_set_digests_methods(struct digest_info digestinfo) +{ + const EVP_MD *default_digest = NULL; + + if (digestinfo.digest == NULL) { + switch (digestinfo.nid) { + case NID_sm3: + default_digest = EVP_sm3(); + break; + case NID_md5: + default_digest = EVP_md5(); + break; + default: + return NULL; + } + } + digestinfo.digest = (EVP_MD *)EVP_MD_meth_dup(default_digest); + if (digestinfo.digest == NULL) { + US_ERR("dup digest failed!"); + return NULL; + } + + EVP_MD_meth_set_init(digestinfo.digest, sec_digests_init); + EVP_MD_meth_set_update(digestinfo.digest, sec_digests_update); + EVP_MD_meth_set_final(digestinfo.digest, sec_digests_final); + EVP_MD_meth_set_cleanup(digestinfo.digest, sec_digests_cleanup); + EVP_MD_meth_set_app_datasize(digestinfo.digest, sizeof(sec_digest_priv_t)); + return digestinfo.digest; +} + +static void sec_create_digests(void) +{ + unsigned int i = 0; + + for (i = 0; i < DIGESTS_COUNT; i++) { + if (g_sec_digests_info[i].digest == NULL) + g_sec_digests_info[i].digest = sec_set_digests_methods(g_sec_digests_info[i]); + } +} + +/****************************************************************************** + * function: + * sec_engine_digests(ENGINE *e, + * const EVP_digest **digest, + * const int **nids, + * int nid) + * + * @param e [IN] - OpenSSL engine pointer + * @param digest [IN] - digest structure pointer + * @param nids [IN] - digest function nids + * @param nid [IN] - digest operation id + * + * description: + * kae engine digest operations registrar + ******************************************************************************/ +int sec_engine_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid) +{ + UNUSED(e); + unsigned int i = 0; + + if ((nids == NULL) && ((digest == NULL) || (nid < 0))) { + US_ERR("%s invalid input param.", __func__); + if (digest != NULL) + *digest = NULL; + return OPENSSL_FAIL; + } + + /* No specific digest => return a list of supported nids ... */ + /* No specific digest => return a list of supported nids ... */ + if (digest == NULL) { + if (nids != NULL) + *nids = g_known_digest_nids; + return BLOCKSIZES_OF(g_sec_digests_info); + } + + for (i = 0; i < DIGESTS_COUNT; i++) { + if (g_sec_digests_info[i].nid == nid) { + if (g_sec_digests_info[i].digest == NULL) + sec_create_digests(); + /*SM3 is disabled*/ + *digest = g_sec_digests_info[i].is_enabled + ? g_sec_digests_info[i].digest : (EVP_MD *)EVP_MD_meth_dup(EVP_sm3()); + return OPENSSL_SUCCESS; + } + } + + US_WARN("nid = %d not support.", nid); + *digest = NULL; + + return OPENSSL_FAIL; +} + +void sec_digests_free_methods(void) +{ + unsigned int i = 0; + + for (i = 0; i < DIGESTS_COUNT; i++) { + if (g_sec_digests_info[i].digest != NULL) { + EVP_MD_meth_free(g_sec_digests_info[i].digest); + g_sec_digests_info[i].digest = NULL; + } + } +} + +void sec_digests_cb(const void *msg, void *tag) +{ + if (!msg || !tag) { + US_ERR("sec cb params err!\n"); + return; + } + struct wcrypto_digest_msg *message = (struct wcrypto_digest_msg *)msg; + digest_engine_ctx_t *e_digest_ctx = (digest_engine_ctx_t *)tag; + + kae_memcpy(e_digest_ctx->md_ctx->out, message->out, message->out_bytes); +} + +// async poll thread create +int sec_digest_engine_ctx_poll(void *engnine_ctx) +{ + int ret = 0; + digest_engine_ctx_t *e_digest_ctx = (digest_engine_ctx_t *)engnine_ctx; + struct wd_queue *q = e_digest_ctx->q_node->kae_wd_queue; + +POLL_AGAIN: + ret = wcrypto_digest_poll(q, 1); + if (!ret) { + goto POLL_AGAIN; + } else if (ret < 0) { + US_ERR("digest poll failed\n"); + return ret; + } + return ret; +} + +int digest_module_init(void) +{ + wd_digests_init_qnode_pool(); + sec_create_digests(); + // reg async interface here + async_register_poll_fn_v1(ASYNC_TASK_DIGEST, sec_digest_engine_ctx_poll); + + return 1; +} diff --git a/kae_engine/src/v1/alg/digests/sec_digests.h b/kae_engine/src/v1/alg/digests/sec_digests.h new file mode 100644 index 0000000..4cc00ad --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the digest interface for KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SEC_DIGESTS_H +#define SEC_DIGESTS_H + +#include +#include +#include +#include "../../wdmngr/wd_queue_memory.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_utils.h" + +#define MAX_SEND_TRY_CNTS 50 + +#define MIN_DIGEST_LEN 512 +#define INPUT_CACHE_SIZE (512 * 1024) +#define SM3_LEN 32 +#define MAX_OUTLEN 64 +#define MD5_HASH_LEN 16 + +enum sec_digest_state { + SEC_DIGEST_INIT = 0, + SEC_DIGEST_FIRST_UPDATING, + SEC_DIGEST_DOING, + SEC_DIGEST_FINAL +}; + +typedef struct digest_engine_ctx digest_engine_ctx_t; +typedef struct sec_digest_priv sec_digest_priv_t; + +struct sec_digest_priv { + uint8_t *last_update_buff; + uint8_t *in; + uint8_t *out; + uint32_t d_mode; // haven't used + uint32_t d_alg; + uint32_t state; + uint32_t last_update_bufflen; + uint32_t do_digest_len; // do one cycle digest length + uint32_t out_len; // digest out length + uint32_t e_nid; // digest nid + digest_engine_ctx_t *e_digest_ctx; + EVP_MD_CTX *soft_ctx; + uint32_t switch_flag; +}; + +struct digest_engine_ctx { + KAE_QUEUE_DATA_NODE_S *q_node; + struct wcrypto_digest_op_data op_data; + struct wcrypto_digest_ctx_setup setup; + void *wd_ctx; // one ctx or a list of ctx + sec_digest_priv_t *md_ctx; +}; + +struct digest_threshold_table { + int nid; + int threshold; +}; +void sec_digests_set_enabled(int nid, int enabled); +int sec_engine_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); +void sec_digests_free_methods(void); +int sec_cipher_engine_ctx_poll(void *engnine_ctx); + +int digest_module_init(void); +void sec_digests_cb(const void *msg, void *tag); +#endif + diff --git a/kae_engine/src/v1/alg/digests/sec_digests_soft.c b/kae_engine/src/v1/alg/digests/sec_digests_soft.c new file mode 100644 index 0000000..8e1c4fd --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests_soft.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for switch to soft digests + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "sec_digests_soft.h" +#include "../../utils/engine_opensslerr.h" +#include "../../utils/engine_log.h" + +static const EVP_MD *sec_digests_soft_md(uint32_t e_nid) +{ + const EVP_MD *g_digest_md = NULL; + + switch (e_nid) { + case NID_sm3: + g_digest_md = EVP_sm3(); + break; + case NID_md5: + g_digest_md = EVP_md5(); + break; + default: + break; + } + return g_digest_md; +} + +int sec_digests_soft_init(EVP_MD_CTX *ctx, uint32_t e_nid) +{ + const EVP_MD *digest_md = NULL; + + digest_md = sec_digests_soft_md(e_nid); + if (digest_md == NULL) { + US_WARN("switch to soft:don't support by sec engine."); + return OPENSSL_FAIL; + } + int ctx_len = EVP_MD_meth_get_app_datasize(digest_md); + + if (ctx->md_data == NULL) + ctx->md_data = OPENSSL_malloc(ctx_len); + if (!ctx->md_data) { + KAEerr(KAE_F_DIGEST_SOFT_INIT, KAE_R_MALLOC_FAILURE); + US_ERR("malloc md_data failed"); + return OPENSSL_FAIL; + } + + return EVP_MD_meth_get_init(digest_md)(ctx); +} + +int sec_digests_soft_update(EVP_MD_CTX *ctx, const void *data, size_t data_len, uint32_t e_nid) +{ + const EVP_MD *digest_md = NULL; + + digest_md = sec_digests_soft_md(e_nid); + if (digest_md == NULL) { + US_WARN("switch to soft:don't support by sec engine."); + return OPENSSL_FAIL; + } + return EVP_MD_meth_get_update(digest_md)(ctx, data, data_len); +} + +int sec_digests_soft_final(EVP_MD_CTX *ctx, unsigned char *digest, uint32_t e_nid) +{ + US_WARN_LIMIT("call sec_digest_soft_final"); + + const EVP_MD *digest_md = NULL; + + digest_md = sec_digests_soft_md(e_nid); + if (digest_md == NULL) { + US_WARN("switch to soft:don't support by sec engine."); + return OPENSSL_FAIL; + } + + int ret = EVP_MD_meth_get_final(digest_md)(ctx, digest); + + if (ctx->md_data) + OPENSSL_free(ctx->md_data); + + return ret; +} + +void sec_digests_soft_work(sec_digest_priv_t *md_ctx, int len, unsigned char *digest) +{ + if (md_ctx->soft_ctx == NULL) + md_ctx->soft_ctx = EVP_MD_CTX_new(); + + (void)sec_digests_soft_init(md_ctx->soft_ctx, md_ctx->e_nid); + if (len != 0) + (void)sec_digests_soft_update(md_ctx->soft_ctx, md_ctx->last_update_buff, len, md_ctx->e_nid); + (void)sec_digests_soft_final(md_ctx->soft_ctx, digest, md_ctx->e_nid); + + if (md_ctx->soft_ctx != NULL) { + EVP_MD_CTX_free(md_ctx->soft_ctx); + md_ctx->soft_ctx = NULL; + } +} + +void sec_digests_soft_cleanup(sec_digest_priv_t *md_ctx) +{ + if (md_ctx->soft_ctx != NULL) { + EVP_MD_CTX_free(md_ctx->soft_ctx); + md_ctx->soft_ctx = NULL; + } +} diff --git a/kae_engine/src/v1/alg/digests/sec_digests_soft.h b/kae_engine/src/v1/alg/digests/sec_digests_soft.h new file mode 100644 index 0000000..d3a3665 --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests_soft.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the digest interface for soft digests + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SEC_DIGESTS_SOFT_H +#define SEC_DIGESTS_SOFT_H + +#include "sec_digests.h" + +struct evp_md_ctx_st { + const EVP_MD *digest; + ENGINE *engine; /* functional reference if 'digest' is ENGINE-provided */ + unsigned long flags; + void *md_data; + /* Public key context for sign/verify */ + EVP_PKEY_CTX *pctx; + /* Update function: usually copied from EVP_MD */ + int (*update)(EVP_MD_CTX *ctx, const void *data, size_t count); +} /* EVP_MD_CTX */; + +int sec_digests_soft_init(EVP_MD_CTX *ctx, uint32_t e_nid); +int sec_digests_soft_update(EVP_MD_CTX *ctx, const void *data, size_t data_len, uint32_t e_nid); +int sec_digests_soft_final(EVP_MD_CTX *ctx, unsigned char *digest, uint32_t e_nid); +void sec_digests_soft_work(sec_digest_priv_t *md_ctx, int len, unsigned char *digest); +void sec_digests_soft_cleanup(sec_digest_priv_t *md_ctx); + +#endif diff --git a/kae_engine/src/v1/alg/digests/sec_digests_wd.c b/kae_engine/src/v1/alg/digests/sec_digests_wd.c new file mode 100644 index 0000000..1fb1527 --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests_wd.c @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine utils dealing with wrapdrive + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sec_digests_wd.h" +#include "../../wdmngr/wd_queue_memory.h" +#include "../../utils/engine_utils.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" + +KAE_QUEUE_POOL_HEAD_S *g_sec_digests_qnode_pool; +static digest_engine_ctx_t *wd_digests_new_engine_ctx(KAE_QUEUE_DATA_NODE_S *q_node, sec_digest_priv_t *md_ctx); +static int wd_digests_init_engine_ctx(digest_engine_ctx_t *e_digest_ctx); + +void wd_digests_free_engine_ctx(void *digest_ctx) +{ + digest_engine_ctx_t *e_digest_ctx = (digest_engine_ctx_t *)digest_ctx; + + if (e_digest_ctx == NULL) + return; + + if (e_digest_ctx->op_data.in && e_digest_ctx->setup.br.usr) { + e_digest_ctx->setup.br.free(e_digest_ctx->setup.br.usr, (void *)e_digest_ctx->op_data.in); + e_digest_ctx->op_data.in = NULL; + } + + if (e_digest_ctx->op_data.out && e_digest_ctx->setup.br.usr) { + e_digest_ctx->setup.br.free(e_digest_ctx->setup.br.usr, (void *)e_digest_ctx->op_data.out); + e_digest_ctx->op_data.out = NULL; + } + + OPENSSL_free(e_digest_ctx); + e_digest_ctx = NULL; +} + +static digest_engine_ctx_t *wd_digests_new_engine_ctx(KAE_QUEUE_DATA_NODE_S *q_node, sec_digest_priv_t *md_ctx) +{ + digest_engine_ctx_t *e_digest_ctx = NULL; + + e_digest_ctx = (digest_engine_ctx_t *)OPENSSL_malloc(sizeof(digest_engine_ctx_t)); + if (e_digest_ctx == NULL) { + US_ERR("digest engine_ctx malloc fail."); + return NULL; + } + kae_memset(e_digest_ctx, 0, sizeof(digest_engine_ctx_t)); + + e_digest_ctx->setup.br.alloc = kae_wd_alloc_blk; + e_digest_ctx->setup.br.free = kae_wd_free_blk; + e_digest_ctx->setup.br.iova_map = kae_dma_map; + e_digest_ctx->setup.br.iova_unmap = kae_dma_unmap; + e_digest_ctx->setup.br.usr = q_node->kae_queue_mem_pool; + + e_digest_ctx->op_data.in = e_digest_ctx->setup.br.alloc(e_digest_ctx->setup.br.usr, DIGEST_BLOCK_SIZE); + if (e_digest_ctx->op_data.in == NULL) { + US_ERR("alloc opdata in buf failed"); + goto err; + } + + e_digest_ctx->op_data.out = e_digest_ctx->setup.br.alloc(e_digest_ctx->setup.br.usr, DIGEST_BLOCK_SIZE); + if (e_digest_ctx->op_data.out == NULL) { + US_ERR("alloc opdata out buf failed"); + goto err; + } + + e_digest_ctx->md_ctx = md_ctx; // point to each other + e_digest_ctx->q_node = q_node; // point to each other + q_node->engine_ctx = e_digest_ctx; // point to each other + + return e_digest_ctx; + +err: + wd_digests_free_engine_ctx(e_digest_ctx); + return NULL; +} + +static int wd_digests_init_engine_ctx(digest_engine_ctx_t *e_digest_ctx) +{ + struct wd_queue *q = e_digest_ctx->q_node->kae_wd_queue; + sec_digest_priv_t *md_ctx = e_digest_ctx->md_ctx; + + if (e_digest_ctx->wd_ctx != NULL) { + US_WARN("wd ctx is in used by other digests"); + return KAE_FAIL; + } + + e_digest_ctx->setup.alg = (enum wcrypto_digest_alg)md_ctx->d_alg; // for example: WD_SM3; + e_digest_ctx->setup.mode = WCRYPTO_DIGEST_NORMAL; + e_digest_ctx->setup.cb = (wcrypto_cb)sec_digests_cb; + e_digest_ctx->wd_ctx = wcrypto_create_digest_ctx(q, &e_digest_ctx->setup); + if (e_digest_ctx->wd_ctx == NULL) { + US_ERR("wd create sec digest ctx fail!"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +digest_engine_ctx_t *wd_digests_get_engine_ctx(sec_digest_priv_t *md_ctx) +{ + KAE_QUEUE_DATA_NODE_S *q_node = NULL; + digest_engine_ctx_t *e_digest_ctx = NULL; + + if (unlikely(md_ctx == NULL)) { + US_WARN("sec digest priv ctx NULL!"); + return NULL; + } + + q_node = kae_get_node_from_pool(g_sec_digests_qnode_pool); + if (q_node == NULL) { + US_ERR_LIMIT("failed to get hardware queue"); + return NULL; + } + + e_digest_ctx = (digest_engine_ctx_t *)q_node->engine_ctx; + if (e_digest_ctx == NULL) { + e_digest_ctx = wd_digests_new_engine_ctx(q_node, md_ctx); + if (e_digest_ctx == NULL) { + US_WARN("sec new engine ctx fail!"); + (void)kae_put_node_to_pool(g_sec_digests_qnode_pool, q_node); + return NULL; + } + } + + e_digest_ctx->md_ctx = md_ctx; + md_ctx->e_digest_ctx = e_digest_ctx; + + if (wd_digests_init_engine_ctx(e_digest_ctx) == KAE_FAIL) { + US_WARN("init engine ctx fail!"); + wd_digests_put_engine_ctx(e_digest_ctx); + return NULL; + } + + return e_digest_ctx; +} + +void wd_digests_put_engine_ctx(digest_engine_ctx_t *e_digest_ctx) +{ + if (unlikely(e_digest_ctx == NULL)) { + US_WARN("sec digest engine ctx NULL!"); + return; + } + + if (e_digest_ctx->wd_ctx != NULL) { + wcrypto_del_digest_ctx(e_digest_ctx->wd_ctx); + e_digest_ctx->wd_ctx = NULL; + } + + if (e_digest_ctx->q_node != NULL) + (void)kae_put_node_to_pool(g_sec_digests_qnode_pool, e_digest_ctx->q_node); + + e_digest_ctx = NULL; +} + +int wd_digests_doimpl(digest_engine_ctx_t *e_digest_ctx) +{ + int ret; + int trycount = 0; + + if (unlikely(e_digest_ctx == NULL)) { + US_ERR("do digest ctx NULL!"); + return KAE_FAIL; + } + +again: + ret = wcrypto_do_digest(e_digest_ctx->wd_ctx, &e_digest_ctx->op_data, NULL); + if (ret != WD_SUCCESS) { + if (ret == -WD_EBUSY && trycount <= 5) { // try 5 times + US_WARN("do digest busy, retry again!"); + trycount++; + goto again; + } else { + US_ERR("do digest failed!"); + return KAE_FAIL; + } + } + + return KAE_SUCCESS; +} + +void wd_digests_set_input_data(digest_engine_ctx_t *e_digest_ctx) +{ + // fill engine ctx opdata + sec_digest_priv_t *md_ctx = e_digest_ctx->md_ctx; + + e_digest_ctx->op_data.in_bytes = md_ctx->do_digest_len; + e_digest_ctx->op_data.out_bytes = md_ctx->out_len; + + e_digest_ctx->op_data.has_next = (md_ctx->state == SEC_DIGEST_FINAL) ? false : true; +} + +void wd_digests_get_output_data(digest_engine_ctx_t *e_digest_ctx) +{ + sec_digest_priv_t *md_ctx = e_digest_ctx->md_ctx; + + // the real out data start at opdata.out + offset + if (e_digest_ctx->op_data.has_next == false) + kae_memcpy(md_ctx->out, (uint8_t *)e_digest_ctx->op_data.out, md_ctx->out_len); +} + +uint32_t wd_digests_get_do_digest_len(digest_engine_ctx_t *e_digest_ctx, int leftlen) +{ + uint32_t do_digest_len = 0; + int max_input_datalen = DIGEST_BLOCK_SIZE; + + /* + * Note: Small encrypted block can be encrypted once. + * or the last encrypted slice of a large encrypted block + */ + if (leftlen <= max_input_datalen) + do_digest_len = leftlen; + else + do_digest_len = max_input_datalen; + + return do_digest_len; +} + +KAE_QUEUE_POOL_HEAD_S *wd_digests_get_qnode_pool(void) +{ + return g_sec_digests_qnode_pool; +} + +int wd_digests_init_qnode_pool(void) +{ + kae_queue_pool_destroy(g_sec_digests_qnode_pool, wd_digests_free_engine_ctx); + + g_sec_digests_qnode_pool = kae_init_queue_pool(WCRYPTO_DIGEST); + if (g_sec_digests_qnode_pool == NULL) { + US_ERR("do digest ctx NULL!"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +void wd_digests_uninit_qnode_pool(void) +{ + kae_queue_pool_destroy(g_sec_digests_qnode_pool, wd_digests_free_engine_ctx); + g_sec_digests_qnode_pool = NULL; +} diff --git a/kae_engine/src/v1/alg/digests/sec_digests_wd.h b/kae_engine/src/v1/alg/digests/sec_digests_wd.h new file mode 100644 index 0000000..9c9e7a1 --- /dev/null +++ b/kae_engine/src/v1/alg/digests/sec_digests_wd.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the digest interface for KAE digests using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SEC_DIGESTS_WD_H +#define SEC_DIGESTS_WD_H + +#include "sec_digests.h" + +extern KAE_QUEUE_POOL_HEAD_S *g_sec_digests_qnode_pool; + +digest_engine_ctx_t *wd_digests_get_engine_ctx(sec_digest_priv_t *md_ctx); +void wd_digests_put_engine_ctx(digest_engine_ctx_t *e_digest_ctx); +int wd_digests_doimpl(digest_engine_ctx_t *e_digest_ctx); + +void wd_digests_set_input_data(digest_engine_ctx_t *e_digest_ctx); +void wd_digests_get_output_data(digest_engine_ctx_t *e_digest_ctx); +uint32_t wd_digests_get_do_digest_len(digest_engine_ctx_t *e_digest_ctx, int leftlen); + +KAE_QUEUE_POOL_HEAD_S *wd_digests_get_qnode_pool(void); +int wd_digests_init_qnode_pool(void); +void wd_digests_uninit_qnode_pool(void); +void wd_digests_free_engine_ctx(void *digest_ctx); + +#endif + diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa.c b/kae_engine/src/v1/alg/pkey/hpre_rsa.c new file mode 100644 index 0000000..6c1d96d --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa.c @@ -0,0 +1,771 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine rsa + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "hpre_rsa.h" +#include "hpre_rsa_utils.h" +#include "hpre_wd.h" +#include "hpre_rsa_soft.h" +#include "../../async/async_poll.h" +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "../dh/hpre_dh.h" + +#ifndef OPENSSL_NO_RSA +const int RSAPKEYMETH_IDX; +#else +const int RSAPKEYMETH_IDX = -1; +#endif + +const char *g_hpre_device = "hisi_hpre"; +static RSA_METHOD *g_hpre_rsa_method; +static RSA_METHOD *g_soft_rsa_method; +static EVP_PKEY_METHOD *g_hpre_pkey_meth; + +static int hpre_rsa_public_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); + +static int hpre_rsa_private_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); + +static int hpre_rsa_public_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); + +static int hpre_rsa_private_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); + +static int hpre_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); + +static int hpre_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx); + +static int hpre_bn_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); + +RSA_METHOD *hpre_get_rsa_methods(void) +{ + int ret = 1; + + if (g_hpre_rsa_method != NULL) + return g_hpre_rsa_method; + if (g_soft_rsa_method != NULL) + return g_soft_rsa_method; +#if 0 + if (!kae_get_device(g_hpre_device)) { + const RSA_METHOD *default_soft_method = RSA_PKCS1_OpenSSL(); + + g_soft_rsa_method = RSA_meth_new("SOFT RSA METHOD", 0); + ret &= RSA_meth_set_pub_enc(g_soft_rsa_method, RSA_meth_get_pub_enc(default_soft_method)); + ret &= RSA_meth_set_priv_enc(g_soft_rsa_method, RSA_meth_get_priv_enc(default_soft_method)); + ret &= RSA_meth_set_pub_dec(g_soft_rsa_method, RSA_meth_get_pub_dec(default_soft_method)); + ret &= RSA_meth_set_priv_dec(g_soft_rsa_method, RSA_meth_get_priv_dec(default_soft_method)); + ret &= RSA_meth_set_keygen(g_soft_rsa_method, hpre_rsa_soft_genkey); + ret &= RSA_meth_set_mod_exp(g_soft_rsa_method, RSA_meth_get_mod_exp(default_soft_method)); + ret &= RSA_meth_set_bn_mod_exp(g_soft_rsa_method, RSA_meth_get_bn_mod_exp(default_soft_method)); + if (ret == 0) { + US_ERR("Failed to set SOFT RSA methods"); + return NULL; + } + return g_soft_rsa_method; + } +#endif + g_hpre_rsa_method = RSA_meth_new("HPRE RSA method", 0); + if (g_hpre_rsa_method == NULL) { + KAEerr(KAE_F_HPRE_GET_RSA_METHODS, KAE_R_MALLOC_FAILURE); + US_ERR("Failed to allocate HPRE RSA methods"); + return NULL; + } + + ret &= RSA_meth_set_pub_enc(g_hpre_rsa_method, hpre_rsa_public_encrypt); + ret &= RSA_meth_set_pub_dec(g_hpre_rsa_method, hpre_rsa_public_decrypt); + ret &= RSA_meth_set_priv_enc(g_hpre_rsa_method, hpre_rsa_private_encrypt); + ret &= RSA_meth_set_priv_dec(g_hpre_rsa_method, hpre_rsa_private_decrypt); + ret &= RSA_meth_set_keygen(g_hpre_rsa_method, hpre_rsa_keygen); + ret &= RSA_meth_set_mod_exp(g_hpre_rsa_method, hpre_rsa_mod_exp); + ret &= RSA_meth_set_bn_mod_exp(g_hpre_rsa_method, hpre_bn_mod_exp); + if (ret == 0) { + KAEerr(KAE_F_HPRE_GET_RSA_METHODS, KAE_R_RSA_SET_METHODS_FAILURE); + US_ERR("Failed to set HPRE RSA methods"); + return NULL; + } + + return g_hpre_rsa_method; +} + +static void hpre_free_rsa_methods(void) +{ + if (g_hpre_rsa_method != NULL) { + RSA_meth_free(g_hpre_rsa_method); + g_hpre_rsa_method = NULL; + } + if (g_soft_rsa_method != NULL) { + RSA_meth_free(g_soft_rsa_method); + g_soft_rsa_method = NULL; + } +} + + +int hpre_engine_ctx_poll(void *engine_ctx) +{ + int ret; + hpre_engine_ctx_t *eng_ctx = (hpre_engine_ctx_t *)engine_ctx; + struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; +poll_again: + ret = wcrypto_rsa_poll(q, 1); + if (!ret) { + goto poll_again; + } else if (ret < 0) { + US_ERR("rsa poll fail!\n"); + return ret; + } + return ret; +} + +int hpre_module_init(void) +{ + /* init queue */ + wd_hpre_init_qnode_pool(); + + (void)get_rsa_pkey_meth(); + (void)hpre_get_rsa_methods(); + + /* register async poll func */ + async_register_poll_fn_v1(ASYNC_TASK_RSA, hpre_engine_ctx_poll); + + return 1; +} + +EVP_PKEY_METHOD *get_rsa_pkey_meth(void) +{ + const EVP_PKEY_METHOD *def_rsa = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); + + if (g_hpre_pkey_meth == NULL) { + g_hpre_pkey_meth = EVP_PKEY_meth_new(EVP_PKEY_RSA, 0); + if (g_hpre_pkey_meth == NULL) { + US_ERR("failed to new pkey meth"); + return NULL; + } + + EVP_PKEY_meth_copy(g_hpre_pkey_meth, def_rsa); + } + + return g_hpre_pkey_meth; +} + +void hpre_destroy(void) +{ + hpre_free_rsa_methods(); +} + +#define PKEY_METHOD_TYPE_NUM 3 +const int g_pkey_method_types[PKEY_METHOD_TYPE_NUM] = {EVP_PKEY_RSA, EVP_PKEY_DH, EVP_PKEY_DHX}; + +static int hpre_check_meth_args(EVP_PKEY_METHOD **pmeth, + const int **pnids, int nid) +{ + if ((pnids == NULL) && ((pmeth == NULL) || (nid < 0))) { + if (pmeth != NULL) + *pmeth = NULL; + return 0; + } + + if (pmeth == NULL && pnids != NULL) { + *pnids = g_pkey_method_types; + return PKEY_METHOD_TYPE_NUM; + } + if (pmeth == NULL) + return 0; + + return -1; +} + +int hpre_pkey_meths(ENGINE *e, EVP_PKEY_METHOD **pmeth, + const int **pnids, int nid) +{ + int ret = hpre_check_meth_args(pmeth, pnids, nid); + + if (ret != -1) + return ret; + + switch (nid) { + case EVP_PKEY_RSA: + *pmeth = get_rsa_pkey_meth(); + break; + case EVP_PKEY_DH: + *pmeth = get_dh_pkey_meth(); + break; + case EVP_PKEY_DHX: + *pmeth = (EVP_PKEY_METHOD *)EVP_PKEY_meth_find(EVP_PKEY_DHX); + break; + default: + *pmeth = NULL; + break; + } + + return (*pmeth != NULL); +} + +//lint -save -e506 +#undef GOTOEND_IF +#define GOTOEND_IF(cond, mesg, f, r) \ + do { \ + if (cond) { \ + KAEerr(f, r); \ + US_ERR(mesg); \ + ret = HPRE_CRYPTO_FAIL; \ + rsa_soft_mark = 1; \ + goto end;\ + } \ + } while (0) + + +static int hpre_rsa_check(const int flen, const BIGNUM *n, const BIGNUM *e, + int *num_bytes, RSA *rsa) +{ + int key_bits; + + if (n == NULL || e == NULL) + return HPRE_CRYPTO_FAIL; + + if (check_pubkey_param(n, e) != HPRE_CRYPTO_SUCC) + return HPRE_CRYPTO_FAIL; + + *num_bytes = BN_num_bytes(n); + if (flen > *num_bytes) { + KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_DATA_GREATER_THEN_MOD_LEN); + US_WARN("data length is large than num bytes of rsa->n"); + return HPRE_CRYPTO_FAIL; + } + + key_bits = RSA_bits(rsa); + if (!check_bit_useful(key_bits)) { + US_WARN("op sizes not supported by hpre engine then back to soft!"); + return HPRE_CRYPTO_FAIL; + } + + return HPRE_CRYPTO_SUCC; +} + +static int hpre_rsa_prepare_opdata(const BIGNUM *n, int flen, + const unsigned char *from, + BN_CTX **bn_ctx, + BIGNUM **bn_ret, BIGNUM **f_ret) +{ + BN_CTX *bn_ctx_tmp; + BIGNUM *bn_ret_tmp = NULL; + BIGNUM *f = NULL; + + bn_ctx_tmp = BN_CTX_new(); + if (bn_ctx_tmp == NULL) { + KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_MALLOC_FAILURE); + US_ERR("fail to new BN_CTX."); + return HPRE_CRYPTO_SOFT; + } + + BN_CTX_start(bn_ctx_tmp); + bn_ret_tmp = BN_CTX_get(bn_ctx_tmp); + f = BN_CTX_get(bn_ctx_tmp); + if (bn_ret_tmp == NULL || f == NULL) { + KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_MALLOC_FAILURE); + US_ERR("fail to get BN_CTX."); + return HPRE_CRYPTO_SOFT; + } + + if (BN_bin2bn(from, flen, f) == NULL) { + KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_ERR_LIB_BN); + US_ERR("fail to bin2bn"); + return HPRE_CRYPTO_SOFT; + } + + if (BN_ucmp(f, n) >= 0) { + KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); + US_ERR("data is too large"); + return HPRE_CRYPTO_SOFT; + } + *bn_ctx = bn_ctx_tmp; + *bn_ret = bn_ret_tmp; + *f_ret = f; + return HPRE_CRYPTO_SUCC; +} + + +static int hpre_rsa_public_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding) +{ + int rsa_soft_mark = 0; + const BIGNUM *n = NULL; + const BIGNUM *e = NULL; + const BIGNUM *d = NULL; + BIGNUM *ret_bn = NULL; + hpre_engine_ctx_t *eng_ctx = NULL; + unsigned char *in_buf = NULL; + BN_CTX *bn_ctx = NULL; + int num_bytes = 0; + int key_bits; + int ret; + + if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) + return HPRE_CRYPTO_FAIL; + + key_bits = RSA_bits(rsa); + if (!check_bit_useful(key_bits)) { + US_WARN("op sizes not supported by hpre engine then back to soft!"); + return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_ENC); + } + + eng_ctx = hpre_get_eng_ctx(rsa, 0); + if (eng_ctx == NULL) { + US_WARN("get eng ctx fail then switch to soft!"); + rsa_soft_mark = 1; + goto end_soft; + } + + RSA_get0_key(rsa, &n, &e, &d); + ret = check_pubkey_param(n, e); + GOTOEND_IF(ret != HPRE_CRYPTO_SUCC, "check public key fail", + KAE_F_HPRE_RSA_PUBENC, KAE_R_PUBLIC_KEY_INVALID); + + bn_ctx = BN_CTX_new(); + + GOTOEND_IF(bn_ctx == NULL, "bn_ctx MALLOC FAILED!", + KAE_F_HPRE_RSA_PUBENC, KAE_R_MALLOC_FAILURE); + BN_CTX_start(bn_ctx); + ret_bn = BN_CTX_get(bn_ctx); + num_bytes = BN_num_bytes(n); + in_buf = (unsigned char *)OPENSSL_malloc(num_bytes); + + GOTOEND_IF(ret_bn == NULL || in_buf == NULL, "PUBLIC_ENCRYPT RSA MALLOC FAILED!", + KAE_F_HPRE_RSA_PUBENC, KAE_R_MALLOC_FAILURE); + + ret = hpre_rsa_padding(flen, from, in_buf, num_bytes, padding, PUB_ENC); + GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "RSA PADDING FAILED", + KAE_F_HPRE_RSA_PUBENC, KAE_R_RSA_PADDING_FAILURE); + + hpre_rsa_fill_pubkey(e, n, eng_ctx); + eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; + eng_ctx->opdata.op_type = WCRYPTO_RSA_VERIFY; + eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + kae_memcpy(eng_ctx->opdata.in, in_buf, eng_ctx->opdata.in_bytes); + + ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); + GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "hpre rsa pub encrypt failed!", + KAE_F_HPRE_RSA_PUBENC, KAE_R_PUBLIC_ENCRYPTO_FAILURE); + + BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, ret_bn); + ret = BN_bn2binpad(ret_bn, to, num_bytes); + + US_DEBUG("hpre rsa public encrypt success!"); + +end: + hpre_free_bn_ctx_buf(bn_ctx, in_buf, num_bytes); + hpre_free_eng_ctx(eng_ctx); + +end_soft: + if (rsa_soft_mark == 1) + ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_ENC); + + return ret; +} + +static int hpre_rsa_private_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + int rsa_soft_mark = 0; + hpre_engine_ctx_t *eng_ctx = NULL; + BIGNUM *f = (BIGNUM *)NULL; + BIGNUM *bn_ret = (BIGNUM *)NULL; + BIGNUM *res = (BIGNUM *)NULL; + const BIGNUM *n = (const BIGNUM *)NULL; + const BIGNUM *e = (const BIGNUM *)NULL; + const BIGNUM *d = (const BIGNUM *)NULL; + const BIGNUM *p = (const BIGNUM *)NULL; + const BIGNUM *q = (const BIGNUM *)NULL; + const BIGNUM *dmp1 = (const BIGNUM *)NULL; + const BIGNUM *dmq1 = (const BIGNUM *)NULL; + const BIGNUM *iqmp = (const BIGNUM *)NULL; + unsigned char *in_buf = (unsigned char *)NULL; + int num_bytes = 0; + int key_bits; + int version; + + if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) + return HPRE_CRYPTO_FAIL; + + key_bits = RSA_bits(rsa); + if (!check_bit_useful(key_bits)) { + US_WARN("op sizes not supported by hpre engine then back to soft!"); + return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_ENC); + } + + eng_ctx = hpre_get_eng_ctx(rsa, 0); + if (eng_ctx == NULL) { + US_WARN("get eng ctx fail then switch to soft!"); + rsa_soft_mark = 1; + goto end_soft; + } + + BN_CTX *bn_ctx = BN_CTX_new(); + + GOTOEND_IF(bn_ctx == NULL, "PRI_ENC MALLOC_FAILURE ", + KAE_F_HPRE_RSA_PRIENC, KAE_R_MALLOC_FAILURE); + + BN_CTX_start(bn_ctx); + f = BN_CTX_get(bn_ctx); + bn_ret = BN_CTX_get(bn_ctx); + RSA_get0_factors(rsa, &p, &q); + RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); + version = RSA_get_version(rsa); + + RSA_get0_key(rsa, &n, &e, &d); + num_bytes = BN_num_bytes(n); + + in_buf = (unsigned char *)OPENSSL_malloc(num_bytes); + GOTOEND_IF(bn_ret == NULL || in_buf == NULL, "OpenSSL malloc failure", + KAE_F_HPRE_RSA_PRIENC, KAE_R_MALLOC_FAILURE); + + ret = hpre_rsa_padding(flen, from, in_buf, num_bytes, padding, PRI_ENC); + GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "RSA PADDING FAILED!", + KAE_F_HPRE_RSA_PRIENC, KAE_R_RSA_PADDING_FAILURE); + + GOTOEND_IF(BN_bin2bn(in_buf, num_bytes, f) == NULL, "BN_bin2bn failure", + KAE_F_HPRE_RSA_PRIENC, KAE_R_ERR_LIB_BN); + + ret = BN_ucmp(f, n); + GOTOEND_IF(ret >= 0, "RSA PADDING FAILED!", + KAE_F_HPRE_RSA_PRIENC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); + + hpre_rsa_fill_pubkey(e, n, eng_ctx); + hpre_rsa_fill_prikey(rsa, eng_ctx, version, p, q, dmp1, dmq1, iqmp); + + eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; + eng_ctx->opdata.op_type = WCRYPTO_RSA_SIGN; + eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + kae_memcpy(eng_ctx->opdata.in, in_buf, eng_ctx->opdata.in_bytes); + + ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); + if (ret == HPRE_CRYPTO_FAIL) { + US_WARN("hpre rsa priv encrypt failed!"); + rsa_soft_mark = 1; + goto end; + } + + BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); + + if (hpre_get_prienc_res(padding, f, n, bn_ret, &res) == HPRE_CRYPTO_FAIL) + goto end; + + ret = BN_bn2binpad(res, to, num_bytes); + + US_DEBUG("hpre rsa priv encrypt success!"); + +end: + hpre_free_bn_ctx_buf(bn_ctx, in_buf, num_bytes); + hpre_free_eng_ctx(eng_ctx); + +end_soft: + if (rsa_soft_mark == 1) + ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_ENC); + + return ret; +} + +static int hpre_rsa_public_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding) +{ + hpre_engine_ctx_t *eng_ctx = NULL; + BIGNUM *bn_ret = NULL; + BIGNUM *f = NULL; + BN_CTX *bn_ctx = NULL; + const BIGNUM *n = NULL; + const BIGNUM *e = NULL; + const BIGNUM *d = NULL; + int num_bytes = 0; + int rsa_soft_mark = 0; + unsigned char *buf = NULL; + int ret, len; + + if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) + return HPRE_CRYPTO_FAIL; + + RSA_get0_key(rsa, &n, &e, &d); + ret = hpre_rsa_check(flen, n, e, &num_bytes, rsa); + if (ret == HPRE_CRYPTO_FAIL) { + rsa_soft_mark = 1; + goto end_soft; + } + + eng_ctx = hpre_get_eng_ctx(rsa, 0); + if (eng_ctx == NULL) { + US_WARN("get eng ctx fail then switch to soft!"); + rsa_soft_mark = 1; + goto end_soft; + } + + buf = (unsigned char *)OPENSSL_malloc(num_bytes); + if (buf == NULL) { + rsa_soft_mark = 1; + goto end; + } + + ret = hpre_rsa_prepare_opdata(n, flen, from, &bn_ctx, &bn_ret, &f); + if (ret == HPRE_CRYPTO_SOFT) { + rsa_soft_mark = 1; + goto end; + } + + hpre_rsa_fill_pubkey(e, n, eng_ctx); + eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; + eng_ctx->opdata.op_type = WCRYPTO_RSA_VERIFY; + eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + kae_memcpy(eng_ctx->opdata.in, from, eng_ctx->opdata.in_bytes); + + ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); + GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "hpre rsa pub decrypt failed!", + KAE_F_HPRE_RSA_PUBDEC, KAE_R_PUBLIC_DECRYPTO_FAILURE); + + BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); + if ((padding == RSA_X931_PADDING) && ((bn_get_words(bn_ret)[0] & 0xf) != 12)) { // not 12 then BN_sub + GOTOEND_IF(!BN_sub(bn_ret, n, bn_ret), "BN_sub failed", + KAE_F_HPRE_RSA_PUBDEC, KAE_R_ERR_LIB_BN); + } + len = BN_bn2binpad(bn_ret, buf, num_bytes); + ret = check_rsa_padding(to, num_bytes, buf, len, padding, PUB_DEC); + if (ret == HPRE_CRYPTO_FAIL) { + US_WARN("hpre rsa check padding failed.switch to soft"); + rsa_soft_mark = 1; + goto end; + } + + US_DEBUG("hpre rsa public decrypt success!"); + +end: + hpre_free_bn_ctx_buf(bn_ctx, buf, num_bytes); + hpre_free_eng_ctx(eng_ctx); + +end_soft: + if (rsa_soft_mark == 1) + ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_DEC); + + return ret; +} + +static int hpre_rsa_private_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + const BIGNUM *n = (const BIGNUM *)NULL; + const BIGNUM *e = (const BIGNUM *)NULL; + const BIGNUM *d = (const BIGNUM *)NULL; + const BIGNUM *p = (const BIGNUM *)NULL; + const BIGNUM *q = (const BIGNUM *)NULL; + const BIGNUM *dmp1 = (const BIGNUM *)NULL; + const BIGNUM *dmq1 = (const BIGNUM *)NULL; + const BIGNUM *iqmp = (const BIGNUM *)NULL; + BIGNUM *f = (BIGNUM *)NULL; + BIGNUM *bn_ret = (BIGNUM *)NULL; + int len, num_bytes, key_bits, version; + int rsa_soft_mark = 0; + unsigned char *buf = (unsigned char *)NULL; + BN_CTX *bn_ctx = NULL; + + if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) + return HPRE_CRYPTO_FAIL; + + RSA_get0_key(rsa, &n, &e, &d); + num_bytes = BN_num_bytes(n); + if (flen > num_bytes) { + KAEerr(KAE_F_HPRE_RSA_PRIDEC, KAE_R_DATA_GREATER_THEN_MOD_LEN); + US_ERR("PRIVATE_DECRYPT DATA_GREATER_THAN_MOD_LEN"); + return HPRE_CRYPTO_FAIL; + } + + key_bits = RSA_bits(rsa); + if (!check_bit_useful(key_bits)) { + US_WARN("op sizes not supported by hpre engine then back to soft!"); + return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_DEC); + } + + hpre_engine_ctx_t *eng_ctx = hpre_get_eng_ctx(rsa, 0); + + if (eng_ctx == NULL) { + US_WARN("get eng ctx fail then switch to soft!"); + rsa_soft_mark = 1; + goto end_soft; + } + + bn_ctx = BN_CTX_new(); + GOTOEND_IF(bn_ctx == NULL, "bn_ctx MALLOC FAILED!", + KAE_F_HPRE_RSA_PRIDEC, KAE_R_ERR_LIB_BN); + + BN_CTX_start(bn_ctx); + f = BN_CTX_get(bn_ctx); + bn_ret = BN_CTX_get(bn_ctx); + RSA_get0_factors(rsa, &p, &q); + RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); + version = RSA_get_version(rsa); + buf = (unsigned char *)OPENSSL_malloc(num_bytes); + GOTOEND_IF(bn_ret == NULL || buf == NULL, "PRIVATE_DECRYPT ERR_R_MALLOC_FAILURE", + KAE_F_HPRE_RSA_PRIDEC, KAE_R_MALLOC_FAILURE); + + GOTOEND_IF(BN_bin2bn(from, (int) flen, f) == NULL, "BN_bin2bn failure", + KAE_F_HPRE_RSA_PRIDEC, KAE_R_ERR_LIB_BN); + + GOTOEND_IF(BN_ucmp(f, n) >= 0, "PRIVATE_DECRYPT, RSA_R_DATA_TOO_LARGE_FOR_MODULUS", + KAE_F_HPRE_RSA_PRIDEC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); + + hpre_rsa_fill_pubkey(e, n, eng_ctx); + hpre_rsa_fill_prikey(rsa, eng_ctx, version, p, q, dmp1, dmq1, iqmp); + + eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; + eng_ctx->opdata.op_type = WCRYPTO_RSA_SIGN; + eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, + eng_ctx->qlist->kae_queue_mem_pool->block_size); + kae_memcpy(eng_ctx->opdata.in, from, eng_ctx->opdata.in_bytes); + + ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); + if (ret == HPRE_CRYPTO_FAIL) { + US_WARN("hpre rsa priv decrypt failed.switch to soft"); + rsa_soft_mark = 1; + goto end; + } + + BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); + len = BN_bn2binpad(bn_ret, buf, num_bytes); + ret = check_rsa_padding(to, num_bytes, buf, len, padding, PRI_DEC); + if (ret == HPRE_CRYPTO_FAIL) { + US_WARN("hpre rsa check padding failed.switch to soft"); + rsa_soft_mark = 1; + goto end; + } + + US_DEBUG("hpre rsa priv decrypt success!"); + +end: + hpre_free_bn_ctx_buf(bn_ctx, buf, num_bytes); + hpre_free_eng_ctx(eng_ctx); + +end_soft: + if (rsa_soft_mark == 1) + ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_DEC); + return ret; +} + +static int hpre_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb) +{ + int ret = HPRE_CRYPTO_FAIL; + int rsa_soft_mark = 0; + struct wcrypto_rsa_pubkey *pubkey = NULL; + struct wcrypto_rsa_prikey *prikey = NULL; + struct wd_dtb *wd_e = NULL; + struct wd_dtb *wd_p = NULL; + struct wd_dtb *wd_q = NULL; + + if (bits < RSA_MIN_MODULUS_BITS) { + KAEerr(KAE_F_HPRE_RSA_KEYGEN, KAE_R_RSA_KEY_SIZE_TOO_SMALL); + US_ERR("RSA_BUILTIN_KEYGEN RSA_R_KEY_SIZE_TOO_SMALL"); + return HPRE_CRYPTO_FAIL; + } + + if (!check_bit_useful(bits)) { + US_WARN("op sizes not supported by hpre engine then back to soft!"); + return hpre_rsa_soft_genkey(rsa, bits, e, cb); + } + + hpre_engine_ctx_t *eng_ctx = hpre_get_eng_ctx(rsa, bits); + + if (eng_ctx == NULL) { + US_WARN("get eng ctx fail then switch to soft!"); + rsa_soft_mark = 1; + goto end_soft; + } + + BIGNUM *e_value = BN_new(); + BIGNUM *p = BN_new(); + BIGNUM *q = BN_new(); + + GOTOEND_IF(e_value == NULL || p == NULL || q == NULL, "e_value or p or q MALLOC FAILED.", + KAE_F_HPRE_RSA_KEYGEN, KAE_R_ERR_LIB_BN); + GOTOEND_IF(hpre_rsa_primegen(bits, e, p, q, cb) == OPENSSL_FAIL, "hisi_rsa_primegen failed", + KAE_F_HPRE_RSA_KEYGEN, KAE_R_GET_PRIMEKEY_FAILURE); + GOTOEND_IF(BN_copy(e_value, e) == NULL, "copy e failed", + KAE_F_HPRE_RSA_KEYGEN, KAE_R_ERR_LIB_BN); + + wcrypto_get_rsa_pubkey(eng_ctx->ctx, &pubkey); + wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, NULL); + wd_e->dsize = BN_bn2bin(e_value, (unsigned char *)wd_e->data); + wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); + wcrypto_get_rsa_crt_prikey_params(prikey, NULL, NULL, NULL, &wd_q, &wd_p); + wd_q->dsize = BN_bn2bin(q, (unsigned char *)wd_q->data); + wd_p->dsize = BN_bn2bin(p, (unsigned char *)wd_p->data); + + eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; + eng_ctx->opdata.op_type = WCRYPTO_RSA_GENKEY; + ret = hpre_fill_keygen_opdata(eng_ctx->ctx, &eng_ctx->opdata); + if (ret != KAE_SUCCESS) { + US_WARN("hpre_fill_keygen_opdata failed"); + rsa_soft_mark = 1; + goto end; + } + ret = hpre_rsa_sync(eng_ctx->ctx, &eng_ctx->opdata); + if (ret == HPRE_CRYPTO_FAIL) { + US_WARN("hpre generate rsa key failed.switch to soft"); + rsa_soft_mark = 1; + goto end; + } + ret = hpre_rsa_get_keygen_param(&eng_ctx->opdata, eng_ctx->ctx, rsa, + e_value, p, q); + + US_DEBUG("hpre rsa keygen success!"); + +end: + hpre_free_eng_ctx(eng_ctx); + +end_soft: + if (rsa_soft_mark == 1) + ret = hpre_rsa_soft_genkey(rsa, bits, e, cb); + return ret; +} + +static int hpre_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) +{ + US_DEBUG("- Started\n"); + return RSA_meth_get_mod_exp(RSA_PKCS1_OpenSSL()) + (r0, I, rsa, ctx); +} + +static int hpre_bn_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx) +{ + US_DEBUG("- Started\n"); + return RSA_meth_get_bn_mod_exp(RSA_PKCS1_OpenSSL()) + (r, a, p, m, ctx, m_ctx); +} diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa.h b/kae_engine/src/v1/alg/pkey/hpre_rsa.h new file mode 100644 index 0000000..11307c7 --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE rsa using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_RSA_H +#define HPRE_RSA_H + +#include +#include + +#include +#include +#include +#include +#include + +#include "../../utils/engine_utils.h" +#include "../../utils/engine_opensslerr.h" + +#define RSA_MIN_MODULUS_BITS 512 + +#define RSA1024BITS 1024 +#define RSA2048BITS 2048 +#define RSA3072BITS 3072 +#define RSA4096BITS 4096 + +#define HPRE_CONT (-1) +#define HPRE_CRYPTO_SUCC 1 +#define HPRE_CRYPTO_FAIL 0 +#define HPRE_CRYPTO_SOFT (-1) + + +enum { + INVALID = 0, + PUB_ENC, + PUB_DEC, + PRI_ENC, + PRI_DEC, + MAX_CODE, +}; + +struct bignum_st { + BN_ULONG *d; + int top; + int dmax; + int neg; + int flags; +}; + +RSA_METHOD *hpre_get_rsa_methods(void); + +int hpre_module_init(void); + +void hpre_destroy(void); + +EVP_PKEY_METHOD *get_rsa_pkey_meth(void); + +#endif + diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.c b/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.c new file mode 100644 index 0000000..86b915d --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for switch to soft rsa + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "hpre_rsa.h" +#include "../../utils/engine_log.h" + +/** + * succ: > 0 + * fail: 0 + */ +int hpre_rsa_soft_calc(int flen, const unsigned char *from, unsigned char *to, + RSA *rsa, int padding, int type) +{ + int ret = 0; + const RSA_METHOD *soft_rsa = RSA_PKCS1_OpenSSL(); + + switch (type) { + case PUB_ENC: + ret = RSA_meth_get_pub_enc(soft_rsa)(flen, from, to, rsa, padding); + break; + case PUB_DEC: + ret = RSA_meth_get_pub_dec(soft_rsa)(flen, from, to, rsa, padding); + break; + case PRI_ENC: + ret = RSA_meth_get_priv_enc(soft_rsa)(flen, from, to, rsa, padding); + break; + case PRI_DEC: + ret = RSA_meth_get_priv_dec(soft_rsa)(flen, from, to, rsa, padding); + break; + default: + return 0; + } + return ret; +} + +/** + * succ: 1 + * fail: 0 + */ +int hpre_rsa_soft_genkey(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb) +{ + UNUSED(cb); + const RSA_METHOD *default_meth = RSA_PKCS1_OpenSSL(); + int ret; + + RSA_set_method(rsa, default_meth); + ret = RSA_generate_key_ex(rsa, bits, e, (BN_GENCB *)NULL); + + if (ret != 1) { + US_ERR("rsa soft key generate fail!"); + return 0; + } + + return 1; +} diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.h b/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.h new file mode 100644 index 0000000..1a795e4 --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa_soft.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the rsa interface for soft rsa + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_RSA_SOFT_H +#define HPRE_RSA_SOFT_H + +#include + +int hpre_rsa_soft_calc(int flen, const unsigned char *from, unsigned char *to, + RSA *rsa, int padding, int type); + +int hpre_rsa_soft_genkey(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); + +#endif diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.c b/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.c new file mode 100644 index 0000000..0630b28 --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.c @@ -0,0 +1,540 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine utils dealing with wrapdrive + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "../../utils/engine_types.h" +#include "../../utils/engine_log.h" +#include "hpre_rsa.h" +#include "hpre_wd.h" +#include + +BN_ULONG *bn_get_words(const BIGNUM *a) +{ + return a->d; +} + +void hpre_free_bn_ctx_buf(BN_CTX *bn_ctx, unsigned char *in_buf, int num) +{ + if (bn_ctx != NULL) + BN_CTX_end(bn_ctx); + BN_CTX_free(bn_ctx); + if (in_buf != NULL) + OPENSSL_clear_free(in_buf, num); +} + +/* check parameter */ +int hpre_rsa_check_para(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa) +{ + if ((rsa == NULL || from == NULL || to == NULL || flen <= 0)) { + US_ERR("RSA key %p, input %p or output %p are NULL, or flen invalid length.\n", rsa, from, to); + return HPRE_CRYPTO_FAIL; + } + return HPRE_CRYPTO_SUCC; +} + +int hpre_get_prienc_res(int padding, BIGNUM *f, const BIGNUM *n, BIGNUM *bn_ret, BIGNUM **res) +{ + if (padding == RSA_X931_PADDING) { + if (!BN_sub(f, n, bn_ret)) + return HPRE_CRYPTO_FAIL; + if (BN_cmp(bn_ret, f) > 0) + *res = f; + else + *res = bn_ret; + } else { + *res = bn_ret; + } + return HPRE_CRYPTO_SUCC; +} + +/** + * func: + * desc: + * Check HPRE rsa bits + * + * @param bit :rsa bit + * @return + * succ: 1 + * fail: 0 + */ +int check_bit_useful(const int bit) +{ + switch (bit) { + case RSA1024BITS: + case RSA2048BITS: + case RSA3072BITS: + case RSA4096BITS: + return 1; + default: + break; + } + return 0; +} + +/** + * + * @param n + * @param e + * @return success 1 / failed 0 + */ +int check_pubkey_param(const BIGNUM *n, const BIGNUM *e) +{ + if (BN_num_bits(n) > OPENSSL_RSA_MAX_MODULUS_BITS) { + KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_MODULE_TOO_LARGE); + US_ERR("RSA MODULUS TOO LARGE!"); + return HPRE_CRYPTO_FAIL; + } + + if (BN_ucmp(n, e) <= 0) { + KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_INVAILED_E_VALUE); + US_ERR("RSA E VALUE IS NOT VALID!"); + return HPRE_CRYPTO_FAIL; + } + + /* for large moduli, enforce exponent limit */ + if (BN_num_bits(n) > OPENSSL_RSA_SMALL_MODULUS_BITS) { + if (BN_num_bits(e) > OPENSSL_RSA_MAX_PUBEXP_BITS) { + KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_INVAILED_E_VALUE); + US_ERR("RSA E VALUE IS NOT VALID!"); + return HPRE_CRYPTO_FAIL; + } + } + return HPRE_CRYPTO_SUCC; +} + +static int hpre_pubenc_padding(int flen, const unsigned char *from, + unsigned char *buf, int num, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + + switch (padding) { + case RSA_PKCS1_PADDING: + ret = RSA_padding_add_PKCS1_type_2(buf, num, from, flen); + break; + case RSA_PKCS1_OAEP_PADDING: + ret = RSA_padding_add_PKCS1_OAEP(buf, num, from, flen, + NULL, 0); + break; + case RSA_SSLV23_PADDING: + ret = RSA_padding_add_SSLv23(buf, num, from, flen); + break; + case RSA_NO_PADDING: + ret = RSA_padding_add_none(buf, num, from, flen); + break; + default: + KAEerr(KAE_F_HPRE_PUBENC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); + US_ERR("RSA UNKNOWN PADDING TYPE!"); + ret = HPRE_CRYPTO_FAIL; + } + if (ret <= 0) { + US_ERR("padding error: ret = %d", ret); + ret = HPRE_CRYPTO_FAIL; + } else { + ret = HPRE_CRYPTO_SUCC; + } + return ret; +} + +static int hpre_prienc_padding(int flen, const unsigned char *from, + unsigned char *buf, int num, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + + switch (padding) { + case RSA_PKCS1_PADDING: + ret = RSA_padding_add_PKCS1_type_1(buf, num, from, flen); + break; + case RSA_X931_PADDING: + ret = RSA_padding_add_X931(buf, num, from, flen); + break; + case RSA_NO_PADDING: + ret = RSA_padding_add_none(buf, num, from, flen); + break; + default: + KAEerr(KAE_F_HPRE_PRIENC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); + US_ERR("RSA UNKNOWN PADDING TYPE!"); + ret = HPRE_CRYPTO_FAIL; + } + if (ret <= 0) { + US_DEBUG("padding error: ret = %d", ret); + ret = HPRE_CRYPTO_FAIL; + } else { + ret = HPRE_CRYPTO_SUCC; + } + return ret; +} + +/** + * func: + * + * @param flen [IN] - size in bytes of input + * @param from [IN] - pointer to the input + * @param buf [OUT] - pointer to output data + * @param num [IN] - pointer to public key structure + * @param padding [IN] - Padding scheme + * @param type [IN] - Padding type + * @return + * SUCCESS: 1 + * FAIL: 0 + * desc: + * rsa encrypt padding. + * + */ +int hpre_rsa_padding(int flen, const unsigned char *from, unsigned char *buf, + int num, int padding, int type) +{ + int ret = HPRE_CRYPTO_FAIL; + + if (type == PUB_ENC) + return hpre_pubenc_padding(flen, from, buf, num, padding); + else if (type == PRI_ENC) + return hpre_prienc_padding(flen, from, buf, num, padding); + + US_ERR("hpre rsa padding type error."); + return ret; +} + +static int hpre_check_pubdec_padding(unsigned char *to, int num, + const unsigned char *buf, int len, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + + switch (padding) { + case RSA_PKCS1_PADDING: + ret = RSA_padding_check_PKCS1_type_1(to, num, buf, len, num); + break; + case RSA_X931_PADDING: + ret = RSA_padding_check_X931(to, num, buf, len, num); + break; + case RSA_NO_PADDING: + kae_memcpy(to, buf, len); + ret = len; + break; + default: + KAEerr(KAE_F_CHECK_HPRE_PUBDEC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); + US_ERR("RSA UNKNOWN PADDING TYPE!"); + ret = HPRE_CRYPTO_FAIL; + } + + if (ret == -1) { + US_ERR("FAIL ret = %d.", ret); + ret = HPRE_CRYPTO_FAIL; + } + return ret; +} + +static int hpre_check_pridec_padding(unsigned char *to, int num, + const unsigned char *buf, int len, int padding) +{ + int ret = HPRE_CRYPTO_FAIL; + + switch (padding) { + case RSA_PKCS1_PADDING: + ret = RSA_padding_check_PKCS1_type_2(to, num, buf, len, num); + break; + case RSA_PKCS1_OAEP_PADDING: + ret = RSA_padding_check_PKCS1_OAEP(to, num, buf, len, num, NULL, 0); + break; + case RSA_SSLV23_PADDING: + ret = RSA_padding_check_SSLv23(to, num, buf, len, num); + break; + case RSA_NO_PADDING: + kae_memcpy(to, buf, len); + ret = len; + break; + default: + KAEerr(KAE_F_CHECK_HPRE_PRIDEC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); + US_ERR("RSA UNKNOWN PADDING TYPE!"); + ret = HPRE_CRYPTO_FAIL; + } + + if (ret == -1) { + US_ERR("FAIL ret = %d.", ret); + ret = HPRE_CRYPTO_FAIL; + } + return ret; +} + +/** + * func: + * + * @param len [IN] - size in bytes of output + * @param to [IN] - pointer to the output + * @param buf [OUT] - pointer to output data + * @param num [IN] - pointer to public key structure + * @param padding [IN] - Padding scheme + * @param type [IN] - Padding type + * @return + * SUCCESS: 1 + * FAIL: 0 + * desc: + * rsa decrypt padding. + * + */ +int check_rsa_padding(unsigned char *to, int num, + const unsigned char *buf, int len, int padding, int type) +{ + int ret = HPRE_CRYPTO_FAIL; + + if (type == PUB_DEC) + return hpre_check_pubdec_padding(to, num, buf, len, padding); + else if (type == PRI_DEC) + return hpre_check_pridec_padding(to, num, buf, len, padding); + + US_ERR("hpre rsa padding type error."); + return ret; +} + +static int check_primeequal(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *prime) +{ + int j; + + for (j = 0; j < i; j++) { + BIGNUM *prev_prime = NULL; + + if (j == 0) + prev_prime = rsa_p; + else + prev_prime = rsa_q; + + if (!BN_cmp(prime, prev_prime)) + return KAE_FAIL; + } + return KAE_SUCCESS; +} + +static int prime_mul_res(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *r1, BN_CTX *ctx, BN_GENCB *cb) +{ + if (i == 1) { + /* we get at least 2 primes */ + if (!BN_mul(r1, rsa_p, rsa_q, ctx)) + goto err; + } else { + /* i == 0, do nothing */ + if (!BN_GENCB_call(cb, 3, i)) // When a random p has been found, call BN_GENCB_call(cb, 3, *i) + goto err; + goto cont; + } + return KAE_SUCCESS; +err: + return -1; +cont: + return 1; +} +static int check_prime_sufficient(int *i, int *bitsr, int *bitse, int *n, BIGNUM *rsa_p, BIGNUM *rsa_q, + BIGNUM *r1, BIGNUM *r2, BN_CTX *ctx, BN_GENCB *cb) +{ + BN_ULONG bitst; + static int retries; + int ret; + + /* calculate n immediately to see if it's sufficient */ + ret = prime_mul_res(*i, rsa_p, rsa_q, r1, ctx, cb); + if (ret != KAE_SUCCESS) + return ret; + if (!BN_rshift(r2, r1, *bitse - 4)) // right shift *bitse - 4 + goto err; + bitst = BN_get_word(r2); + if (bitst < 0x9 || bitst > 0xF) { + *bitse -= bitsr[*i]; + if (!BN_GENCB_call(cb, 2, *n++)) // When the n-th is rejected, call BN_GENCB_call(cb, 2, n) + goto err; + if (retries == 4) { // retries max is 4 + *i = -1; + *bitse = 0; + retries = 0; + goto cont; + } + retries++; + goto redo; + } + + if (!BN_GENCB_call(cb, 3, *i)) // When a random p has been found, call BN_GENCB_call(cb, 3, *i) + goto err; + retries = 0; + return 0; +err: + return -1; +redo: + return -2; // if redo return -2 +cont: + return 1; +} + +static void set_primes(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM **prime) +{ + if (i == 0) + *prime = rsa_p; + else + *prime = rsa_q; + BN_set_flags(*prime, BN_FLG_CONSTTIME); +} + +static int check_prime_useful(int *n, BIGNUM *prime, BIGNUM *r1, BIGNUM *r2, + BIGNUM *e_value, BN_CTX *ctx, BN_GENCB *cb) +{ + unsigned long error = ERR_peek_last_error(); + + if (!BN_sub(r2, prime, BN_value_one())) + goto err; + ERR_set_mark(); + BN_set_flags(r2, BN_FLG_CONSTTIME); + if (BN_mod_inverse(r1, r2, e_value, ctx) != NULL) + goto br; + + if (ERR_GET_LIB(error) == ERR_LIB_BN && ERR_GET_REASON(error) == BN_R_NO_INVERSE) + ERR_pop_to_mark(); + else + goto err; + if (!BN_GENCB_call(cb, 2, *n++)) // When the n-th is rejected, call BN_GENCB_call(cb, 2, n) + goto err; + return 0; +err: + return -1; +br: + return 1; +} +static void switch_p_q(BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *p, BIGNUM *q) +{ + BIGNUM *tmp = (BIGNUM *)NULL; + + if (BN_cmp(rsa_p, rsa_q) < 0) { + tmp = rsa_p; + rsa_p = rsa_q; + rsa_q = tmp; + } + BN_copy(q, rsa_q); + BN_copy(p, rsa_p); +} + +static int hpre_get_prime_once(int i, const int *bitsr, int *n, BIGNUM *prime, BIGNUM *rsa_p, BIGNUM *rsa_q, + BIGNUM *r1, BIGNUM *r2, BIGNUM *e_value, BN_CTX *ctx, BN_GENCB *cb) +{ + int adj = 0; + int ret = KAE_FAIL; + + for (;;) { +redo: + if (!BN_generate_prime_ex(prime, bitsr[i] + adj, 0, (const BIGNUM *)NULL, (const BIGNUM *)NULL, cb)) + goto err; + /* + * prime should not be equal to p, q, r_3... + * (those primes prior to this one) + */ + if (check_primeequal(i, rsa_p, rsa_q, prime) == KAE_FAIL) + goto redo; + + ret = check_prime_useful(n, prime, r1, r2, e_value, ctx, cb); + if (ret == KAE_FAIL) + goto err; + else if (ret == 1) + break; + } + return ret; +err: + return KAE_FAIL; +} + +int hpre_rsa_primegen(int bits, BIGNUM *e_value, BIGNUM *p, BIGNUM *q, BN_GENCB *cb) +{ + int ok = -1; + int primes = 2; + int n = 0; + int bitse = 0; + int i = 0; + int bitsr[2]; // 2 bits + BN_CTX *ctx = (BN_CTX *)NULL; + BIGNUM *r1 = (BIGNUM *)NULL; + BIGNUM *r2 = (BIGNUM *)NULL; + BIGNUM *prime = (BIGNUM *)NULL; + BIGNUM *rsa_p, *rsa_q; + int ret, quo; + + ctx = BN_CTX_new(); + if (ctx == NULL) + goto err; + BN_CTX_start(ctx); + r1 = BN_CTX_get(ctx); + r2 = BN_CTX_get(ctx); + rsa_p = BN_CTX_get(ctx); + rsa_q = BN_CTX_get(ctx); + if (rsa_q == NULL) + goto err; + /* divide bits into 'primes' pieces evenly */ + quo = bits / primes; + bitsr[0] = quo; + bitsr[1] = quo; + /* generate p, q and other primes (if any) */ + for (i = 0; i < primes; i++) { + set_primes(i, rsa_p, rsa_q, &prime); +redo: + if (hpre_get_prime_once(i, bitsr, &n, prime, rsa_p, rsa_q, r1, r2, e_value, ctx, cb) == KAE_FAIL) + goto err; + + bitse += bitsr[i]; + ret = check_prime_sufficient(&i, bitsr, &bitse, &n, rsa_p, rsa_q, r1, r2, ctx, cb); + if (ret == -1) + goto err; + else if (ret == -2) // ret = -2 goto redo + goto redo; + else if (ret == 1) + continue; + } + switch_p_q(rsa_p, rsa_q, p, q); + ok = 1; +err: + if (ok == -1) { + KAEerr(KAE_F_HPRE_RSA_PRIMEGEN, KAE_R_ERR_LIB_BN); + US_ERR("rsa prime gen failed"); + ok = 0; + } + hpre_free_bn_ctx_buf(ctx, NULL, 0); + return ok; +} + +int hpre_rsa_iscrt(RSA *rsa) +{ + int version; + + if (unlikely(rsa == NULL)) + return 0; + + if (RSA_test_flags(rsa, RSA_FLAG_EXT_PKEY)) + return 1; + + version = RSA_get_version(rsa); + if (version == RSA_ASN1_VERSION_MULTI) + return 1; + + const BIGNUM *p = NULL; + const BIGNUM *q = NULL; + const BIGNUM *dmp1 = NULL; + const BIGNUM *dmq1 = NULL; + const BIGNUM *iqmp = NULL; + + RSA_get0_factors(rsa, &p, &q); + RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); + if ((p != NULL) && (q != NULL) && (dmp1 != NULL) && (dmq1 != NULL) && (iqmp != NULL)) + return 1; + + return 0; +} diff --git a/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.h b/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.h new file mode 100644 index 0000000..83ee1de --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_rsa_utils.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the rsa interface for KAE engine utils dealing + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_RSA_UTILS_H +#define HPRE_RSA_UTILS_H + +BN_ULONG *bn_get_words(const BIGNUM *a); + +void hpre_free_bn_ctx_buf(BN_CTX *bn_ctx, unsigned char *in_buf, int num); + +int hpre_rsa_check_para(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa); + +int hpre_get_prienc_res(int padding, BIGNUM *f, const BIGNUM *n, BIGNUM *bn_ret, BIGNUM **res); + +int check_bit_useful(const int bit); + +int check_pubkey_param(const BIGNUM *n, const BIGNUM *e); + +int hpre_rsa_padding(int flen, const unsigned char *from, unsigned char *buf, + int num, int padding, int type); + +int check_rsa_padding(unsigned char *to, int num, + const unsigned char *buf, int len, int padding, int type); + +int hpre_rsa_primegen(int bits, BIGNUM *e_value, BIGNUM *p, BIGNUM *q, BN_GENCB *cb); + +int hpre_rsa_iscrt(RSA *rsa); + +#endif diff --git a/kae_engine/src/v1/alg/pkey/hpre_wd.c b/kae_engine/src/v1/alg/pkey/hpre_wd.c new file mode 100644 index 0000000..971f7b2 --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_wd.c @@ -0,0 +1,452 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE rsa using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hpre_wd.h" +#include +#include "../../async/async_callback.h" +#include "../../async/async_task_queue.h" +#include "../../async/async_event.h" +#include "../../wdmngr/wd_queue_memory.h" +#include "../../utils/engine_types.h" +#include "hpre_rsa_utils.h" +#include "../../utils/engine_check.h" +#include "../../utils/engine_log.h" + +static void hpre_rsa_cb(const void *message, void *tag); + +KAE_QUEUE_POOL_HEAD_S *g_hpre_rsa_qnode_pool; + +void wd_hpre_uninit_qnode_pool(void) +{ + kae_queue_pool_destroy(g_hpre_rsa_qnode_pool, NULL); + g_hpre_rsa_qnode_pool = NULL; +} + +int wd_hpre_init_qnode_pool(void) +{ + kae_queue_pool_destroy(g_hpre_rsa_qnode_pool, NULL); + + g_hpre_rsa_qnode_pool = kae_init_queue_pool(WCRYPTO_RSA); + if (g_hpre_rsa_qnode_pool == NULL) { + US_ERR("hpre rsa qnode poll init fail!\n"); + return KAE_FAIL; + } + + return KAE_SUCCESS; +} + +KAE_QUEUE_POOL_HEAD_S *wd_hpre_get_qnode_pool(void) +{ + return g_hpre_rsa_qnode_pool; +} + +static hpre_engine_ctx_t *hpre_new_eng_ctx(RSA *rsa_alg) +{ + hpre_engine_ctx_t *eng_ctx = NULL; + + eng_ctx = (hpre_engine_ctx_t *)OPENSSL_malloc(sizeof(hpre_engine_ctx_t)); + if (eng_ctx == NULL) { + US_ERR("hpre engine_ctx malloc fail"); + return NULL; + } + kae_memset(eng_ctx, 0, sizeof(hpre_engine_ctx_t)); + + eng_ctx->priv_ctx.ssl_alg = rsa_alg; + eng_ctx->qlist = kae_get_node_from_pool(g_hpre_rsa_qnode_pool); + if (eng_ctx->qlist == NULL) { + US_ERR_LIMIT("error. get hardware queue failed"); + OPENSSL_free(eng_ctx); + eng_ctx = NULL; + return NULL; + } + eng_ctx->priv_ctx.is_privkey_ready = UNSET; + eng_ctx->priv_ctx.is_pubkey_ready = UNSET; + return eng_ctx; +} + +static int hpre_init_eng_ctx(hpre_engine_ctx_t *eng_ctx, int bits) +{ + struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; + struct wd_queue_mempool *pool = eng_ctx->qlist->kae_queue_mem_pool; + + // this is for ctx is in use.we dont need to re create ctx->ctx again + if (eng_ctx->ctx && eng_ctx->opdata.in) { + kae_memset(eng_ctx->opdata.in, 0, eng_ctx->opdata.in_bytes); + return OPENSSL_SUCCESS; + } + if (eng_ctx->ctx == NULL) { + if (bits == 0) + eng_ctx->priv_ctx.key_size = RSA_size(eng_ctx->priv_ctx.ssl_alg); + else + eng_ctx->priv_ctx.key_size = bits >> BIT_BYTES_SHIFT; + + eng_ctx->rsa_setup.key_bits = eng_ctx->priv_ctx.key_size << BIT_BYTES_SHIFT; + eng_ctx->rsa_setup.is_crt = ISSET; + eng_ctx->rsa_setup.cb = (wcrypto_cb)hpre_rsa_cb; + eng_ctx->rsa_setup.br.alloc = kae_wd_alloc_blk; + eng_ctx->rsa_setup.br.free = kae_wd_free_blk; + eng_ctx->rsa_setup.br.iova_map = kae_dma_map; + eng_ctx->rsa_setup.br.iova_unmap = kae_dma_unmap; + eng_ctx->rsa_setup.br.usr = pool; + eng_ctx->ctx = wcrypto_create_rsa_ctx(q, &eng_ctx->rsa_setup); + + if (eng_ctx->ctx == NULL) { + US_ERR("create rsa ctx fail!"); + return OPENSSL_FAIL; + } + } + + return OPENSSL_SUCCESS; +} + +hpre_engine_ctx_t *hpre_get_eng_ctx(RSA *rsa, int bits) +{ + hpre_engine_ctx_t *eng_ctx = hpre_new_eng_ctx(rsa); + + if (eng_ctx == NULL) { + US_WARN("new eng ctx fail then switch to soft!"); + return NULL; + } + + if (hpre_init_eng_ctx(eng_ctx, bits) == 0) { + hpre_free_eng_ctx(eng_ctx); + US_WARN("init eng ctx fail then switch to soft!"); + return NULL; + } + return eng_ctx; +} + +void hpre_free_eng_ctx(hpre_engine_ctx_t *eng_ctx) +{ + US_DEBUG("hpre rsa free engine ctx start!"); + if (eng_ctx == NULL) { + US_DEBUG("no eng_ctx to free"); + return; + } + + if (eng_ctx->opdata.op_type != WCRYPTO_RSA_GENKEY) { + if (eng_ctx->opdata.in) + eng_ctx->rsa_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->opdata.in); + if (eng_ctx->opdata.out) { + if (eng_ctx->qlist != NULL) + eng_ctx->rsa_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->opdata.out); + } + } else { + if (eng_ctx->opdata.in) + wcrypto_del_kg_in(eng_ctx->ctx, (struct wcrypto_rsa_kg_in *)eng_ctx->opdata.in); + if (eng_ctx->opdata.out) + wcrypto_del_kg_out(eng_ctx->ctx, (struct wcrypto_rsa_kg_out *)eng_ctx->opdata.out); + } + + if (eng_ctx->qlist != NULL) { + hpre_free_rsa_ctx(eng_ctx->ctx); + kae_put_node_to_pool(g_hpre_rsa_qnode_pool, eng_ctx->qlist); + } + + eng_ctx->priv_ctx.ssl_alg = NULL; + eng_ctx->qlist = NULL; + eng_ctx->ctx = NULL; + eng_ctx->opdata.in = NULL; + eng_ctx->opdata.out = NULL; + eng_ctx->priv_ctx.is_privkey_ready = UNSET; + eng_ctx->priv_ctx.is_pubkey_ready = UNSET; + OPENSSL_free(eng_ctx); + eng_ctx = NULL; +} + +void hpre_free_rsa_ctx(void *ctx) +{ + if (ctx != NULL) { + wcrypto_del_rsa_ctx(ctx); + ctx = NULL; + } +} + +void hpre_rsa_fill_pubkey(const BIGNUM *e, const BIGNUM *n, hpre_engine_ctx_t *eng_ctx) +{ + struct wcrypto_rsa_pubkey *pubkey = NULL; + struct wd_dtb *wd_e = NULL; + struct wd_dtb *wd_n = NULL; + + wcrypto_get_rsa_pubkey(eng_ctx->ctx, &pubkey); + wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, &wd_n); + if (!eng_ctx->priv_ctx.is_pubkey_ready) { + wd_e->dsize = BN_bn2bin(e, (unsigned char *)wd_e->data); + wd_n->dsize = BN_bn2bin(n, (unsigned char *)wd_n->data); + eng_ctx->priv_ctx.is_pubkey_ready = ISSET; + } +} + +/** + * FILL prikey to rsa_ctx in normal mode + * @param rsa get prikey from rsa + * @param rsa_ctx + */ +static void hpre_rsa_fill_prikey1(RSA *rsa, hpre_engine_ctx_t *eng_ctx) +{ + struct wcrypto_rsa_prikey *prikey = NULL; + struct wd_dtb *wd_d = NULL; + struct wd_dtb *wd_n = NULL; + const BIGNUM *n = (const BIGNUM *)NULL; + const BIGNUM *e = (const BIGNUM *)NULL; + const BIGNUM *d = (const BIGNUM *)NULL; + + RSA_get0_key(rsa, &n, &e, &d); + wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); + wcrypto_get_rsa_prikey_params(prikey, &wd_d, &wd_n); + + if (!eng_ctx->priv_ctx.is_privkey_ready) { + wd_d->dsize = BN_bn2bin(d, (unsigned char *)wd_d->data); + wd_n->dsize = BN_bn2bin(n, (unsigned char *)wd_n->data); + eng_ctx->priv_ctx.is_privkey_ready = ISSET; + } +} + +/** + * FILL prikey to rsa_ctx in crt mode + * @param rsa get prikey from rsa + * @param rsa_ctx + */ +static void hpre_rsa_fill_prikey2(RSA *rsa, hpre_engine_ctx_t *eng_ctx) +{ + struct wcrypto_rsa_prikey *prikey = NULL; + struct wd_dtb *wd_dq, *wd_dp, *wd_q, *wd_p, *wd_qinv; + const BIGNUM *p = NULL; + const BIGNUM *q = NULL; + const BIGNUM *dmp1 = NULL; + const BIGNUM *dmq1 = NULL; + const BIGNUM *iqmp = NULL; + + RSA_get0_factors(rsa, &p, &q); + RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); + wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); + wcrypto_get_rsa_crt_prikey_params(prikey, &wd_dq, &wd_dp, &wd_qinv, &wd_q, &wd_p); + if (!eng_ctx->priv_ctx.is_privkey_ready) { + wd_dq->dsize = BN_bn2bin(dmq1, (unsigned char *)wd_dq->data); + wd_dp->dsize = BN_bn2bin(dmp1, (unsigned char *)wd_dp->data); + wd_q->dsize = BN_bn2bin(q, (unsigned char *)wd_q->data); + wd_p->dsize = BN_bn2bin(p, (unsigned char *)wd_p->data); + wd_qinv->dsize = BN_bn2bin(iqmp, (unsigned char *)wd_qinv->data); + eng_ctx->priv_ctx.is_privkey_ready = ISSET; + } +} + +void hpre_rsa_fill_prikey(RSA *rsa, hpre_engine_ctx_t *eng_ctx, int version, const BIGNUM *p, const BIGNUM *q, + const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp) +{ + if (hpre_rsa_iscrt(rsa)) + hpre_rsa_fill_prikey2(rsa, eng_ctx); + else + hpre_rsa_fill_prikey1(rsa, eng_ctx); +} + +int hpre_fill_keygen_opdata(void *ctx, struct wcrypto_rsa_op_data *opdata) +{ + struct wd_dtb *wd_e = NULL; + struct wd_dtb *wd_p = NULL; + struct wd_dtb *wd_q = NULL; + struct wcrypto_rsa_pubkey *pubkey = NULL; + struct wcrypto_rsa_prikey *prikey = NULL; + + wcrypto_get_rsa_pubkey(ctx, &pubkey); + wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, NULL); + wcrypto_get_rsa_prikey(ctx, &prikey); + wcrypto_get_rsa_crt_prikey_params(prikey, NULL, NULL, NULL, &wd_q, &wd_p); + opdata->in = wcrypto_new_kg_in(ctx, wd_e, wd_p, wd_q); + if (!opdata->in) { + US_ERR("create rsa kgen in fail!\n"); + return -ENOMEM; + } + opdata->out = wcrypto_new_kg_out(ctx); + if (!opdata->out) { + wcrypto_del_kg_in(ctx, (struct wcrypto_rsa_kg_in *)opdata->in); + US_ERR("create rsa kgen out fail\n"); + return -ENOMEM; + } + + return 0; +} + +int hpre_rsa_get_keygen_param(struct wcrypto_rsa_op_data *opdata, void *ctx, + RSA *rsa, BIGNUM *e_value, BIGNUM *p, BIGNUM *q) +{ + BIGNUM *n = BN_new(); + BIGNUM *d = BN_new(); + BIGNUM *dmp1 = BN_new(); + BIGNUM *dmq1 = BN_new(); + BIGNUM *iqmp = BN_new(); + struct wd_dtb wd_d; + struct wd_dtb wd_n; + struct wd_dtb wd_qinv; + struct wd_dtb wd_dq; + struct wd_dtb wd_dp; + unsigned int key_bits, key_size; + struct wcrypto_rsa_kg_out *out = (struct wcrypto_rsa_kg_out *)opdata->out; + + key_bits = wcrypto_rsa_key_bits(ctx); + key_size = key_bits >> BIT_BYTES_SHIFT; + wcrypto_get_rsa_kg_out_params(out, &wd_d, &wd_n); + wcrypto_get_rsa_kg_out_crt_params(out, &wd_qinv, &wd_dq, &wd_dp); + + BN_bin2bn((unsigned char *)wd_d.data, key_size, d); + BN_bin2bn((unsigned char *)wd_n.data, key_size, n); + BN_bin2bn((unsigned char *)wd_qinv.data, wd_qinv.dsize, iqmp); + BN_bin2bn((unsigned char *)wd_dq.data, wd_dq.dsize, dmq1); + BN_bin2bn((unsigned char *)wd_dp.data, wd_dp.dsize, dmp1); + + if (!(RSA_set0_key(rsa, n, e_value, d) && RSA_set0_factors(rsa, p, q) && + RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))) { + KAEerr(KAE_F_RSA_FILL_KENGEN_PARAM, KAE_R_RSA_KEY_NOT_COMPELET); + US_ERR("set key failed!"); + return OPENSSL_FAIL; + } else { + return OPENSSL_SUCCESS; + } +} + +static void hpre_rsa_cb(const void *message, void *tag) +{ + if (!message || !tag) { + US_ERR("hpre cb params err!\n"); + return; + } + struct wcrypto_rsa_msg *msg = (struct wcrypto_rsa_msg *)message; + hpre_engine_ctx_t *eng_ctx = (hpre_engine_ctx_t *)tag; + + eng_ctx->opdata.out = msg->out; + eng_ctx->opdata.out_bytes = msg->out_bytes; + eng_ctx->opdata.status = msg->result; +} + +int hpre_rsa_sync(void *ctx, struct wcrypto_rsa_op_data *opdata) +{ + void *tag = NULL; + int ret; + + if (!ctx || !opdata) { + US_ERR("sync params err!"); + return HPRE_CRYPTO_FAIL; + } + + ret = wcrypto_do_rsa(ctx, opdata, tag); + if (ret != WD_SUCCESS) { + US_ERR("hpre do rsa fail!"); + return HPRE_CRYPTO_FAIL; + } + + return HPRE_CRYPTO_SUCC; +} + +int hpre_rsa_async(hpre_engine_ctx_t *eng_ctx, + struct wcrypto_rsa_op_data *opdata, op_done_t *op_done) +{ + int ret = 0; + int cnt = 0; + enum task_type type = ASYNC_TASK_RSA; + void *tag = eng_ctx; + + do { + if (cnt > MAX_SEND_TRY_CNTS) + break; + ret = wcrypto_do_rsa(eng_ctx->ctx, opdata, tag); + if (ret == WD_STATUS_BUSY) { + if ((async_wake_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || + (async_pause_job_v1(op_done->job, ASYNC_STATUS_EAGAIN) == 0))) { + US_ERR("hpre wake job or hpre pause job fail!"); + ret = 0; + break; + } + cnt++; + } + } while (ret == WD_STATUS_BUSY); + + if (ret != WD_SUCCESS) + return HPRE_CRYPTO_FAIL; + + if (async_add_poll_task_v1(eng_ctx, op_done, type) == 0) + return HPRE_CRYPTO_FAIL; + + return HPRE_CRYPTO_SUCC; +} + +int hpre_rsa_crypto(hpre_engine_ctx_t *eng_ctx, struct wcrypto_rsa_op_data *opdata) +{ + int job_ret; + op_done_t op_done; + + async_init_op_done_v1(&op_done); + + if (op_done.job != NULL && kae_is_async_enabled()) { + if (async_setup_async_event_notification_v1(0) == 0) { + US_ERR("hpre async event notifying failed"); + async_cleanup_op_done_v1(&op_done); + return HPRE_CRYPTO_FAIL; + } + } else { + US_DEBUG("hpre rsa no async Job or async disable, back to sync!"); + async_cleanup_op_done_v1(&op_done); + return hpre_rsa_sync(eng_ctx->ctx, opdata); + } + + if (hpre_rsa_async(eng_ctx, opdata, &op_done) == HPRE_CRYPTO_FAIL) + goto err; + + do { + job_ret = async_pause_job_v1(op_done.job, ASYNC_STATUS_OK); + if (job_ret == 0) { + US_DEBUG("- pthread_yidle -"); + kae_pthread_yield(); + } + } + + while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); + + if (op_done.verifyRst <= 0) { + US_ERR("hpre rsa verify result failed with %d", op_done.verifyRst); + async_cleanup_op_done_v1(&op_done); + return HPRE_CRYPTO_FAIL; + } + + async_cleanup_op_done_v1(&op_done); + + US_DEBUG("hpre rsa do async job success!"); + return HPRE_CRYPTO_SUCC; + +err: + US_ERR("hpre rsa do async job err"); + (void)async_clear_async_event_notification_v1(); + async_cleanup_op_done_v1(&op_done); + return HPRE_CRYPTO_FAIL; +} diff --git a/kae_engine/src/v1/alg/pkey/hpre_wd.h b/kae_engine/src/v1/alg/pkey/hpre_wd.h new file mode 100644 index 0000000..5e18c60 --- /dev/null +++ b/kae_engine/src/v1/alg/pkey/hpre_wd.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the rsa interface for KAE rsa using wd interface + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HPRE_WD_H +#define HPRE_WD_H + +#include + +#include "hpre_rsa.h" +#include +#include "../../wdmngr/wd_queue_memory.h" + + +#define UNSET 0 +#define ISSET 1 +#define BIT_BYTES_SHIFT 3 + +#define BN_ULONG unsigned long +#define MAX_SEND_TRY_CNTS 50 +#define MAX_RECV_TRY_CNTS 3000 + +#define RSA_BALANCE_TIMES 1280 + +#define WD_STATUS_BUSY (-EBUSY) + +struct hpre_priv_ctx { + RSA *ssl_alg; + int is_pubkey_ready; + int is_privkey_ready; + int key_size; +}; + +typedef struct hpre_priv_ctx hpre_priv_ctx_t; + +struct hpre_engine_ctx { + void *ctx; + struct wcrypto_rsa_op_data opdata; + struct wcrypto_rsa_ctx_setup rsa_setup; + struct KAE_QUEUE_DATA_NODE *qlist; + hpre_priv_ctx_t priv_ctx; +}; + +typedef struct hpre_engine_ctx hpre_engine_ctx_t; + +extern KAE_QUEUE_POOL_HEAD_S *g_hpre_rsa_qnode_pool; + +int wd_hpre_init_qnode_pool(void); +void wd_hpre_uninit_qnode_pool(void); + +KAE_QUEUE_POOL_HEAD_S *wd_hpre_get_qnode_pool(void); + +hpre_engine_ctx_t *hpre_get_eng_ctx(RSA *rsa, int bits); + +void hpre_free_eng_ctx(hpre_engine_ctx_t *eng_ctx); + +void hpre_free_rsa_ctx(void *ctx); + +void hpre_rsa_fill_pubkey(const BIGNUM *e, const BIGNUM *n, hpre_engine_ctx_t *rsa_ctx); + +void hpre_rsa_fill_prikey(RSA *rsa, hpre_engine_ctx_t *eng_ctx, int version, + const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, + const BIGNUM *dmq1, const BIGNUM *iqmp); + +int hpre_fill_keygen_opdata(void *ctx, + struct wcrypto_rsa_op_data *opdata); + +int hpre_rsa_get_keygen_param(struct wcrypto_rsa_op_data *opdata, void *ctx, + RSA *rsa, BIGNUM *e_value, BIGNUM *p, BIGNUM *q); + +int hpre_rsa_sync(void *ctx, struct wcrypto_rsa_op_data *opdata); + +int hpre_rsa_crypto(hpre_engine_ctx_t *eng_ctx, struct wcrypto_rsa_op_data *opdata); + +#endif + diff --git a/kae_engine/src/v1/async/async_callback.c b/kae_engine/src/v1/async/async_callback.c new file mode 100644 index 0000000..d01e82e --- /dev/null +++ b/kae_engine/src/v1/async/async_callback.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides implementation for callback in KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#ifndef __USE_GNU +# define __USE_GNU +#endif + +#include +#include +#include +#include + +#include "async_callback.h" +#include "../utils/engine_log.h" + +#include + +void async_init_op_done_v1(op_done_t *op_done) +{ + if (op_done == NULL) { + US_ERR("error! parameter is NULL."); + return; + } + + op_done->flag = 0; + op_done->verifyRst = 0; + op_done->job = ASYNC_get_current_job(); +} + +void async_cleanup_op_done_v1(op_done_t *op_done) +{ + if (op_done == NULL) { + US_ERR("error! parameter is NULL."); + return; + } + + op_done->verifyRst = 0; + + if (op_done->job) + op_done->job = NULL; +} +/*lint -e(10)*/ diff --git a/kae_engine/src/v1/async/async_callback.h b/kae_engine/src/v1/async/async_callback.h new file mode 100644 index 0000000..3d34f79 --- /dev/null +++ b/kae_engine/src/v1/async/async_callback.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides interface for callback in KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ASYNC_CALLBACK_H +#define ASYNC_CALLBACK_H + +#include +#include + +typedef struct { + volatile int flag; + volatile int verifyRst; + volatile ASYNC_JOB *job; +} op_done_t; + +void async_init_op_done_v1(op_done_t *op_done); +void async_cleanup_op_done_v1(op_done_t *op_done); +#endif + diff --git a/kae_engine/src/v1/async/async_event.c b/kae_engine/src/v1/async/async_event.c new file mode 100644 index 0000000..245c269 --- /dev/null +++ b/kae_engine/src/v1/async/async_event.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides implementation for async events in KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#ifndef _USE_GNU +# define _USE_GNU +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "async_event.h" +#include "../utils/engine_log.h" +#include "../../uadk.h" + +static void async_fd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key, OSSL_ASYNC_FD readfd, void *custom) +{ + (void)ctx; + (void)key; + (void)custom; + if (close(readfd) != 0) + US_WARN("Failed to close fd: %d - error: %d\n", readfd, errno); +} + +int async_setup_async_event_notification_v1(int jobStatus) +{ + (void)jobStatus; + ASYNC_JOB *job; + ASYNC_WAIT_CTX *waitctx; + OSSL_ASYNC_FD efd; + void *custom = NULL; + + job = ASYNC_get_current_job(); + if (job == NULL) { + US_ERR("Could not obtain current async job\n"); + return 0; + } + + waitctx = ASYNC_get_wait_ctx(job); + if (waitctx == NULL) { + US_ERR("current job has no waitctx."); + return 0; + } + + if (ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, + &custom) == 0) { + efd = eventfd(0, EFD_NONBLOCK); + if (efd == -1) { + US_ERR("efd error."); + return 0; + } + + if (ASYNC_WAIT_CTX_set_wait_fd(waitctx, engine_uadk_id, efd, + custom, async_fd_cleanup) == 0) { + US_ERR("set wait fd error."); + async_fd_cleanup(waitctx, engine_uadk_id, efd, NULL); + return 0; + } + } + return 1; +} + +int async_clear_async_event_notification_v1(void) +{ + ASYNC_JOB *job; + ASYNC_WAIT_CTX *waitctx; + OSSL_ASYNC_FD efd; + size_t num_add_fds = 0; + size_t num_del_fds = 0; + void *custom = NULL; + + job = ASYNC_get_current_job(); + if (job == NULL) { + US_ERR("no async job."); + return 0; + } + + waitctx = ASYNC_get_wait_ctx(job); + if (waitctx == NULL) { + US_ERR("The job has no waitctx"); + return 0; + } + + if (ASYNC_WAIT_CTX_get_changed_fds(waitctx, NULL, &num_add_fds, NULL, &num_del_fds) == 0) { + US_ERR("no add fds."); + return 0; + } + + if (num_add_fds > 0) { + if (ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom) == 0) { + US_ERR("no fd."); + return 0; + } + + async_fd_cleanup(waitctx, engine_uadk_id, efd, NULL); + + if (ASYNC_WAIT_CTX_clear_fd(waitctx, engine_uadk_id) == 0) { + US_ERR("clear fd error."); + return 0; + } + } + + return 1; +} + +int async_pause_job_v1(volatile ASYNC_JOB *job, int jobStatus) +{ + (void)jobStatus; + + ASYNC_WAIT_CTX *waitctx; + OSSL_ASYNC_FD efd; + void *custom = NULL; + uint64_t buf = 0; + int ret = 0; + + waitctx = ASYNC_get_wait_ctx((ASYNC_JOB *)job); + if (waitctx == NULL) { + US_ERR("error. waitctx is NULL\n"); + return ret; + } + + if (ASYNC_pause_job() == 0) { + US_ERR("Failed to pause the job\n"); + return ret; + } + + ret = ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom); + if (ret > 0) { + if (read(efd, &buf, sizeof(uint64_t)) == -1) { + if (errno != EAGAIN) + US_WARN("Failed to read from fd: %d - error: %d\n", efd, errno); + /* Not resumed by the expected async_wake_job() */ + return ASYNC_JOB_RESUMED_UNEXPECTEDLY; + } + } + + return ret; +} + + +int async_wake_job_v1(volatile ASYNC_JOB *job, int jobStatus) +{ + (void)jobStatus; + + ASYNC_WAIT_CTX *waitctx; + OSSL_ASYNC_FD efd; + void *custom = NULL; + uint64_t buf = 1; + int ret = 0; + + waitctx = ASYNC_get_wait_ctx((ASYNC_JOB *)job); + if (waitctx == NULL) { + US_ERR("error. waitctx is NULL\n"); + return ret; + } + + ret = ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom); + if (ret > 0) { + if (write(efd, &buf, sizeof(uint64_t)) == -1) + US_ERR("Failed to write to fd: %d - error: %d\n", efd, errno); + } + + US_DEBUG("- async wake job success - "); + return ret; +} +/*lint -e(10)*/ diff --git a/kae_engine/src/v1/async/async_event.h b/kae_engine/src/v1/async/async_event.h new file mode 100644 index 0000000..46c11e7 --- /dev/null +++ b/kae_engine/src/v1/async/async_event.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides interface for async events in KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ASYNC_EVENTS_H__ +#define __ASYNC_EVENTS_H__ + +#include + +#include + + +#define ASYNC_JOB_RESUMED_UNEXPECTEDLY (-1) +#define ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(x) \ + ((x) == ASYNC_JOB_RESUMED_UNEXPECTEDLY) + +#define ASYNC_STATUS_UNSUPPORTED 0 +#define ASYNC_STATUS_ERR 1 +#define ASYNC_STATUS_OK 2 +#define ASYNC_STATUS_EAGAIN 3 + +int async_setup_async_event_notification_v1(int jobStatus); +int async_clear_async_event_notification_v1(void); +int async_pause_job_v1(volatile ASYNC_JOB *job, int jobStatus); +int async_wake_job_v1(volatile ASYNC_JOB *job, int jobStatus); + +#endif + diff --git a/kae_engine/src/v1/async/async_poll.c b/kae_engine/src/v1/async/async_poll.c new file mode 100644 index 0000000..0d75ae3 --- /dev/null +++ b/kae_engine/src/v1/async/async_poll.c @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for the KAE engine thread polling + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#ifndef __USE_GNU +# define __USE_GNU +#endif + +#include +#include +#include +#include +#include +#include + +#include "async_poll.h" +#include "async_event.h" +#include "async_task_queue.h" +#include "../utils/engine_utils.h" +#include "../utils/engine_check.h" +#include "../utils/engine_log.h" + +#include + +#define ASYNC_POLL_TASK_NUM 1024 + +static void async_polling_thread_destroy(void); + +static void *async_poll_process_func(void *args) +{ + (void)args; + int ret; + async_poll_task *task; + void *eng_ctx; + int type; + op_done_t *op_done; + + while (1) { + if (sem_wait(&g_async_poll_queue.full_sem) != 0) { + if (errno == EINTR) { + /* sem_wait is interrupted by interrupt, continue */ + continue; + } + US_ERR("wait async full_sem failed, errno:%d", errno); //lint !e666 + } + + task = async_get_queue_task_v1(); + if (task == NULL) { + usleep(1); + continue; + } + + eng_ctx = task->eng_ctx; + op_done = task->op_done; + type = task->type; + + US_DEBUG("async poll thread start to recv result."); + + ret = g_async_recv_func[type](eng_ctx); + + op_done->verifyRst = ret; + + op_done->flag = 1; + if (op_done->job) + async_wake_job_v1(op_done->job, ASYNC_STATUS_OK); + + US_DEBUG("process task done."); + } + + US_DEBUG("polling thread exit."); + return NULL; +} + +void async_polling_thread_reset(void) +{ + g_async_poll_queue.init_mark = 0; + kae_memset(&g_async_poll_queue, 0, sizeof(g_async_poll_queue)); +} + +int async_polling_thread_init(void) +{ + pthread_t thread_id; + + US_DEBUG("init polling thread."); + if (g_async_poll_queue.init_mark == INITED) + return 1; + + kae_memset(&g_async_poll_queue, 0, sizeof(async_poll_queue_t)); + + if (pthread_mutex_init(&(g_async_poll_queue.async_task_mutex), NULL) < 0) + US_ERR("init queue mutex failed, errno:%d", errno); //lint !e666 + + if (!async_poll_task_init_v1()) { + US_ERR("init poll task queue failed."); + return 0; + } + + if (kae_create_thread(&thread_id, NULL, async_poll_process_func, NULL) == 0) { + US_DEBUG("fail to create polling thread"); + goto _err; + } + + g_async_poll_queue.thread_id = thread_id; + g_async_poll_queue.init_mark = INITED; + (void)OPENSSL_atexit(async_polling_thread_destroy); + + return 1; + +_err: + async_poll_task_free_v1(); + return 0; +} + +static void async_polling_thread_destroy(void) +{ + if (g_async_poll_queue.exit_mark == 1) + return; + + async_poll_task_free_v1(); + g_async_poll_queue.exit_mark = 1; +} + +void async_module_init_v1(void) +{ + if (kae_is_async_enabled()) { + async_poll_task_free_v1(); + async_polling_thread_reset(); + if (!async_polling_thread_init()) + kae_disable_async(); + } +} +/*lint -e(10)*/ diff --git a/kae_engine/src/v1/async/async_poll.h b/kae_engine/src/v1/async/async_poll.h new file mode 100644 index 0000000..e800f98 --- /dev/null +++ b/kae_engine/src/v1/async/async_poll.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides interface for the KAE engine thread polling + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ASYNC_POLLING_H +#define ASYNC_POLLING_H +#include +#include "async_callback.h" +#include "async_task_queue.h" + +void async_module_init_v1(void); + +#endif + diff --git a/kae_engine/src/v1/async/async_task_queue.c b/kae_engine/src/v1/async/async_task_queue.c new file mode 100644 index 0000000..2feea7e --- /dev/null +++ b/kae_engine/src/v1/async/async_task_queue.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for the KAE engine async task queue + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#ifndef __USE_GNU +# define __USE_GNU +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "async_task_queue.h" +#include "../utils/engine_utils.h" +#include "../utils/engine_log.h" + +#define ASYNC_POLL_TASK_NUM 4096 + +async_poll_queue_t g_async_poll_queue = { + .init_mark = 0, +}; + +async_recv_t g_async_recv_func[MAX_ALG_SIZE]; + +int async_register_poll_fn_v1(int type, async_recv_t func) +{ + if (type < 0 || type >= MAX_ALG_SIZE) + return -1; + + g_async_recv_func[type] = func; + return 0; +} + +int async_poll_task_init_v1(void) +{ + int ret; + + kae_memset(&g_async_poll_queue, 0, sizeof(g_async_poll_queue)); + + g_async_poll_queue.async_poll_task_queue_head = + (async_poll_task *)malloc(sizeof(async_poll_task) * ASYNC_POLL_TASK_NUM); + if (g_async_poll_queue.async_poll_task_queue_head == NULL) { + US_ERR("no enough memory for task queue, errno=%d", errno); //lint !e666 + return 0; + } + kae_memset(g_async_poll_queue.async_poll_task_queue_head, 0, + sizeof(async_poll_task) * ASYNC_POLL_TASK_NUM); + g_async_poll_queue.left_task = ASYNC_POLL_TASK_NUM; + + ret = sem_init(&g_async_poll_queue.empty_sem, 0, (unsigned int)g_async_poll_queue.left_task); + if (ret != 0) { + US_ERR("fail to init empty semaphore, errno=%d", errno); //lint !e666 + goto _err; + } + + if (sem_init(&g_async_poll_queue.full_sem, 0, 0) != 0) { + US_ERR("fail to init full semaphore, errno=%d", errno); //lint !e666 + goto _err; + } + + US_DEBUG("async poll task init done."); + return 1; +_err: + async_poll_task_free_v1(); + return 0; +} + +async_poll_task *async_get_queue_task_v1(void) +{ + async_poll_task *task_queue; + async_poll_task *cur_task; + int tail_pos; + + if (pthread_mutex_lock(&g_async_poll_queue.async_task_mutex) != 0) { + US_ERR("lock queue mutex failed, errno:%d", errno); //lint !e666 + return NULL; + } + + tail_pos = g_async_poll_queue.tail_pos; + task_queue = g_async_poll_queue.async_poll_task_queue_head; + cur_task = &task_queue[tail_pos]; + + g_async_poll_queue.tail_pos = (tail_pos + 1) % ASYNC_POLL_TASK_NUM; + g_async_poll_queue.cur_task--; + g_async_poll_queue.left_task++; + + if (pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex) != 0) + US_ERR("unlock queue mutex failed, errno:%d", errno); //lint !e666 + + if (sem_post(&g_async_poll_queue.empty_sem) != 0) + US_ERR("post empty sem failed, errno:%d", errno); //lint !e666 + + US_DEBUG("get task end"); + return cur_task; +} + +static int async_add_queue_task(void *eng_ctx, op_done_t *op_done, enum task_type type) +{ + async_poll_task *task_queue; + async_poll_task *task; + int head_pos; + + if (sem_wait(&g_async_poll_queue.empty_sem) != 0) { + US_ERR("wait empty sem failed, errno:%d", errno); //lint !e666 + return 0; + } + + if (pthread_mutex_lock(&g_async_poll_queue.async_task_mutex) != 0) + US_ERR("lock queue mutex failed, errno:%d", errno); //lint !e666 + + head_pos = g_async_poll_queue.head_pos; + task_queue = g_async_poll_queue.async_poll_task_queue_head; + task = &task_queue[head_pos]; + task->eng_ctx = eng_ctx; + task->op_done = op_done; + task->type = type; + + head_pos = (head_pos + 1) % ASYNC_POLL_TASK_NUM; + g_async_poll_queue.head_pos = head_pos; + g_async_poll_queue.cur_task++; + g_async_poll_queue.left_task--; + + if (pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex) != 0) + US_ERR("unlock queue mutex failed, errno:%d", errno); //lint !e666 + + if (sem_post(&g_async_poll_queue.full_sem) != 0) + US_ERR("post full sem failed, errno:%d", errno); //lint !e666 + + US_DEBUG("add task success"); + return 1; +} + +static void async_poll_queue_free(void) +{ + async_poll_task *task = g_async_poll_queue.async_poll_task_queue_head; + + if (task != NULL) + OPENSSL_free(task); + g_async_poll_queue.async_poll_task_queue_head = NULL; +} + +int async_add_poll_task_v1(void *eng_ctx, op_done_t *op_done, enum task_type type) +{ + US_DEBUG("start to add task to poll queue"); + return async_add_queue_task(eng_ctx, op_done, type); +} + +void async_poll_task_free_v1(void) +{ + int error; + + error = pthread_mutex_lock(&g_async_poll_queue.async_task_mutex); + if (error != 0) { + US_ERR("lock mutex failed, errno=%d", errno); //lint !e666 + return; + } + async_poll_queue_free(); + pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex); + + sem_destroy(&g_async_poll_queue.empty_sem); + sem_destroy(&g_async_poll_queue.full_sem); + pthread_mutex_destroy(&g_async_poll_queue.async_task_mutex); + + US_DEBUG("async task free succ"); +} +/*lint -e(10)*/ diff --git a/kae_engine/src/v1/async/async_task_queue.h b/kae_engine/src/v1/async/async_task_queue.h new file mode 100644 index 0000000..b87347e --- /dev/null +++ b/kae_engine/src/v1/async/async_task_queue.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides interface for the KAE engine async task queue + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ASYNC_TASK_QUEUE_H +#define ASYNC_TASK_QUEUE_H +#include +#include "async_callback.h" +#include +#include "../../uadk_async.h" + +#include + +#define MAX_ALG_SIZE 6 + +typedef int (*async_recv_t)(void *engine_ctx); + +struct async_wd_polling_arg { + enum task_type type; + void *eng_ctx; + op_done_t *op_done; +}; +typedef struct async_wd_polling_arg async_poll_task; + +typedef struct async_poll_queue_t { + async_poll_task *async_poll_task_queue_head; + int head_pos; + int tail_pos; + int cur_task; + int left_task; + int shutdown; + sem_t empty_sem; + sem_t full_sem; + pthread_mutex_t async_task_mutex; + pthread_t thread_id; + int init_mark; + int exit_mark; +} async_poll_queue_t; + +extern async_poll_queue_t g_async_poll_queue; +extern async_recv_t g_async_recv_func[MAX_ALG_SIZE]; + +int async_register_poll_fn_v1(int type, async_recv_t async_recv); +int async_poll_task_init_v1(void); +async_poll_task *async_get_queue_task_v1(void); + +int async_add_poll_task_v1(void *ctx, op_done_t *op_done, enum task_type type); +void async_poll_task_free_v1(void); + +#endif diff --git a/kae_engine/src/v1/uadk_v1.h b/kae_engine/src/v1/uadk_v1.h new file mode 100644 index 0000000..9ca0a94 --- /dev/null +++ b/kae_engine/src/v1/uadk_v1.h @@ -0,0 +1,40 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#ifndef UADK_V1_H +#define UADK_V1_H +#include "async/async_poll.h" +#include "utils/engine_fork.h" +#include "utils/engine_log.h" + +extern void sec_ciphers_free_ciphers(void); +extern int cipher_module_init(void); +extern int sec_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); + +extern void sec_digests_free_methods(void); +extern int digest_module_init(void); +extern int sec_engine_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); + +extern RSA_METHOD *hpre_get_rsa_methods(void); +extern int hpre_module_init(void); +extern void hpre_destroy(void); + +extern const DH_METHOD *hpre_get_dh_methods(void); +extern int hpre_module_dh_init(void); +extern void hpre_dh_destroy(void); + +extern int hpre_pkey_meths(ENGINE *e, EVP_PKEY_METHOD **pmeth, + const int **pnids, int nid); +extern int wd_get_nosva_dev_num(const char *algorithm); +#endif diff --git a/kae_engine/src/v1/utils/engine_check.c b/kae_engine/src/v1/utils/engine_check.c new file mode 100644 index 0000000..949eeb8 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_check.c @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for an engine check thread + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include + +#include "../alg/ciphers/sec_ciphers_wd.h" +#include "../alg/digests/sec_digests_wd.h" +#include "../alg/pkey/hpre_wd.h" +#include "../alg/dh/hpre_dh_wd.h" +#include "engine_check.h" +#include "engine_utils.h" +#include "engine_log.h" + +KAE_CHECK_Q_TASK g_kae_check_q_task = { + .init_flag = NOT_INIT, +}; +static pthread_once_t g_check_thread_is_initialized = PTHREAD_ONCE_INIT; + +static struct kae_spinlock g_kae_async_spinmtx = { + .lock = 0, +}; + +static unsigned int g_kae_async_enabled = 1; + +void kae_enable_async(void) +{ + KAE_SPIN_LOCK(g_kae_async_spinmtx); + g_kae_async_enabled = 1; + KAE_SPIN_UNLOCK(g_kae_async_spinmtx); +} + +void kae_disable_async(void) +{ + KAE_SPIN_LOCK(g_kae_async_spinmtx); + g_kae_async_enabled = 0; + KAE_SPIN_UNLOCK(g_kae_async_spinmtx); +} + +int kae_is_async_enabled(void) +{ + return g_kae_async_enabled; +} + +static void kae_set_exit_flag(void) +{ + g_kae_check_q_task.exit_flag = 1; +} + +static void *kae_checking_q_loop_fn(void *args) +{ + (void)args; + + while (1) { + if (g_kae_check_q_task.exit_flag) + break; + + usleep(KAE_QUEUE_CHECKING_INTERVAL); + if (g_kae_check_q_task.exit_flag) + break; // double check + + kae_queue_pool_check_and_release(wd_ciphers_get_qnode_pool(), wd_ciphers_free_engine_ctx); + kae_queue_pool_check_and_release(wd_digests_get_qnode_pool(), wd_digests_free_engine_ctx); + kae_queue_pool_check_and_release(wd_hpre_get_qnode_pool(), NULL); + kae_queue_pool_check_and_release(wd_hpre_dh_get_qnode_pool(), NULL); + } + US_INFO("check thread exit normally."); + + return NULL; // lint !e527 +} + +static void kae_checking_q_thread_destroy(void) +{ + kae_set_exit_flag(); + pthread_join(g_kae_check_q_task.thread_id, NULL); + + (void)wd_digests_uninit_qnode_pool(); + (void)wd_ciphers_uninit_qnode_pool(); + (void)wd_hpre_dh_uninit_qnode_pool(); + (void)wd_hpre_uninit_qnode_pool(); +} + +static void kae_check_thread_init(void) +{ + pthread_t thread_id; + + if (g_kae_check_q_task.init_flag == INITED) + return; + + if (!kae_create_thread_joinable(&thread_id, NULL, kae_checking_q_loop_fn, NULL)) { + US_ERR("fail to create check thread"); + return; + } + + g_kae_check_q_task.thread_id = thread_id; + g_kae_check_q_task.init_flag = INITED; + + (void)OPENSSL_atexit(kae_checking_q_thread_destroy); +} + +int kae_checking_q_thread_init(void) +{ + US_DEBUG("check queue thread init begin"); + + if (g_kae_check_q_task.init_flag == INITED) + return 1; + + pthread_once(&g_check_thread_is_initialized, kae_check_thread_init); + + if (g_kae_check_q_task.init_flag != INITED) { + US_ERR("check thread init failed"); + g_check_thread_is_initialized = PTHREAD_ONCE_INIT; + return 0; + } + + return 1; +} + +void kae_check_thread_reset(void) +{ + kae_memset(&g_kae_check_q_task, 0, sizeof(KAE_CHECK_Q_TASK)); + g_check_thread_is_initialized = PTHREAD_ONCE_INIT; +} diff --git a/kae_engine/src/v1/utils/engine_check.h b/kae_engine/src/v1/utils/engine_check.h new file mode 100644 index 0000000..2cf0a7a --- /dev/null +++ b/kae_engine/src/v1/utils/engine_check.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for an engine check thread + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ENGINE_CHECK_H +#define ENGINE_CHECK_H + +#include + +#define KAE_QUEUE_CHECKING_INTERVAL 15000 + + +struct kae_check_q_task_s { + int init_flag; + int exit_flag; + pthread_t thread_id; +}; + +typedef struct kae_check_q_task_s KAE_CHECK_Q_TASK; + +void kae_enable_async(void); +void kae_disable_async(void); +int kae_is_async_enabled(void); +int kae_checking_q_thread_init(void); +void kae_check_thread_reset(void); + +#endif // end of ENGINE_CHECK_H + diff --git a/kae_engine/src/v1/utils/engine_config.c b/kae_engine/src/v1/utils/engine_config.c new file mode 100644 index 0000000..746aeee --- /dev/null +++ b/kae_engine/src/v1/utils/engine_config.c @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides implementation of configuration file reading for the KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "engine_config.h" + +int kae_drv_findsection(FILE *stream, const char *v_pszSection) +{ + char line[256]; // array length:256 + char *pos = NULL; + size_t section_len = strlen(v_pszSection); + + while (!feof(stream)) { + if (fgets(line, sizeof(line), stream) == NULL) + return -1; + + pos = line; + if (*(pos++) != '[') + continue; + + if (memcmp(pos, v_pszSection, section_len) == 0) { + pos += section_len; + if (*pos == ']') + return 0; + } + } + + return -1; +} + +void kae_drv_get_value(char *pos, char *v_pszValue) +{ + while (*pos != '\0') { + if (*pos == ' ') { + pos++; + continue; + } + + if (*pos == ';') { + *(v_pszValue++) = '\0'; + return; + } + + *(v_pszValue++) = *(pos++); + } +} + +int kae_drv_find_item(FILE *stream, const char *v_pszItem, char *v_pszValue) +{ + char line[256]; // array length:256 + char *pos = NULL; + + while (!feof(stream)) { + if (fgets(line, sizeof(line), stream) == NULL) + return -1; + + if (strstr(line, v_pszItem) != NULL) { + pos = strstr(line, "="); + if (pos != NULL) { + pos++; + kae_drv_get_value(pos, v_pszValue); + return 0; + } + } + + if ('[' == line[0]) + break; + } + + return -1; +} + +int kae_drv_get_item(const char *config_file, const char *v_pszSection, + const char *v_pszItem, char *v_pszValue) +{ + FILE *stream; + int retvalue = -1; + + stream = fopen(config_file, "r"); + if (stream == NULL) + return -1; + + if (kae_drv_findsection(stream, v_pszSection) == 0) + retvalue = kae_drv_find_item(stream, v_pszItem, v_pszValue); + + fclose(stream); + + return retvalue; +} diff --git a/kae_engine/src/v1/utils/engine_config.h b/kae_engine/src/v1/utils/engine_config.h new file mode 100644 index 0000000..ddf4027 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_config.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides interface of configuration file reading for the KAE engine + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HISI_ACC_OPENSSL_CONFIG_H +#define HISI_ACC_OPENSSL_CONFIG_H + +#include +#include +#include + +int kae_drv_get_item(const char *config_file, const char *v_pszSection, + const char *v_pszItem, char *v_pszValue); + +#endif // HISI_ACC_OPENSSL_CONFIG_H diff --git a/kae_engine/src/v1/utils/engine_fork.c b/kae_engine/src/v1/utils/engine_fork.c new file mode 100644 index 0000000..14f4e6e --- /dev/null +++ b/kae_engine/src/v1/utils/engine_fork.c @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the inplemenation for a KAE engine fork + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "engine_fork.h" +#include "engine_check.h" +#include "../async/async_poll.h" +#include "../alg/pkey/hpre_rsa.h" +#include "../alg/dh/hpre_dh.h" +#include "../alg/ciphers/sec_ciphers.h" +#include "../alg/digests/sec_digests.h" +#include "../utils/engine_log.h" +#include "../alg/pkey/hpre_wd.h" +#include "../alg/dh/hpre_dh_wd.h" +#include "../alg/ciphers/sec_ciphers_wd.h" +#include "../alg/digests/sec_digests_wd.h" + +void engine_init_child_at_fork_handler_v1(void) +{ + US_DEBUG("call engine_init_child_at_fork_handler"); + + if (g_sec_digests_qnode_pool) + g_sec_digests_qnode_pool->pool_use_num = 0; + if (g_sec_ciphers_qnode_pool) + g_sec_ciphers_qnode_pool->pool_use_num = 0; + if (g_hpre_rsa_qnode_pool) + g_hpre_rsa_qnode_pool->pool_use_num = 0; + if (g_hpre_dh_qnode_pool) + g_hpre_dh_qnode_pool->pool_use_num = 0; + + (void)hpre_module_init(); + (void)hpre_module_dh_init(); + (void)cipher_module_init(); + (void)digest_module_init(); + + kae_check_thread_reset(); + if (!kae_checking_q_thread_init()) + US_WARN("kae queue check thread init failed"); + async_module_init_v1(); +} + +void engine_do_before_fork_handler(void) +{ +} + +void engine_init_parent_at_fork_handler(void) +{ +} diff --git a/kae_engine/src/v1/utils/engine_fork.h b/kae_engine/src/v1/utils/engine_fork.h new file mode 100644 index 0000000..4042d27 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_fork.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for a KAE engine fork + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __HPRE_FORK_H +#define __HPRE_FORK_H + +void engine_init_child_at_fork_handler_v1(void); +void engine_do_before_fork_handler(void); +void engine_init_parent_at_fork_handler(void); + +#endif diff --git a/kae_engine/src/v1/utils/engine_log.c b/kae_engine/src/v1/utils/engine_log.c new file mode 100644 index 0000000..6e32b97 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_log.c @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for log module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "engine_log.h" +#include "engine_config.h" +#include "engine_utils.h" + +#define KAE_CONFIG_FILE_NAME "/kae.cnf" +#define MAX_LEVEL_LEN 10 +#define MAX_CONFIG_LEN 512 + +static const char *g_kae_conf_env = "KAE_CONF_ENV"; + +FILE *g_kae_debug_log_file = (FILE *)NULL; +pthread_mutex_t g_debug_file_mutex = PTHREAD_MUTEX_INITIALIZER; +int g_debug_file_ref_count; +int g_log_init_times; +int g_kae_log_level; + +const char *g_log_level[] = { + "none", + "error", + "warning", + "info", + "debug", +}; + +static char *kae_getenv(const char *name) +{ + return getenv(name); +} + +static void kae_set_conf_debuglevel(void) +{ + char *conf_path = kae_getenv(g_kae_conf_env); + unsigned int i = 0; + const char *filename = KAE_CONFIG_FILE_NAME; + char *file_path = (char *)NULL; + char *debuglev = (char *)NULL; + int ret; + + if (conf_path == NULL || strlen(conf_path) > MAX_CONFIG_LEN) + goto err; + file_path = (char *)kae_malloc(strlen(conf_path) + strlen(filename) + 1); + debuglev = (char *)kae_malloc(MAX_LEVEL_LEN); + if (!file_path || !debuglev) + goto err; + memset(debuglev, 0, MAX_LEVEL_LEN); + memset(file_path, 0, sizeof(conf_path) + sizeof(filename) + 1); + strcat(file_path, conf_path); + strcat(file_path, filename); + ret = kae_drv_get_item(file_path, "LogSection", "debug_level", debuglev); + if (ret != 0) + goto err; + + for (i = 0; i < sizeof(g_log_level) / sizeof(g_log_level[0]); i++) { + if (strncmp(g_log_level[i], debuglev, strlen(debuglev) - 1) == 0) { + g_kae_log_level = i; + kae_free(file_path); + kae_free(debuglev); + return; + } + } + +err: + g_kae_log_level = KAE_ERROR; + if (debuglev != NULL) { + kae_free(debuglev); + debuglev = (char *)NULL; + } + if (file_path != NULL) { + kae_free(file_path); + file_path = (char *)NULL; + } +} + +void kae_debug_init_log(void) +{ + pthread_mutex_lock(&g_debug_file_mutex); + kae_set_conf_debuglevel(); + if (!g_debug_file_ref_count && g_kae_log_level != KAE_NONE) { + g_kae_debug_log_file = fopen(KAE_DEBUG_FILE_PATH, "a+"); + if (g_kae_debug_log_file == NULL) { + g_kae_debug_log_file = stderr; + fprintf(stderr, "unable to open %s, %s\n", KAE_DEBUG_FILE_PATH, strerror(errno)); + } else { + g_debug_file_ref_count++; + } + } + g_log_init_times++; + pthread_mutex_unlock(&g_debug_file_mutex); +} + +void kae_debug_close_log(void) +{ + pthread_mutex_lock(&g_debug_file_mutex); + g_log_init_times--; + if (g_debug_file_ref_count && (g_log_init_times == 0)) { + if (g_kae_debug_log_file != NULL) { + fclose(g_kae_debug_log_file); + g_debug_file_ref_count--; + g_kae_debug_log_file = stderr; + } + } + pthread_mutex_unlock(&g_debug_file_mutex); +} + +void ENGINE_LOG_LIMIT(int level, int times, int limit, const char *fmt, ...) +{ + struct tm *log_tm_p = (struct tm *)NULL; + static unsigned long ulpre; + static int is_should_print; + va_list args1 = { 0 }; + + if (level > g_kae_log_level) + return; + + // cppcheck-suppress * + va_start(args1, fmt); + time_t curr = time((time_t *)NULL); + + if (difftime(curr, ulpre) > limit) + is_should_print = times; + if (is_should_print <= 0) + is_should_print = 0; + if (is_should_print-- > 0) { + log_tm_p = (struct tm *)localtime(&curr); + flock(g_kae_debug_log_file->_fileno, LOCK_EX); + pthread_mutex_lock(&g_debug_file_mutex); + fseek(g_kae_debug_log_file, 0, SEEK_END); + if (log_tm_p != NULL) { + fprintf(g_kae_debug_log_file, "[%4d-%02d-%02d %02d:%02d:%02d][%s][%s:%d:%s()] ", + (1900 + log_tm_p->tm_year), (1 + log_tm_p->tm_mon), log_tm_p->tm_mday, // base time 1900 year + log_tm_p->tm_hour, log_tm_p->tm_min, log_tm_p->tm_sec, + g_log_level[level], __FILE__, __LINE__, __func__); + } else { + fprintf(g_kae_debug_log_file, "[%s][%s:%d:%s()] ", + g_log_level[level], __FILE__, __LINE__, __func__); + } + vfprintf(g_kae_debug_log_file, fmt, args1); + fprintf(g_kae_debug_log_file, "\n"); + if (ftell(g_kae_debug_log_file) > KAE_LOG_MAX_SIZE) { + kae_save_log(g_kae_debug_log_file); + if (ftruncate(g_kae_debug_log_file->_fileno, 0)) + ; + fseek(g_kae_debug_log_file, 0, SEEK_SET); + } + pthread_mutex_unlock(&g_debug_file_mutex); + flock(g_kae_debug_log_file->_fileno, LOCK_UN); + ulpre = time((time_t *)NULL); + } + + va_end(args1); +} + +static int need_debug(void) +{ + if (g_kae_log_level >= KAE_DEBUG) + return 1; + else + return 0; +} + +/* + * desc: print data for debug + * @param name the name of buf + * @param buf the buf msg when input + * @param len bd len + */ +void dump_data(const char *name, unsigned char *buf, unsigned int len) +{ + unsigned int i; + + if (need_debug()) { + US_DEBUG("DUMP ==> %s", name); + for (i = 0; i + 8 <= len; i += 8) { // buf length:8 + US_DEBUG("0x%llx: \t%02x %02x %02x %02x %02x %02x %02x %02x", + (unsigned long long)(buf + i), + *(buf + i), (*(buf + i + 1)), *(buf + i + 2), *(buf + i + 3), // buf offset:0,1,2,3 + *(buf + i + 4), *(buf + i + 5), *(buf + i + 6), *(buf + i + 7)); // buf offset:4,5,6,7 + } + + if (len % 8) { // remainder:divide by 8 + US_DEBUG("0x%llx: \t", (unsigned long long)(buf + i)); + for (; i < len; i++) + US_DEBUG("%02x ", buf[i]); + } + } +} + +/* + * desc: print bd for debug + * @param bd the buf msg when input + * @param len bd len + */ +void dump_bd(unsigned int *bd, unsigned int len) +{ + unsigned int i; + + if (need_debug()) { + for (i = 0; i < len; i++) + US_DEBUG("Word[%d] 0x%08x", i, bd[i]); + } +} + +void kae_save_log(FILE *src) +{ + int size = 0; + char buf[1024] = {0}; // buf length:1024 + + if (src == NULL) + return; + + FILE *dst = fopen(KAE_DEBUG_FILE_PATH_OLD, "w"); + + if (dst == NULL) + return; + + fseek(src, 0, SEEK_SET); + while (1) { + size = fread(buf, sizeof(char), 1024, src); // buf length:1024 + fwrite(buf, sizeof(char), size, dst); + if (!size) + break; + } + + fclose(dst); +} diff --git a/kae_engine/src/v1/utils/engine_log.h b/kae_engine/src/v1/utils/engine_log.h new file mode 100644 index 0000000..b529628 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_log.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for log module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef KAE_ACC_ENGINE_LOG_H +#define KAE_ACC_ENGINE_LOG_H +#include +#include +#include +#include +#include + + +#define LOG_LEVEL_CONFIG KAE_NONE +#define KAE_DEBUG_FILE_PATH "/var/log/kae.log" +#define KAE_DEBUG_FILE_PATH_OLD "/var/log/kae.log.old" +#define KAE_LOG_MAX_SIZE 209715200 + +enum KAE_LOG_LEVEL { + KAE_NONE = 0, + KAE_ERROR, + KAE_WARNING, + KAE_INFO, + KAE_DEBUG, +}; + +void ENGINE_LOG_LIMIT(int level, int times, int limit, const char *fmt, ...); + +#define US_WARN(fmt, args...) ENGINE_LOG_LIMIT(KAE_WARNING, 3, 30, fmt, ##args) +#define US_ERR(fmt, args...) ENGINE_LOG_LIMIT(KAE_ERROR, 3, 30, fmt, ##args) +#define US_INFO(fmt, args...) ENGINE_LOG_LIMIT(KAE_INFO, 3, 30, fmt, ##args) +#define US_DEBUG(fmt, args...) ENGINE_LOG_LIMIT(KAE_DEBUG, 3, 30, fmt, ##args) +#define US_WARN_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_WARNING, 3, 30, fmt, ##args) +#define US_ERR_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_ERROR, 3, 30, fmt, ##args) +#define US_INFO_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_INFO, 3, 30, fmt, ##args) +#define US_DEBUG_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_DEBUG, 3, 30, fmt, ##args) + +void kae_debug_init_log(void); +void kae_debug_close_log(void); +void kae_save_log(FILE *src); + +/* + * desc: print data for debug + * @param name the name of buf + * @param buf the buf msg when input + * @param len bd len + */ +void dump_data(const char *name, unsigned char *buf, unsigned int len); + +/* + * desc: print bd for debug + * @param bd the buf msg when input + * @param len bd len + */ +void dump_bd(unsigned int *bd, unsigned int len); + +#endif diff --git a/kae_engine/src/v1/utils/engine_opensslerr.c b/kae_engine/src/v1/utils/engine_opensslerr.c new file mode 100644 index 0000000..b86df3f --- /dev/null +++ b/kae_engine/src/v1/utils/engine_opensslerr.c @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for error module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "engine_opensslerr.h" + +#define ERR_FUNC(func) ERR_PACK(0, func, 0) +#define ERR_REASON(reason) ERR_PACK(0, 0, reason) + +static int g_kae_lib_error_code; +static int g_kae_error_init = 1; + +static ERR_STRING_DATA g_kae_str_functs[] = { + { ERR_FUNC(KAE_F_HPRE_GET_RSA_METHODS), "hpre_get_RSA_methods" }, + { ERR_FUNC(KAE_F_CHANGRSAMETHOD), "changRsaMethod" }, + { ERR_FUNC(KAE_F_HPRE_PKEY_METHS), "hpre_pkey_meths" }, + { ERR_FUNC(KAE_F_BIND_HELPER), "bind_helper" }, + { ERR_FUNC(KAE_F_RSA_FILL_KENGEN_PARAM), "rsa_fill_keygen_param" }, + { ERR_FUNC(KAE_F_HPRE_RSA_PUBENC), "hpre_rsa_public_encrypt" }, + { ERR_FUNC(KAE_F_HPRE_RSA_PRIENC), "hpre_rsa_private_encrypt" }, + { ERR_FUNC(KAE_F_HPRE_RSA_PUBDEC), "hpre_rsa_public_decrypt" }, + { ERR_FUNC(KAE_F_HPRE_RSA_PRIDEC), "hpre_rsa_private_decrypt" }, + { ERR_FUNC(KAE_F_HPRE_RSA_PRIMEGEN), "hpre_rsa_primegen" }, + { ERR_FUNC(KAE_F_HPRE_RSA_KEYGEN), "hpre_rsa_keygen" }, + { ERR_FUNC(KAE_F_CHECK_PUBKEY_PARAM), "check_pubkey_param" }, + { ERR_FUNC(KAE_F_HPRE_PUBENC_PADDING), "hpre_pubenc_padding" }, + { ERR_FUNC(KAE_F_HPRE_PRIENC_PADDING), "hpre_prienc_padding" }, + { ERR_FUNC(KAE_F_CHECK_HPRE_PUBDEC_PADDING), "hpre_check_pubdec_padding" }, + { ERR_FUNC(KAE_F_CHECK_HPRE_PRIDEC_PADDING), "hpre_check_pridec_padding" }, + { ERR_FUNC(KAE_F_DIGEST_SOFT_INIT), "sec_digest_soft_init" }, + { 0, (const char *)NULL } +}; + +static ERR_STRING_DATA g_kae_str_reasons[] = { + { ERR_REASON(KAE_R_NO_MATCH_DEVICE), "get no match device.check the hw resource" }, + { ERR_REASON(KAE_R_MALLOC_FAILURE), "no system memory to alloc" }, + { ERR_REASON(KAE_R_HWMEM_MALLOC_FAILURE), "no hardware memory to alloc" }, + { ERR_REASON(KAE_R_INPUT_PARAM_ERR), "input param is invalid" }, + { ERR_REASON(KAE_R_SET_ID_FAILURE), "kae engine set id failure" }, + { ERR_REASON(KAE_R_SET_NAME_FAILURE), "kae engine set name failure" }, + { ERR_REASON(KAE_R_SET_PKEYMETH_FAILURE), "kae engine set pkeymeth function failure" }, + { ERR_REASON(KAE_R_SET_RSA_FAILURE), "kae engine set rsa failure" }, + { ERR_REASON(KAE_R_SET_DESTORY_FAILURE), "kae engine set destroy function failure" }, + { ERR_REASON(KAE_R_SET_INIT_FAILURE), "kae engine set init function failure" }, + { ERR_REASON(KAE_R_SET_CTRL_FAILURE), "kae engine set ctrl function failure" }, + { ERR_REASON(KAE_R_SET_CMDDEF_FAILURE), "kae engine set cmd define failure" }, + { ERR_REASON(KAE_R_SET_FINISH_FAILURE), "kae engine set finish function failure" }, + { ERR_REASON(KAE_R_UNSUPPORT_HARDWARE_TYPE), "unsupported hardware type" }, + { ERR_REASON(KAE_R_TIMEOUT), "Operation timeout" }, + { ERR_REASON(KAE_R_RSARECV_FAILURE), "RSA receive failure" }, + { ERR_REASON(KAE_R_RSARECV_STATE_FAILURE), "RSA received but status is failure" }, + { ERR_REASON(KAE_R_RSASEND_FAILURE), "RSA send failure" }, + { ERR_REASON(KAE_R_GET_ALLOCED_HWMEM_FAILURE), "get memory from reserve memory failure" }, + { ERR_REASON(KAE_R_FREE_ALLOCED_HWMEM_FAILURE), "free memory to reserve memory failure" }, + { ERR_REASON(KAE_R_RSA_KEY_NOT_COMPELET), "rsa key param is not compeleted" }, + { ERR_REASON(KAE_R_RSA_PADDING_FAILURE), "rsa padding failed" }, + { ERR_REASON(KAE_R_DATA_TOO_LARGE_FOR_MODULUS), "data too large for modules" }, + { ERR_REASON(KAE_R_DATA_GREATER_THEN_MOD_LEN), "data greater than mod len" }, + { ERR_REASON(KAE_R_CHECKPADDING_FAILURE), "check rsa padding failure" }, + { ERR_REASON(KAE_R_ERR_LIB_BN), "err in BN operation" }, + { ERR_REASON(KAE_R_RSA_KEY_SIZE_TOO_SMALL), "data too small" }, + { ERR_REASON(KAE_R_MODULE_TOO_LARGE), "data too large" }, + { ERR_REASON(KAE_R_INVAILED_E_VALUE), "invailed e value" }, + { ERR_REASON(KAE_R_UNKNOW_PADDING_TYPE), "unknown padding type" }, + { ERR_REASON(KAE_R_INPUT_FIKE_LENGTH_ZERO), "input file length zero" }, + { ERR_REASON(KAE_R_NEW_ENGINE_FAILURE), "get new engine failure" }, + { ERR_REASON(KAE_R_BIND_ENGINE_FAILURE), "kae engine bind failure" }, + { ERR_REASON(KAE_R_RSA_SET_METHODS_FAILURE), "rsa set kae methods failure" }, + { ERR_REASON(KAE_R_PUBLIC_KEY_INVALID), "invalid public key" }, + { ERR_REASON(KAE_R_PUBLIC_ENCRYPTO_FAILURE), "rsa public key encrypto failure" }, + { ERR_REASON(KAE_R_PUBLIC_DECRYPTO_FAILURE), "rsa public key decrypto failure" }, + { ERR_REASON(KAE_R_GET_PRIMEKEY_FAILURE), "rsa prime key generate failure" }, + { ERR_REASON(KAE_R_ENGINE_ALREADY_DEFINED), "kae engine already defined, try to use engine id 'kae' instead." }, + { 0, (const char *)NULL } +}; + +int err_load_kae_strings(void) +{ + if (g_kae_lib_error_code == 0) + g_kae_lib_error_code = ERR_get_next_error_library(); + + if (g_kae_error_init) { + g_kae_error_init = 0; + ERR_load_strings(g_kae_lib_error_code, g_kae_str_functs); + ERR_load_strings(g_kae_lib_error_code, g_kae_str_reasons); + } + return 1; +} + +void err_unload_kae_strings(void) +{ + if (g_kae_error_init == 0) { + ERR_unload_strings(g_kae_lib_error_code, g_kae_str_functs); + ERR_unload_strings(g_kae_lib_error_code, g_kae_str_reasons); + g_kae_error_init = 1; + } +} + +void err_kae_error(int function, int reason, char *engine_file, int line) +{ + if (g_kae_lib_error_code == 0) + g_kae_lib_error_code = ERR_get_next_error_library(); + ERR_PUT_error(g_kae_lib_error_code, function, reason, engine_file, line); +} diff --git a/kae_engine/src/v1/utils/engine_opensslerr.h b/kae_engine/src/v1/utils/engine_opensslerr.h new file mode 100644 index 0000000..bf87b93 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_opensslerr.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for error module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HISI_ACC_ENGINE_OPENSSLERR_H +#define HISI_ACC_ENGINE_OPENSSLERR_H + +int err_load_kae_strings(void); +void err_unload_kae_strings(void); +void err_kae_error(int function, int reason, char *engine_file, int line); +#define KAEerr(f, r) err_kae_error((f), (r), OPENSSL_FILE, OPENSSL_LINE) + +/* Function codes. */ +enum HISI_FUNC_CODE { + KAE_F_HPRE_GET_RSA_METHODS = 100, + KAE_F_CHANGRSAMETHOD, + KAE_F_HPRE_PKEY_METHS, + KAE_F_BIND_HELPER, + KAE_F_RSA_FILL_KENGEN_PARAM, + KAE_F_HPRE_RSA_PUBENC, + KAE_F_HPRE_RSA_PRIENC, + KAE_F_HPRE_RSA_PUBDEC, + KAE_F_HPRE_RSA_PRIDEC, + KAE_F_HPRE_RSA_PRIMEGEN, + KAE_F_HPRE_RSA_KEYGEN, + KAE_F_CHECK_PUBKEY_PARAM, + KAE_F_HPRE_PUBENC_PADDING, + KAE_F_HPRE_PRIENC_PADDING, + KAE_F_CHECK_HPRE_PUBDEC_PADDING, + KAE_F_CHECK_HPRE_PRIDEC_PADDING, + KAE_F_SEC_SM3_INIT, + KAE_F_SEC_SM3_FINAL, + KAE_F_DIGEST_SOFT_INIT, + KAE_F_ENGINE_WD, + KAE_F_BIND_FN, + KAE_F_CHECK_DATA_VALID, + KAE_F_CHECK_MALLOC_SUCC, + KAE_F_HPRE_GET_DH_METHODS, + KAE_F_HPRE_DH_KEYGEN, + KAE_F_HPRE_DH_KEYCOMP, + KAE_F_CHANGDHMETHOD +}; + +enum HISI_RESON_CODE { + KAE_R_NO_MATCH_DEVICE = 100, + KAE_R_MALLOC_FAILURE, + KAE_R_HWMEM_MALLOC_FAILURE, + KAE_R_INPUT_PARAM_ERR, + KAE_R_SET_ID_FAILURE, + KAE_R_SET_NAME_FAILURE, + KAE_R_SET_PKEYMETH_FAILURE, + KAE_R_SET_RSA_FAILURE, + KAE_R_SET_DESTORY_FAILURE, + KAE_R_SET_INIT_FAILURE, + KAE_R_SET_CTRL_FAILURE, + KAE_R_SET_CMDDEF_FAILURE, + KAE_R_SET_FINISH_FAILURE, + KAE_R_UNSUPPORT_HARDWARE_TYPE, + KAE_R_TIMEOUT, + KAE_R_RSARECV_FAILURE, + KAE_R_RSARECV_STATE_FAILURE, + KAE_R_RSASEND_FAILURE, + KAE_R_GET_ALLOCED_HWMEM_FAILURE, + KAE_R_FREE_ALLOCED_HWMEM_FAILURE, + KAE_R_RSA_KEY_NOT_COMPELET, + KAE_R_RSA_PADDING_FAILURE, + KAE_R_DATA_TOO_LARGE_FOR_MODULUS, + KAE_R_DATA_GREATER_THEN_MOD_LEN, + KAE_R_CHECKPADDING_FAILURE, + KAE_R_ERR_LIB_BN, + KAE_R_RSA_KEY_SIZE_TOO_SMALL, + KAE_R_MODULE_TOO_LARGE, + KAE_R_INVAILED_E_VALUE, + KAE_R_UNKNOW_PADDING_TYPE, + KAE_R_INPUT_FIKE_LENGTH_ZERO, + KAE_R_SET_CIPHERS_FAILURE, + KAE_R_SET_DIGESTS_FAILURE, + KAE_R_NEW_ENGINE_FAILURE, + KAE_R_BIND_ENGINE_FAILURE, + KAE_R_RSA_SET_METHODS_FAILURE, + KAE_R_PUBLIC_KEY_INVALID, + KAE_R_PUBLIC_ENCRYPTO_FAILURE, + KAE_R_PUBLIC_DECRYPTO_FAILURE, + KAE_R_GET_PRIMEKEY_FAILURE, + KAE_R_DH_SET_METHODS_FAILURE, + KAE_R_SET_DH_FAILURE, + KAE_R_DH_KEY_SIZE_TOO_LARGE, + KAE_R_DH_INVALID_PARAMETER, + KAE_R_ENGINE_ALREADY_DEFINED, +}; + +#endif // HISI_ACC_ENGINE_OPENSSLERR_H diff --git a/kae_engine/src/v1/utils/engine_types.h b/kae_engine/src/v1/utils/engine_types.h new file mode 100644 index 0000000..d2cc264 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_types.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for some base type or define for KAE + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef KAE_ENGINE_TYPES_H +#define KAE_ENGINE_TYPES_H + +#define OPENSSL_SUCCESS (1) +#define OPENSSL_FAIL (0) +#define KAE_SUCCESS (0) +#define KAE_FAIL (-1) + +#define NO_C_MODE (UINT_MAX) +#define NO_C_ALG (UINT_MAX) + +#endif + diff --git a/kae_engine/src/v1/utils/engine_utils.c b/kae_engine/src/v1/utils/engine_utils.c new file mode 100644 index 0000000..7c6c153 --- /dev/null +++ b/kae_engine/src/v1/utils/engine_utils.c @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for utis module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "engine_utils.h" +#include "engine_log.h" + +#define KAE_MEM_IMPROVE_THRESHOLD 1024 + +int kae_create_thread(pthread_t *thread_id, const pthread_attr_t *attr, + void *(*start_func)(void *), void *p_arg) +{ + (void)attr; + pthread_attr_t thread_attr; + + pthread_attr_init(&thread_attr); + pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED); + if (pthread_create(thread_id, &thread_attr, start_func, p_arg) != 0) { + US_ERR("fail to create thread, reason:%s", strerror(errno)); //lint !e666 + return 0; + } + + return 1; +} + +int kae_create_thread_joinable(pthread_t *thread_id, const pthread_attr_t *attr, + void *(*start_func)(void *), void *p_arg) +{ + (void)attr; + pthread_attr_t thread_attr; + + pthread_attr_init(&thread_attr); + pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(thread_id, &thread_attr, start_func, p_arg) != 0) { + US_ERR("fail to create thread, reason:%s", strerror(errno)); //lint !e666 + return 0; + } + return 1; +} + +void *memcpy_large(void *dstpp, const void *srcpp, size_t len) +{ + __asm__ __volatile__( + "add x4, %[src], %[count] \n\t" + "add x5, %[res], %[count] \n\t" + "ldr q0, [%[src]] \n\t" + "str q0, [%[res]] \n\t" + "sub %[count], %[count], 80 \n\t" + "and x14, %[src], 15 \n\t" + "bic %[src], %[src], 15 \n\t" + "sub x3, %[res], x14 \n\t" + "add %[count], %[count], x14 \n\t" + + "1: \n\t" + "ldp q0, q1, [%[src], 16] \n\t" + "stp q0, q1, [x3, 16] \n\t" + "ldp q0, q1, [%[src], 48] \n\t" + "stp q0, q1, [x3, 48] \n\t" + "add %[src], %[src], 64 \n\t" + "add x3, x3, 64 \n\t" + "subs %[count], %[count], 64 \n\t" + "b.hi 1b \n\t" + + "ldp q0, q1, [x4, -64] \n\t" + "stp q0, q1, [x5, -64] \n\t" + "ldp q0, q1, [x4, -32] \n\t" + "stp q0, q1, [x5, -32] \n\t" + + : [res] "+r"(dstpp) + : [src] "r"(srcpp), [count] "r"(len) + : "x3", "x4", "x5", "x14", "q0", "q1" + ); + + return dstpp; +} + +void *kae_memcpy(void *dstpp, const void *srcpp, size_t len) +{ + if (len >= KAE_MEM_IMPROVE_THRESHOLD) + return memcpy_large(dstpp, srcpp, len); + else + return memcpy(dstpp, srcpp, len); +} diff --git a/kae_engine/src/v1/utils/engine_utils.h b/kae_engine/src/v1/utils/engine_utils.h new file mode 100644 index 0000000..e018b7a --- /dev/null +++ b/kae_engine/src/v1/utils/engine_utils.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for utils module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef KAE_ACC_ENGINE_UTILS_H +#define KAE_ACC_ENGINE_UTILS_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define gettid() syscall(SYS_gettid) +#define PRINTPID \ + US_DEBUG("pid=%d, ptid=%lu, tid=%d", getpid(), pthread_self(), gettid()) + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + +#ifndef true +#define true (0 == 0) +#endif + +#ifndef false +#define false (0 == 1) +#endif + +enum KAE_Q_INIT_FLAG { + NOT_INIT = 0, + INITED, +}; + +#define UNUSED(x) ((void)(x)) + +#define BLOCKSIZES_OF(data) (sizeof((data)) / sizeof(((data)[0]))) + +#define KAE_SPIN_INIT(q) kae_spinlock_init(&(q)) +#define KAE_SPIN_LOCK(q) kae_spinlock_lock(&(q)) +#define KAE_SPIN_TRYLOCK(q) kae_spinlock_trylock(&(q)) +#define KAE_SPIN_UNLOCK(q) kae_spinlock_unlock(&(q)) + +#define kae_free(addr) \ + do { \ + if (addr != NULL) { \ + free(addr); \ + addr = NULL; \ + } \ + } while (0) + +struct kae_spinlock { + int lock; +}; + +static inline void kae_spinlock_init(struct kae_spinlock *lock) +{ + lock->lock = 0; +} + +static inline void kae_spinlock_lock(struct kae_spinlock *lock) +{ + while (__sync_lock_test_and_set(&lock->lock, 1)); +} + +static inline int kae_spinlock_trylock(struct kae_spinlock *lock) +{ + return __sync_lock_test_and_set(&lock->lock, 1) == 0; +} + +static inline void kae_spinlock_unlock(struct kae_spinlock *lock) +{ + __sync_lock_release(&lock->lock); +} + +static inline void *kae_malloc(unsigned int size) +{ + return malloc(size); +} + +static inline void *kae_realloc(void *mem_address, unsigned int newsize) +{ + return realloc(mem_address, newsize); +} + +static inline void *kae_calloc(unsigned int num, unsigned int size) +{ + return calloc(num, size); +} + +static inline int kae_strcmp(const char *src, const char *dst) +{ + return strcmp(src, dst); +} + +static inline void kae_memset(void *ptr, int value, int len) +{ + (void)memset(ptr, value, len); +} + +void *kae_memcpy(void *dstpp, const void *srcpp, size_t len); + +static inline void kae_pthread_yield(void) +{ + //(void)pthread_yield(); //lint !e1055 + sched_yield(); +} + +int kae_create_thread(pthread_t *thread_id, const pthread_attr_t *attr, + void *(*start_func)(void *), void *p_arg); + +int kae_create_thread_joinable(pthread_t *thread_id, const pthread_attr_t *attr, + void *(*start_func)(void *), void *p_arg); + +#endif diff --git a/kae_engine/src/v1/wdmngr/wd_alg_queue.c b/kae_engine/src/v1/wdmngr/wd_alg_queue.c new file mode 100644 index 0000000..5cd33ae --- /dev/null +++ b/kae_engine/src/v1/wdmngr/wd_alg_queue.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the wd queue management module + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wd_alg_queue.h" +#include "../utils/engine_log.h" + +struct wd_queue *wd_new_queue(int algtype) +{ + struct wd_queue *queue = (struct wd_queue *)kae_malloc(sizeof(struct wd_queue)); + int ret; + + if (queue == NULL) { + US_ERR("malloc failed"); + return NULL; + } + + kae_memset(queue, 0, sizeof(struct wd_queue)); + + switch (algtype) { + case WCRYPTO_RSA: + queue->capa.alg = "rsa"; + break; + case WCRYPTO_DH: + queue->capa.alg = "dh"; + break; + case WCRYPTO_CIPHER: + queue->capa.alg = "cipher"; + break; + case WCRYPTO_DIGEST: + queue->capa.alg = "digest"; + break; + case WCRYPTO_COMP: + case WCRYPTO_EC: + case WCRYPTO_RNG: + default: + US_WARN("not support algtype:%d", algtype); + kae_free(queue); + queue = NULL; + return NULL; + } + + ret = wd_request_queue(queue); + if (ret) { + US_ERR("request wd queue fail!errno:%d", ret); + kae_free(queue); + queue = NULL; + return NULL; + } + + return queue; +} + +void wd_free_queue(struct wd_queue *queue) +{ + if (queue != NULL) { + wd_release_queue(queue); + kae_free(queue); + queue = NULL; + } +} + +int wd_get_nosva_dev_num(const char *algorithm) +{ + return wd_get_available_dev_num(algorithm); +} diff --git a/kae_engine/src/v1/wdmngr/wd_alg_queue.h b/kae_engine/src/v1/wdmngr/wd_alg_queue.h new file mode 100644 index 0000000..955eed5 --- /dev/null +++ b/kae_engine/src/v1/wdmngr/wd_alg_queue.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for wd_alg_queue.c + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __WD_ALG_QUEUE_H +#define __WD_ALG_QUEUE_H + +#include +#include "../utils/engine_utils.h" + +struct wd_queue *wd_new_queue(int algtype); + +void wd_free_queue(struct wd_queue *queue); + +int wd_get_nosva_dev_num(const char *algorithm); +#endif diff --git a/kae_engine/src/v1/wdmngr/wd_queue_memory.c b/kae_engine/src/v1/wdmngr/wd_queue_memory.c new file mode 100644 index 0000000..e6e7a2c --- /dev/null +++ b/kae_engine/src/v1/wdmngr/wd_queue_memory.c @@ -0,0 +1,480 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the implementation for KAE engine of wd queue memory management + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "wd_queue_memory.h" +#include "../utils/engine_utils.h" +#include "../utils/engine_log.h" +#include + +#define MAXBLOCKSIZE 0x90000 +#define MAXRSVMEM 0x400000 + +#define MAXBLOCKSIZE 0x90000 +#define MAXRSVMEM 0x400000 + +const char *g_alg_type[] = { + "rsa", + "dh", + "cipher", + "digest", +}; + +struct wd_queue_mempool *wd_queue_mempool_create(struct wd_queue *q, unsigned int block_size, unsigned int block_num) +{ + void *addr = NULL; + unsigned long rsv_mm_sz; + struct wd_queue_mempool *pool = NULL; + unsigned int bitmap_sz; + const unsigned int BLOCKS_PER_BITMAP = 32; + + if (block_size > MAXBLOCKSIZE) { + US_ERR("error! current blk size is beyond 576k"); + return NULL; + } + + rsv_mm_sz = (unsigned long)block_size * (unsigned long)block_num; + if (rsv_mm_sz > (unsigned long)MAXRSVMEM) { + US_ERR("error! current mem size is beyond 4M"); + return NULL; + } + + addr = wd_reserve_memory(q, rsv_mm_sz); + if (addr == NULL) { + US_ERR("reserve_memory fail!"); + return NULL; + } + kae_memset(addr, 0, rsv_mm_sz); + + bitmap_sz = (block_num / BLOCKS_PER_BITMAP + 1) * sizeof(unsigned int); + pool = + (struct wd_queue_mempool *)kae_malloc(sizeof(struct wd_queue_mempool) + bitmap_sz); + if (pool == NULL) { + US_ERR("Alloc pool handle fail!"); + return NULL; + } + kae_memset(pool, 0, sizeof(struct wd_queue_mempool) + bitmap_sz); + + pool->base = addr; + sem_init(&pool->mempool_sem, 0, 1); + pool->block_size = block_size; + pool->block_num = block_num; + pool->free_num = block_num; + pool->bitmap = (unsigned int *) (pool + 1); + pool->mem_size = rsv_mm_sz; + pool->q = q; + + return pool; +} + +struct wd_queue_mempool *create_alg_wd_queue_mempool(int algtype, struct wd_queue *q) +{ + struct wd_queue_mempool *mempool = NULL; + unsigned int block_size; + unsigned int block_num; + + switch (algtype) { + case WCRYPTO_RSA: + block_size = RSA_BLOCK_SIZE; + block_num = RSA_BLOCK_NUM; + break; + case WCRYPTO_DH: + block_size = DH_BLOCK_SIZE; + block_num = DH_BLOCK_NUM; + break; + case WCRYPTO_CIPHER: + block_size = CIPHER_BLOCK_SIZE; + block_num = CIPHER_BLOCK_NUM; + break; + case WCRYPTO_DIGEST: + block_size = DIGEST_BLOCK_SIZE; + block_num = DIGEST_BLOCK_NUM; + break; + case WCRYPTO_COMP: + case WCRYPTO_EC: + case WCRYPTO_RNG: + default: + US_WARN("%s not support algtype:%d", __func__, algtype); + return NULL; + } + +#ifdef NO_WD_BLK_POOL + mempool = wd_queue_mempool_create(q, block_size, block_num); +#else + struct wd_blkpool_setup setup; + + kae_memset(&setup, 0, sizeof(setup)); + setup.block_size = block_size; + setup.block_num = block_num; + setup.align_size = 64; // align with 64 + + mempool = (struct wd_queue_mempool *)wd_blkpool_create(q, &setup); +#endif + + return mempool; +} + +void wd_queue_mempool_destroy(struct wd_queue_mempool *pool) +{ + wd_blkpool_destroy(pool); +} + +void *kae_dma_map(void *usr, void *va, size_t sz) +{ + return wd_blk_iova_map(usr, va); +} + +void kae_dma_unmap(void *usr, void *va, void *dma, size_t sz) +{ + return wd_blk_iova_unmap(usr, dma, va); +} + +void *kae_wd_alloc_blk(void *pool, size_t size) +{ + if (pool == NULL) { + US_ERR("mem pool empty!"); + return NULL; + } + +#ifdef NO_WD_BLK_POOL + struct wd_queue_mempool *mempool = (struct wd_queue_mempool *)pool; + + if (size > (size_t)mempool->block_size) { + US_ERR("alloc size error, over one block size."); + return NULL; + } + return wd_queue_pool_alloc_buf((struct wd_queue_mempool *)pool); +#else + return wd_alloc_blk(pool); +#endif +} + +void kae_wd_free_blk(void *pool, void *blk) +{ +#ifdef NO_WD_BLK_POOL + wd_queue_pool_free_buf((struct wd_queue_mempool *)pool, blk); +#else + wd_free_blk(pool, blk); +#endif +} + +KAE_QUEUE_POOL_HEAD_S *kae_init_queue_pool(int algtype) +{ + KAE_QUEUE_POOL_HEAD_S *kae_pool = NULL; + + kae_pool = (KAE_QUEUE_POOL_HEAD_S *)kae_malloc(sizeof(KAE_QUEUE_POOL_HEAD_S)); + if (kae_pool == NULL) { + US_ERR("malloc pool head fail!"); + return NULL; + } + + /* fill data of head */ + kae_pool->algtype = algtype; + kae_pool->next = NULL; + kae_pool->pool_use_num = 0; + + /* malloc a pool */ + kae_pool->kae_queue_pool = (KAE_QUEUE_POOL_NODE_S *) + kae_malloc(KAE_QUEUE_POOL_MAX_SIZE * sizeof(KAE_QUEUE_POOL_NODE_S)); + if (kae_pool->kae_queue_pool == NULL) { + US_ERR("malloc failed"); + kae_free(kae_pool); + return NULL; + } + kae_memset(kae_pool->kae_queue_pool, 0, KAE_QUEUE_POOL_MAX_SIZE * sizeof(KAE_QUEUE_POOL_NODE_S)); + + pthread_mutex_init(&kae_pool->kae_queue_mutex, NULL); + pthread_mutex_init(&kae_pool->destroy_mutex, NULL); + + US_DEBUG("kae init %s queue success", g_alg_type[algtype]); + + return kae_pool; +} + +static KAE_QUEUE_DATA_NODE_S *kae_get_queue_data_from_list(KAE_QUEUE_POOL_HEAD_S *pool_head) +{ + int i = 0; + KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; + KAE_QUEUE_POOL_HEAD_S *temp_pool = pool_head; + + US_DEBUG("kae get queue node from pool start."); + + if ((pool_head->pool_use_num == 0) && (pool_head->next == NULL)) + return queue_data_node; + + while (temp_pool != NULL) { + for (i = 0; i < temp_pool->pool_use_num; i++) { + if (temp_pool->kae_queue_pool[i].node_data == NULL) + continue; + + if (KAE_SPIN_TRYLOCK(temp_pool->kae_queue_pool[i].spinlock)) { + if (temp_pool->kae_queue_pool[i].node_data == NULL) { + KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); + continue; + } else { + queue_data_node = temp_pool->kae_queue_pool[i].node_data; + temp_pool->kae_queue_pool[i].node_data = (KAE_QUEUE_DATA_NODE_S *)NULL; + KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); + + US_DEBUG("kae queue pool first success. queue_data_node=%p queue_node id =%d", queue_data_node, i); + return queue_data_node; + } + } + } + /* next pool */ + temp_pool = temp_pool->next; + } + + return queue_data_node; +} + +static void kae_free_wd_queue_memory(KAE_QUEUE_DATA_NODE_S *queue_node, release_engine_ctx_cb release_fn) +{ + if (queue_node != NULL) { + if (release_fn != NULL && queue_node->engine_ctx != NULL) { + release_fn(queue_node->engine_ctx); + queue_node->engine_ctx = NULL; + } + + if (queue_node->kae_queue_mem_pool != NULL) { + wd_queue_mempool_destroy(queue_node->kae_queue_mem_pool); + queue_node->kae_queue_mem_pool = NULL; + } + if (queue_node->kae_wd_queue != NULL) { + wd_free_queue(queue_node->kae_wd_queue); + queue_node->kae_wd_queue = NULL; + } + + kae_free(queue_node); + queue_node = NULL; + } + + US_DEBUG("free wd queue success"); +} + +static KAE_QUEUE_DATA_NODE_S *kae_new_wd_queue_memory(int algtype) +{ + KAE_QUEUE_DATA_NODE_S *queue_node = NULL; + + queue_node = (KAE_QUEUE_DATA_NODE_S *)kae_malloc(sizeof(KAE_QUEUE_DATA_NODE_S)); + if (queue_node == NULL) { + US_ERR("malloc failed"); + return NULL; + } + kae_memset(queue_node, 0, sizeof(KAE_QUEUE_DATA_NODE_S)); + + queue_node->kae_wd_queue = wd_new_queue(algtype); + if (queue_node->kae_wd_queue == NULL) { + US_ERR("new wd queue fail"); + goto err; + } + + queue_node->kae_queue_mem_pool = create_alg_wd_queue_mempool(algtype, queue_node->kae_wd_queue); + if (queue_node->kae_queue_mem_pool == NULL) { + US_ERR("request mempool fail!"); + goto err; + } + + return queue_node; + +err: + kae_free_wd_queue_memory(queue_node, NULL); + return NULL; +} + +KAE_QUEUE_DATA_NODE_S *kae_get_node_from_pool(KAE_QUEUE_POOL_HEAD_S *pool_head) +{ + KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; + + if (pool_head == NULL) { + US_ERR("input params pool_head is null"); + return NULL; + } + + queue_data_node = kae_get_queue_data_from_list(pool_head); + if (queue_data_node == NULL) + queue_data_node = kae_new_wd_queue_memory(pool_head->algtype); + + return queue_data_node; +} + +static void kae_set_pool_use_num(KAE_QUEUE_POOL_HEAD_S *pool, int set_num) +{ + pthread_mutex_lock(&pool->kae_queue_mutex); + if (set_num > pool->pool_use_num) + pool->pool_use_num = set_num; + (void)pthread_mutex_unlock(&pool->kae_queue_mutex); +} + +int kae_put_node_to_pool(KAE_QUEUE_POOL_HEAD_S *pool_head, KAE_QUEUE_DATA_NODE_S *node_data) +{ + int i = 0; + KAE_QUEUE_POOL_HEAD_S *temp_pool = pool_head; + KAE_QUEUE_POOL_HEAD_S *last_pool = NULL; + + if (node_data == NULL || pool_head == NULL) + return 0; + + US_DEBUG("Add nodedata to pool"); + + while (temp_pool != NULL) { + for (i = 0; i < KAE_QUEUE_POOL_MAX_SIZE; i++) { + if (temp_pool->kae_queue_pool[i].node_data) + continue; + + if (KAE_SPIN_TRYLOCK(temp_pool->kae_queue_pool[i].spinlock)) { + if (temp_pool->kae_queue_pool[i].node_data) { + KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); + continue; + } else { + temp_pool->kae_queue_pool[i].node_data = node_data; + temp_pool->kae_queue_pool[i].add_time = time((time_t *)NULL); + KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); + if (i >= temp_pool->pool_use_num) + kae_set_pool_use_num(temp_pool, i + 1); + + US_DEBUG("kae put queue node to pool, queue_node id is %d.", i); + return 1; + } + } + } + last_pool = temp_pool; + temp_pool = temp_pool->next; + /* if no empty pool to add,new a pool */ + if (temp_pool == NULL) { + pthread_mutex_lock(&last_pool->destroy_mutex); + if (last_pool->next == NULL) { + temp_pool = kae_init_queue_pool(last_pool->algtype); + if (temp_pool == NULL) { + (void)pthread_mutex_unlock(&last_pool->destroy_mutex); + break; + } + last_pool->next = temp_pool; + } + (void)pthread_mutex_unlock(&last_pool->destroy_mutex); + } + } + /* if not added,free it */ + kae_free_wd_queue_memory(node_data, NULL); + return 0; +} + +void kae_queue_pool_reset(KAE_QUEUE_POOL_HEAD_S *pool_head) +{ + (void)pool_head; +} + +void kae_queue_pool_destroy(KAE_QUEUE_POOL_HEAD_S *pool_head, release_engine_ctx_cb release_fn) +{ + int error = 0; + int i = 0; + KAE_QUEUE_DATA_NODE_S *queue_data_node = (KAE_QUEUE_DATA_NODE_S *)NULL; + KAE_QUEUE_POOL_HEAD_S *temp_pool = NULL; + KAE_QUEUE_POOL_HEAD_S *cur_pool = pool_head; + + while (cur_pool != NULL) { + error = pthread_mutex_lock(&cur_pool->destroy_mutex); + if (error != 0) { + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + return; + } + + error = pthread_mutex_lock(&cur_pool->kae_queue_mutex); + if (error != 0) { + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + return; + } + for (i = 0; i < cur_pool->pool_use_num; i++) { + queue_data_node = cur_pool->kae_queue_pool[i].node_data; + if (queue_data_node != NULL) { + kae_free_wd_queue_memory(queue_data_node, release_fn); + US_DEBUG("kae queue node destroy success. queue_node id =%d", i); + cur_pool->kae_queue_pool[i].node_data = NULL; + } + } + US_DEBUG("pool use num :%d.", cur_pool->pool_use_num); + + kae_free(cur_pool->kae_queue_pool); + + (void)pthread_mutex_unlock(&cur_pool->kae_queue_mutex); + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + + pthread_mutex_destroy(&cur_pool->kae_queue_mutex); + pthread_mutex_destroy(&cur_pool->destroy_mutex); + + temp_pool = cur_pool; + + kae_free(cur_pool); + + cur_pool = temp_pool->next; + } + + US_DEBUG("kae queue pool destroy success."); +} + +void kae_queue_pool_check_and_release(KAE_QUEUE_POOL_HEAD_S *pool_head, release_engine_ctx_cb release_fn) +{ + int i = 0; + int error; + time_t current_time; + KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; + KAE_QUEUE_POOL_HEAD_S *cur_pool = pool_head; + + current_time = time((time_t *)NULL); + + while (cur_pool != NULL) { + error = pthread_mutex_lock(&cur_pool->destroy_mutex); + if (error != 0) { + cur_pool = cur_pool->next; + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + continue; + } + if (cur_pool->kae_queue_pool == NULL) { + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + cur_pool = cur_pool->next; + continue; + } + + for (i = cur_pool->pool_use_num - 1; i >= 0; i--) { + if (cur_pool->kae_queue_pool[i].node_data == NULL) + continue; + + if (difftime(current_time, cur_pool->kae_queue_pool[i].add_time) < CHECK_QUEUE_TIME_SECONDS) + continue; + + if (KAE_SPIN_TRYLOCK(cur_pool->kae_queue_pool[i].spinlock)) { + if ((cur_pool->kae_queue_pool[i].node_data == NULL) || + (difftime(current_time, cur_pool->kae_queue_pool[i].add_time) < CHECK_QUEUE_TIME_SECONDS)) { + KAE_SPIN_UNLOCK(cur_pool->kae_queue_pool[i].spinlock); + continue; + } else { + queue_data_node = cur_pool->kae_queue_pool[i].node_data; + cur_pool->kae_queue_pool[i].node_data = (KAE_QUEUE_DATA_NODE_S *)NULL; + KAE_SPIN_UNLOCK(cur_pool->kae_queue_pool[i].spinlock); + + kae_free_wd_queue_memory(queue_data_node, release_fn); + + US_DEBUG("hpre queue list release success. queue node id =%d", i); + } + } + } + + (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); + cur_pool = cur_pool->next; + } +} diff --git a/kae_engine/src/v1/wdmngr/wd_queue_memory.h b/kae_engine/src/v1/wdmngr/wd_queue_memory.h new file mode 100644 index 0000000..6eee169 --- /dev/null +++ b/kae_engine/src/v1/wdmngr/wd_queue_memory.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. + * + * Description: This file provides the interface for wd_queue_memory.c + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __WD_QUEUE_MEMORY_H +#define __WD_QUEUE_MEMORY_H + +#include +#include +#include "wd_alg_queue.h" +#include "../utils/engine_utils.h" + +#define KAE_QUEUE_POOL_MAX_SIZE 512 +#define CHECK_QUEUE_TIME_SECONDS 5 // seconds + +/* + * once use 3 block for ctx&pubkey*prikey. + * the max Concurrent num = HPRE_BLOCK_NUM/3 + * when use 4096bit rsa. block use max is 3576. + * 3576 = sizeof(ctx)(248)+ pubkey_size(1024) + prikey_size(2304) + * that means max block used is 2304. set 4096 for reserve + */ +#define RSA_BLOCK_NUM 16 +#define RSA_BLOCK_SIZE 4096 + +#define DH_BLOCK_NUM 16 +#define DH_BLOCK_SIZE 4096 + +#define CIPHER_BLOCK_NUM 4 +#define CIPHER_BLOCK_SIZE (272*1024) + +#define DIGEST_BLOCK_NUM 4 +#define DIGEST_BLOCK_SIZE (512 * 1024) + +typedef void (*release_engine_ctx_cb)(void *engine_ctx); + +typedef struct KAE_QUEUE_DATA_NODE { + struct wd_queue *kae_wd_queue; + struct wd_queue_mempool *kae_queue_mem_pool; + void *engine_ctx; +} KAE_QUEUE_DATA_NODE_S; + +typedef struct KAE_QUEUE_POOL_NODE { + struct kae_spinlock spinlock; + time_t add_time; + KAE_QUEUE_DATA_NODE_S *node_data; +} KAE_QUEUE_POOL_NODE_S; + +typedef struct KAE_QUEUE_POOL_HEAD { + int pool_use_num; + int algtype; /* alg type,just init at init pool */ + pthread_mutex_t destroy_mutex; + pthread_mutex_t kae_queue_mutex; + struct KAE_QUEUE_POOL_HEAD *next; /* next pool */ + KAE_QUEUE_POOL_NODE_S *kae_queue_pool; /* point to a attray */ +} KAE_QUEUE_POOL_HEAD_S; + +struct wd_queue_mempool { + struct wd_queue *q; + void *base; + unsigned int *bitmap; + unsigned int block_size; + unsigned int block_num; + unsigned int mem_size; + unsigned int block_align_size; + unsigned int free_num; + unsigned int fail_times; + unsigned long long index; + sem_t mempool_sem; + int dev; +}; + +struct wd_queue_mempool *wd_queue_mempool_create(struct wd_queue *q, unsigned int block_size, unsigned int block_num); + +void wd_queue_mempool_destroy(struct wd_queue_mempool *pool); + +void kae_wd_free_blk(void *pool, void *blk); +void *kae_wd_alloc_blk(void *pool, size_t size); + +void *kae_dma_map(void *usr, void *va, size_t sz); + +void kae_dma_unmap(void *usr, void *va, void *dma, size_t sz); + +KAE_QUEUE_POOL_HEAD_S *kae_init_queue_pool(int algtype); +KAE_QUEUE_DATA_NODE_S *kae_get_node_from_pool(KAE_QUEUE_POOL_HEAD_S *pool_head); +int kae_put_node_to_pool(KAE_QUEUE_POOL_HEAD_S *pool_head, KAE_QUEUE_DATA_NODE_S *node_data); +void kae_queue_pool_reset(KAE_QUEUE_POOL_HEAD_S *pool_head); +void kae_queue_pool_destroy(KAE_QUEUE_POOL_HEAD_S *pool_head, release_engine_ctx_cb release_fn); +void kae_queue_pool_check_and_release(KAE_QUEUE_POOL_HEAD_S *pool_head, release_engine_ctx_cb release_ectx_fn); + +#endif + -- Gitee