summaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
authorDimitri John Ledkov <xnox@ubuntu.com>2017-09-11 10:39:26 +0100
committerDimitri John Ledkov <xnox@ubuntu.com>2017-09-11 10:39:26 +0100
commit306f83ec518ae67cc74cb8df403ae5431725b514 (patch)
treeb323301880961ef130d6f953f160f9b6326273b3 /src/include
parent0ddea171d8d6a5addc7b6ce9f65d2026b7198c02 (diff)
New upstream release LP: #1716348
Diffstat (limited to 'src/include')
-rw-r--r--src/include/icastats.h16
-rw-r--r--src/include/s390_aes.h113
-rw-r--r--src/include/s390_ccm.h29
-rw-r--r--src/include/s390_crypto.h115
-rw-r--r--src/include/s390_gcm.h250
-rw-r--r--src/include/s390_rsa.h3
-rw-r--r--src/include/s390_sha.h111
7 files changed, 561 insertions, 76 deletions
diff --git a/src/include/icastats.h b/src/include/icastats.h
index e96fb6c..30e8b30 100644
--- a/src/include/icastats.h
+++ b/src/include/icastats.h
@@ -35,6 +35,12 @@ typedef enum stats_fields {
ICA_STATS_SHA256,
ICA_STATS_SHA384,
ICA_STATS_SHA512,
+ ICA_STATS_SHA3_224,
+ ICA_STATS_SHA3_256,
+ ICA_STATS_SHA3_384,
+ ICA_STATS_SHA3_512,
+ ICA_STATS_SHAKE_128,
+ ICA_STATS_SHAKE_256,
ICA_STATS_GHASH,
ICA_STATS_PRNG,
ICA_STATS_DRBGSHA512,
@@ -62,6 +68,7 @@ typedef enum stats_fields {
ICA_STATS_AES_CTR,
ICA_STATS_AES_CMAC,
ICA_STATS_AES_XTS,
+ ICA_STATS_AES_GCM,
/* number of counters */
ICA_NUM_STATS
@@ -73,6 +80,12 @@ typedef enum stats_fields {
"SHA-256", \
"SHA-384", \
"SHA-512", \
+ "SHA3-224", \
+ "SHA3-256", \
+ "SHA3-384", \
+ "SHA3-512", \
+ "SHAKE-128", \
+ "SHAKE-256", \
"GHASH", \
"P_RNG", \
"DRBG-SHA-512", \
@@ -96,7 +109,8 @@ typedef enum stats_fields {
"AES CFB", \
"AES CTR", \
"AES CMAC", \
- "AES XTS"
+ "AES XTS", \
+ "AES GCM"
diff --git a/src/include/s390_aes.h b/src/include/s390_aes.h
index dc5f9be..610aeb1 100644
--- a/src/include/s390_aes.h
+++ b/src/include/s390_aes.h
@@ -31,6 +31,116 @@
#include "s390_ctr.h"
#define AES_BLOCK_SIZE 16
+#define GCM_RECOMMENDED_IV_LENGTH 12
+
+#define HS_FLAG 0x400;
+#define LAAD_FLAG 0x200;
+#define LPC_FLAG 0x100;
+
+
+static inline int s390_aes_gcm_hw(unsigned int function_code,
+ const unsigned char *input_data, unsigned char *output_data,
+ unsigned long input_length,
+ unsigned char *key,
+ unsigned char *j0, unsigned long j0_width,
+ unsigned char *ctr, unsigned long ctr_width,
+ const unsigned char *aad, unsigned long aad_length,
+ const unsigned char *subkey_h,
+ unsigned char *tag, unsigned long tag_length,
+ unsigned int laad, unsigned int lpc)
+{
+ struct {
+ char reserved[12];
+ unsigned int cv;
+ ica_aes_vector_t tag;
+ ica_aes_vector_t subkey_h;
+ unsigned long long total_aad_length;
+ unsigned long long total_input_length;
+ ica_aes_vector_t j0;
+ ica_aes_key_len_256_t key;
+ } parm_block;
+
+ unsigned int rc = 0;
+ unsigned int key_size = (function_code & 0x0f) * sizeof(ica_aes_key_single_t);
+
+ memset(&parm_block, 0, sizeof(parm_block));
+ memcpy(&parm_block.tag, tag, AES_BLOCK_SIZE);
+ memcpy(&parm_block.subkey_h, subkey_h, AES_BLOCK_SIZE);
+ memcpy(&parm_block.key, key, key_size);
+
+ if (laad && lpc) {
+ parm_block.total_aad_length = aad_length*8; // total length in bits
+ parm_block.total_input_length = input_length*8; // total length in bits
+ parm_block.cv = input_length / AES_BLOCK_SIZE + 1;
+ }
+
+ if (ctr) {
+ memcpy(&parm_block.cv, &ctr[GCM_RECOMMENDED_IV_LENGTH], sizeof(int));
+ memcpy(&parm_block.j0, ctr, GCM_RECOMMENDED_IV_LENGTH);
+ unsigned int* cv;
+ cv = (unsigned int*)&(parm_block.j0[GCM_RECOMMENDED_IV_LENGTH]);
+ *cv = 1;
+ }
+
+ if (j0)
+ memcpy(&parm_block.j0, j0, AES_BLOCK_SIZE);
+
+ // Set flags ...
+ function_code = function_code | HS_FLAG; // subkey flag is always = 1
+ if (laad)
+ function_code = function_code | LAAD_FLAG;
+ if (lpc)
+ function_code = function_code | LPC_FLAG;
+
+ if (input_data == NULL)
+ input_length = 0;
+ if (aad == NULL)
+ aad_length = 0;
+ if (input_length == 0 && aad_length == 0)
+ parm_block.cv++;
+
+ rc = s390_kma(function_code, &parm_block,
+ output_data, input_data, input_length,
+ aad, aad_length);
+
+ if (rc >= 0) {
+ memcpy(tag, &parm_block.tag, AES_BLOCK_SIZE);
+ if (ctr)
+ memcpy(&ctr[GCM_RECOMMENDED_IV_LENGTH], &parm_block.cv, sizeof(int)); // not in last call
+ return 0;
+ } else
+ return EIO;
+}
+
+static inline int s390_aes_gcm(unsigned int fc, const unsigned char *in_data,
+ unsigned char *out_data, unsigned long data_length,
+ unsigned char *key,
+ unsigned char *j0, unsigned int j0_width,
+ unsigned char *ctr, unsigned int ctr_width,
+ const unsigned char *aad, unsigned long aad_length,
+ unsigned char *subkey_h, unsigned char *tag,
+ unsigned long tag_length, unsigned int laad,
+ unsigned int lpc)
+{
+ int rc = 1;
+ int hardware = ALGO_HW;
+
+ if (*s390_kma_functions[fc].enabled)
+ rc = s390_aes_gcm_hw(s390_kma_functions[fc].hw_fc,
+ in_data, out_data, data_length,
+ key, j0, j0_width, ctr, ctr_width, aad, aad_length,
+ subkey_h, tag, tag_length, laad, lpc);
+ if (rc)
+ return rc;
+
+ stats_increment(ICA_STATS_AES_GCM,
+ hardware,
+ (s390_kma_functions[fc].hw_fc &
+ S390_CRYPTO_DIRECTION_MASK) == 0 ?
+ ENCRYPT:DECRYPT);
+
+ return rc;
+}
static inline int __s390_aes_ctrlist(unsigned int fc, unsigned long data_length,
const unsigned char *in_data,
@@ -110,6 +220,9 @@ static inline int s390_aes_ctr(unsigned int fc, const unsigned char *in_data,
int rc = 0;
+ if (data_length == 0)
+ return 0;
+
if (data_length <= AES_BLOCK_SIZE) {
/* short message handling */
rc = s390_aes_ctrlist(fc, data_length, in_data, ctr,
diff --git a/src/include/s390_ccm.h b/src/include/s390_ccm.h
index 55564a8..b5672e4 100644
--- a/src/include/s390_ccm.h
+++ b/src/include/s390_ccm.h
@@ -314,7 +314,26 @@ static inline unsigned int s390_ccm(unsigned int function_code,
key, cipher_ctr, ccm_ctr_width);
if (rc)
return rc;
+ /* mac */
+ rc = s390_ccm_authenticate(UNDIRECTED_FC(function_code),
+ payload, payload_length,
+ assoc_data, assoc_data_length,
+ nonce, nonce_length,
+ tag, mac_length,
+ key, fc_to_key_length(function_code));
+ if (rc)
+ return rc;
} else {
+ /* mac */
+ rc = s390_ccm_authenticate(UNDIRECTED_FC(function_code),
+ payload, payload_length,
+ assoc_data, assoc_data_length,
+ nonce, nonce_length,
+ tag, mac_length,
+ key, fc_to_key_length(function_code));
+ if (rc)
+ return rc;
+
/*encrypt */
rc = s390_aes_ctr(UNDIRECTED_FC(function_code),
payload, ciphertext, payload_length,
@@ -324,16 +343,6 @@ static inline unsigned int s390_ccm(unsigned int function_code,
}
}
- /* generate tag */
- rc = s390_ccm_authenticate(UNDIRECTED_FC(function_code),
- payload, payload_length,
- assoc_data, assoc_data_length,
- nonce, nonce_length,
- tag, mac_length,
- key, fc_to_key_length(function_code));
- if (rc)
- return rc;
-
/* encrypt tag into mac */
return s390_aes_ctr(UNDIRECTED_FC(function_code),
tag, mac, mac_length,
diff --git a/src/include/s390_crypto.h b/src/include/s390_crypto.h
index e969eec..373a720 100644
--- a/src/include/s390_crypto.h
+++ b/src/include/s390_crypto.h
@@ -32,6 +32,7 @@
#define MSA4 4
#define ADAPTER 5
#define PPNO 6
+#define MSA8 7
enum s390_crypto_instruction {
S390_CRYPTO_DIRECTION_MASK = 0x80,
@@ -50,6 +51,12 @@ enum s390_crypto_function {
S390_CRYPTO_SHA_1 = 0x01,
S390_CRYPTO_SHA_256 = 0x02,
S390_CRYPTO_SHA_512 = 0x03,
+ S390_CRYPTO_SHA_3_224 = 0x20,
+ S390_CRYPTO_SHA_3_256 = 0x21,
+ S390_CRYPTO_SHA_3_384 = 0x22,
+ S390_CRYPTO_SHA_3_512 = 0x23,
+ S390_CRYPTO_SHAKE_128 = 0x24,
+ S390_CRYPTO_SHAKE_256 = 0x25,
S390_CRYPTO_GHASH = 0x41,
/*
* The following functions are available for KM,KMC,KMF,KMO,
@@ -72,6 +79,14 @@ enum s390_crypto_function {
S390_CRYPTO_AES_128_XTS_DECRYPT = 0x32 | 0x80,
S390_CRYPTO_AES_256_XTS_ENCRYPT = 0x34,
S390_CRYPTO_AES_256_XTS_DECRYPT = 0x34 | 0x80,
+ /* GCM */
+ S390_CRYPTO_AES_128_GCM_ENCRYPT = 0x12,
+ S390_CRYPTO_AES_128_GCM_DECRYPT = 0x12 | 0x80,
+ S390_CRYPTO_AES_192_GCM_ENCRYPT = 0x13,
+ S390_CRYPTO_AES_192_GCM_DECRYPT = 0x13 | 0x80,
+ S390_CRYPTO_AES_256_GCM_ENCRYPT = 0x14,
+ S390_CRYPTO_AES_256_GCM_DECRYPT = 0x14 | 0x80,
+
/*
* The S390_PRNG is only available for the KMC instruction.
*/
@@ -83,10 +98,10 @@ enum s390_crypto_function {
S390_CRYPTO_SHA512_DRNG_SEED = 0x03 | 0x80
};
-extern unsigned int sha1_switch, sha256_switch, sha512_switch, des_switch,
+extern unsigned int sha1_switch, sha256_switch, sha512_switch, sha3_switch, des_switch,
tdes_switch, aes128_switch, aes192_switch, aes256_switch,
prng_switch, tdea128_switch, tdea192_switch, sha512_drng_switch,
- msa4_switch, msa5_switch;
+ msa4_switch, msa5_switch, msa8_switch;
typedef struct {
unsigned int dummy_fc;
@@ -102,6 +117,12 @@ typedef enum {
SHA_256,
SHA_384,
SHA_512,
+ SHA_3_224,
+ SHA_3_256,
+ SHA_3_384,
+ SHA_3_512,
+ SHAKE_128,
+ SHAKE_256,
GHASH
} kimd_functions_t;
@@ -121,6 +142,7 @@ typedef enum {
extern s390_supported_function_t s390_kmc_functions[];
extern s390_supported_function_t s390_msa4_functions[];
+extern s390_supported_function_t s390_kma_functions[];
extern s390_supported_function_t s390_kimd_functions[];
extern s390_supported_function_t s390_ppno_functions[];
@@ -180,6 +202,43 @@ static inline int s390_kmac(unsigned long func, void *param,
}
/**
+ * s390_kma:
+ * @func: the function code passed to KMA; see s390_kma_functions
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @aad: address of optional additional authenticated data
+ * @aad_len: length of aad operand in bytes
+ *
+ * Executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int s390_kma(unsigned long func, void *param, unsigned char *dest,
+ const unsigned char *src, long src_len,
+ const unsigned char *aad, long aad_len)
+{
+ register long __func asm("0") = func;
+ register void *__param asm("1") = param;
+ register const unsigned char *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ register unsigned char *__dest asm("4") = dest;
+ register const unsigned char *__aad asm("6") = aad;
+ register long __aad_len asm("7") = aad_len;
+
+ asm volatile(
+ "0: .insn rrf,0xb9290000,%2,%0,%3,0 \n"
+ "1: brc 1,0b \n" /* handle partial completion */
+ : "+a" (__src), "+d" (__src_len), "+a" (__dest), "+a" (__aad), "+d" (__aad_len)
+ : "d" (__func), "a" (__param)
+ : "cc", "memory");
+
+ return func ? src_len - __src_len : __src_len;
+}
+
+/**
* s390_kmctr:
* @func: the function code passed to KMCTR; see s390_km_func
* @param: address of parameter block; see POP for details on each func
@@ -354,6 +413,30 @@ static inline int s390_kmc(unsigned long func, void *param, unsigned char *dest,
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for digest funcs
*/
+static inline int s390_kimd_shake(unsigned long func, void *param,
+ unsigned char *dest, long dest_len,
+ const unsigned char *src, long src_len)
+{
+ register long __func asm("0") = func;
+ register void *__param asm("1") = param;
+ register unsigned char *__dest asm("2") = dest;
+ register long __dest_len asm("3") = dest_len;
+ register const unsigned char *__src asm("4") = src;
+ register long __src_len asm("5") = src_len;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rre,0xb93e0000,%1,%5\n\t" /* KIMD opcode */
+ " brc 1,0b\n\t" /* handle partial completion */
+ " la %0,0\n\t"
+ : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+ : "d"(__func), "a"(__param), "a"(__src), "d"(__src_len)
+ : "cc", "memory"
+ );
+
+ return func ? src_len - __src_len : __src_len;
+}
+
static inline int s390_kimd(unsigned long func, void *param,
const unsigned char *src, long src_len)
{
@@ -384,8 +467,32 @@ static inline int s390_kimd(unsigned long func, void *param,
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for digest funcs
*/
-static inline int s390_klmd(unsigned long func, void *param, const unsigned char *src,
- long src_len)
+static inline int s390_klmd_shake(unsigned long func, void *param,
+ unsigned char *dest, long dest_len,
+ const unsigned char *src, long src_len)
+{
+ register long __func asm("0") = func;
+ register void *__param asm("1") = param;
+ register unsigned char *__dest asm("2") = dest;
+ register long __dest_len asm("3") = dest_len;
+ register const unsigned char *__src asm("4") = src;
+ register long __src_len asm("5") = src_len;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rre,0xb93f0000,%1,%5\n\t" /* KLMD opcode */
+ " brc 1,0b\n\t" /* handle partial completion */
+ " la %0,0\n\t"
+ : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+ : "d"(__func), "a"(__param), "a"(__src), "d"(__src_len)
+ : "cc", "memory"
+ );
+
+ return func ? src_len - __src_len : __src_len;
+}
+
+static inline int s390_klmd(unsigned long func, void *param,
+ const unsigned char *src, long src_len)
{
register long __func asm("0") = func;
register void *__param asm("1") = param;
diff --git a/src/include/s390_gcm.h b/src/include/s390_gcm.h
index 0c03137..3236d46 100644
--- a/src/include/s390_gcm.h
+++ b/src/include/s390_gcm.h
@@ -319,36 +319,77 @@ static inline int s390_gcm(unsigned int function_code,
/* prepate initial counter for cipher */
memcpy(tmp_ctr, j0, AES_BLOCK_SIZE);
- __inc_aes_ctr((struct uint128 *)tmp_ctr, GCM_CTR_WIDTH);
- /* en-/decrypt payload */
- if (function_code % 2) {
- /* decrypt */
- rc = s390_aes_ctr(UNDIRECTED_FC(function_code),
- ciphertext, plaintext, text_length,
- key, tmp_ctr, GCM_CTR_WIDTH);
- if (rc)
- return rc;
+ if (!msa8_switch) {
+
+ /**
+ * simulate aes-gcm with aes-ctr and ghash.
+ */
+
+ __inc_aes_ctr((struct uint128 *)tmp_ctr, GCM_CTR_WIDTH);
+
+ if (function_code % 2) {
+ /* mac */
+ rc = s390_gcm_authenticate(ciphertext, text_length,
+ aad, aad_length,
+ subkey_h, tmp_tag);
+ if (rc)
+ return rc;
+
+ /* decrypt */
+ rc = s390_aes_ctr(UNDIRECTED_FC(function_code),
+ ciphertext, plaintext, text_length,
+ key, tmp_ctr, GCM_CTR_WIDTH);
+ if (rc)
+ return rc;
+ } else {
+ /* encrypt */
+ rc = s390_aes_ctr(UNDIRECTED_FC(function_code),
+ plaintext, ciphertext, text_length,
+ key, tmp_ctr, GCM_CTR_WIDTH);
+ if (rc)
+ return rc;
+
+ /* mac */
+ rc = s390_gcm_authenticate(ciphertext, text_length,
+ aad, aad_length,
+ subkey_h, tmp_tag);
+ if (rc)
+ return rc;
+ }
+
+ /* encrypt tag */
+ return s390_aes_ctr(UNDIRECTED_FC(function_code),
+ tmp_tag, tag, tag_length,
+ key, j0, GCM_CTR_WIDTH);
+
} else {
- /* encrypt */
- rc = s390_aes_ctr(UNDIRECTED_FC(function_code),
- plaintext, ciphertext, text_length,
- key, tmp_ctr, GCM_CTR_WIDTH);
- if (rc)
- return rc;
- }
- /* generate authentication tag */
- rc = s390_gcm_authenticate(ciphertext, text_length,
- aad, aad_length,
- subkey_h, tmp_tag);
- if (rc)
- return rc;
+ /**
+ * use the aes-gcm support via CPACF.
+ */
+
+ if (function_code % 2) {
+ /* decrypt */
+ rc = s390_aes_gcm(function_code,
+ ciphertext, plaintext, text_length,
+ key, j0, GCM_CTR_WIDTH,
+ tmp_ctr, GCM_CTR_WIDTH,
+ aad, aad_length, subkey_h,
+ tag, tag_length, 1, 1);
+ } else {
+ /* encrypt */
+ memset(tag, 0, AES_BLOCK_SIZE);
+ rc = s390_aes_gcm(function_code,
+ plaintext, ciphertext, text_length,
+ key, j0, GCM_CTR_WIDTH,
+ tmp_ctr, GCM_CTR_WIDTH,
+ aad, aad_length, subkey_h,
+ tag, tag_length, 1, 1);
+ }
- /* encrypt tag */
- return s390_aes_ctr(UNDIRECTED_FC(function_code),
- tmp_tag, tag, tag_length,
- key, j0, GCM_CTR_WIDTH);
+ return rc;
+ }
}
static inline int s390_gcm_initialize(unsigned int function_code,
@@ -373,14 +414,29 @@ static inline int s390_gcm_initialize(unsigned int function_code,
/* calculate initial counter, based on iv */
__compute_j0(iv, iv_length, subkey, icb);
- /* prepate usage counter for cipher */
+ /* prepare usage counter for cipher */
memcpy(ucb, icb, AES_BLOCK_SIZE);
- __inc_aes_ctr((struct uint128 *)ucb, GCM_CTR_WIDTH);
+
+ if (!msa8_switch) // KMA increases the ctr internally
+ __inc_aes_ctr((struct uint128 *)ucb, GCM_CTR_WIDTH);
return 0;
}
-static inline int s390_gcm_intermediate(unsigned int function_code,
+static inline void inc_ctr(unsigned char* ctr)
+{
+ unsigned int* cv;
+
+ cv = (unsigned int*)&ctr[12];
+ *cv = *cv + 1;
+}
+
+/**
+ * processes the last partial plaintext/ciphertext (< 16 bytes) and calculates
+ * the last intermediate tag using the old code path. This is not possible with
+ * KMA, because KMA cannot process partial blocks before s390_gcm_last.
+ */
+static inline int s390_gcm_last_intermediate(unsigned int function_code,
unsigned char *plaintext, unsigned long text_length,
unsigned char *ciphertext,
unsigned char *ctr,
@@ -389,30 +445,114 @@ static inline int s390_gcm_intermediate(unsigned int function_code,
unsigned char *key, unsigned char *subkey)
{
unsigned int rc;
+ unsigned char tmp_ctr[16];
- if (!msa4_switch)
- return EPERM;
+ /*
+ * The old code needs ctr +1.
+ * We copy ctr, to not destroy the original ctr.
+ */
+ memcpy(tmp_ctr, ctr, sizeof(tmp_ctr));
+ inc_ctr(tmp_ctr);
- /* en-/decrypt payload */
if (function_code % 2) {
+ /* mac */
+ rc = s390_gcm_authenticate_intermediate(ciphertext, text_length, aad,
+ aad_length, subkey, tag);
+ if (rc)
+ return rc;
/* decrypt */
rc = s390_aes_ctr(UNDIRECTED_FC(function_code), ciphertext, plaintext,
- text_length, key, ctr, GCM_CTR_WIDTH);
+ text_length, key, tmp_ctr, GCM_CTR_WIDTH);
if (rc)
return rc;
} else {
/* encrypt */
rc = s390_aes_ctr(UNDIRECTED_FC(function_code), plaintext, ciphertext,
- text_length, key, ctr, GCM_CTR_WIDTH);
+ text_length, key, tmp_ctr, GCM_CTR_WIDTH);
+ if (rc)
+ return rc;
+ /* mac */
+ rc = s390_gcm_authenticate_intermediate(ciphertext, text_length, aad,
+ aad_length, subkey, tag);
if (rc)
return rc;
}
- /* generate authentication tag */
- rc = s390_gcm_authenticate_intermediate(ciphertext, text_length, aad,
- aad_length, subkey, tag);
- if (rc)
- return rc;
+ return 0;
+}
+
+static inline int s390_gcm_intermediate(unsigned int function_code,
+ unsigned char *plaintext, unsigned long text_length,
+ unsigned char *ciphertext,
+ unsigned char *ctr,
+ unsigned char *aad, unsigned long aad_length,
+ unsigned char *tag, unsigned long tag_length,
+ unsigned char *key, unsigned char *subkey)
+{
+ unsigned long bulk;
+ unsigned int rc, laad;
+ unsigned char *in, *out;
+
+ if (!msa4_switch)
+ return EPERM;
+
+ if (!msa8_switch) {
+ if (function_code % 2) {
+ /* mac */
+ rc = s390_gcm_authenticate_intermediate(ciphertext, text_length, aad,
+ aad_length, subkey, tag);
+ if (rc)
+ return rc;
+
+ /* decrypt */
+ rc = s390_aes_ctr(UNDIRECTED_FC(function_code), ciphertext, plaintext,
+ text_length, key, ctr, GCM_CTR_WIDTH);
+ if (rc)
+ return rc;
+ } else {
+ /* encrypt */
+ rc = s390_aes_ctr(UNDIRECTED_FC(function_code), plaintext, ciphertext,
+ text_length, key, ctr, GCM_CTR_WIDTH);
+ if (rc)
+ return rc;
+
+ /* mac */
+ rc = s390_gcm_authenticate_intermediate(ciphertext, text_length, aad,
+ aad_length, subkey, tag);
+ if (rc)
+ return rc;
+ }
+ } else {
+ if ((text_length > 0) || (aad_length % AES_BLOCK_SIZE))
+ laad = 1;
+ else
+ laad = 0;
+
+ bulk = (text_length / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
+ text_length %= AES_BLOCK_SIZE;
+
+ if (bulk || aad_length) {
+ in = (function_code % 2) ? ciphertext : plaintext;
+ out = (function_code % 2) ? plaintext : ciphertext;
+
+ rc = s390_aes_gcm(function_code,
+ in, out, bulk, key,
+ NULL, 0, // j0, j0_length not used here
+ ctr, GCM_CTR_WIDTH,
+ aad, aad_length, subkey,
+ tag, tag_length, laad, 0);
+ if (rc)
+ return rc;
+ }
+ if (text_length) {
+ rc = s390_gcm_last_intermediate(function_code,
+ plaintext + bulk, text_length,
+ ciphertext + bulk, ctr, NULL,
+ 0, tag, tag_length, key, subkey);
+ if (rc)
+ return rc;
+ }
+ }
return 0;
}
@@ -423,16 +563,32 @@ static inline int s390_gcm_last(unsigned int function_code, unsigned char *icb,
unsigned char *key, unsigned char *subkey)
{
unsigned char tmp_tag[AES_BLOCK_SIZE];
+ unsigned char tmp_icb[AES_BLOCK_SIZE];
int rc;
- /* generate authentication tag */
- memcpy(tmp_tag, tag, tag_length);
- rc = s390_gcm_authenticate_last(aad_length, ciph_length, subkey, tmp_tag);
- if (rc)
- return rc;
+ /* dont modify icb buffer */
+ memcpy(tmp_icb, icb, sizeof(tmp_icb));
- /* encrypt tag */
- return s390_aes_ctr(UNDIRECTED_FC(function_code), tmp_tag, tag, tag_length,
- key, icb, GCM_CTR_WIDTH);
+ if (!msa8_switch) {
+
+ /* generate authentication tag */
+ memcpy(tmp_tag, tag, tag_length);
+ rc = s390_gcm_authenticate_last(aad_length, ciph_length, subkey, tmp_tag);
+ if (rc)
+ return rc;
+
+ /* encrypt tag */
+ return s390_aes_ctr(UNDIRECTED_FC(function_code), tmp_tag, tag, tag_length,
+ key, tmp_icb, GCM_CTR_WIDTH);
+
+ } else {
+
+ return s390_aes_gcm(function_code,
+ NULL, NULL, ciph_length,
+ key, tmp_icb, GCM_CTR_WIDTH,
+ NULL, 0,
+ NULL, aad_length, subkey,
+ tag, tag_length, 1, 1);
+ }
}
#endif
diff --git a/src/include/s390_rsa.h b/src/include/s390_rsa.h
index 5d30cfc..1cca24d 100644
--- a/src/include/s390_rsa.h
+++ b/src/include/s390_rsa.h
@@ -16,7 +16,6 @@
#include <openssl/bn.h>
#include <asm/zcrypt.h>
-#include <semaphore.h>
#include "ica_api.h"
typedef struct ica_rsa_modexpo ica_rsa_modexpo_t;
@@ -41,7 +40,5 @@ unsigned int rsa_key_generate_crt(ica_adapter_handle_t deviceHandle,
unsigned int rsa_crt_sw(ica_rsa_modexpo_crt_t * pCrt);
unsigned int rsa_mod_mult_sw(ica_rsa_modmult_t * pMul);
unsigned int rsa_mod_expo_sw(ica_rsa_modexpo_t *pMex);
-
-sem_t openssl_crypto_lock_mtx;
#endif
diff --git a/src/include/s390_sha.h b/src/include/s390_sha.h
index 1cec7be..270faeb 100644
--- a/src/include/s390_sha.h
+++ b/src/include/s390_sha.h
@@ -44,6 +44,25 @@ static unsigned char SHA_512_DEFAULT_IV[] = {
0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19,
0x13, 0x7e, 0x21, 0x79 };
+static unsigned char SHA_3_DEFAULT_IV[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
typedef struct {
unsigned int hw_function_code;
unsigned int hash_length;
@@ -57,7 +76,13 @@ static const SHA_CONSTANTS sha_constants[] = {
{S390_CRYPTO_SHA_256, 28, 32, 64, SHA_224_DEFAULT_IV},
{S390_CRYPTO_SHA_256, 32, 32, 64, SHA_256_DEFAULT_IV},
{S390_CRYPTO_SHA_512, 48, 64, 128, SHA_384_DEFAULT_IV},
- {S390_CRYPTO_SHA_512, 64, 64, 128, SHA_512_DEFAULT_IV}
+ {S390_CRYPTO_SHA_512, 64, 64, 128, SHA_512_DEFAULT_IV},
+ {S390_CRYPTO_SHA_3_224, 28, 200, 144, SHA_3_DEFAULT_IV},
+ {S390_CRYPTO_SHA_3_256, 32, 200, 136, SHA_3_DEFAULT_IV},
+ {S390_CRYPTO_SHA_3_384, 48, 200, 104, SHA_3_DEFAULT_IV},
+ {S390_CRYPTO_SHA_3_512, 64, 200, 72, SHA_3_DEFAULT_IV},
+ {S390_CRYPTO_SHAKE_128, 0, 200, 168, SHA_3_DEFAULT_IV},
+ {S390_CRYPTO_SHAKE_256, 0, 200, 136, SHA_3_DEFAULT_IV}
};
int s390_sha1(unsigned char *iv, unsigned char *input_data,
@@ -82,8 +107,51 @@ int s390_sha512(unsigned char *iv, unsigned char *input_data,
unsigned int message_part, uint64_t *running_length_lo,
uint64_t *running_length_hi);
+int s390_sha3_224(unsigned char *iv, unsigned char *input_data,
+ unsigned int input_length, unsigned char *output_data,
+ unsigned int message_part, uint64_t *running_length);
+
+int s390_sha3_256(unsigned char *iv, unsigned char *input_data,
+ unsigned int input_length, unsigned char *output_data,
+ unsigned int message_part, uint64_t *running_length);
+
+int s390_sha3_384(unsigned char *iv, unsigned char *input_data,
+ uint64_t input_length, unsigned char *output_data,
+ unsigned int message_part, uint64_t *running_length_lo,
+ uint64_t *running_length_hi);
+
+int s390_sha3_512(unsigned char *iv, unsigned char *input_data,
+ uint64_t input_length, unsigned char *output_data,
+ unsigned int message_part, uint64_t *running_length_lo,
+ uint64_t *running_length_hi);
+
+int s390_shake_128(unsigned char *iv, unsigned char *input_data,
+ uint64_t input_length, unsigned char *output_data, unsigned int output_length,
+ unsigned int message_part, uint64_t *running_length_lo,
+ uint64_t *running_length_hi);
+
+int s390_shake_256(unsigned char *iv, unsigned char *input_data,
+ uint64_t input_length, unsigned char *output_data, unsigned int output_length,
+ unsigned int message_part, uint64_t *running_length_lo,
+ uint64_t *running_length_hi);
+
+int s390_shake_hw(unsigned char *iv, unsigned char *input_data,
+ uint64_t input_length, unsigned char *output_data, unsigned int output_length,
+ unsigned int message_part, uint64_t *running_length_lo,
+ uint64_t *running_length_hi, kimd_functions_t sha_function);
+
+static inline int is_shake(unsigned int n)
+{
+ return (n >= SHAKE_128 && n <= SHAKE_256 ? 1 : 0);
+}
+
+static inline int is_sha3(unsigned int n)
+{
+ return (n >= SHA_3_224 && n <= SHA_3_512 ? 1 : 0);
+}
+
static inline int s390_sha_hw(unsigned char *iv, unsigned char *input_data,
- uint64_t input_length, unsigned char *output_data,
+ uint64_t input_length, unsigned char *output_data, unsigned int output_length,
unsigned int message_part, uint64_t *running_length_lo,
uint64_t *running_length_hi, kimd_functions_t sha_function)
{
@@ -94,15 +162,15 @@ static inline int s390_sha_hw(unsigned char *iv, unsigned char *input_data,
int complete_blocks_length = 0;
unsigned char *default_iv = sha_constants[sha_function].default_iv;
- unsigned int hash_length = sha_constants[sha_function].hash_length;
+ unsigned int hash_length = output_length;
unsigned int vector_length = sha_constants[sha_function].vector_length;
unsigned int hw_function_code
= sha_constants[sha_function].hw_function_code;
- /* A internal buffer for the SHA hash and stream bit length. For SHA512
- * this can be at most 128 byte for the hash plus 16 byte for the
+ /* A internal buffer for the SHA hash and stream bit length. For SHA3/SHAKE
+ * this can be at most 200 bytes for the parmblock plus 16 bytes for the
* stream length. */
- unsigned char shabuff[128 + 16];
+ unsigned char shabuff[200+16];
if (input_length) {
remnant = input_length % sha_constants[sha_function].block_length;
@@ -128,8 +196,14 @@ static inline int s390_sha_hw(unsigned char *iv, unsigned char *input_data,
return EINVAL;
if (complete_blocks_length) {
- rc = s390_kimd(hw_function_code, shabuff, input_data,
- complete_blocks_length);
+ if (is_shake(sha_function))
+ rc = s390_kimd_shake(hw_function_code, shabuff, output_data,
+ output_length, input_data,
+ complete_blocks_length);
+ else
+ rc = s390_kimd(hw_function_code, shabuff, input_data,
+ complete_blocks_length);
+
if (rc > 0) {
/* Check for overflow in sum_lo */
sum_lo += rc;
@@ -158,14 +232,29 @@ static inline int s390_sha_hw(unsigned char *iv, unsigned char *input_data,
memcpy(shabuff + vector_length,
(unsigned char *)&sum_lo, sizeof(sum_lo));
}
- rc = s390_klmd(hw_function_code, shabuff,
- input_data + complete_blocks_length, remnant);
+
+ if (is_shake(sha_function))
+ rc = s390_klmd_shake(hw_function_code, shabuff, output_data,
+ output_length,
+ input_data + complete_blocks_length, remnant);
+ else
+ rc = s390_klmd(hw_function_code, shabuff,
+ input_data + complete_blocks_length, remnant);
+
if (rc > 0)
rc = 0;
}
if (rc == 0) {
- memcpy((void *)output_data, shabuff, hash_length);
+
+ /**
+ * Here we copy the correct final hash to the caller provided buffer.
+ * But not for SHAKE. In this case s390_klmd_shake already copied the output
+ * (that may be longer than shabuff!) directly to output_data.
+ */
+ if (!is_shake(sha_function))
+ memcpy((void *)output_data, shabuff, hash_length);
+
if (message_part != SHA_MSG_PART_FINAL &&
message_part != SHA_MSG_PART_ONLY) {
memcpy((void *)iv, shabuff, vector_length);