@@ -34,6 +34,54 @@ static const unsigned int bytes_per_fpu_avx2 = 34 * 1024;
static const unsigned int bytes_per_fpu_avx = 30 * 1024;
static const unsigned int bytes_per_fpu_ssse3 = 26 * 1024;
+asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
+ const u8 *data, int blocks);
+
+asmlinkage void sha1_transform_avx(struct sha1_state *state,
+ const u8 *data, int blocks);
+
+asmlinkage void sha1_transform_avx2(struct sha1_state *state,
+ const u8 *data, int blocks);
+
+#ifdef CONFIG_AS_SHA1_NI
+asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
+ int rounds);
+#endif
+
+static void fpu_sha1_transform_ssse3(struct sha1_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha1_transform_ssse3(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha1_transform_avx(struct sha1_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha1_transform_avx(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha1_transform_avx2(struct sha1_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha1_transform_avx2(state, data, blocks);
+ kernel_fpu_end();
+}
+
+#ifdef CONFIG_AS_SHA1_NI
+static void fpu_sha1_transform_shani(struct sha1_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha1_ni_transform(state, data, blocks);
+ kernel_fpu_end();
+}
+#endif
+
static int using_x86_ssse3;
static int using_x86_avx;
static int using_x86_avx2;
@@ -60,9 +108,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha1_base_do_update(desc, data, chunk, sha1_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
@@ -81,36 +127,29 @@ static int sha1_finup(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha1_base_do_update(desc, data, chunk, sha1_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
}
- kernel_fpu_begin();
sha1_base_do_finalize(desc, sha1_xform);
- kernel_fpu_end();
return sha1_base_finish(desc, out);
}
-asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
- const u8 *data, int blocks);
-
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, bytes_per_fpu_ssse3,
- sha1_transform_ssse3);
+ fpu_sha1_transform_ssse3);
}
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, bytes_per_fpu_ssse3, out,
- sha1_transform_ssse3);
+ fpu_sha1_transform_ssse3);
}
/* Add padding and return the message digest. */
@@ -143,21 +182,18 @@ static void unregister_sha1_ssse3(void)
}
}
-asmlinkage void sha1_transform_avx(struct sha1_state *state,
- const u8 *data, int blocks);
-
static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, bytes_per_fpu_avx,
- sha1_transform_avx);
+ fpu_sha1_transform_avx);
}
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, bytes_per_fpu_avx, out,
- sha1_transform_avx);
+ fpu_sha1_transform_avx);
}
static int sha1_avx_final(struct shash_desc *desc, u8 *out)
@@ -191,17 +227,14 @@ static void unregister_sha1_avx(void)
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
-asmlinkage void sha1_transform_avx2(struct sha1_state *state,
- const u8 *data, int blocks);
-
static void sha1_apply_transform_avx2(struct sha1_state *state,
const u8 *data, int blocks)
{
/* Select the optimal transform based on data block size */
if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
- sha1_transform_avx2(state, data, blocks);
+ fpu_sha1_transform_avx2(state, data, blocks);
else
- sha1_transform_avx(state, data, blocks);
+ fpu_sha1_transform_avx(state, data, blocks);
}
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
@@ -248,21 +281,18 @@ static void unregister_sha1_avx2(void)
}
#ifdef CONFIG_AS_SHA1_NI
-asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
- int rounds);
-
static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, bytes_per_fpu_shani,
- sha1_ni_transform);
+ fpu_sha1_transform_shani);
}
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, bytes_per_fpu_shani, out,
- sha1_ni_transform);
+ fpu_sha1_transform_shani);
}
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
@@ -51,6 +51,51 @@ static const unsigned int bytes_per_fpu_ssse3 = 11 * 1024;
asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks);
+asmlinkage void sha256_transform_avx(struct sha256_state *state,
+ const u8 *data, int blocks);
+
+asmlinkage void sha256_transform_rorx(struct sha256_state *state,
+ const u8 *data, int blocks);
+
+#ifdef CONFIG_AS_SHA256_NI
+asmlinkage void sha256_ni_transform(struct sha256_state *digest,
+ const u8 *data, int rounds);
+#endif
+
+static void fpu_sha256_transform_ssse3(struct sha256_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha256_transform_ssse3(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha256_transform_avx(struct sha256_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha256_transform_avx(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha256_transform_avx2(struct sha256_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha256_transform_rorx(state, data, blocks);
+ kernel_fpu_end();
+}
+
+#ifdef CONFIG_AS_SHA1_NI
+static void fpu_sha256_transform_shani(struct sha256_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha256_ni_transform(state, data, blocks);
+ kernel_fpu_end();
+}
+#endif
+
static int using_x86_ssse3;
static int using_x86_avx;
static int using_x86_avx2;
@@ -77,9 +122,7 @@ static int _sha256_update(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha256_base_do_update(desc, data, chunk, sha256_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
@@ -98,17 +141,13 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha256_base_do_update(desc, data, chunk, sha256_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
}
- kernel_fpu_begin();
sha256_base_do_finalize(desc, sha256_xform);
- kernel_fpu_end();
return sha256_base_finish(desc, out);
}
@@ -117,14 +156,14 @@ static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, bytes_per_fpu_ssse3,
- sha256_transform_ssse3);
+ fpu_sha256_transform_ssse3);
}
static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, bytes_per_fpu_ssse3,
- out, sha256_transform_ssse3);
+ out, fpu_sha256_transform_ssse3);
}
/* Add padding and return the message digest. */
@@ -179,14 +218,14 @@ static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, bytes_per_fpu_avx,
- sha256_transform_avx);
+ fpu_sha256_transform_avx);
}
static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, bytes_per_fpu_avx,
- out, sha256_transform_avx);
+ out, fpu_sha256_transform_avx);
}
static int sha256_avx_final(struct shash_desc *desc, u8 *out)
@@ -240,14 +279,14 @@ static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, bytes_per_fpu_avx2,
- sha256_transform_rorx);
+ fpu_sha256_transform_avx2);
}
static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, bytes_per_fpu_avx2,
- out, sha256_transform_rorx);
+ out, fpu_sha256_transform_avx2);
}
static int sha256_avx2_final(struct shash_desc *desc, u8 *out)
@@ -302,14 +341,14 @@ static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, bytes_per_fpu_shani,
- sha256_ni_transform);
+ fpu_sha256_transform_shani);
}
static int sha256_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, bytes_per_fpu_shani,
- out, sha256_ni_transform);
+ out, fpu_sha256_transform_shani);
}
static int sha256_ni_final(struct shash_desc *desc, u8 *out)
@@ -47,6 +47,36 @@ static const unsigned int bytes_per_fpu_ssse3 = 17 * 1024;
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
const u8 *data, int blocks);
+asmlinkage void sha512_transform_avx(struct sha512_state *state,
+ const u8 *data, int blocks);
+
+asmlinkage void sha512_transform_rorx(struct sha512_state *state,
+ const u8 *data, int blocks);
+
+static void fpu_sha512_transform_ssse3(struct sha512_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha512_transform_ssse3(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha512_transform_avx(struct sha512_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha512_transform_avx(state, data, blocks);
+ kernel_fpu_end();
+}
+
+static void fpu_sha512_transform_avx2(struct sha512_state *state,
+ const u8 *data, int blocks)
+{
+ kernel_fpu_begin();
+ sha512_transform_rorx(state, data, blocks);
+ kernel_fpu_end();
+}
+
static int using_x86_ssse3;
static int using_x86_avx;
static int using_x86_avx2;
@@ -70,9 +100,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha512_base_do_update(desc, data, chunk, sha512_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
@@ -91,17 +119,13 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
while (len) {
unsigned int chunk = min(len, bytes_per_fpu);
- kernel_fpu_begin();
sha512_base_do_update(desc, data, chunk, sha512_xform);
- kernel_fpu_end();
len -= chunk;
data += chunk;
}
- kernel_fpu_begin();
sha512_base_do_finalize(desc, sha512_xform);
- kernel_fpu_end();
return sha512_base_finish(desc, out);
}
@@ -110,14 +134,14 @@ static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, bytes_per_fpu_ssse3,
- sha512_transform_ssse3);
+ fpu_sha512_transform_ssse3);
}
static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, bytes_per_fpu_ssse3,
- out, sha512_transform_ssse3);
+ out, fpu_sha512_transform_ssse3);
}
/* Add padding and return the message digest. */
@@ -172,14 +196,14 @@ static int sha512_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, bytes_per_fpu_avx,
- sha512_transform_avx);
+ fpu_sha512_transform_avx);
}
static int sha512_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, bytes_per_fpu_avx,
- out, sha512_transform_avx);
+ out, fpu_sha512_transform_avx);
}
/* Add padding and return the message digest. */
@@ -234,14 +258,14 @@ static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, bytes_per_fpu_avx2,
- sha512_transform_rorx);
+ fpu_sha512_transform_avx2);
}
static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, bytes_per_fpu_avx2,
- out, sha512_transform_rorx);
+ out, fpu_sha512_transform_avx2);
}
/* Add padding and return the message digest. */