mirror of
https://github.com/hashcat/hashcat
synced 2024-11-24 14:27:14 +01:00
dad03e394d
1) SIMD code for all attack-mode Macro vector_accessible() was not refactored and missing completely. Had to rename variables rules_cnt, combs_cnt and bfs_cnt into il_cnt which was a good thing anyway as with new SIMD code they all act in the same way. 2) SIMD code for attack-mode 0 With new SIMD code, apply_rules_vect() has to return u32 not u32x. This has massive impact on all *_a0 kernels. I've rewritten most of them. Deep testing using test.sh is still required. Some kernel need more fixes: - Some are kind of completely incompatible like m10400 but they still use old check_* includes, we should get rid of them as they are no longer neccessary as we have simd.c - Some have a chance but require additional effort like m11500. We can use commented out "#define NEW_SIMD_CODE" to find them This change can have negative impact on -a0 performance for device that require vectorization. That is mostly CPU devices. New GPU's are all scalar, so they wont get hurt by this. This change also proofes that there's no way to efficiently vectorize kernel rules with new SIMD code, but it enables the addition of the rule functions like @ that we were missing for some long time. This is a TODO.
986 lines
30 KiB
Common Lisp
986 lines
30 KiB
Common Lisp
/**
|
|
* Authors.....: Jens Steube <jens.steube@gmail.com>
|
|
* Gabriele Gristina <matrix@hashcat.net>
|
|
*
|
|
* License.....: MIT
|
|
*/
|
|
|
|
#define _LOTUS8_
|
|
|
|
#include "include/constants.h"
|
|
#include "include/kernel_vendor.h"
|
|
|
|
#define DGST_R0 0
|
|
#define DGST_R1 1
|
|
#define DGST_R2 2
|
|
#define DGST_R3 3
|
|
|
|
#include "include/kernel_functions.c"
|
|
|
|
#undef _SHA1_
|
|
|
|
#include "OpenCL/types_ocl.c"
|
|
#include "OpenCL/common.c"
|
|
|
|
#define COMPARE_S "OpenCL/check_single_comp4.c"
|
|
#define COMPARE_M "OpenCL/check_multi_comp4.c"
|
|
|
|
__constant char lotus64_table[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/";
|
|
|
|
__constant u32 lotus_magic_table[256] =
|
|
{
|
|
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
|
|
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
|
|
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
|
|
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
|
|
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
|
|
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
|
|
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
|
|
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
|
|
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
|
|
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
|
|
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
|
|
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
|
|
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
|
|
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
|
|
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
|
|
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
|
|
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
|
|
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
|
|
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
|
|
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
|
|
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
|
|
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
|
|
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
|
|
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
|
|
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
|
|
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
|
|
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
|
|
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
|
|
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
|
|
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
|
|
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
|
|
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
|
|
};
|
|
|
|
#define BOX(S,i) (S)[(i)]
|
|
|
|
#define uint_to_hex_upper8(i) l_bin2asc[(i)]
|
|
|
|
static void lotus_mix (u32 *in, __local u32 *s_lotus_magic_table)
|
|
{
|
|
u32 p = 0;
|
|
|
|
for (int i = 0; i < 18; i++)
|
|
{
|
|
u32 s = 48;
|
|
|
|
#pragma unroll 12
|
|
for (int j = 0; j < 12; j++)
|
|
{
|
|
u32 tmp_in = in[j];
|
|
u32 tmp_out = 0;
|
|
|
|
p = (p + s--) & 0xff; p = ((tmp_in >> 0) & 0xff) ^ BOX (s_lotus_magic_table, p); tmp_out |= p << 0;
|
|
p = (p + s--) & 0xff; p = ((tmp_in >> 8) & 0xff) ^ BOX (s_lotus_magic_table, p); tmp_out |= p << 8;
|
|
p = (p + s--) & 0xff; p = ((tmp_in >> 16) & 0xff) ^ BOX (s_lotus_magic_table, p); tmp_out |= p << 16;
|
|
p = (p + s--) & 0xff; p = ((tmp_in >> 24) & 0xff) ^ BOX (s_lotus_magic_table, p); tmp_out |= p << 24;
|
|
|
|
in[j] = tmp_out;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void lotus_transform_password (u32 in[4], u32 out[4], __local u32 *s_lotus_magic_table)
|
|
{
|
|
u32 t = out[3] >> 24;
|
|
|
|
u32 c;
|
|
|
|
#pragma unroll 4
|
|
for (int i = 0; i < 4; i++)
|
|
{
|
|
t ^= (in[i] >> 0) & 0xff; c = BOX (s_lotus_magic_table, t); out[i] ^= c << 0; t = ((out[i] >> 0) & 0xff);
|
|
t ^= (in[i] >> 8) & 0xff; c = BOX (s_lotus_magic_table, t); out[i] ^= c << 8; t = ((out[i] >> 8) & 0xff);
|
|
t ^= (in[i] >> 16) & 0xff; c = BOX (s_lotus_magic_table, t); out[i] ^= c << 16; t = ((out[i] >> 16) & 0xff);
|
|
t ^= (in[i] >> 24) & 0xff; c = BOX (s_lotus_magic_table, t); out[i] ^= c << 24; t = ((out[i] >> 24) & 0xff);
|
|
}
|
|
}
|
|
|
|
static void pad (u32 w[4], const u32 len)
|
|
{
|
|
const u32 val = 16 - len;
|
|
|
|
const u32 mask1 = val << 24;
|
|
|
|
const u32 mask2 = val << 16
|
|
| val << 24;
|
|
|
|
const u32 mask3 = val << 8
|
|
| val << 16
|
|
| val << 24;
|
|
|
|
const u32 mask4 = val << 0
|
|
| val << 8
|
|
| val << 16
|
|
| val << 24;
|
|
|
|
switch (len)
|
|
{
|
|
case 0: w[0] = mask4;
|
|
w[1] = mask4;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 1: w[0] |= mask3;
|
|
w[1] = mask4;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 2: w[0] |= mask2;
|
|
w[1] = mask4;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 3: w[0] |= mask1;
|
|
w[1] = mask4;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 4: w[1] = mask4;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 5: w[1] |= mask3;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 6: w[1] |= mask2;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 7: w[1] |= mask1;
|
|
w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 8: w[2] = mask4;
|
|
w[3] = mask4;
|
|
break;
|
|
case 9: w[2] |= mask3;
|
|
w[3] = mask4;
|
|
break;
|
|
case 10: w[2] |= mask2;
|
|
w[3] = mask4;
|
|
break;
|
|
case 11: w[2] |= mask1;
|
|
w[3] = mask4;
|
|
break;
|
|
case 12: w[3] = mask4;
|
|
break;
|
|
case 13: w[3] |= mask3;
|
|
break;
|
|
case 14: w[3] |= mask2;
|
|
break;
|
|
case 15: w[3] |= mask1;
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void mdtransform_norecalc (u32 state[4], u32 block[4], __local u32 *s_lotus_magic_table)
|
|
{
|
|
u32 x[12];
|
|
|
|
x[ 0] = state[0];
|
|
x[ 1] = state[1];
|
|
x[ 2] = state[2];
|
|
x[ 3] = state[3];
|
|
x[ 4] = block[0];
|
|
x[ 5] = block[1];
|
|
x[ 6] = block[2];
|
|
x[ 7] = block[3];
|
|
x[ 8] = state[0] ^ block[0];
|
|
x[ 9] = state[1] ^ block[1];
|
|
x[10] = state[2] ^ block[2];
|
|
x[11] = state[3] ^ block[3];
|
|
|
|
lotus_mix (x, s_lotus_magic_table);
|
|
|
|
state[0] = x[0];
|
|
state[1] = x[1];
|
|
state[2] = x[2];
|
|
state[3] = x[3];
|
|
}
|
|
|
|
static void mdtransform (u32 state[4], u32 checksum[4], u32 block[4], __local u32 *s_lotus_magic_table)
|
|
{
|
|
mdtransform_norecalc (state, block, s_lotus_magic_table);
|
|
|
|
lotus_transform_password (block, checksum, s_lotus_magic_table);
|
|
}
|
|
|
|
static void domino_big_md (const u32 saved_key[16], const u32 size, u32 state[4], __local u32 *s_lotus_magic_table)
|
|
{
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = 0;
|
|
checksum[1] = 0;
|
|
checksum[2] = 0;
|
|
checksum[3] = 0;
|
|
|
|
u32 block[4];
|
|
|
|
block[0] = 0;
|
|
block[1] = 0;
|
|
block[2] = 0;
|
|
block[3] = 0;
|
|
|
|
u32 curpos;
|
|
u32 idx;
|
|
|
|
for (curpos = 0, idx = 0; curpos + 16 < size; curpos += 16, idx += 4)
|
|
{
|
|
block[0] = saved_key[idx + 0];
|
|
block[1] = saved_key[idx + 1];
|
|
block[2] = saved_key[idx + 2];
|
|
block[3] = saved_key[idx + 3];
|
|
|
|
mdtransform (state, checksum, block, s_lotus_magic_table);
|
|
}
|
|
|
|
u32 left = size - curpos;
|
|
|
|
block[0] = saved_key[idx + 0];
|
|
block[1] = saved_key[idx + 1];
|
|
block[2] = saved_key[idx + 2];
|
|
block[3] = saved_key[idx + 3];
|
|
|
|
mdtransform (state, checksum, block, s_lotus_magic_table);
|
|
|
|
mdtransform_norecalc (state, checksum, s_lotus_magic_table);
|
|
}
|
|
|
|
static void sha1_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[5])
|
|
{
|
|
u32 A = digest[0];
|
|
u32 B = digest[1];
|
|
u32 C = digest[2];
|
|
u32 D = digest[3];
|
|
u32 E = digest[4];
|
|
|
|
u32 w0_t = w0[0];
|
|
u32 w1_t = w0[1];
|
|
u32 w2_t = w0[2];
|
|
u32 w3_t = w0[3];
|
|
u32 w4_t = w1[0];
|
|
u32 w5_t = w1[1];
|
|
u32 w6_t = w1[2];
|
|
u32 w7_t = w1[3];
|
|
u32 w8_t = w2[0];
|
|
u32 w9_t = w2[1];
|
|
u32 wa_t = w2[2];
|
|
u32 wb_t = w2[3];
|
|
u32 wc_t = w3[0];
|
|
u32 wd_t = w3[1];
|
|
u32 we_t = w3[2];
|
|
u32 wf_t = w3[3];
|
|
|
|
#undef K
|
|
#define K SHA1C00
|
|
|
|
SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t);
|
|
SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t);
|
|
SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t);
|
|
SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t);
|
|
SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t);
|
|
SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t);
|
|
SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t);
|
|
SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t);
|
|
SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t);
|
|
SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t);
|
|
SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t);
|
|
SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t);
|
|
SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t);
|
|
SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t);
|
|
SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t);
|
|
SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t);
|
|
w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t);
|
|
w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t);
|
|
w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t);
|
|
w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t);
|
|
|
|
#undef K
|
|
#define K SHA1C01
|
|
|
|
w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t);
|
|
w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t);
|
|
w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t);
|
|
w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t);
|
|
w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t);
|
|
w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t);
|
|
wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t);
|
|
wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t);
|
|
wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t);
|
|
wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t);
|
|
we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t);
|
|
wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t);
|
|
w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t);
|
|
w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t);
|
|
w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t);
|
|
w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t);
|
|
w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t);
|
|
w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t);
|
|
w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t);
|
|
w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t);
|
|
|
|
#undef K
|
|
#define K SHA1C02
|
|
|
|
w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t);
|
|
w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t);
|
|
wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t);
|
|
wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t);
|
|
wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t);
|
|
wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t);
|
|
we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t);
|
|
wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t);
|
|
w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t);
|
|
w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t);
|
|
w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t);
|
|
w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t);
|
|
w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t);
|
|
w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t);
|
|
w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t);
|
|
w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t);
|
|
w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t);
|
|
w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t);
|
|
wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t);
|
|
wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t);
|
|
|
|
#undef K
|
|
#define K SHA1C03
|
|
|
|
wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t);
|
|
wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t);
|
|
we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t);
|
|
wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t);
|
|
w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t);
|
|
w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t);
|
|
w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t);
|
|
w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t);
|
|
w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t);
|
|
w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t);
|
|
w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t);
|
|
w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t);
|
|
w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t);
|
|
w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t);
|
|
wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t);
|
|
wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t);
|
|
wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t);
|
|
wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t);
|
|
we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t);
|
|
wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t);
|
|
|
|
digest[0] += A;
|
|
digest[1] += B;
|
|
digest[2] += C;
|
|
digest[3] += D;
|
|
digest[4] += E;
|
|
}
|
|
|
|
static void hmac_sha1_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5])
|
|
{
|
|
w0[0] = w0[0] ^ 0x36363636;
|
|
w0[1] = w0[1] ^ 0x36363636;
|
|
w0[2] = w0[2] ^ 0x36363636;
|
|
w0[3] = w0[3] ^ 0x36363636;
|
|
w1[0] = w1[0] ^ 0x36363636;
|
|
w1[1] = w1[1] ^ 0x36363636;
|
|
w1[2] = w1[2] ^ 0x36363636;
|
|
w1[3] = w1[3] ^ 0x36363636;
|
|
w2[0] = w2[0] ^ 0x36363636;
|
|
w2[1] = w2[1] ^ 0x36363636;
|
|
w2[2] = w2[2] ^ 0x36363636;
|
|
w2[3] = w2[3] ^ 0x36363636;
|
|
w3[0] = w3[0] ^ 0x36363636;
|
|
w3[1] = w3[1] ^ 0x36363636;
|
|
w3[2] = w3[2] ^ 0x36363636;
|
|
w3[3] = w3[3] ^ 0x36363636;
|
|
|
|
ipad[0] = SHA1M_A;
|
|
ipad[1] = SHA1M_B;
|
|
ipad[2] = SHA1M_C;
|
|
ipad[3] = SHA1M_D;
|
|
ipad[4] = SHA1M_E;
|
|
|
|
sha1_transform (w0, w1, w2, w3, ipad);
|
|
|
|
w0[0] = w0[0] ^ 0x6a6a6a6a;
|
|
w0[1] = w0[1] ^ 0x6a6a6a6a;
|
|
w0[2] = w0[2] ^ 0x6a6a6a6a;
|
|
w0[3] = w0[3] ^ 0x6a6a6a6a;
|
|
w1[0] = w1[0] ^ 0x6a6a6a6a;
|
|
w1[1] = w1[1] ^ 0x6a6a6a6a;
|
|
w1[2] = w1[2] ^ 0x6a6a6a6a;
|
|
w1[3] = w1[3] ^ 0x6a6a6a6a;
|
|
w2[0] = w2[0] ^ 0x6a6a6a6a;
|
|
w2[1] = w2[1] ^ 0x6a6a6a6a;
|
|
w2[2] = w2[2] ^ 0x6a6a6a6a;
|
|
w2[3] = w2[3] ^ 0x6a6a6a6a;
|
|
w3[0] = w3[0] ^ 0x6a6a6a6a;
|
|
w3[1] = w3[1] ^ 0x6a6a6a6a;
|
|
w3[2] = w3[2] ^ 0x6a6a6a6a;
|
|
w3[3] = w3[3] ^ 0x6a6a6a6a;
|
|
|
|
opad[0] = SHA1M_A;
|
|
opad[1] = SHA1M_B;
|
|
opad[2] = SHA1M_C;
|
|
opad[3] = SHA1M_D;
|
|
opad[4] = SHA1M_E;
|
|
|
|
sha1_transform (w0, w1, w2, w3, opad);
|
|
}
|
|
|
|
static void hmac_sha1_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[5], u32 opad[5], u32 digest[5])
|
|
{
|
|
digest[0] = ipad[0];
|
|
digest[1] = ipad[1];
|
|
digest[2] = ipad[2];
|
|
digest[3] = ipad[3];
|
|
digest[4] = ipad[4];
|
|
|
|
sha1_transform (w0, w1, w2, w3, digest);
|
|
|
|
w0[0] = digest[0];
|
|
w0[1] = digest[1];
|
|
w0[2] = digest[2];
|
|
w0[3] = digest[3];
|
|
w1[0] = digest[4];
|
|
w1[1] = 0x80000000;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = (64 + 20) * 8;
|
|
|
|
digest[0] = opad[0];
|
|
digest[1] = opad[1];
|
|
digest[2] = opad[2];
|
|
digest[3] = opad[3];
|
|
digest[4] = opad[4];
|
|
|
|
sha1_transform (w0, w1, w2, w3, digest);
|
|
}
|
|
|
|
static void base64_encode (u8 *base64_hash, const u32 len, const u8 *base64_plain)
|
|
{
|
|
u8 *out_ptr = (u8 *) base64_hash;
|
|
u8 *in_ptr = (u8 *) base64_plain;
|
|
|
|
u32 i;
|
|
|
|
for (i = 0; i < len; i += 3)
|
|
{
|
|
char out_val0 = lotus64_table [ ((in_ptr[0] >> 2) & 0x3f)];
|
|
char out_val1 = lotus64_table [((in_ptr[0] << 4) & 0x30) | ((in_ptr[1] >> 4) & 0x0f)];
|
|
char out_val2 = lotus64_table [((in_ptr[1] << 2) & 0x3c) | ((in_ptr[2] >> 6) & 0x03)];
|
|
char out_val3 = lotus64_table [ ((in_ptr[2] >> 0) & 0x3f)];
|
|
|
|
out_ptr[0] = out_val0 & 0x7f;
|
|
out_ptr[1] = out_val1 & 0x7f;
|
|
out_ptr[2] = out_val2 & 0x7f;
|
|
out_ptr[3] = out_val3 & 0x7f;
|
|
|
|
in_ptr += 3;
|
|
out_ptr += 4;
|
|
}
|
|
}
|
|
|
|
static void lotus6_base64_encode (u8 base64_hash[24], const u32 salt0, const u32 salt1, u32 a, u32 b, u32 c)
|
|
{
|
|
uchar4 salt0c = as_uchar4 (salt0);
|
|
uchar4 salt1c = as_uchar4 (salt1);
|
|
|
|
uchar4 ac;
|
|
uchar4 bc;
|
|
uchar4 cc;
|
|
|
|
ac = as_uchar4 (a);
|
|
bc = as_uchar4 (b);
|
|
cc = as_uchar4 (c);
|
|
|
|
u8 tmp[24]; // size 22 (=pw_len) is needed but base64 needs size divisible by 4
|
|
|
|
/*
|
|
* Copy $salt.$digest to a tmp buffer
|
|
*/
|
|
|
|
u8 base64_plain[16];
|
|
|
|
base64_plain[ 0] = salt0c.s0;
|
|
base64_plain[ 1] = salt0c.s1;
|
|
base64_plain[ 2] = salt0c.s2;
|
|
base64_plain[ 3] = salt0c.s3;
|
|
base64_plain[ 3] -= -4; // dont ask!
|
|
base64_plain[ 4] = salt1c.s0;
|
|
base64_plain[ 5] = ac.s0;
|
|
base64_plain[ 6] = ac.s1;
|
|
base64_plain[ 7] = ac.s2;
|
|
base64_plain[ 8] = ac.s3;
|
|
base64_plain[ 9] = bc.s0;
|
|
base64_plain[10] = bc.s1;
|
|
base64_plain[11] = bc.s2;
|
|
base64_plain[12] = bc.s3;
|
|
base64_plain[13] = cc.s0;
|
|
base64_plain[14] = cc.s1;
|
|
base64_plain[15] = cc.s2;
|
|
|
|
/*
|
|
* base64 encode the $salt.$digest string
|
|
*/
|
|
|
|
base64_encode (tmp + 2, 14, base64_plain);
|
|
|
|
base64_hash[ 0] = '(';
|
|
base64_hash[ 1] = 'G';
|
|
base64_hash[ 2] = tmp[ 2];
|
|
base64_hash[ 3] = tmp[ 3];
|
|
base64_hash[ 4] = tmp[ 4];
|
|
base64_hash[ 5] = tmp[ 5];
|
|
base64_hash[ 6] = tmp[ 6];
|
|
base64_hash[ 7] = tmp[ 7];
|
|
base64_hash[ 8] = tmp[ 8];
|
|
base64_hash[ 9] = tmp[ 9];
|
|
base64_hash[10] = tmp[10];
|
|
base64_hash[11] = tmp[11];
|
|
base64_hash[12] = tmp[12];
|
|
base64_hash[13] = tmp[13];
|
|
base64_hash[14] = tmp[14];
|
|
base64_hash[15] = tmp[15];
|
|
base64_hash[16] = tmp[16];
|
|
base64_hash[17] = tmp[17];
|
|
base64_hash[18] = tmp[18];
|
|
base64_hash[19] = tmp[19];
|
|
base64_hash[20] = tmp[20];
|
|
base64_hash[21] = ')';
|
|
}
|
|
|
|
__kernel void m09100_init (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global lotus8_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
const u32 lid = get_local_id (0);
|
|
const u32 lsz = get_local_size (0);
|
|
|
|
/**
|
|
* sbox
|
|
*/
|
|
|
|
__local u32 s_lotus_magic_table[256];
|
|
|
|
for (u32 i = lid; i < 256; i += lsz)
|
|
{
|
|
s_lotus_magic_table[i] = lotus_magic_table[i];
|
|
}
|
|
|
|
__local u32 l_bin2asc[256];
|
|
|
|
for (u32 i = lid; i < 256; i += lsz)
|
|
{
|
|
const u32 i0 = (i >> 0) & 15;
|
|
const u32 i1 = (i >> 4) & 15;
|
|
|
|
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'A' - 10 + i0) << 8
|
|
| ((i1 < 10) ? '0' + i1 : 'A' - 10 + i1) << 0;
|
|
}
|
|
|
|
barrier (CLK_LOCAL_MEM_FENCE);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
u32 w[16];
|
|
|
|
w[ 0] = pws[gid].i[ 0];
|
|
w[ 1] = pws[gid].i[ 1];
|
|
w[ 2] = pws[gid].i[ 2];
|
|
w[ 3] = pws[gid].i[ 3];
|
|
|
|
w[ 4] = pws[gid].i[ 4];
|
|
w[ 5] = pws[gid].i[ 5];
|
|
w[ 6] = pws[gid].i[ 6];
|
|
w[ 7] = pws[gid].i[ 7];
|
|
|
|
w[ 8] = pws[gid].i[ 8];
|
|
w[ 9] = pws[gid].i[ 9];
|
|
w[10] = pws[gid].i[10];
|
|
w[11] = pws[gid].i[11];
|
|
|
|
w[12] = pws[gid].i[12];
|
|
w[13] = pws[gid].i[13];
|
|
w[14] = pws[gid].i[14];
|
|
w[15] = pws[gid].i[15];
|
|
|
|
/**
|
|
* pad
|
|
*/
|
|
|
|
u32 pw_len = pws[gid].pw_len;
|
|
|
|
if (pw_len < 16)
|
|
{
|
|
pad (&w[ 0], pw_len & 0xf);
|
|
}
|
|
else if (pw_len < 32)
|
|
{
|
|
pad (&w[ 4], pw_len & 0xf);
|
|
}
|
|
else if (pw_len < 48)
|
|
{
|
|
pad (&w[ 8], pw_len & 0xf);
|
|
}
|
|
else if (pw_len < 64)
|
|
{
|
|
pad (&w[12], pw_len & 0xf);
|
|
}
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 salt_len = salt_bufs[salt_pos].salt_len;
|
|
|
|
u32 salt_buf0[4];
|
|
|
|
salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
|
|
salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
|
|
salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
|
|
salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
|
|
|
|
u32 salt_buf1[4];
|
|
|
|
salt_buf1[0] = 0x01000000;
|
|
salt_buf1[1] = 0x00000080;
|
|
salt_buf1[2] = 0;
|
|
salt_buf1[3] = 0;
|
|
|
|
u32 salt_buf2[4];
|
|
|
|
salt_buf2[0] = 0;
|
|
salt_buf2[1] = 0;
|
|
salt_buf2[2] = 0;
|
|
salt_buf2[3] = 0;
|
|
|
|
u32 salt_buf3[4];
|
|
|
|
salt_buf3[0] = 0;
|
|
salt_buf3[1] = 0;
|
|
salt_buf3[2] = 0;
|
|
salt_buf3[3] = 0;
|
|
|
|
const u32 salt0 = salt_buf0[0];
|
|
const u32 salt1 = (salt_buf0[1] & 0xff) | ('(' << 8);
|
|
|
|
/**
|
|
* Lotus 6 hash - SEC_pwddigest_V2
|
|
*/
|
|
|
|
u32 w_tmp[16];
|
|
|
|
w_tmp[ 0] = w[ 0];
|
|
w_tmp[ 1] = w[ 1];
|
|
w_tmp[ 2] = w[ 2];
|
|
w_tmp[ 3] = w[ 3];
|
|
w_tmp[ 4] = w[ 4];
|
|
w_tmp[ 5] = w[ 5];
|
|
w_tmp[ 6] = w[ 6];
|
|
w_tmp[ 7] = w[ 7];
|
|
w_tmp[ 8] = w[ 8];
|
|
w_tmp[ 9] = w[ 9];
|
|
w_tmp[10] = w[10];
|
|
w_tmp[11] = w[11];
|
|
w_tmp[12] = w[12];
|
|
w_tmp[13] = w[13];
|
|
w_tmp[14] = w[14];
|
|
w_tmp[15] = w[15];
|
|
|
|
u32 state[4];
|
|
|
|
state[0] = 0;
|
|
state[1] = 0;
|
|
state[2] = 0;
|
|
state[3] = 0;
|
|
|
|
domino_big_md (w_tmp, pw_len, state, s_lotus_magic_table);
|
|
|
|
const u32 w0_t = uint_to_hex_upper8 ((state[0] >> 0) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[0] >> 8) & 255) << 16;
|
|
const u32 w1_t = uint_to_hex_upper8 ((state[0] >> 16) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[0] >> 24) & 255) << 16;
|
|
const u32 w2_t = uint_to_hex_upper8 ((state[1] >> 0) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[1] >> 8) & 255) << 16;
|
|
const u32 w3_t = uint_to_hex_upper8 ((state[1] >> 16) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[1] >> 24) & 255) << 16;
|
|
const u32 w4_t = uint_to_hex_upper8 ((state[2] >> 0) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[2] >> 8) & 255) << 16;
|
|
const u32 w5_t = uint_to_hex_upper8 ((state[2] >> 16) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[2] >> 24) & 255) << 16;
|
|
const u32 w6_t = uint_to_hex_upper8 ((state[3] >> 0) & 255) << 0
|
|
| uint_to_hex_upper8 ((state[3] >> 8) & 255) << 16;
|
|
|
|
const u32 pade = 0x0e0e0e0e;
|
|
|
|
w_tmp[ 0] = salt0;
|
|
w_tmp[ 1] = salt1 | w0_t << 16;
|
|
w_tmp[ 2] = w0_t >> 16 | w1_t << 16;
|
|
w_tmp[ 3] = w1_t >> 16 | w2_t << 16;
|
|
w_tmp[ 4] = w2_t >> 16 | w3_t << 16;
|
|
w_tmp[ 5] = w3_t >> 16 | w4_t << 16;
|
|
w_tmp[ 6] = w4_t >> 16 | w5_t << 16;
|
|
w_tmp[ 7] = w5_t >> 16 | w6_t << 16;
|
|
w_tmp[ 8] = w6_t >> 16 | pade << 16;
|
|
w_tmp[ 9] = pade;
|
|
w_tmp[10] = pade;
|
|
w_tmp[11] = pade;
|
|
w_tmp[12] = 0;
|
|
w_tmp[13] = 0;
|
|
w_tmp[14] = 0;
|
|
w_tmp[15] = 0;
|
|
|
|
state[0] = 0;
|
|
state[1] = 0;
|
|
state[2] = 0;
|
|
state[3] = 0;
|
|
|
|
domino_big_md (w_tmp, 34, state, s_lotus_magic_table);
|
|
|
|
u32 a = state[0];
|
|
u32 b = state[1];
|
|
u32 c = state[2];
|
|
|
|
/**
|
|
* Base64 encode
|
|
*/
|
|
|
|
pw_len = 22;
|
|
|
|
u8 base64_hash[22];
|
|
|
|
lotus6_base64_encode (base64_hash, salt_buf0[0], salt_buf0[1], a, b, c);
|
|
|
|
/**
|
|
* PBKDF2 - HMACSHA1 - 1st iteration
|
|
*/
|
|
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = (base64_hash[ 0] << 24) | (base64_hash[ 1] << 16) | (base64_hash[ 2] << 8) | base64_hash[ 3];
|
|
w0[1] = (base64_hash[ 4] << 24) | (base64_hash[ 5] << 16) | (base64_hash[ 6] << 8) | base64_hash[ 7];
|
|
w0[2] = (base64_hash[ 8] << 24) | (base64_hash[ 9] << 16) | (base64_hash[10] << 8) | base64_hash[11];
|
|
w0[3] = (base64_hash[12] << 24) | (base64_hash[13] << 16) | (base64_hash[14] << 8) | base64_hash[15];
|
|
w1[0] = (base64_hash[16] << 24) | (base64_hash[17] << 16) | (base64_hash[18] << 8) | base64_hash[19];
|
|
w1[1] = (base64_hash[20] << 24) | (base64_hash[21] << 16);
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
/**
|
|
* pads
|
|
*/
|
|
|
|
u32 ipad[5];
|
|
u32 opad[5];
|
|
|
|
hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
|
|
|
|
tmps[gid].ipad[0] = ipad[0];
|
|
tmps[gid].ipad[1] = ipad[1];
|
|
tmps[gid].ipad[2] = ipad[2];
|
|
tmps[gid].ipad[3] = ipad[3];
|
|
tmps[gid].ipad[4] = ipad[4];
|
|
|
|
tmps[gid].opad[0] = opad[0];
|
|
tmps[gid].opad[1] = opad[1];
|
|
tmps[gid].opad[2] = opad[2];
|
|
tmps[gid].opad[3] = opad[3];
|
|
tmps[gid].opad[4] = opad[4];
|
|
|
|
w0[0] = salt_buf0[0];
|
|
w0[1] = salt_buf0[1];
|
|
w0[2] = salt_buf0[2];
|
|
w0[3] = salt_buf0[3];
|
|
w1[0] = salt_buf1[0];
|
|
w1[1] = salt_buf1[1];
|
|
w1[2] = salt_buf1[2];
|
|
w1[3] = salt_buf1[3];
|
|
w2[0] = salt_buf2[0];
|
|
w2[1] = salt_buf2[1];
|
|
w2[2] = salt_buf2[2];
|
|
w2[3] = salt_buf2[3];
|
|
w3[0] = salt_buf3[0];
|
|
w3[1] = salt_buf3[1];
|
|
w3[2] = salt_buf3[2];
|
|
//w3[3] = salt_buf3[3];
|
|
|
|
w0[0] = swap32 (w0[0]);
|
|
w0[1] = swap32 (w0[1]);
|
|
w0[2] = swap32 (w0[2]);
|
|
w0[3] = swap32 (w0[3]);
|
|
w1[0] = swap32 (w1[0]);
|
|
w1[1] = swap32 (w1[1]);
|
|
w1[2] = swap32 (w1[2]);
|
|
w1[3] = swap32 (w1[3]);
|
|
w2[0] = swap32 (w2[0]);
|
|
w2[1] = swap32 (w2[1]);
|
|
w2[2] = swap32 (w2[2]);
|
|
w2[3] = swap32 (w2[3]);
|
|
w3[0] = swap32 (w3[0]);
|
|
w3[1] = swap32 (w3[1]);
|
|
w3[2] = swap32 (w3[2]);
|
|
w3[3] = (64 + salt_len + 4) * 8;
|
|
|
|
u32 dgst[5];
|
|
|
|
hmac_sha1_run (w0, w1, w2, w3, ipad, opad, dgst);
|
|
|
|
tmps[gid].dgst[0] = dgst[0];
|
|
tmps[gid].dgst[1] = dgst[1];
|
|
tmps[gid].dgst[2] = dgst[2];
|
|
tmps[gid].dgst[3] = dgst[3];
|
|
tmps[gid].dgst[4] = dgst[4];
|
|
|
|
tmps[gid].out[0] = dgst[0];
|
|
tmps[gid].out[1] = dgst[1];
|
|
tmps[gid].out[2] = dgst[2];
|
|
tmps[gid].out[3] = dgst[3];
|
|
tmps[gid].out[4] = dgst[4];
|
|
}
|
|
|
|
__kernel void m09100_loop (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global lotus8_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32 ipad[5];
|
|
u32 opad[5];
|
|
|
|
ipad[0] = tmps[gid].ipad[0];
|
|
ipad[1] = tmps[gid].ipad[1];
|
|
ipad[2] = tmps[gid].ipad[2];
|
|
ipad[3] = tmps[gid].ipad[3];
|
|
ipad[4] = tmps[gid].ipad[4];
|
|
|
|
opad[0] = tmps[gid].opad[0];
|
|
opad[1] = tmps[gid].opad[1];
|
|
opad[2] = tmps[gid].opad[2];
|
|
opad[3] = tmps[gid].opad[3];
|
|
opad[4] = tmps[gid].opad[4];
|
|
|
|
u32 dgst[5];
|
|
u32 out[5];
|
|
|
|
dgst[0] = tmps[gid].dgst[0];
|
|
dgst[1] = tmps[gid].dgst[1];
|
|
dgst[2] = tmps[gid].dgst[2];
|
|
dgst[3] = tmps[gid].dgst[3];
|
|
dgst[4] = tmps[gid].dgst[4];
|
|
|
|
out[0] = tmps[gid].out[0];
|
|
out[1] = tmps[gid].out[1];
|
|
out[2] = tmps[gid].out[2];
|
|
out[3] = tmps[gid].out[3];
|
|
out[4] = tmps[gid].out[4];
|
|
|
|
for (u32 j = 0; j < loop_cnt; j++)
|
|
{
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = dgst[0];
|
|
w0[1] = dgst[1];
|
|
w0[2] = dgst[2];
|
|
w0[3] = dgst[3];
|
|
w1[0] = dgst[4];
|
|
w1[1] = 0x80000000;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = (64 + 20) * 8;
|
|
|
|
hmac_sha1_run (w0, w1, w2, w3, ipad, opad, dgst);
|
|
|
|
out[0] ^= dgst[0];
|
|
out[1] ^= dgst[1];
|
|
out[2] ^= dgst[2];
|
|
out[3] ^= dgst[3];
|
|
out[4] ^= dgst[4];
|
|
}
|
|
|
|
tmps[gid].dgst[0] = dgst[0];
|
|
tmps[gid].dgst[1] = dgst[1];
|
|
tmps[gid].dgst[2] = dgst[2];
|
|
tmps[gid].dgst[3] = dgst[3];
|
|
tmps[gid].dgst[4] = dgst[4];
|
|
|
|
tmps[gid].out[0] = out[0];
|
|
tmps[gid].out[1] = out[1];
|
|
tmps[gid].out[2] = out[2];
|
|
tmps[gid].out[3] = out[3];
|
|
tmps[gid].out[4] = out[4];
|
|
}
|
|
|
|
__kernel void m09100_comp (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global lotus8_tmp_t *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global wpa_t *wpa_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
const u32 lid = get_local_id (0);
|
|
|
|
/**
|
|
* digest
|
|
*/
|
|
|
|
const u32 r0 = tmps[gid].out[DGST_R0];
|
|
const u32 r1 = tmps[gid].out[DGST_R1];
|
|
const u32 r2 = 0;
|
|
const u32 r3 = 0;
|
|
|
|
#define il_pos 0
|
|
|
|
#include COMPARE_M
|
|
}
|