summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/amd/amd_gpu/opencl
diff options
context:
space:
mode:
authorxmr-stak-devs <email@example.com>2018-03-25 13:21:57 +0100
committerfireice-uk <fireice-uk@users.noreply.github.com>2018-03-25 13:28:40 +0100
commit1e7911e653a267ffd71199cdf7afaf1cfed5bad0 (patch)
tree29efb953d9851b352298369104bf777fa3bf34e1 /xmrstak/backend/amd/amd_gpu/opencl
parent5014bdda628f64ab780d02de371bac4997573d10 (diff)
downloadxmr-stak-1e7911e653a267ffd71199cdf7afaf1cfed5bad0.zip
xmr-stak-1e7911e653a267ffd71199cdf7afaf1cfed5bad0.tar.gz
XMR-Stak 2.3.0 RC
Co-authored-by: psychocrypt <psychocryptHPC@gmail.com> Co-authored-by: fireice-uk <fireice-uk@users.noreply.github.com> Co-authored-by: Lee Clagett <code@leeclagett.com> Co-authored-by: curie-kief <curie-kief@users.noreply.github.com>
Diffstat (limited to 'xmrstak/backend/amd/amd_gpu/opencl')
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl273
1 files changed, 260 insertions, 13 deletions
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
index 9383b04..7a36357 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
@@ -433,8 +433,18 @@ inline ulong getIdx()
#endif
}
+inline uint4 mix_and_propagate(__local uint4 xin[8][WORKSIZE])
+{
+ return xin[(get_local_id(1)) % 8][get_local_id(0)] ^ xin[(get_local_id(1) + 1) % 8][get_local_id(0)];
+}
+
__attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
-__kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads)
+__kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+)
{
ulong State[25];
uint ExpandedKey1[40];
@@ -464,11 +474,11 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
+ Scratchpad += gIdx * (MEMORY >> 4);
#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
#elif(STRIDED_INDEX==2)
- Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
((ulong8 *)State)[0] = vload8(0, input);
@@ -507,13 +517,41 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
mem_fence(CLK_LOCAL_MEM_FENCE);
+
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ __local uint4 xin[8][WORKSIZE];
+
+ /* Also left over threads performe this loop.
+ * The left over thread results will be ignored
+ */
+ for(size_t i=0; i < 16; i++)
+ {
+ #pragma unroll
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey1)[j]);
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+ }
+#endif
+
#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
#endif
{
+ int iterations = MEMORY >> 7;
+#if (ALGO == 4)
+ if(version < 3)
+ iterations >>= 1;
+#endif
#pragma unroll 2
- for(int i = 0; i < (ITERATIONS >> 5); ++i)
+ for(int i = 0; i < iterations; ++i)
{
#pragma unroll
for(int j = 0; j < 10; ++j)
@@ -525,8 +563,22 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
mem_fence(CLK_GLOBAL_MEM_FENCE);
}
+#define VARIANT1_1(p) \
+ uint table = 0x75310U; \
+ uint index = (((p).s2 >> 26) & 12) | (((p).s2 >> 23) & 2); \
+ (p).s2 ^= ((table >> index) & 0x30U) << 24
+
+#define VARIANT1_2(p) ((uint2 *)&(p))[0] ^= tweak1_2
+
+#define VARIANT1_INIT() \
+ tweak1_2 = as_uint2(input[4]); \
+ tweak1_2.s0 >>= 24; \
+ tweak1_2.s0 |= tweak1_2.s1 << 8; \
+ tweak1_2.s1 = get_global_id(0); \
+ tweak1_2 ^= as_uint2(states[24])
+
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
-__kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Threads)
+__kernel void cn1_monero(__global uint4 *Scratchpad, __global ulong *states, ulong Threads, __global ulong *input)
{
ulong a[2], b[2];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
@@ -544,6 +596,7 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
barrier(CLK_LOCAL_MEM_FENCE);
+ uint2 tweak1_2;
uint4 b_x;
#if(COMP_MODE==1)
// do not use early return here
@@ -552,11 +605,11 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
+ Scratchpad += gIdx * (MEMORY >> 4);
#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
#elif(STRIDED_INDEX==2)
- Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
a[0] = states[0] ^ states[4];
@@ -565,6 +618,7 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
b[1] = states[3] ^ states[7];
b_x = ((uint4 *)b)[0];
+ VARIANT1_INIT();
}
mem_fence(CLK_LOCAL_MEM_FENCE);
@@ -581,9 +635,10 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
((uint4 *)c)[0] = Scratchpad[IDX((a[0] & MASK) >> 4)];
((uint4 *)c)[0] = AES_Round(AES0, AES1, AES2, AES3, ((uint4 *)c)[0], ((uint4 *)a)[0]);
- //b_x ^= ((uint4 *)c)[0];
- Scratchpad[IDX((a[0] & MASK) >> 4)] = b_x ^ ((uint4 *)c)[0];
+ b_x ^= ((uint4 *)c)[0];
+ VARIANT1_1(b_x);
+ Scratchpad[IDX((a[0] & MASK) >> 4)] = b_x;
uint4 tmp;
tmp = Scratchpad[IDX((c[0] & MASK) >> 4)];
@@ -591,18 +646,129 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
a[1] += c[0] * as_ulong2(tmp).s0;
a[0] += mul_hi(c[0], as_ulong2(tmp).s0);
+ VARIANT1_2(a[1]);
Scratchpad[IDX((c[0] & MASK) >> 4)] = ((uint4 *)a)[0];
+ VARIANT1_2(a[1]);
+
+ ((uint4 *)a)[0] ^= tmp;
+
+ b_x = ((uint4 *)c)[0];
+ }
+ }
+ mem_fence(CLK_GLOBAL_MEM_FENCE);
+}
+
+__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
+__kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+)
+{
+ ulong a[2], b[2];
+ __local uint AES0[256], AES1[256], AES2[256], AES3[256];
+
+ const ulong gIdx = getIdx();
+
+ for(int i = get_local_id(0); i < 256; i += WORKSIZE)
+ {
+ const uint tmp = AES0_C[i];
+ AES0[i] = tmp;
+ AES1[i] = rotate(tmp, 8U);
+ AES2[i] = rotate(tmp, 16U);
+ AES3[i] = rotate(tmp, 24U);
+ }
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ uint4 b_x;
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
+ states += 25 * gIdx;
+#if(STRIDED_INDEX==0)
+ Scratchpad += gIdx * (MEMORY >> 4);
+#elif(STRIDED_INDEX==1)
+ Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
+#endif
+
+ a[0] = states[0] ^ states[4];
+ b[0] = states[2] ^ states[6];
+ a[1] = states[1] ^ states[5];
+ b[1] = states[3] ^ states[7];
+
+ b_x = ((uint4 *)b)[0];
+ }
+
+ mem_fence(CLK_LOCAL_MEM_FENCE);
+
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
+ ulong idx0 = a[0];
+ ulong mask = MASK;
+
+ int iterations = ITERATIONS;
+#if (ALGO == 4)
+ if(version < 3)
+ {
+ iterations <<= 1;
+ mask -= 0x200000;
+ }
+#endif
+ #pragma unroll 8
+ for(int i = 0; i < iterations; ++i)
+ {
+ ulong c[2];
+
+ ((uint4 *)c)[0] = Scratchpad[IDX((idx0 & mask) >> 4)];
+ ((uint4 *)c)[0] = AES_Round(AES0, AES1, AES2, AES3, ((uint4 *)c)[0], ((uint4 *)a)[0]);
+ //b_x ^= ((uint4 *)c)[0];
+
+ Scratchpad[IDX((idx0 & mask) >> 4)] = b_x ^ ((uint4 *)c)[0];
+
+ uint4 tmp;
+ tmp = Scratchpad[IDX((c[0] & mask) >> 4)];
+
+ a[1] += c[0] * as_ulong2(tmp).s0;
+ a[0] += mul_hi(c[0], as_ulong2(tmp).s0);
+
+ Scratchpad[IDX((c[0] & mask) >> 4)] = ((uint4 *)a)[0];
((uint4 *)a)[0] ^= tmp;
+ idx0 = a[0];
b_x = ((uint4 *)c)[0];
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ long n = *((__global long*)(Scratchpad + (IDX((idx0 & mask) >> 4))));
+ int d = ((__global int*)(Scratchpad + (IDX((idx0 & mask) >> 4))))[2];
+ long q = n / (d | 0x5);
+ *((__global long*)(Scratchpad + (IDX((idx0 & mask) >> 4)))) = n ^ q;
+ idx0 = d ^ q;
+ }
+#endif
}
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
}
__attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
-__kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global uint *Branch0, __global uint *Branch1, __global uint *Branch2, __global uint *Branch3, ulong Threads)
+__kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global uint *Branch0, __global uint *Branch1, __global uint *Branch2, __global uint *Branch3, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+ )
{
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
uint ExpandedKey2[40];
@@ -631,11 +797,11 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
+ Scratchpad += gIdx * (MEMORY >> 4);
#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
#elif(STRIDED_INDEX==2)
- Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
#if defined(__Tahiti__) || defined(__Pitcairn__)
@@ -655,13 +821,67 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_LOCAL_MEM_FENCE);
+#if (ALGO == 4)
+ __local uint4 xin[8][WORKSIZE];
+#endif
+
#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
#endif
{
+ int iterations = MEMORY >> 7;
+#if (ALGO == 4)
+ if(version < 3)
+ {
+ iterations >>= 1;
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+ }
+ }
+ else
+ {
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+ }
+#else
#pragma unroll 2
- for(int i = 0; i < (ITERATIONS >> 5); ++i)
+ for(int i = 0; i < iterations; ++i)
{
text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
@@ -669,7 +889,34 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
for(int j = 0; j < 10; ++j)
text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
}
+#endif
+ }
+
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ /* Also left over threads performe this loop.
+ * The left over thread results will be ignored
+ */
+ for(size_t i=0; i < 16; i++)
+ {
+ #pragma unroll
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+ }
+#endif
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
vstore2(as_ulong2(text), get_local_id(1) + 4, states);
}
OpenPOWER on IntegriCloud