summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/amd/amd_gpu
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/backend/amd/amd_gpu')
-rw-r--r--xmrstak/backend/amd/amd_gpu/gpu.cpp355
-rw-r--r--xmrstak/backend/amd/amd_gpu/gpu.hpp9
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl432
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl76
4 files changed, 703 insertions, 169 deletions
diff --git a/xmrstak/backend/amd/amd_gpu/gpu.cpp b/xmrstak/backend/amd/amd_gpu/gpu.cpp
index d9bc962..8d0fd32 100644
--- a/xmrstak/backend/amd/amd_gpu/gpu.cpp
+++ b/xmrstak/backend/amd/amd_gpu/gpu.cpp
@@ -15,6 +15,7 @@
#include "xmrstak/backend/cryptonight.hpp"
#include "xmrstak/jconf.hpp"
+#include "xmrstak/picosha2/picosha2.hpp"
#include <stdio.h>
#include <string.h>
@@ -25,8 +26,41 @@
#include <regex>
#include <cassert>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <string>
+#include <iostream>
+
+#if defined _MSC_VER
+#include <direct.h>
+#elif defined __GNUC__
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
+
+
#ifdef _WIN32
#include <windows.h>
+#include <Shlobj.h>
+
+static inline void create_directory(std::string dirname)
+{
+ _mkdir(dirname.data());
+}
+
+static inline std::string get_home()
+{
+ char path[MAX_PATH + 1];
+ // get folder "appdata\local"
+ if (SHGetSpecialFolderPathA(HWND_DESKTOP, path, CSIDL_LOCAL_APPDATA, FALSE))
+ {
+ return path;
+ }
+ else
+ return ".";
+}
static inline void port_sleep(size_t sec)
{
@@ -34,6 +68,22 @@ static inline void port_sleep(size_t sec)
}
#else
#include <unistd.h>
+#include <pwd.h>
+
+static inline void create_directory(std::string dirname)
+{
+ mkdir(dirname.data(), 0744);
+}
+
+static inline std::string get_home()
+{
+ const char *home = ".";
+
+ if ((home = getenv("HOME")) == nullptr)
+ home = getpwuid(getuid())->pw_dir;
+
+ return home;
+}
static inline void port_sleep(size_t sec)
{
@@ -84,6 +134,7 @@ const char* err_to_str(cl_int ret)
return "CL_MISALIGNED_SUB_BUFFER_OFFSET";
case CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST:
return "CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST";
+#ifdef CL_VERSION_1_2
case CL_COMPILE_PROGRAM_FAILURE:
return "CL_COMPILE_PROGRAM_FAILURE";
case CL_LINKER_NOT_AVAILABLE:
@@ -94,6 +145,7 @@ const char* err_to_str(cl_int ret)
return "CL_DEVICE_PARTITION_FAILED";
case CL_KERNEL_ARG_INFO_NOT_AVAILABLE:
return "CL_KERNEL_ARG_INFO_NOT_AVAILABLE";
+#endif
case CL_INVALID_VALUE:
return "CL_INVALID_VALUE";
case CL_INVALID_DEVICE_TYPE:
@@ -164,6 +216,7 @@ const char* err_to_str(cl_int ret)
return "CL_INVALID_GLOBAL_WORK_SIZE";
case CL_INVALID_PROPERTY:
return "CL_INVALID_PROPERTY";
+#ifdef CL_VERSION_1_2
case CL_INVALID_IMAGE_DESCRIPTOR:
return "CL_INVALID_IMAGE_DESCRIPTOR";
case CL_INVALID_COMPILER_OPTIONS:
@@ -172,6 +225,7 @@ const char* err_to_str(cl_int ret)
return "CL_INVALID_LINKER_OPTIONS";
case CL_INVALID_DEVICE_PARTITION_COUNT:
return "CL_INVALID_DEVICE_PARTITION_COUNT";
+#endif
#if defined(CL_VERSION_2_0) && !defined(CONF_ENFORCE_OpenCL_1_2)
case CL_INVALID_PIPE_SIZE:
return "CL_INVALID_PIPE_SIZE";
@@ -252,21 +306,9 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_
return ERR_OCL_API;
}
- size_t hashMemSize;
- int threadMemMask;
- int hasIterations;
- if(::jconf::inst()->IsCurrencyMonero())
- {
- hashMemSize = MONERO_MEMORY;
- threadMemMask = MONERO_MASK;
- hasIterations = MONERO_ITER;
- }
- else
- {
- hashMemSize = AEON_MEMORY;
- threadMemMask = AEON_MASK;
- hasIterations = AEON_ITER;
- }
+ size_t hashMemSize = cn_select_memory(::jconf::inst()->GetMiningAlgo());
+ int threadMemMask = cn_select_mask(::jconf::inst()->GetMiningAlgo());
+ int hashIterations = cn_select_iter(::jconf::inst()->GetMiningAlgo());
size_t g_thd = ctx->rawIntensity;
ctx->ExtraBuffers[0] = clCreateBuffer(opencl_ctx, CL_MEM_READ_WRITE, hashMemSize * g_thd, NULL, &ret);
@@ -323,60 +365,162 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_
return ERR_OCL_API;
}
- ctx->Program = clCreateProgramWithSource(opencl_ctx, 1, (const char**)&source_code, NULL, &ret);
- if(ret != CL_SUCCESS)
+ std::vector<char> devNameVec(1024);
+ if((ret = clGetDeviceInfo(ctx->DeviceID, CL_DEVICE_NAME, devNameVec.size(), devNameVec.data(), NULL)) != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithSource on the contents of cryptonight.cl", err_to_str(ret));
+ printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_NAME for device %u.", err_to_str(ret),ctx->deviceIdx );
return ERR_OCL_API;
}
- char options[256];
- snprintf(options, sizeof(options),
- "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu -DSTRIDED_INDEX=%d",
- hasIterations, threadMemMask, int_port(ctx->workSize), ctx->stridedIndex ? 1 : 0);
- ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, options, NULL, NULL);
- if(ret != CL_SUCCESS)
+ auto miner_algo = ::jconf::inst()->GetMiningAlgo();
+
+ char options[512];
+ snprintf(options, sizeof(options),
+ "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu -DSTRIDED_INDEX=%d -DMEM_CHUNK_EXPONENT=%d -DCOMP_MODE=%d -DMEMORY=%llu -DALGO=%d",
+ hashIterations, threadMemMask, int_port(ctx->workSize), ctx->stridedIndex, int(1u<<ctx->memChunk), ctx->compMode ? 1 : 0,
+ int_port(hashMemSize), int(miner_algo));
+ /* create a hash for the compile time cache
+ * used data:
+ * - source code
+ * - device name
+ * - compile paramater
+ */
+ std::string src_str(source_code);
+ src_str += options;
+ src_str += devNameVec.data();
+ std::string hash_hex_str;
+ picosha2::hash256_hex_string(src_str, hash_hex_str);
+
+ std::string cache_file = get_home() + "/.openclcache/" + hash_hex_str + ".openclbin";
+ std::ifstream clBinFile(cache_file, std::ofstream::in | std::ofstream::binary);
+ if(!clBinFile.good())
{
- size_t len;
- printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram.", err_to_str(ret));
+ printer::inst()->print_msg(L1,"OpenCL device %u - Precompiled code %s not found. Compiling ...",ctx->deviceIdx, cache_file.c_str());
+ ctx->Program = clCreateProgramWithSource(opencl_ctx, 1, (const char**)&source_code, NULL, &ret);
+ if(ret != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithSource on the OpenCL miner code", err_to_str(ret));
+ return ERR_OCL_API;
+ }
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len)) != CL_SUCCESS)
+ ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, options, NULL, NULL);
+ if(ret != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for length of build log output.", err_to_str(ret));
+ size_t len;
+ printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram.", err_to_str(ret));
+
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for length of build log output.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+
+ char* BuildLog = (char*)malloc(len + 1);
+ BuildLog[0] = '\0';
+
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL)) != CL_SUCCESS)
+ {
+ free(BuildLog);
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for build log.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+
+ printer::inst()->print_str("Build log:\n");
+ std::cerr<<BuildLog<<std::endl;
+
+ free(BuildLog);
return ERR_OCL_API;
}
- char* BuildLog = (char*)malloc(len + 1);
- BuildLog[0] = '\0';
+ cl_uint num_devices;
+ clGetProgramInfo(ctx->Program, CL_PROGRAM_NUM_DEVICES, sizeof(cl_uint), &num_devices,NULL);
+
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL)) != CL_SUCCESS)
+ std::vector<cl_device_id> devices_ids(num_devices);
+ clGetProgramInfo(ctx->Program, CL_PROGRAM_DEVICES, sizeof(cl_device_id)* devices_ids.size(), devices_ids.data(),NULL);
+ int dev_id = 0;
+ /* Search for the gpu within the program context.
+ * The id can be different to ctx->DeviceID.
+ */
+ for(auto & ocl_device : devices_ids)
{
- free(BuildLog);
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for build log.", err_to_str(ret));
+ if(ocl_device == ctx->DeviceID)
+ break;
+ dev_id++;
+ }
+
+ cl_build_status status;
+ do
+ {
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &status, NULL)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for status of build.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ port_sleep(1);
+ }
+ while(status == CL_BUILD_IN_PROGRESS);
+
+ std::vector<size_t> binary_sizes(num_devices);
+ clGetProgramInfo (ctx->Program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t) * binary_sizes.size(), binary_sizes.data(), NULL);
+
+ std::vector<char*> all_programs(num_devices);
+ std::vector<std::vector<char>> program_storage;
+
+ int p_id = 0;
+ size_t mem_size = 0;
+ // create memory structure to query all OpenCL program binaries
+ for(auto & p : all_programs)
+ {
+ program_storage.emplace_back(std::vector<char>(binary_sizes[p_id]));
+ all_programs[p_id] = program_storage[p_id].data();
+ mem_size += binary_sizes[p_id];
+ p_id++;
+ }
+
+ if( ret = clGetProgramInfo(ctx->Program, CL_PROGRAM_BINARIES, num_devices * sizeof(char*), all_programs.data(),NULL) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramInfo.", err_to_str(ret));
return ERR_OCL_API;
}
-
- printer::inst()->print_str("Build log:\n");
- std::cerr<<BuildLog<<std::endl;
- free(BuildLog);
- return ERR_OCL_API;
+ std::ofstream file_stream;
+ std::cout<<get_home() + "/.openclcache/" + hash_hex_str + ".openclbin"<<std::endl;
+ file_stream.open(cache_file, std::ofstream::out | std::ofstream::binary);
+ file_stream.write(all_programs[dev_id], binary_sizes[dev_id]);
+ file_stream.close();
+ printer::inst()->print_msg(L1, "OpenCL device %u - Precompiled code stored in file %s",ctx->deviceIdx, cache_file.c_str());
}
-
- cl_build_status status;
- do
+ else
{
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &status, NULL)) != CL_SUCCESS)
+ printer::inst()->print_msg(L1, "OpenCL device %u - Load precompiled cod from file %s",ctx->deviceIdx, cache_file.c_str());
+ std::ostringstream ss;
+ ss << clBinFile.rdbuf();
+ std::string s = ss.str();
+
+ size_t bin_size = s.size();
+ auto data_ptr = s.data();
+
+ cl_int clStatus;
+ ctx->Program = clCreateProgramWithBinary(
+ opencl_ctx, 1, &ctx->DeviceID, &bin_size,
+ (const unsigned char **)&data_ptr, &clStatus, &ret
+ );
+ if(ret != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithBinary. Try to delete file %s", err_to_str(ret), cache_file.c_str());
+ return ERR_OCL_API;
+ }
+ ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, NULL, NULL, NULL);
+ if(ret != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for status of build.", err_to_str(ret));
+ printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram. Try to delete file %s", err_to_str(ret), cache_file.c_str());
return ERR_OCL_API;
}
- port_sleep(1);
}
- while(status == CL_BUILD_IN_PROGRESS);
- const char *KernelNames[] = { "cn0", "cn1", "cn2", "Blake", "Groestl", "JH", "Skein" };
- for(int i = 0; i < 7; ++i)
+ const char *KernelNames[] = { "cn0", "cn1", "cn2", "Blake", "Groestl", "JH", "Skein", "cn1_monero" };
+ for(int i = 0; i < 8; ++i)
{
ctx->Kernels[i] = clCreateKernel(ctx->Program, KernelNames[i], &ret);
if(ret != CL_SUCCESS)
@@ -487,7 +631,7 @@ std::vector<GpuContext> getAMDDevices(int index)
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get the device vendor name for device %u.", err_to_str(clStatus), k);
continue;
}
-
+
std::string devVendor(devVendorVec.data());
if( devVendor.find("Advanced Micro Devices") != std::string::npos || devVendor.find("AMD") != std::string::npos)
{
@@ -518,13 +662,13 @@ std::vector<GpuContext> getAMDDevices(int index)
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_NAME for device %u.", err_to_str(clStatus), k);
continue;
}
- printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str());
// if environment variable GPU_SINGLE_ALLOC_PERCENT is not set we can not allocate the full memory
ctx.deviceIdx = k;
ctx.freeMem = std::min(ctx.freeMem, maxMem);
ctx.name = std::string(devNameVec.data());
ctx.DeviceID = device_list[k];
+ printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str());
ctxVec.push_back(ctx);
}
}
@@ -549,6 +693,8 @@ int getAMDPlatformIdx()
clStatus = clGetPlatformIDs(numPlatforms, platforms, NULL);
int platformIndex = -1;
+ // Mesa OpenCL is the fallback if no AMD or Apple OpenCL is found
+ int mesaPlatform = -1;
if(clStatus == CL_SUCCESS)
{
@@ -559,13 +705,29 @@ int getAMDPlatformIdx()
clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, infoSize, platformNameVec.data(), NULL);
std::string platformName(platformNameVec.data());
- if( platformName.find("Advanced Micro Devices") != std::string::npos || platformName.find("Apple") != std::string::npos)
+ if( platformName.find("Advanced Micro Devices") != std::string::npos ||
+ platformName.find("Apple") != std::string::npos ||
+ platformName.find("Mesa") != std::string::npos
+ )
{
- platformIndex = i;
+
printer::inst()->print_msg(L0,"Found AMD platform index id = %i, name = %s",i , platformName.c_str());
- break;
+ if(platformName.find("Mesa") != std::string::npos)
+ mesaPlatform = i;
+ else
+ {
+ // exit if AMD or Apple platform is found
+ platformIndex = i;
+ break;
+ }
}
}
+ // fall back to Mesa OpenCL
+ if(platformIndex == -1 && mesaPlatform != -1)
+ {
+ printer::inst()->print_msg(L0,"No AMD platform found select Mesa as OpenCL platform");
+ platformIndex = mesaPlatform;
+ }
}
else
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetPlatformIDs for platform information.", err_to_str(clStatus));
@@ -694,8 +856,18 @@ size_t InitOpenCL(GpuContext* ctx, size_t num_gpus, size_t platform_idx)
source_code = std::regex_replace(source_code, std::regex("XMRSTAK_INCLUDE_BLAKE256"), blake256CL);
source_code = std::regex_replace(source_code, std::regex("XMRSTAK_INCLUDE_GROESTL256"), groestl256CL);
+ // create a directory for the OpenCL compile cache
+ create_directory(get_home() + "/.openclcache");
+
for(int i = 0; i < num_gpus; ++i)
{
+ if(ctx[i].stridedIndex == 2 && (ctx[i].rawIntensity % ctx[i].workSize) != 0)
+ {
+ size_t reduced_intensity = (ctx[i].rawIntensity / ctx[i].workSize) * ctx[i].workSize;
+ ctx[i].rawIntensity = reduced_intensity;
+ printer::inst()->print_msg(L0, "WARNING AMD: gpu %d intensity is not a multiple of 'worksize', auto reduce intensity to %d", ctx[i].deviceIdx, int(reduced_intensity));
+ }
+
if((ret = InitOpenCLGpu(opencl_ctx, &ctx[i], source_code.c_str())) != ERR_SUCCESS)
{
return ret;
@@ -705,7 +877,7 @@ size_t InitOpenCL(GpuContext* ctx, size_t num_gpus, size_t platform_idx)
return ERR_SUCCESS;
}
-size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t target)
+size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t target, xmrstak_algo miner_algo, uint32_t version)
{
cl_int ret;
@@ -750,29 +922,65 @@ size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t tar
return(ERR_OCL_API);
}
- // CN2 Kernel
+ if(miner_algo == cryptonight_heavy)
+ {
+ // version
+ if ((ret = clSetKernelArg(ctx->Kernels[0], 4, sizeof(cl_uint), &version)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1, "Error %s when calling clSetKernelArg for kernel 0, argument 4.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ }
+
+ // CN1 Kernel
+
+ /// @todo only activate if currency is monero
+ int cn_kernel_offset = 0;
+ if(miner_algo == cryptonight_monero && version >= 7)
+ {
+ cn_kernel_offset = 6;
+ }
// Scratchpads
- if((ret = clSetKernelArg(ctx->Kernels[1], 0, sizeof(cl_mem), ctx->ExtraBuffers + 0)) != CL_SUCCESS)
+ if((ret = clSetKernelArg(ctx->Kernels[1 + cn_kernel_offset], 0, sizeof(cl_mem), ctx->ExtraBuffers + 0)) != CL_SUCCESS)
{
printer::inst()->print_msg(L1,"Error %s when calling clSetKernelArg for kernel 1, argument 0.", err_to_str(ret));
return ERR_OCL_API;
}
// States
- if((ret = clSetKernelArg(ctx->Kernels[1], 1, sizeof(cl_mem), ctx->ExtraBuffers + 1)) != CL_SUCCESS)
+ if((ret = clSetKernelArg(ctx->Kernels[1 + cn_kernel_offset], 1, sizeof(cl_mem), ctx->ExtraBuffers + 1)) != CL_SUCCESS)
{
printer::inst()->print_msg(L1,"Error %s when calling clSetKernelArg for kernel 1, argument 1.", err_to_str(ret));
return ERR_OCL_API;
}
// Threads
- if((ret = clSetKernelArg(ctx->Kernels[1], 2, sizeof(cl_ulong), &numThreads)) != CL_SUCCESS)
+ if((ret = clSetKernelArg(ctx->Kernels[1 + cn_kernel_offset], 2, sizeof(cl_ulong), &numThreads)) != CL_SUCCESS)
{
printer::inst()->print_msg(L1,"Error %s when calling clSetKernelArg for kernel 1, argument 2.", err_to_str(ret));
return(ERR_OCL_API);
}
+ if(miner_algo == cryptonight_monero && version >= 7)
+ {
+ // Input
+ if ((ret = clSetKernelArg(ctx->Kernels[1 + cn_kernel_offset], 3, sizeof(cl_mem), &ctx->InputBuffer)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1, "Error %s when calling clSetKernelArg for kernel 1, arugment 4(input buffer).", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ }
+ else if(miner_algo == cryptonight_heavy)
+ {
+ // version
+ if ((ret = clSetKernelArg(ctx->Kernels[1], 3, sizeof(cl_uint), &version)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1, "Error %s when calling clSetKernelArg for kernel 1, argument 3 (version).", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ }
+
// CN3 Kernel
// Scratchpads
if((ret = clSetKernelArg(ctx->Kernels[2], 0, sizeof(cl_mem), ctx->ExtraBuffers + 0)) != CL_SUCCESS)
@@ -823,6 +1031,16 @@ size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t tar
return(ERR_OCL_API);
}
+ if(miner_algo == cryptonight_heavy)
+ {
+ // version
+ if ((ret = clSetKernelArg(ctx->Kernels[2], 7, sizeof(cl_uint), &version)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1, "Error %s when calling clSetKernelArg for kernel 2, argument 7.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ }
+
for(int i = 0; i < 4; ++i)
{
// States
@@ -857,7 +1075,7 @@ size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t tar
return ERR_SUCCESS;
}
-size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput)
+size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput, xmrstak_algo miner_algo, uint32_t version)
{
cl_int ret;
cl_uint zero = 0;
@@ -866,10 +1084,15 @@ size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput)
size_t g_intensity = ctx->rawIntensity;
size_t w_size = ctx->workSize;
- // round up to next multiple of w_size
- size_t g_thd = ((g_intensity + w_size - 1u) / w_size) * w_size;
- // number of global threads must be a multiple of the work group size (w_size)
- assert(g_thd%w_size == 0);
+ size_t g_thd = g_intensity;
+
+ if(ctx->compMode)
+ {
+ // round up to next multiple of w_size
+ g_thd = ((g_intensity + w_size - 1u) / w_size) * w_size;
+ // number of global threads must be a multiple of the work group size (w_size)
+ assert(g_thd%w_size == 0);
+ }
for(int i = 2; i < 6; ++i)
{
@@ -905,7 +1128,13 @@ size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput)
}*/
size_t tmpNonce = ctx->Nonce;
- if((ret = clEnqueueNDRangeKernel(ctx->CommandQueues, ctx->Kernels[1], 1, &tmpNonce, &g_thd, &w_size, 0, NULL, NULL)) != CL_SUCCESS)
+ /// @todo only activate if currency is monero
+ int cn_kernel_offset = 0;
+ if(miner_algo == cryptonight_monero && version >= 7)
+ {
+ cn_kernel_offset = 6;
+ }
+ if((ret = clEnqueueNDRangeKernel(ctx->CommandQueues, ctx->Kernels[1 + cn_kernel_offset], 1, &tmpNonce, &g_thd, &w_size, 0, NULL, NULL)) != CL_SUCCESS)
{
printer::inst()->print_msg(L1,"Error %s when calling clEnqueueNDRangeKernel for kernel %d.", err_to_str(ret), 1);
return ERR_OCL_API;
diff --git a/xmrstak/backend/amd/amd_gpu/gpu.hpp b/xmrstak/backend/amd/amd_gpu/gpu.hpp
index c17bac1..a387b15 100644
--- a/xmrstak/backend/amd/amd_gpu/gpu.hpp
+++ b/xmrstak/backend/amd/amd_gpu/gpu.hpp
@@ -1,6 +1,7 @@
#pragma once
#include "xmrstak/misc/console.hpp"
+#include "xmrstak/jconf.hpp"
#if defined(__APPLE__)
#include <OpenCL/cl.h>
@@ -25,6 +26,8 @@ struct GpuContext
size_t rawIntensity;
size_t workSize;
int stridedIndex;
+ int memChunk;
+ int compMode;
/*Output vars*/
cl_device_id DeviceID;
@@ -33,7 +36,7 @@ struct GpuContext
cl_mem OutputBuffer;
cl_mem ExtraBuffers[6];
cl_program Program;
- cl_kernel Kernels[7];
+ cl_kernel Kernels[8];
size_t freeMem;
int computeUnits;
std::string name;
@@ -47,7 +50,7 @@ int getAMDPlatformIdx();
std::vector<GpuContext> getAMDDevices(int index);
size_t InitOpenCL(GpuContext* ctx, size_t num_gpus, size_t platform_idx);
-size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t target);
-size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput);
+size_t XMRSetJob(GpuContext* ctx, uint8_t* input, size_t input_len, uint64_t target, xmrstak_algo miner_algo, uint32_t version);
+size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput, xmrstak_algo miner_algo, uint32_t version);
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
index 255fcbb..7a36357 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
@@ -14,6 +14,11 @@ R"===(
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/* For Mesa clover support */
+#ifdef cl_clang_storage_class_specifiers
+# pragma OPENCL EXTENSION cl_clang_storage_class_specifiers : enable
+#endif
+
#ifdef cl_amd_media_ops
#pragma OPENCL EXTENSION cl_amd_media_ops : enable
#else
@@ -399,7 +404,7 @@ static const __constant uchar rcon[8] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x
void AESExpandKey256(uint *keybuf)
{
//#pragma unroll 4
- for(uint c = 8, i = 1; c < 60; ++c)
+ for(uint c = 8, i = 1; c < 40; ++c)
{
// For 256-bit keys, an sbox permutation is done every other 4th uint generated, AND every 8th
uint t = ((!(c & 7)) || ((c & 7) == 4)) ? SubWord(keybuf[c - 1]) : keybuf[c - 1];
@@ -411,21 +416,42 @@ void AESExpandKey256(uint *keybuf)
}
}
+#define MEM_CHUNK (1<<MEM_CHUNK_EXPONENT)
+
#if(STRIDED_INDEX==0)
# define IDX(x) (x)
-#else
+#elif(STRIDED_INDEX==1)
# define IDX(x) ((x) * (Threads))
+#elif(STRIDED_INDEX==2)
+# define IDX(x) (((x) % MEM_CHUNK) + ((x) / MEM_CHUNK) * WORKSIZE * MEM_CHUNK)
#endif
+inline ulong getIdx()
+{
+#if(STRIDED_INDEX==0 || STRIDED_INDEX==1 || STRIDED_INDEX==2)
+ return get_global_id(0) - get_global_offset(0);
+#endif
+}
+
+inline uint4 mix_and_propagate(__local uint4 xin[8][WORKSIZE])
+{
+ return xin[(get_local_id(1)) % 8][get_local_id(0)] ^ xin[(get_local_id(1) + 1) % 8][get_local_id(0)];
+}
+
__attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
-__kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads)
+__kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+)
{
ulong State[25];
- uint ExpandedKey1[256];
+ uint ExpandedKey1[40];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
uint4 text;
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(1) * WORKSIZE + get_local_id(0);
i < 256;
@@ -439,16 +465,20 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
barrier(CLK_LOCAL_MEM_FENCE);
-
+
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+ Scratchpad += gIdx * (MEMORY >> 4);
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
((ulong8 *)State)[0] = vload8(0, input);
@@ -470,9 +500,10 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
-
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll
for(int i = 0; i < 25; ++i) states[i] = State[i];
@@ -486,12 +517,41 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
mem_fence(CLK_LOCAL_MEM_FENCE);
+
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ __local uint4 xin[8][WORKSIZE];
+
+ /* Also left over threads performe this loop.
+ * The left over thread results will be ignored
+ */
+ for(size_t i=0; i < 16; i++)
+ {
+ #pragma unroll
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey1)[j]);
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+ }
+#endif
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
+ int iterations = MEMORY >> 7;
+#if (ALGO == 4)
+ if(version < 3)
+ iterations >>= 1;
+#endif
#pragma unroll 2
- for(int i = 0; i < (ITERATIONS >> 5); ++i)
+ for(int i = 0; i < iterations; ++i)
{
#pragma unroll
for(int j = 0; j < 10; ++j)
@@ -503,13 +563,27 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
mem_fence(CLK_GLOBAL_MEM_FENCE);
}
+#define VARIANT1_1(p) \
+ uint table = 0x75310U; \
+ uint index = (((p).s2 >> 26) & 12) | (((p).s2 >> 23) & 2); \
+ (p).s2 ^= ((table >> index) & 0x30U) << 24
+
+#define VARIANT1_2(p) ((uint2 *)&(p))[0] ^= tweak1_2
+
+#define VARIANT1_INIT() \
+ tweak1_2 = as_uint2(input[4]); \
+ tweak1_2.s0 >>= 24; \
+ tweak1_2.s0 |= tweak1_2.s1 << 8; \
+ tweak1_2.s1 = get_global_id(0); \
+ tweak1_2 ^= as_uint2(states[24])
+
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
-__kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Threads)
+__kernel void cn1_monero(__global uint4 *Scratchpad, __global ulong *states, ulong Threads, __global ulong *input)
{
ulong a[2], b[2];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(0); i < 256; i += WORKSIZE)
{
@@ -522,16 +596,20 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
barrier(CLK_LOCAL_MEM_FENCE);
+ uint2 tweak1_2;
uint4 b_x;
-
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+ Scratchpad += gIdx * (MEMORY >> 4);
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
a[0] = states[0] ^ states[4];
@@ -540,12 +618,15 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
b[1] = states[3] ^ states[7];
b_x = ((uint4 *)b)[0];
+ VARIANT1_INIT();
}
mem_fence(CLK_LOCAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll 8
for(int i = 0; i < ITERATIONS; ++i)
@@ -554,9 +635,10 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
((uint4 *)c)[0] = Scratchpad[IDX((a[0] & MASK) >> 4)];
((uint4 *)c)[0] = AES_Round(AES0, AES1, AES2, AES3, ((uint4 *)c)[0], ((uint4 *)a)[0]);
- //b_x ^= ((uint4 *)c)[0];
- Scratchpad[IDX((a[0] & MASK) >> 4)] = b_x ^ ((uint4 *)c)[0];
+ b_x ^= ((uint4 *)c)[0];
+ VARIANT1_1(b_x);
+ Scratchpad[IDX((a[0] & MASK) >> 4)] = b_x;
uint4 tmp;
tmp = Scratchpad[IDX((c[0] & MASK) >> 4)];
@@ -564,25 +646,136 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
a[1] += c[0] * as_ulong2(tmp).s0;
a[0] += mul_hi(c[0], as_ulong2(tmp).s0);
+ VARIANT1_2(a[1]);
Scratchpad[IDX((c[0] & MASK) >> 4)] = ((uint4 *)a)[0];
+ VARIANT1_2(a[1]);
+
+ ((uint4 *)a)[0] ^= tmp;
+
+ b_x = ((uint4 *)c)[0];
+ }
+ }
+ mem_fence(CLK_GLOBAL_MEM_FENCE);
+}
+
+__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
+__kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+)
+{
+ ulong a[2], b[2];
+ __local uint AES0[256], AES1[256], AES2[256], AES3[256];
+
+ const ulong gIdx = getIdx();
+
+ for(int i = get_local_id(0); i < 256; i += WORKSIZE)
+ {
+ const uint tmp = AES0_C[i];
+ AES0[i] = tmp;
+ AES1[i] = rotate(tmp, 8U);
+ AES2[i] = rotate(tmp, 16U);
+ AES3[i] = rotate(tmp, 24U);
+ }
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ uint4 b_x;
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
+ states += 25 * gIdx;
+#if(STRIDED_INDEX==0)
+ Scratchpad += gIdx * (MEMORY >> 4);
+#elif(STRIDED_INDEX==1)
+ Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
+#endif
+
+ a[0] = states[0] ^ states[4];
+ b[0] = states[2] ^ states[6];
+ a[1] = states[1] ^ states[5];
+ b[1] = states[3] ^ states[7];
+
+ b_x = ((uint4 *)b)[0];
+ }
+
+ mem_fence(CLK_LOCAL_MEM_FENCE);
+
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
+ ulong idx0 = a[0];
+ ulong mask = MASK;
+
+ int iterations = ITERATIONS;
+#if (ALGO == 4)
+ if(version < 3)
+ {
+ iterations <<= 1;
+ mask -= 0x200000;
+ }
+#endif
+ #pragma unroll 8
+ for(int i = 0; i < iterations; ++i)
+ {
+ ulong c[2];
+
+ ((uint4 *)c)[0] = Scratchpad[IDX((idx0 & mask) >> 4)];
+ ((uint4 *)c)[0] = AES_Round(AES0, AES1, AES2, AES3, ((uint4 *)c)[0], ((uint4 *)a)[0]);
+ //b_x ^= ((uint4 *)c)[0];
+
+ Scratchpad[IDX((idx0 & mask) >> 4)] = b_x ^ ((uint4 *)c)[0];
+
+ uint4 tmp;
+ tmp = Scratchpad[IDX((c[0] & mask) >> 4)];
+
+ a[1] += c[0] * as_ulong2(tmp).s0;
+ a[0] += mul_hi(c[0], as_ulong2(tmp).s0);
+
+ Scratchpad[IDX((c[0] & mask) >> 4)] = ((uint4 *)a)[0];
((uint4 *)a)[0] ^= tmp;
+ idx0 = a[0];
b_x = ((uint4 *)c)[0];
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ long n = *((__global long*)(Scratchpad + (IDX((idx0 & mask) >> 4))));
+ int d = ((__global int*)(Scratchpad + (IDX((idx0 & mask) >> 4))))[2];
+ long q = n / (d | 0x5);
+ *((__global long*)(Scratchpad + (IDX((idx0 & mask) >> 4)))) = n ^ q;
+ idx0 = d ^ q;
+ }
+#endif
}
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
}
__attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
-__kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global uint *Branch0, __global uint *Branch1, __global uint *Branch2, __global uint *Branch3, ulong Threads)
+__kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global uint *Branch0, __global uint *Branch1, __global uint *Branch2, __global uint *Branch3, ulong Threads
+// cryptonight_heavy
+#if (ALGO == 4)
+ , uint version
+#endif
+ )
{
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
- uint ExpandedKey2[256];
+ uint ExpandedKey2[40];
ulong State[25];
uint4 text;
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(1) * WORKSIZE + get_local_id(0);
i < 256;
@@ -597,14 +790,18 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_LOCAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
- Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+ Scratchpad += gIdx * (MEMORY >> 4);
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (MEMORY >> 4) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
#if defined(__Tahiti__) || defined(__Pitcairn__)
@@ -624,26 +821,111 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_LOCAL_MEM_FENCE);
+#if (ALGO == 4)
+ __local uint4 xin[8][WORKSIZE];
+#endif
+
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
+ int iterations = MEMORY >> 7;
+#if (ALGO == 4)
+ if(version < 3)
+ {
+ iterations >>= 1;
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+ }
+ }
+ else
+ {
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+
+ #pragma unroll 2
+ for(int i = 0; i < iterations; ++i)
+ {
+ text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
+ }
+ }
+#else
#pragma unroll 2
- for(int i = 0; i < (ITERATIONS >> 5); ++i)
+ for(int i = 0; i < iterations; ++i)
{
text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
+ #pragma unroll 10
+ for(int j = 0; j < 10; ++j)
+ text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+ }
+#endif
+ }
+
+// cryptonight_heavy
+#if (ALGO == 4)
+ if(version >= 3)
+ {
+ /* Also left over threads performe this loop.
+ * The left over thread results will be ignored
+ */
+ for(size_t i=0; i < 16; i++)
+ {
#pragma unroll
for(int j = 0; j < 10; ++j)
text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
+ barrier(CLK_LOCAL_MEM_FENCE);
+ xin[get_local_id(1)][get_local_id(0)] = text;
+ barrier(CLK_LOCAL_MEM_FENCE);
+ text = mix_and_propagate(xin);
}
+ }
+#endif
+#if(COMP_MODE==1)
+ // do not use early return here
+ if(gIdx < Threads)
+#endif
+ {
vstore2(as_ulong2(text), get_local_id(1) + 4, states);
}
barrier(CLK_GLOBAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
if(!get_local_id(1))
{
@@ -653,21 +935,11 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
for(int i = 0; i < 25; ++i) states[i] = State[i];
- switch(State[0] & 3)
- {
- case 0:
- Branch0[atomic_inc(Branch0 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 1:
- Branch1[atomic_inc(Branch1 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 2:
- Branch2[atomic_inc(Branch2 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 3:
- Branch3[atomic_inc(Branch3 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- }
+ ulong StateSwitch = State[0] & 3;
+ __global uint *destinationBranch1 = StateSwitch == 0 ? Branch0 : Branch1;
+ __global uint *destinationBranch2 = StateSwitch == 2 ? Branch2 : Branch3;
+ __global uint *destinationBranch = StateSwitch < 2 ? destinationBranch1 : destinationBranch2;
+ destinationBranch[atomic_inc(destinationBranch + Threads)] = gIdx;
}
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
@@ -704,8 +976,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
for(uint i = 0; i < 4; ++i)
{
- if(i < 3) t[0] += 0x40UL;
- else t[0] += 0x08UL;
+ t[0] += i < 3 ? 0x40UL : 0x08UL;
t[2] = t[0] ^ t[1];
@@ -715,8 +986,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
h = m ^ p;
- if(i < 2) t[1] = 0x3000000000000000UL;
- else t[1] = 0xB000000000000000UL;
+ t[1] = i < 2 ? 0x3000000000000000UL : 0xB000000000000000UL;
}
t[0] = 0x08UL;
@@ -744,6 +1014,27 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
#define SWAP8(x) as_ulong(as_uchar8(x).s76543210)
+#define JHXOR \
+ h0h ^= input[0]; \
+ h0l ^= input[1]; \
+ h1h ^= input[2]; \
+ h1l ^= input[3]; \
+ h2h ^= input[4]; \
+ h2l ^= input[5]; \
+ h3h ^= input[6]; \
+ h3l ^= input[7]; \
+\
+ E8; \
+\
+ h4h ^= input[0]; \
+ h4l ^= input[1]; \
+ h5h ^= input[2]; \
+ h5l ^= input[3]; \
+ h6h ^= input[4]; \
+ h6l ^= input[5]; \
+ h7h ^= input[6]; \
+ h7l ^= input[7]
+
__kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint *output, ulong Target, ulong Threads)
{
const uint idx = get_global_id(0) - get_global_offset(0);
@@ -757,46 +1048,27 @@ __kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint
sph_u64 h4h = 0x754D2E7F8996A371UL, h4l = 0x62E27DF70849141DUL, h5h = 0x948F2476F7957627UL, h5l = 0x6C29804757B6D587UL, h6h = 0x6C0D8EAC2D275E5CUL, h6l = 0x0F7A0557C6508451UL, h7h = 0xEA12247067D3E47BUL, h7l = 0x69D71CD313ABE389UL;
sph_u64 tmp;
- for(int i = 0; i < 5; ++i)
+ for(int i = 0; i < 3; ++i)
{
ulong input[8];
- if(i < 3)
- {
- for(int x = 0; x < 8; ++x) input[x] = (states[(i << 3) + x]);
- }
- else if(i == 3)
- {
- input[0] = (states[24]);
- input[1] = 0x80UL;
- for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
- }
- else
- {
- input[7] = 0x4006000000000000UL;
-
- for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
- }
-
- h0h ^= input[0];
- h0l ^= input[1];
- h1h ^= input[2];
- h1l ^= input[3];
- h2h ^= input[4];
- h2l ^= input[5];
- h3h ^= input[6];
- h3l ^= input[7];
-
- E8;
-
- h4h ^= input[0];
- h4l ^= input[1];
- h5h ^= input[2];
- h5l ^= input[3];
- h6h ^= input[4];
- h6l ^= input[5];
- h7h ^= input[6];
- h7l ^= input[7];
+ const int shifted = i << 3;
+ for(int x = 0; x < 8; ++x) input[x] = (states[shifted + x]);
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ input[0] = (states[24]);
+ input[1] = 0x80UL;
+ #pragma unroll 6
+ for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
+ input[7] = 0x4006000000000000UL;
+ JHXOR;
}
//output[0] = h6h;
@@ -832,6 +1104,7 @@ __kernel void Blake(__global ulong *states, __global uint *BranchBuf, __global u
((uint8 *)h)[0] = vload8(0U, c_IV256);
+ #pragma unroll 4
for(uint i = 0, bitlen = 0; i < 4; ++i)
{
if(i < 3)
@@ -907,6 +1180,7 @@ __kernel void Groestl(__global ulong *states, __global uint *BranchBuf, __global
State[7] = 0x0001000000000000UL;
+ #pragma unroll 4
for(uint i = 0; i < 4; ++i)
{
ulong H[8], M[8];
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl b/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
index 868757b..279b652 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
@@ -3,6 +3,7 @@ R"===(
#define WOLF_SKEIN_CL
// Vectorized Skein implementation macros and functions by Wolf
+// Updated by taisel
#define SKEIN_KS_PARITY 0x1BD11BDAA9FC1A22
@@ -22,11 +23,11 @@ static const __constant ulong SKEIN512_256_IV[8] =
0xC36FBAF9393AD185UL, 0x3EEDBA1833EDFC13UL
};
-#define SKEIN_INJECT_KEY(p, s) do { \
+#define SKEIN_INJECT_KEY(p, s, q) do { \
p += h; \
- p.s5 += t[s % 3]; \
- p.s6 += t[(s + 1) % 3]; \
- p.s7 += s; \
+ p.s5 += t[s]; \
+ p.s6 += t[select(s + 1U, 0U, s == 2U)]; \
+ p.s7 += q; \
} while(0)
ulong SKEIN_ROT(const uint2 x, const uint y)
@@ -35,55 +36,55 @@ ulong SKEIN_ROT(const uint2 x, const uint y)
else return(as_ulong(amd_bitalign(x.s10, x, 32 - (y - 32))));
}
-void SkeinMix8(ulong4 *pv0, ulong4 *pv1, const uint rc0, const uint rc1, const uint rc2, const uint rc3)
+void SkeinMix8(ulong4 *pv0, ulong4 *pv1, const ulong4 rc)
{
*pv0 += *pv1;
- (*pv1).s0 = SKEIN_ROT(as_uint2((*pv1).s0), rc0);
- (*pv1).s1 = SKEIN_ROT(as_uint2((*pv1).s1), rc1);
- (*pv1).s2 = SKEIN_ROT(as_uint2((*pv1).s2), rc2);
- (*pv1).s3 = SKEIN_ROT(as_uint2((*pv1).s3), rc3);
+ (*pv1).s0 = SKEIN_ROT(as_uint2((*pv1).s0), rc.s0);
+ (*pv1).s1 = SKEIN_ROT(as_uint2((*pv1).s1), rc.s1);
+ (*pv1).s2 = SKEIN_ROT(as_uint2((*pv1).s2), rc.s2);
+ (*pv1).s3 = SKEIN_ROT(as_uint2((*pv1).s3), rc.s3);
*pv1 ^= *pv0;
}
-ulong8 SkeinEvenRound(ulong8 p, const ulong8 h, const ulong *t, const uint s)
+ulong8 SkeinEvenRound(ulong8 p, const ulong8 h, const ulong *t, const uint s, const uint q)
{
- SKEIN_INJECT_KEY(p, s);
+ SKEIN_INJECT_KEY(p, s, q);
ulong4 pv0 = p.even, pv1 = p.odd;
- SkeinMix8(&pv0, &pv1, 46, 36, 19, 37);
+ SkeinMix8(&pv0, &pv1, (ulong4)(46, 36, 19, 37));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 33, 27, 14, 42);
+ SkeinMix8(&pv0, &pv1, (ulong4)(33, 27, 14, 42));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 17, 49, 36, 39);
+ SkeinMix8(&pv0, &pv1, (ulong4)(17, 49, 36, 39));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 44, 9, 54, 56);
+ SkeinMix8(&pv0, &pv1, (ulong4)(44, 9, 54, 56));
return(shuffle2(pv0, pv1, (ulong8)(1, 4, 2, 7, 3, 6, 0, 5)));
}
-ulong8 SkeinOddRound(ulong8 p, const ulong8 h, const ulong *t, const uint s)
+ulong8 SkeinOddRound(ulong8 p, const ulong8 h, const ulong *t, const uint s, const uint q)
{
- SKEIN_INJECT_KEY(p, s);
+ SKEIN_INJECT_KEY(p, s, q);
ulong4 pv0 = p.even, pv1 = p.odd;
- SkeinMix8(&pv0, &pv1, 39, 30, 34, 24);
+ SkeinMix8(&pv0, &pv1, (ulong4)(39, 30, 34, 24));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 13, 50, 10, 17);
+ SkeinMix8(&pv0, &pv1, (ulong4)(13, 50, 10, 17));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 25, 29, 39, 43);
+ SkeinMix8(&pv0, &pv1, (ulong4)(25, 29, 39, 43));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 8, 35, 56, 22);
+ SkeinMix8(&pv0, &pv1, (ulong4)(8, 35, 56, 22));
return(shuffle2(pv0, pv1, (ulong8)(1, 4, 2, 7, 3, 6, 0, 5)));
}
@@ -92,20 +93,47 @@ ulong8 Skein512Block(ulong8 p, ulong8 h, ulong h8, const ulong *t)
#pragma unroll
for(int i = 0; i < 18; ++i)
{
- p = SkeinEvenRound(p, h, t, i);
+ p = SkeinEvenRound(p, h, t, 0U, i);
++i;
ulong tmp = h.s0;
h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
h.s7 = h8;
h8 = tmp;
- p = SkeinOddRound(p, h, t, i);
+ p = SkeinOddRound(p, h, t, 1U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinEvenRound(p, h, t, 2U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinOddRound(p, h, t, 0U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinEvenRound(p, h, t, 1U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinOddRound(p, h, t, 2U, i);
tmp = h.s0;
h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
h.s7 = h8;
h8 = tmp;
}
- SKEIN_INJECT_KEY(p, 18);
+ p += h;
+ p.s5 += t[0];
+ p.s6 += t[1];
+ p.s7 += 18;
return(p);
}
OpenPOWER on IntegriCloud