summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/backend')
-rw-r--r--xmrstak/backend/amd/amd_gpu/gpu.cpp263
-rw-r--r--xmrstak/backend/amd/amd_gpu/gpu.hpp2
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl169
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl76
-rw-r--r--xmrstak/backend/amd/autoAdjust.hpp45
-rw-r--r--xmrstak/backend/amd/config.tpl24
-rw-r--r--xmrstak/backend/amd/jconf.cpp45
-rw-r--r--xmrstak/backend/amd/jconf.hpp4
-rw-r--r--xmrstak/backend/amd/minethd.cpp6
-rw-r--r--xmrstak/backend/cpu/autoAdjust.hpp33
-rw-r--r--xmrstak/backend/cpu/config.tpl5
-rw-r--r--xmrstak/backend/cpu/crypto/cryptonight_aesni.h13
-rw-r--r--xmrstak/backend/cpu/crypto/cryptonight_common.cpp11
-rw-r--r--xmrstak/backend/cpu/minethd.cpp22
-rw-r--r--xmrstak/backend/nvidia/config.tpl3
-rw-r--r--xmrstak/backend/nvidia/minethd.cpp45
-rw-r--r--xmrstak/backend/nvidia/minethd.hpp10
-rw-r--r--xmrstak/backend/nvidia/nvcc_code/cuda_core.cu12
-rw-r--r--xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu14
19 files changed, 592 insertions, 210 deletions
diff --git a/xmrstak/backend/amd/amd_gpu/gpu.cpp b/xmrstak/backend/amd/amd_gpu/gpu.cpp
index d9bc962..79afa00 100644
--- a/xmrstak/backend/amd/amd_gpu/gpu.cpp
+++ b/xmrstak/backend/amd/amd_gpu/gpu.cpp
@@ -15,6 +15,7 @@
#include "xmrstak/backend/cryptonight.hpp"
#include "xmrstak/jconf.hpp"
+#include "xmrstak/picosha2/picosha2.hpp"
#include <stdio.h>
#include <string.h>
@@ -25,8 +26,41 @@
#include <regex>
#include <cassert>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <string>
+#include <iostream>
+
+#if defined _MSC_VER
+#include <direct.h>
+#elif defined __GNUC__
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
+
+
#ifdef _WIN32
#include <windows.h>
+#include <Shlobj.h>
+
+static inline void create_directory(std::string dirname)
+{
+ _mkdir(dirname.data());
+}
+
+static inline std::string get_home()
+{
+ char path[MAX_PATH + 1];
+ // get folder "appdata\local"
+ if (SHGetSpecialFolderPathA(HWND_DESKTOP, path, CSIDL_LOCAL_APPDATA, FALSE))
+ {
+ return path;
+ }
+ else
+ return ".";
+}
static inline void port_sleep(size_t sec)
{
@@ -34,6 +68,22 @@ static inline void port_sleep(size_t sec)
}
#else
#include <unistd.h>
+#include <pwd.h>
+
+static inline void create_directory(std::string dirname)
+{
+ mkdir(dirname.data(), 0744);
+}
+
+static inline std::string get_home()
+{
+ const char *home = ".";
+
+ if ((home = getenv("HOME")) == nullptr)
+ home = getpwuid(getuid())->pw_dir;
+
+ return home;
+}
static inline void port_sleep(size_t sec)
{
@@ -84,6 +134,7 @@ const char* err_to_str(cl_int ret)
return "CL_MISALIGNED_SUB_BUFFER_OFFSET";
case CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST:
return "CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST";
+#ifdef CL_VERSION_1_2
case CL_COMPILE_PROGRAM_FAILURE:
return "CL_COMPILE_PROGRAM_FAILURE";
case CL_LINKER_NOT_AVAILABLE:
@@ -94,6 +145,7 @@ const char* err_to_str(cl_int ret)
return "CL_DEVICE_PARTITION_FAILED";
case CL_KERNEL_ARG_INFO_NOT_AVAILABLE:
return "CL_KERNEL_ARG_INFO_NOT_AVAILABLE";
+#endif
case CL_INVALID_VALUE:
return "CL_INVALID_VALUE";
case CL_INVALID_DEVICE_TYPE:
@@ -164,6 +216,7 @@ const char* err_to_str(cl_int ret)
return "CL_INVALID_GLOBAL_WORK_SIZE";
case CL_INVALID_PROPERTY:
return "CL_INVALID_PROPERTY";
+#ifdef CL_VERSION_1_2
case CL_INVALID_IMAGE_DESCRIPTOR:
return "CL_INVALID_IMAGE_DESCRIPTOR";
case CL_INVALID_COMPILER_OPTIONS:
@@ -172,6 +225,7 @@ const char* err_to_str(cl_int ret)
return "CL_INVALID_LINKER_OPTIONS";
case CL_INVALID_DEVICE_PARTITION_COUNT:
return "CL_INVALID_DEVICE_PARTITION_COUNT";
+#endif
#if defined(CL_VERSION_2_0) && !defined(CONF_ENFORCE_OpenCL_1_2)
case CL_INVALID_PIPE_SIZE:
return "CL_INVALID_PIPE_SIZE";
@@ -323,57 +377,157 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_
return ERR_OCL_API;
}
- ctx->Program = clCreateProgramWithSource(opencl_ctx, 1, (const char**)&source_code, NULL, &ret);
- if(ret != CL_SUCCESS)
+ std::vector<char> devNameVec(1024);
+ if((ret = clGetDeviceInfo(ctx->DeviceID, CL_DEVICE_NAME, devNameVec.size(), devNameVec.data(), NULL)) != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithSource on the contents of cryptonight.cl", err_to_str(ret));
+ printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_NAME for device %u.", err_to_str(ret),ctx->deviceIdx );
return ERR_OCL_API;
}
char options[256];
- snprintf(options, sizeof(options),
- "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu -DSTRIDED_INDEX=%d",
- hasIterations, threadMemMask, int_port(ctx->workSize), ctx->stridedIndex ? 1 : 0);
- ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, options, NULL, NULL);
- if(ret != CL_SUCCESS)
+ snprintf(options, sizeof(options),
+ "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu -DSTRIDED_INDEX=%d -DMEM_CHUNK_EXPONENT=%d -DCOMP_MODE=%d",
+ hasIterations, threadMemMask, int_port(ctx->workSize), ctx->stridedIndex, int(1u<<ctx->memChunk), ctx->compMode ? 1 : 0);
+
+ /* create a hash for the compile time cache
+ * used data:
+ * - source code
+ * - device name
+ * - compile paramater
+ */
+ std::string src_str(source_code);
+ src_str += options;
+ src_str += devNameVec.data();
+ std::string hash_hex_str;
+ picosha2::hash256_hex_string(src_str, hash_hex_str);
+
+ std::string cache_file = get_home() + "/.openclcache/" + hash_hex_str + ".openclbin";
+ std::ifstream clBinFile(cache_file, std::ofstream::in | std::ofstream::binary);
+ if(!clBinFile.good())
{
- size_t len;
- printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram.", err_to_str(ret));
+ printer::inst()->print_msg(L1,"WARNING: OpenCL device %u - OpenCL binary %s not found.",ctx->deviceIdx, cache_file.c_str());
+ ctx->Program = clCreateProgramWithSource(opencl_ctx, 1, (const char**)&source_code, NULL, &ret);
+ if(ret != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithSource on the OpenCL miner code", err_to_str(ret));
+ return ERR_OCL_API;
+ }
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len)) != CL_SUCCESS)
+ ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, options, NULL, NULL);
+ if(ret != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for length of build log output.", err_to_str(ret));
+ size_t len;
+ printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram.", err_to_str(ret));
+
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, 0, NULL, &len)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for length of build log output.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+
+ char* BuildLog = (char*)malloc(len + 1);
+ BuildLog[0] = '\0';
+
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL)) != CL_SUCCESS)
+ {
+ free(BuildLog);
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for build log.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+
+ printer::inst()->print_str("Build log:\n");
+ std::cerr<<BuildLog<<std::endl;
+
+ free(BuildLog);
return ERR_OCL_API;
}
- char* BuildLog = (char*)malloc(len + 1);
- BuildLog[0] = '\0';
+ cl_uint num_devices;
+ clGetProgramInfo(ctx->Program, CL_PROGRAM_NUM_DEVICES, sizeof(cl_uint), &num_devices,NULL);
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_LOG, len, BuildLog, NULL)) != CL_SUCCESS)
+
+ std::vector<cl_device_id> devices_ids(num_devices);
+ clGetProgramInfo(ctx->Program, CL_PROGRAM_DEVICES, sizeof(cl_device_id)* devices_ids.size(), devices_ids.data(),NULL);
+ int dev_id = 0;
+ /* Search for the gpu within the program context.
+ * The id can be different to ctx->DeviceID.
+ */
+ for(auto & ocl_device : devices_ids)
{
- free(BuildLog);
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for build log.", err_to_str(ret));
+ if(ocl_device == ctx->DeviceID)
+ break;
+ dev_id++;
+ }
+
+ cl_build_status status;
+ do
+ {
+ if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &status, NULL)) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for status of build.", err_to_str(ret));
+ return ERR_OCL_API;
+ }
+ port_sleep(1);
+ }
+ while(status == CL_BUILD_IN_PROGRESS);
+
+ std::vector<size_t> binary_sizes(num_devices);
+ clGetProgramInfo (ctx->Program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t) * binary_sizes.size(), binary_sizes.data(), NULL);
+
+ std::vector<char*> all_programs(num_devices);
+ std::vector<std::vector<char>> program_storage;
+
+ int p_id = 0;
+ size_t mem_size = 0;
+ // create memory structure to query all OpenCL program binaries
+ for(auto & p : all_programs)
+ {
+ program_storage.emplace_back(std::vector<char>(binary_sizes[p_id]));
+ all_programs[p_id] = program_storage[p_id].data();
+ mem_size += binary_sizes[p_id];
+ p_id++;
+ }
+
+ if( ret = clGetProgramInfo(ctx->Program, CL_PROGRAM_BINARIES, num_devices * sizeof(char*), all_programs.data(),NULL) != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clGetProgramInfo.", err_to_str(ret));
return ERR_OCL_API;
}
-
- printer::inst()->print_str("Build log:\n");
- std::cerr<<BuildLog<<std::endl;
- free(BuildLog);
- return ERR_OCL_API;
+ std::ofstream file_stream;
+ std::cout<<get_home() + "/.openclcache/" + hash_hex_str + ".openclbin"<<std::endl;
+ file_stream.open(cache_file, std::ofstream::out | std::ofstream::binary);
+ file_stream.write(all_programs[dev_id], binary_sizes[dev_id]);
+ file_stream.close();
+ printer::inst()->print_msg(L1, "OpenCL device %u - OpenCL binary file stored in file %s.",ctx->deviceIdx, cache_file.c_str());
}
-
- cl_build_status status;
- do
+ else
{
- if((ret = clGetProgramBuildInfo(ctx->Program, ctx->DeviceID, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &status, NULL)) != CL_SUCCESS)
+ printer::inst()->print_msg(L1, "OpenCL device %u - Load OpenCL binary file %s",ctx->deviceIdx, cache_file.c_str());
+ std::ostringstream ss;
+ ss << clBinFile.rdbuf();
+ std::string s = ss.str();
+
+ size_t bin_size = s.size();
+ auto data_ptr = s.data();
+
+ cl_int clStatus;
+ ctx->Program = clCreateProgramWithBinary(
+ opencl_ctx, 1, &ctx->DeviceID, &bin_size,
+ (const unsigned char **)&data_ptr, &clStatus, &ret
+ );
+ if(ret != CL_SUCCESS)
{
- printer::inst()->print_msg(L1,"Error %s when calling clGetProgramBuildInfo for status of build.", err_to_str(ret));
+ printer::inst()->print_msg(L1,"Error %s when calling clCreateProgramWithBinary. Try to delete file %s", err_to_str(ret), cache_file.c_str());
+ return ERR_OCL_API;
+ }
+ ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, NULL, NULL, NULL);
+ if(ret != CL_SUCCESS)
+ {
+ printer::inst()->print_msg(L1,"Error %s when calling clBuildProgram. Try to delete file %s", err_to_str(ret), cache_file.c_str());
return ERR_OCL_API;
}
- port_sleep(1);
}
- while(status == CL_BUILD_IN_PROGRESS);
const char *KernelNames[] = { "cn0", "cn1", "cn2", "Blake", "Groestl", "JH", "Skein" };
for(int i = 0; i < 7; ++i)
@@ -487,7 +641,7 @@ std::vector<GpuContext> getAMDDevices(int index)
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get the device vendor name for device %u.", err_to_str(clStatus), k);
continue;
}
-
+
std::string devVendor(devVendorVec.data());
if( devVendor.find("Advanced Micro Devices") != std::string::npos || devVendor.find("AMD") != std::string::npos)
{
@@ -518,13 +672,13 @@ std::vector<GpuContext> getAMDDevices(int index)
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_NAME for device %u.", err_to_str(clStatus), k);
continue;
}
- printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str());
// if environment variable GPU_SINGLE_ALLOC_PERCENT is not set we can not allocate the full memory
ctx.deviceIdx = k;
ctx.freeMem = std::min(ctx.freeMem, maxMem);
ctx.name = std::string(devNameVec.data());
ctx.DeviceID = device_list[k];
+ printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str());
ctxVec.push_back(ctx);
}
}
@@ -549,6 +703,8 @@ int getAMDPlatformIdx()
clStatus = clGetPlatformIDs(numPlatforms, platforms, NULL);
int platformIndex = -1;
+ // Mesa OpenCL is the fallback if no AMD or Apple OpenCL is found
+ int mesaPlatform = -1;
if(clStatus == CL_SUCCESS)
{
@@ -559,13 +715,29 @@ int getAMDPlatformIdx()
clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, infoSize, platformNameVec.data(), NULL);
std::string platformName(platformNameVec.data());
- if( platformName.find("Advanced Micro Devices") != std::string::npos || platformName.find("Apple") != std::string::npos)
+ if( platformName.find("Advanced Micro Devices") != std::string::npos ||
+ platformName.find("Apple") != std::string::npos ||
+ platformName.find("Mesa") != std::string::npos
+ )
{
- platformIndex = i;
+
printer::inst()->print_msg(L0,"Found AMD platform index id = %i, name = %s",i , platformName.c_str());
- break;
+ if(platformName.find("Mesa") != std::string::npos)
+ mesaPlatform = i;
+ else
+ {
+ // exit if AMD or Apple platform is found
+ platformIndex = i;
+ break;
+ }
}
}
+ // fall back to Mesa OpenCL
+ if(platformIndex == -1 && mesaPlatform != -1)
+ {
+ printer::inst()->print_msg(L0,"No AMD platform found select Mesa as OpenCL platform");
+ platformIndex = mesaPlatform;
+ }
}
else
printer::inst()->print_msg(L1,"WARNING: %s when calling clGetPlatformIDs for platform information.", err_to_str(clStatus));
@@ -694,8 +866,18 @@ size_t InitOpenCL(GpuContext* ctx, size_t num_gpus, size_t platform_idx)
source_code = std::regex_replace(source_code, std::regex("XMRSTAK_INCLUDE_BLAKE256"), blake256CL);
source_code = std::regex_replace(source_code, std::regex("XMRSTAK_INCLUDE_GROESTL256"), groestl256CL);
+ // create a directory for the OpenCL compile cache
+ create_directory(get_home() + "/.openclcache");
+
for(int i = 0; i < num_gpus; ++i)
{
+ if(ctx[i].stridedIndex == 2 && (ctx[i].rawIntensity % ctx[i].workSize) != 0)
+ {
+ size_t reduced_intensity = (ctx[i].rawIntensity / ctx[i].workSize) * ctx[i].workSize;
+ ctx[i].rawIntensity = reduced_intensity;
+ printer::inst()->print_msg(L0, "WARNING AMD: gpu %d intensity is not a multiple of 'worksize', auto reduce intensity to %d", ctx[i].deviceIdx, int(reduced_intensity));
+ }
+
if((ret = InitOpenCLGpu(opencl_ctx, &ctx[i], source_code.c_str())) != ERR_SUCCESS)
{
return ret;
@@ -866,10 +1048,15 @@ size_t XMRRunJob(GpuContext* ctx, cl_uint* HashOutput)
size_t g_intensity = ctx->rawIntensity;
size_t w_size = ctx->workSize;
- // round up to next multiple of w_size
- size_t g_thd = ((g_intensity + w_size - 1u) / w_size) * w_size;
- // number of global threads must be a multiple of the work group size (w_size)
- assert(g_thd%w_size == 0);
+ size_t g_thd = g_intensity;
+
+ if(ctx->compMode)
+ {
+ // round up to next multiple of w_size
+ size_t g_thd = ((g_intensity + w_size - 1u) / w_size) * w_size;
+ // number of global threads must be a multiple of the work group size (w_size)
+ assert(g_thd%w_size == 0);
+ }
for(int i = 2; i < 6; ++i)
{
diff --git a/xmrstak/backend/amd/amd_gpu/gpu.hpp b/xmrstak/backend/amd/amd_gpu/gpu.hpp
index c17bac1..8fb7168 100644
--- a/xmrstak/backend/amd/amd_gpu/gpu.hpp
+++ b/xmrstak/backend/amd/amd_gpu/gpu.hpp
@@ -25,6 +25,8 @@ struct GpuContext
size_t rawIntensity;
size_t workSize;
int stridedIndex;
+ int memChunk;
+ int compMode;
/*Output vars*/
cl_device_id DeviceID;
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
index 255fcbb..9383b04 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
@@ -14,6 +14,11 @@ R"===(
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/* For Mesa clover support */
+#ifdef cl_clang_storage_class_specifiers
+# pragma OPENCL EXTENSION cl_clang_storage_class_specifiers : enable
+#endif
+
#ifdef cl_amd_media_ops
#pragma OPENCL EXTENSION cl_amd_media_ops : enable
#else
@@ -399,7 +404,7 @@ static const __constant uchar rcon[8] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x
void AESExpandKey256(uint *keybuf)
{
//#pragma unroll 4
- for(uint c = 8, i = 1; c < 60; ++c)
+ for(uint c = 8, i = 1; c < 40; ++c)
{
// For 256-bit keys, an sbox permutation is done every other 4th uint generated, AND every 8th
uint t = ((!(c & 7)) || ((c & 7) == 4)) ? SubWord(keybuf[c - 1]) : keybuf[c - 1];
@@ -411,21 +416,32 @@ void AESExpandKey256(uint *keybuf)
}
}
+#define MEM_CHUNK (1<<MEM_CHUNK_EXPONENT)
+
#if(STRIDED_INDEX==0)
# define IDX(x) (x)
-#else
+#elif(STRIDED_INDEX==1)
# define IDX(x) ((x) * (Threads))
+#elif(STRIDED_INDEX==2)
+# define IDX(x) (((x) % MEM_CHUNK) + ((x) / MEM_CHUNK) * WORKSIZE * MEM_CHUNK)
#endif
+inline ulong getIdx()
+{
+#if(STRIDED_INDEX==0 || STRIDED_INDEX==1 || STRIDED_INDEX==2)
+ return get_global_id(0) - get_global_offset(0);
+#endif
+}
+
__attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
__kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads)
{
ulong State[25];
- uint ExpandedKey1[256];
+ uint ExpandedKey1[40];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
uint4 text;
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(1) * WORKSIZE + get_local_id(0);
i < 256;
@@ -439,16 +455,20 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
barrier(CLK_LOCAL_MEM_FENCE);
-
+
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
((ulong8 *)State)[0] = vload8(0, input);
@@ -470,9 +490,10 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
-
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll
for(int i = 0; i < 25; ++i) states[i] = State[i];
@@ -486,9 +507,10 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul
}
mem_fence(CLK_LOCAL_MEM_FENCE);
-
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll 2
for(int i = 0; i < (ITERATIONS >> 5); ++i)
@@ -509,7 +531,7 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
ulong a[2], b[2];
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(0); i < 256; i += WORKSIZE)
{
@@ -523,15 +545,18 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
barrier(CLK_LOCAL_MEM_FENCE);
uint4 b_x;
-
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
a[0] = states[0] ^ states[4];
@@ -544,8 +569,10 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre
mem_fence(CLK_LOCAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll 8
for(int i = 0; i < ITERATIONS; ++i)
@@ -578,11 +605,11 @@ __attribute__((reqd_work_group_size(WORKSIZE, 8, 1)))
__kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global uint *Branch0, __global uint *Branch1, __global uint *Branch2, __global uint *Branch3, ulong Threads)
{
__local uint AES0[256], AES1[256], AES2[256], AES3[256];
- uint ExpandedKey2[256];
+ uint ExpandedKey2[40];
ulong State[25];
uint4 text;
- const ulong gIdx = get_global_id(0) - get_global_offset(0);
+ const ulong gIdx = getIdx();
for(int i = get_local_id(1) * WORKSIZE + get_local_id(0);
i < 256;
@@ -597,14 +624,18 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_LOCAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
states += 25 * gIdx;
#if(STRIDED_INDEX==0)
Scratchpad += gIdx * (ITERATIONS >> 2);
-#else
+#elif(STRIDED_INDEX==1)
Scratchpad += gIdx;
+#elif(STRIDED_INDEX==2)
+ Scratchpad += get_group_id(0) * (ITERATIONS >> 2) * WORKSIZE + MEM_CHUNK * get_local_id(0);
#endif
#if defined(__Tahiti__) || defined(__Pitcairn__)
@@ -624,15 +655,17 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_LOCAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
#pragma unroll 2
for(int i = 0; i < (ITERATIONS >> 5); ++i)
{
text ^= Scratchpad[IDX((i << 3) + get_local_id(1))];
- #pragma unroll
+ #pragma unroll 10
for(int j = 0; j < 10; ++j)
text = AES_Round(AES0, AES1, AES2, AES3, text, ((uint4 *)ExpandedKey2)[j]);
}
@@ -642,8 +675,10 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
barrier(CLK_GLOBAL_MEM_FENCE);
+#if(COMP_MODE==1)
// do not use early return here
if(gIdx < Threads)
+#endif
{
if(!get_local_id(1))
{
@@ -653,21 +688,11 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
for(int i = 0; i < 25; ++i) states[i] = State[i];
- switch(State[0] & 3)
- {
- case 0:
- Branch0[atomic_inc(Branch0 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 1:
- Branch1[atomic_inc(Branch1 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 2:
- Branch2[atomic_inc(Branch2 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 3:
- Branch3[atomic_inc(Branch3 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- }
+ ulong StateSwitch = State[0] & 3;
+ __global uint *destinationBranch1 = StateSwitch == 0 ? Branch0 : Branch1;
+ __global uint *destinationBranch2 = StateSwitch == 2 ? Branch2 : Branch3;
+ __global uint *destinationBranch = StateSwitch < 2 ? destinationBranch1 : destinationBranch2;
+ destinationBranch[atomic_inc(destinationBranch + Threads)] = gIdx;
}
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
@@ -704,8 +729,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
for(uint i = 0; i < 4; ++i)
{
- if(i < 3) t[0] += 0x40UL;
- else t[0] += 0x08UL;
+ t[0] += i < 3 ? 0x40UL : 0x08UL;
t[2] = t[0] ^ t[1];
@@ -715,8 +739,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
h = m ^ p;
- if(i < 2) t[1] = 0x3000000000000000UL;
- else t[1] = 0xB000000000000000UL;
+ t[1] = i < 2 ? 0x3000000000000000UL : 0xB000000000000000UL;
}
t[0] = 0x08UL;
@@ -744,6 +767,27 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
#define SWAP8(x) as_ulong(as_uchar8(x).s76543210)
+#define JHXOR \
+ h0h ^= input[0]; \
+ h0l ^= input[1]; \
+ h1h ^= input[2]; \
+ h1l ^= input[3]; \
+ h2h ^= input[4]; \
+ h2l ^= input[5]; \
+ h3h ^= input[6]; \
+ h3l ^= input[7]; \
+\
+ E8; \
+\
+ h4h ^= input[0]; \
+ h4l ^= input[1]; \
+ h5h ^= input[2]; \
+ h5l ^= input[3]; \
+ h6h ^= input[4]; \
+ h6l ^= input[5]; \
+ h7h ^= input[6]; \
+ h7l ^= input[7]
+
__kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint *output, ulong Target, ulong Threads)
{
const uint idx = get_global_id(0) - get_global_offset(0);
@@ -757,46 +801,27 @@ __kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint
sph_u64 h4h = 0x754D2E7F8996A371UL, h4l = 0x62E27DF70849141DUL, h5h = 0x948F2476F7957627UL, h5l = 0x6C29804757B6D587UL, h6h = 0x6C0D8EAC2D275E5CUL, h6l = 0x0F7A0557C6508451UL, h7h = 0xEA12247067D3E47BUL, h7l = 0x69D71CD313ABE389UL;
sph_u64 tmp;
- for(int i = 0; i < 5; ++i)
+ for(int i = 0; i < 3; ++i)
{
ulong input[8];
- if(i < 3)
- {
- for(int x = 0; x < 8; ++x) input[x] = (states[(i << 3) + x]);
- }
- else if(i == 3)
- {
- input[0] = (states[24]);
- input[1] = 0x80UL;
- for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
- }
- else
- {
- input[7] = 0x4006000000000000UL;
-
- for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
- }
-
- h0h ^= input[0];
- h0l ^= input[1];
- h1h ^= input[2];
- h1l ^= input[3];
- h2h ^= input[4];
- h2l ^= input[5];
- h3h ^= input[6];
- h3l ^= input[7];
-
- E8;
-
- h4h ^= input[0];
- h4l ^= input[1];
- h5h ^= input[2];
- h5l ^= input[3];
- h6h ^= input[4];
- h6l ^= input[5];
- h7h ^= input[6];
- h7l ^= input[7];
+ const int shifted = i << 3;
+ for(int x = 0; x < 8; ++x) input[x] = (states[shifted + x]);
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ input[0] = (states[24]);
+ input[1] = 0x80UL;
+ #pragma unroll 6
+ for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
+ input[7] = 0x4006000000000000UL;
+ JHXOR;
}
//output[0] = h6h;
@@ -832,6 +857,7 @@ __kernel void Blake(__global ulong *states, __global uint *BranchBuf, __global u
((uint8 *)h)[0] = vload8(0U, c_IV256);
+ #pragma unroll 4
for(uint i = 0, bitlen = 0; i < 4; ++i)
{
if(i < 3)
@@ -907,6 +933,7 @@ __kernel void Groestl(__global ulong *states, __global uint *BranchBuf, __global
State[7] = 0x0001000000000000UL;
+ #pragma unroll 4
for(uint i = 0; i < 4; ++i)
{
ulong H[8], M[8];
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl b/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
index 868757b..279b652 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/wolf-skein.cl
@@ -3,6 +3,7 @@ R"===(
#define WOLF_SKEIN_CL
// Vectorized Skein implementation macros and functions by Wolf
+// Updated by taisel
#define SKEIN_KS_PARITY 0x1BD11BDAA9FC1A22
@@ -22,11 +23,11 @@ static const __constant ulong SKEIN512_256_IV[8] =
0xC36FBAF9393AD185UL, 0x3EEDBA1833EDFC13UL
};
-#define SKEIN_INJECT_KEY(p, s) do { \
+#define SKEIN_INJECT_KEY(p, s, q) do { \
p += h; \
- p.s5 += t[s % 3]; \
- p.s6 += t[(s + 1) % 3]; \
- p.s7 += s; \
+ p.s5 += t[s]; \
+ p.s6 += t[select(s + 1U, 0U, s == 2U)]; \
+ p.s7 += q; \
} while(0)
ulong SKEIN_ROT(const uint2 x, const uint y)
@@ -35,55 +36,55 @@ ulong SKEIN_ROT(const uint2 x, const uint y)
else return(as_ulong(amd_bitalign(x.s10, x, 32 - (y - 32))));
}
-void SkeinMix8(ulong4 *pv0, ulong4 *pv1, const uint rc0, const uint rc1, const uint rc2, const uint rc3)
+void SkeinMix8(ulong4 *pv0, ulong4 *pv1, const ulong4 rc)
{
*pv0 += *pv1;
- (*pv1).s0 = SKEIN_ROT(as_uint2((*pv1).s0), rc0);
- (*pv1).s1 = SKEIN_ROT(as_uint2((*pv1).s1), rc1);
- (*pv1).s2 = SKEIN_ROT(as_uint2((*pv1).s2), rc2);
- (*pv1).s3 = SKEIN_ROT(as_uint2((*pv1).s3), rc3);
+ (*pv1).s0 = SKEIN_ROT(as_uint2((*pv1).s0), rc.s0);
+ (*pv1).s1 = SKEIN_ROT(as_uint2((*pv1).s1), rc.s1);
+ (*pv1).s2 = SKEIN_ROT(as_uint2((*pv1).s2), rc.s2);
+ (*pv1).s3 = SKEIN_ROT(as_uint2((*pv1).s3), rc.s3);
*pv1 ^= *pv0;
}
-ulong8 SkeinEvenRound(ulong8 p, const ulong8 h, const ulong *t, const uint s)
+ulong8 SkeinEvenRound(ulong8 p, const ulong8 h, const ulong *t, const uint s, const uint q)
{
- SKEIN_INJECT_KEY(p, s);
+ SKEIN_INJECT_KEY(p, s, q);
ulong4 pv0 = p.even, pv1 = p.odd;
- SkeinMix8(&pv0, &pv1, 46, 36, 19, 37);
+ SkeinMix8(&pv0, &pv1, (ulong4)(46, 36, 19, 37));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 33, 27, 14, 42);
+ SkeinMix8(&pv0, &pv1, (ulong4)(33, 27, 14, 42));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 17, 49, 36, 39);
+ SkeinMix8(&pv0, &pv1, (ulong4)(17, 49, 36, 39));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 44, 9, 54, 56);
+ SkeinMix8(&pv0, &pv1, (ulong4)(44, 9, 54, 56));
return(shuffle2(pv0, pv1, (ulong8)(1, 4, 2, 7, 3, 6, 0, 5)));
}
-ulong8 SkeinOddRound(ulong8 p, const ulong8 h, const ulong *t, const uint s)
+ulong8 SkeinOddRound(ulong8 p, const ulong8 h, const ulong *t, const uint s, const uint q)
{
- SKEIN_INJECT_KEY(p, s);
+ SKEIN_INJECT_KEY(p, s, q);
ulong4 pv0 = p.even, pv1 = p.odd;
- SkeinMix8(&pv0, &pv1, 39, 30, 34, 24);
+ SkeinMix8(&pv0, &pv1, (ulong4)(39, 30, 34, 24));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 13, 50, 10, 17);
+ SkeinMix8(&pv0, &pv1, (ulong4)(13, 50, 10, 17));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 25, 29, 39, 43);
+ SkeinMix8(&pv0, &pv1, (ulong4)(25, 29, 39, 43));
pv0 = shuffle(pv0, (ulong4)(1, 2, 3, 0));
pv1 = shuffle(pv1, (ulong4)(0, 3, 2, 1));
- SkeinMix8(&pv0, &pv1, 8, 35, 56, 22);
+ SkeinMix8(&pv0, &pv1, (ulong4)(8, 35, 56, 22));
return(shuffle2(pv0, pv1, (ulong8)(1, 4, 2, 7, 3, 6, 0, 5)));
}
@@ -92,20 +93,47 @@ ulong8 Skein512Block(ulong8 p, ulong8 h, ulong h8, const ulong *t)
#pragma unroll
for(int i = 0; i < 18; ++i)
{
- p = SkeinEvenRound(p, h, t, i);
+ p = SkeinEvenRound(p, h, t, 0U, i);
++i;
ulong tmp = h.s0;
h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
h.s7 = h8;
h8 = tmp;
- p = SkeinOddRound(p, h, t, i);
+ p = SkeinOddRound(p, h, t, 1U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinEvenRound(p, h, t, 2U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinOddRound(p, h, t, 0U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinEvenRound(p, h, t, 1U, i);
+ ++i;
+ tmp = h.s0;
+ h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
+ h.s7 = h8;
+ h8 = tmp;
+ p = SkeinOddRound(p, h, t, 2U, i);
tmp = h.s0;
h = shuffle(h, (ulong8)(1, 2, 3, 4, 5, 6, 7, 0));
h.s7 = h8;
h8 = tmp;
}
- SKEIN_INJECT_KEY(p, 18);
+ p += h;
+ p.s5 += t[0];
+ p.s6 += t[1];
+ p.s7 += 18;
return(p);
}
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp
index 0bc5239..8950105 100644
--- a/xmrstak/backend/amd/autoAdjust.hpp
+++ b/xmrstak/backend/amd/autoAdjust.hpp
@@ -94,14 +94,22 @@ private:
}
std::string conf;
- int i = 0;
for(auto& ctx : devVec)
{
/* 1000 is a magic selected limit, the reason is that more than 2GiB memory
* sowing down the memory performance because of TLB cache misses
*/
size_t maxThreads = 1000u;
- if(ctx.name.compare("gfx901") == 0)
+ if(
+ ctx.name.compare("gfx901") == 0 ||
+ ctx.name.compare("gfx904") == 0 ||
+ // APU
+ ctx.name.compare("gfx902") == 0 ||
+ // UNKNOWN
+ ctx.name.compare("gfx900") == 0 ||
+ ctx.name.compare("gfx903") == 0 ||
+ ctx.name.compare("gfx905") == 0
+ )
{
/* Increase the number of threads for AMD VEGA gpus.
* Limit the number of threads based on the issue: https://github.com/fireice-uk/xmr-stak/issues/5#issuecomment-339425089
@@ -109,6 +117,9 @@ private:
*/
maxThreads = 2024u;
}
+ // increase all intensity limits by two for aeon
+ if(!::jconf::inst()->IsCurrencyMonero())
+ maxThreads *= 2u;
// keep 128MiB memory free (value is randomly chosen)
size_t availableMem = ctx.freeMem - (128u * byteToMiB);
@@ -118,14 +129,28 @@ private:
size_t possibleIntensity = std::min( maxThreads , maxIntensity );
// map intensity to a multiple of the compute unit count, 8 is the number of threads per work group
size_t intensity = (possibleIntensity / (8 * ctx.computeUnits)) * ctx.computeUnits * 8;
- conf += std::string(" // gpu: ") + ctx.name + " memory:" + std::to_string(availableMem / byteToMiB) + "\n";
- conf += std::string(" // compute units: ") + std::to_string(ctx.computeUnits) + "\n";
- // set 8 threads per block (this is a good value for the most gpus)
- conf += std::string(" { \"index\" : ") + std::to_string(ctx.deviceIdx) + ",\n" +
- " \"intensity\" : " + std::to_string(intensity) + ", \"worksize\" : " + std::to_string(8) + ",\n" +
- " \"affine_to_cpu\" : false, \"strided_index\" : true\n"
- " },\n";
- ++i;
+ //If the intensity is 0, then it's because the multiple of the unit count is greater than intensity
+ if (intensity == 0)
+ {
+ printer::inst()->print_msg(L0, "WARNING: Auto detected intensity unexpectedly low. Try to set the environment variable GPU_SINGLE_ALLOC_PERCENT.");
+ intensity = possibleIntensity;
+
+ }
+ if (intensity != 0)
+ {
+ conf += std::string(" // gpu: ") + ctx.name + " memory:" + std::to_string(availableMem / byteToMiB) + "\n";
+ conf += std::string(" // compute units: ") + std::to_string(ctx.computeUnits) + "\n";
+ // set 8 threads per block (this is a good value for the most gpus)
+ conf += std::string(" { \"index\" : ") + std::to_string(ctx.deviceIdx) + ",\n" +
+ " \"intensity\" : " + std::to_string(intensity) + ", \"worksize\" : " + std::to_string(8) + ",\n" +
+ " \"affine_to_cpu\" : false, \"strided_index\" : 1, \"mem_chunk\" : 2,\n"
+ " \"comp_mode\" : true\n" +
+ " },\n";
+ }
+ else
+ {
+ printer::inst()->print_msg(L0, "WARNING: Ignore gpu %s, %s MiB free memory is not enough to suggest settings.", ctx.name.c_str(), std::to_string(availableMem / byteToMiB).c_str());
+ }
}
configTpl.replace("PLATFORMINDEX",std::to_string(platformIndex));
diff --git a/xmrstak/backend/amd/config.tpl b/xmrstak/backend/amd/config.tpl
index af662f8..28855f0 100644
--- a/xmrstak/backend/amd/config.tpl
+++ b/xmrstak/backend/amd/config.tpl
@@ -1,17 +1,29 @@
R"===(
/*
* GPU configuration. You should play around with intensity and worksize as the fastest settings will vary.
- * index - GPU index number usually starts from 0
- * intensity - Number of parallel GPU threads (nothing to do with CPU threads)
- * worksize - Number of local GPU threads (nothing to do with CPU threads)
+ * index - GPU index number usually starts from 0
+ * intensity - Number of parallel GPU threads (nothing to do with CPU threads)
+ * worksize - Number of local GPU threads (nothing to do with CPU threads)
* affine_to_cpu - This will affine the thread to a CPU. This can make a GPU miner play along nicer with a CPU miner.
* strided_index - switch memory pattern used for the scratch pad memory
- * true = use 16byte contiguous memory per thread, the next memory block has offset of intensity blocks
- * false = use a contiguous block of memory per thread
+ * 2 = chunked memory, chunk size is controlled by 'mem_chunk'
+ * required: intensity must be a multiple of worksize
+ * 1 or true = use 16byte contiguous memory per thread, the next memory block has offset of intensity blocks
+ * 0 or false = use a contiguous block of memory per thread
+ * mem_chunk - range 0 to 18: set the number of elements (16byte) per chunk
+ * this value is only used if 'strided_index' == 2
+ * element count is computed with the equation: 2 to the power of 'mem_chunk' e.g. 4 means a chunk of 16 elements(256byte)
+ * comp_mode - Compatibility enable/disable the automatic guard around compute kernel which allows
+ * to use a intensity which is not the multiple of the worksize.
+ * If you set false and the intensity is not multiple of the worksize the miner can crash:
+ * in this case set the intensity to a multiple of the worksize or activate comp_mode.
* "gpu_threads_conf" :
* [
- * { "index" : 0, "intensity" : 1000, "worksize" : 8, "affine_to_cpu" : false, "strided_index" : true },
+ * { "index" : 0, "intensity" : 1000, "worksize" : 8, "affine_to_cpu" : false, "strided_index" : true, "mem_chunk" : 2, "comp_mode" : true },
* ],
+ * If you do not wish to mine with your AMD GPU(s) then use:
+ * "gpu_threads_conf" :
+ * null,
*/
"gpu_threads_conf" : [
diff --git a/xmrstak/backend/amd/jconf.cpp b/xmrstak/backend/amd/jconf.cpp
index 07afb19..93ba709 100644
--- a/xmrstak/backend/amd/jconf.cpp
+++ b/xmrstak/backend/amd/jconf.cpp
@@ -56,9 +56,10 @@ struct configVal {
Type iType;
};
-//Same order as in configEnum, as per comment above
+// Same order as in configEnum, as per comment above
+// kNullType means any type
configVal oConfigValues[] = {
- { aGpuThreadsConf, "gpu_threads_conf", kArrayType },
+ { aGpuThreadsConf, "gpu_threads_conf", kNullType },
{ iPlatformIdx, "platform_index", kNumberType }
};
@@ -68,6 +69,8 @@ inline bool checkType(Type have, Type want)
{
if(want == have)
return true;
+ else if(want == kNullType)
+ return true;
else if(want == kTrueType && have == kFalseType)
return true;
else if(want == kFalseType && have == kTrueType)
@@ -103,14 +106,17 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg)
if(!oThdConf.IsObject())
return false;
- const Value *idx, *intensity, *w_size, *aff, *stridedIndex;
+ const Value *idx, *intensity, *w_size, *aff, *stridedIndex, *memChunk, *compMode;
idx = GetObjectMember(oThdConf, "index");
intensity = GetObjectMember(oThdConf, "intensity");
w_size = GetObjectMember(oThdConf, "worksize");
aff = GetObjectMember(oThdConf, "affine_to_cpu");
stridedIndex = GetObjectMember(oThdConf, "strided_index");
+ memChunk = GetObjectMember(oThdConf, "mem_chunk");
+ compMode = GetObjectMember(oThdConf, "comp_mode");
- if(idx == nullptr || intensity == nullptr || w_size == nullptr || aff == nullptr || stridedIndex == nullptr)
+ if(idx == nullptr || intensity == nullptr || w_size == nullptr || aff == nullptr || memChunk == nullptr ||
+ stridedIndex == nullptr || compMode == nullptr)
return false;
if(!idx->IsUint64() || !intensity->IsUint64() || !w_size->IsUint64())
@@ -119,13 +125,38 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg)
if(!aff->IsUint64() && !aff->IsBool())
return false;
- if(!stridedIndex->IsBool())
+ if(!stridedIndex->IsBool() && !stridedIndex->IsNumber())
+ {
+ printer::inst()->print_msg(L0, "ERROR: strided_index must be a bool or a number");
+ return false;
+ }
+
+ if(stridedIndex->IsBool())
+ cfg.stridedIndex = stridedIndex->GetBool() ? 1 : 0;
+ else
+ cfg.stridedIndex = (int)stridedIndex->GetInt64();
+
+ if(cfg.stridedIndex > 2)
+ {
+ printer::inst()->print_msg(L0, "ERROR: strided_index must be smaller than 2");
+ return false;
+ }
+
+ cfg.memChunk = (int)memChunk->GetInt64();
+
+ if(!idx->IsUint64() || cfg.memChunk > 18 )
+ {
+ printer::inst()->print_msg(L0, "ERROR: mem_chunk must be smaller than 18");
+ return false;
+ }
+
+ if(!compMode->IsBool())
return false;
cfg.index = idx->GetUint64();
- cfg.intensity = intensity->GetUint64();
cfg.w_size = w_size->GetUint64();
- cfg.stridedIndex = stridedIndex->GetBool();
+ cfg.intensity = intensity->GetUint64();
+ cfg.compMode = compMode->GetBool();
if(aff->IsNumber())
cfg.cpu_aff = aff->GetInt64();
diff --git a/xmrstak/backend/amd/jconf.hpp b/xmrstak/backend/amd/jconf.hpp
index ee1882a..580b69f 100644
--- a/xmrstak/backend/amd/jconf.hpp
+++ b/xmrstak/backend/amd/jconf.hpp
@@ -26,7 +26,9 @@ public:
size_t intensity;
size_t w_size;
long long cpu_aff;
- bool stridedIndex;
+ int stridedIndex;
+ int memChunk;
+ bool compMode;
};
size_t GetThreadCount();
diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp
index e83527c..8dfbce5 100644
--- a/xmrstak/backend/amd/minethd.cpp
+++ b/xmrstak/backend/amd/minethd.cpp
@@ -97,6 +97,8 @@ bool minethd::init_gpus()
vGpuData[i].rawIntensity = cfg.intensity;
vGpuData[i].workSize = cfg.w_size;
vGpuData[i].stridedIndex = cfg.stridedIndex;
+ vGpuData[i].memChunk = cfg.memChunk;
+ vGpuData[i].compMode = cfg.compMode;
}
return InitOpenCL(vGpuData.data(), n, jconf::inst()->GetPlatformIdx()) == ERR_SUCCESS;
@@ -139,7 +141,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
if(cfg.cpu_aff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting AMD GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff);
@@ -245,7 +247,7 @@ void minethd::work_main()
if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget)
executor::inst()->push_event(ex_event(job_result(oWork.sJobID, results[i], bResult, iThreadNo), oWork.iPoolId));
else
- executor::inst()->push_event(ex_event("AMD Invalid Result", oWork.iPoolId));
+ executor::inst()->push_event(ex_event("AMD Invalid Result", pGpuCtx->deviceIdx, oWork.iPoolId));
}
iCount += pGpuCtx->rawIntensity;
diff --git a/xmrstak/backend/cpu/autoAdjust.hpp b/xmrstak/backend/cpu/autoAdjust.hpp
index 7bdb14e..db805ec 100644
--- a/xmrstak/backend/cpu/autoAdjust.hpp
+++ b/xmrstak/backend/cpu/autoAdjust.hpp
@@ -33,25 +33,21 @@ class autoAdjust
{
public:
- size_t hashMemSize;
- size_t halfHashMemSize;
-
- autoAdjust()
+ bool printConfig()
{
+ size_t hashMemSizeKB;
+ size_t halfHashMemSizeKB;
+
if(::jconf::inst()->IsCurrencyMonero())
{
- hashMemSize = MONERO_MEMORY;
- halfHashMemSize = hashMemSize / 2u;
+ hashMemSizeKB = MONERO_MEMORY / 1024u;
+ halfHashMemSizeKB = hashMemSizeKB / 2u;
}
else
{
- hashMemSize = AEON_MEMORY;
- halfHashMemSize = hashMemSize / 2u;
+ hashMemSizeKB = AEON_MEMORY / 1024u;
+ halfHashMemSizeKB = hashMemSizeKB / 2u;
}
- }
-
- bool printConfig()
- {
configEditor configTpl{};
@@ -63,9 +59,10 @@ public:
std::string conf;
- if(!detectL3Size() || L3KB_size < halfHashMemSize || L3KB_size > (halfHashMemSize * 100u))
+
+ if(!detectL3Size() || L3KB_size < halfHashMemSizeKB || L3KB_size > (halfHashMemSizeKB * 2048u))
{
- if(L3KB_size < halfHashMemSize || L3KB_size > (halfHashMemSize * 100))
+ if(L3KB_size < halfHashMemSizeKB || L3KB_size > (halfHashMemSizeKB * 2048))
printer::inst()->print_msg(L0, "Autoconf failed: L3 size sanity check failed - %u KB.", L3KB_size);
conf += std::string(" { \"low_power_mode\" : false, \"no_prefetch\" : true, \"affine_to_cpu\" : false },\n");
@@ -88,7 +85,7 @@ public:
if(L3KB_size <= 0)
break;
- double_mode = L3KB_size / hashMemSize > (int32_t)(corecnt-i);
+ double_mode = L3KB_size / hashMemSizeKB > (int32_t)(corecnt-i);
conf += std::string(" { \"low_power_mode\" : ");
conf += std::string(double_mode ? "true" : "false");
@@ -107,9 +104,9 @@ public:
aff_id++;
if(double_mode)
- L3KB_size -= hashMemSize * 2u;
+ L3KB_size -= hashMemSizeKB * 2u;
else
- L3KB_size -= hashMemSize;
+ L3KB_size -= hashMemSizeKB;
}
}
@@ -142,7 +139,7 @@ private:
}
L3KB_size = ((get_masked(cpu_info[1], 31, 22) + 1) * (get_masked(cpu_info[1], 21, 12) + 1) *
- (get_masked(cpu_info[1], 11, 0) + 1) * (cpu_info[2] + 1)) / halfHashMemSize;
+ (get_masked(cpu_info[1], 11, 0) + 1) * (cpu_info[2] + 1)) / 1024;
return true;
}
diff --git a/xmrstak/backend/cpu/config.tpl b/xmrstak/backend/cpu/config.tpl
index b21a22d..cb4b950 100644
--- a/xmrstak/backend/cpu/config.tpl
+++ b/xmrstak/backend/cpu/config.tpl
@@ -2,7 +2,7 @@ R"===(
/*
* Thread configuration for each thread. Make sure it matches the number above.
* low_power_mode - This can either be a boolean (true or false), or a number between 1 to 5. When set to true,
- this mode will double the cache usage, and double the single thread performance. It will
+ * this mode will double the cache usage, and double the single thread performance. It will
* consume much less power (as less cores are working), but will max out at around 80-85% of
* the maximum performance. When set to a number N greater than 1, this mode will increase the
* cache usage and single thread performance by N times.
@@ -24,6 +24,9 @@ R"===(
* { "low_power_mode" : false, "no_prefetch" : true, "affine_to_cpu" : 0 },
* { "low_power_mode" : false, "no_prefetch" : true, "affine_to_cpu" : 1 },
* ],
+ * If you do not wish to mine with your CPU(s) then use:
+ * "cpu_threads_conf" :
+ * null,
*/
"cpu_threads_conf" :
diff --git a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
index 9b6e1dc..e4ccbc3 100644
--- a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
+++ b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
@@ -317,10 +317,9 @@ void cryptonight_hash(const void* input, size_t len, void* output, cryptonight_c
_mm_store_si128((__m128i *)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
idx0 = _mm_cvtsi128_si64(cx);
- bx0 = cx;
-
if(PREFETCH)
_mm_prefetch((const char*)&l0[idx0 & MASK], _MM_HINT_T0);
+ bx0 = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*)&l0[idx0 & MASK])[0];
@@ -329,15 +328,15 @@ void cryptonight_hash(const void* input, size_t len, void* output, cryptonight_c
lo = _umul128(idx0, cl, &hi);
al0 += hi;
- ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
+ al0 ^= cl;
+ if(PREFETCH)
+ _mm_prefetch((const char*)&l0[al0 & MASK], _MM_HINT_T0);
+ ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
- al0 ^= cl;
- idx0 = al0;
- if(PREFETCH)
- _mm_prefetch((const char*)&l0[idx0 & MASK], _MM_HINT_T0);
+ idx0 = al0;
}
// Optim - 90% time boundary
diff --git a/xmrstak/backend/cpu/crypto/cryptonight_common.cpp b/xmrstak/backend/cpu/crypto/cryptonight_common.cpp
index 8b2207d..1026b04 100644
--- a/xmrstak/backend/cpu/crypto/cryptonight_common.cpp
+++ b/xmrstak/backend/cpu/crypto/cryptonight_common.cpp
@@ -31,6 +31,7 @@ extern "C"
#include "cryptonight.h"
#include "cryptonight_aesni.h"
#include "xmrstak/backend/cryptonight.hpp"
+#include "xmrstak/misc/console.hpp"
#include "xmrstak/jconf.hpp"
#include <stdio.h>
#include <stdlib.h>
@@ -73,6 +74,8 @@ void do_skein_hash(const void* input, size_t len, char* output) {
void (* const extra_hashes[4])(const void *, size_t, char *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
#ifdef _WIN32
+#include "xmrstak/misc/uac.hpp"
+
BOOL bRebootDesirable = FALSE; //If VirtualAlloc fails, suggest a reboot
BOOL AddPrivilege(TCHAR* pszPrivilege)
@@ -176,13 +179,16 @@ size_t cryptonight_init(size_t use_fast_mem, size_t use_mlock, alloc_msg* msg)
if(AddPrivilege(TEXT("SeLockMemoryPrivilege")) == 0)
{
+ printer::inst()->print_msg(L0, "Elevating because we need to set up fast memory privileges.");
+ RequestElevation();
+
if(AddLargePageRights())
{
msg->warning = "Added SeLockMemoryPrivilege to the current account. You need to reboot for it to work";
bRebootDesirable = TRUE;
}
else
- msg->warning = "Obtaning SeLockMemoryPrivilege failed.";
+ msg->warning = "Obtaining SeLockMemoryPrivilege failed.";
return 0;
}
@@ -247,6 +253,9 @@ cryptonight_ctx* cryptonight_alloc_ctx(size_t use_fast_mem, size_t use_mlock, al
#elif defined(__FreeBSD__)
ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0);
+#elif defined(__OpenBSD__)
+ ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
#else
ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, 0, 0);
diff --git a/xmrstak/backend/cpu/minethd.cpp b/xmrstak/backend/cpu/minethd.cpp
index 143b66f..cef4f8e 100644
--- a/xmrstak/backend/cpu/minethd.cpp
+++ b/xmrstak/backend/cpu/minethd.cpp
@@ -73,7 +73,16 @@ namespace cpu
bool minethd::thd_setaffinity(std::thread::native_handle_type h, uint64_t cpu_id)
{
#if defined(_WIN32)
- return SetThreadAffinityMask(h, 1ULL << cpu_id) != 0;
+ // we can only pin up to 64 threads
+ if(cpu_id < 64)
+ {
+ return SetThreadAffinityMask(h, 1ULL << cpu_id) != 0;
+ }
+ else
+ {
+ printer::inst()->print_msg(L0, "WARNING: Windows supports only affinity up to 63.");
+ return false;
+ }
#elif defined(__APPLE__)
thread_port_t mach_thread;
thread_affinity_policy_data_t policy = { static_cast<integer_t>(cpu_id) };
@@ -84,6 +93,8 @@ bool minethd::thd_setaffinity(std::thread::native_handle_type h, uint64_t cpu_id
CPU_ZERO(&mn);
CPU_SET(cpu_id, &mn);
return pthread_setaffinity_np(h, sizeof(cpuset_t), &mn) == 0;
+#elif defined(__OpenBSD__)
+ printer::inst()->print_msg(L0,"WARNING: thread pinning is not supported under OPENBSD.");
#else
cpu_set_t mn;
CPU_ZERO(&mn);
@@ -307,7 +318,7 @@ std::vector<iBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work
if(cfg.iCpuAff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting %dx thread, affinity: %d.", cfg.iMultiway, (int)cfg.iCpuAff);
@@ -437,12 +448,13 @@ void minethd::work_main()
globalStates::inst().calc_start_nonce(result.iNonce, oWork.bNiceHash, nonce_chunk);
}
- *piNonce = ++result.iNonce;
+ *piNonce = result.iNonce;
hash_fun(oWork.bWorkBlob, oWork.iWorkSize, result.bResult, ctx);
if (*piHashVal < oWork.iTarget)
executor::inst()->push_event(ex_event(result, oWork.iPoolId));
+ result.iNonce++;
std::this_thread::yield();
}
@@ -626,7 +638,7 @@ void minethd::multiway_work_main(cn_hash_fun_multi hash_fun_multi)
}
for (size_t i = 0; i < N; i++)
- *piNonce[i] = ++iNonce;
+ *piNonce[i] = iNonce++;
hash_fun_multi(bWorkBlob, oWork.iWorkSize, bHashOut, ctx);
@@ -634,7 +646,7 @@ void minethd::multiway_work_main(cn_hash_fun_multi hash_fun_multi)
{
if (*piHashVal[i] < oWork.iTarget)
{
- executor::inst()->push_event(ex_event(job_result(oWork.sJobID, iNonce - N + 1 + i, bHashOut + 32 * i, iThreadNo), oWork.iPoolId));
+ executor::inst()->push_event(ex_event(job_result(oWork.sJobID, iNonce - N + i, bHashOut + 32 * i, iThreadNo), oWork.iPoolId));
}
}
diff --git a/xmrstak/backend/nvidia/config.tpl b/xmrstak/backend/nvidia/config.tpl
index 5479172..f489956 100644
--- a/xmrstak/backend/nvidia/config.tpl
+++ b/xmrstak/backend/nvidia/config.tpl
@@ -26,6 +26,9 @@ R"===(
* "affine_to_cpu" : false, "sync_mode" : 3,
* },
* ],
+ * If you do not wish to mine with your nVidia GPU(s) then use:
+ * "gpu_threads_conf" :
+ * null,
*/
"gpu_threads_conf" :
diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp
index 5564596..867a998 100644
--- a/xmrstak/backend/nvidia/minethd.cpp
+++ b/xmrstak/backend/nvidia/minethd.cpp
@@ -80,14 +80,22 @@ minethd::minethd(miner_work& pWork, size_t iNo, const jconf::thd_cfg& cfg)
ctx.syncMode = cfg.syncMode;
this->affinity = cfg.cpu_aff;
- std::unique_lock<std::mutex> lck(thd_aff_set);
- std::future<void> order_guard = order_fix.get_future();
+ std::future<void> numa_guard = numa_promise.get_future();
+ thread_work_guard = thread_work_promise.get_future();
oWorkThd = std::thread(&minethd::work_main, this);
- order_guard.wait();
+ /* Wait until the gpu memory is initialized and numa cpu memory is pinned.
+ * The startup time is reduced if the memory is initialized in sequential order
+ * without concurrent threads (CUDA driver is less occupied).
+ */
+ numa_guard.wait();
+}
- if(affinity >= 0) //-1 means no affinity
+void minethd::start_mining()
+{
+ thread_work_promise.set_value();
+ if(this->affinity >= 0) //-1 means no affinity
if(!cpu::minethd::thd_setaffinity(oWorkThd.native_handle(), affinity))
printer::inst()->print_msg(L1, "WARNING setting affinity failed.");
}
@@ -166,7 +174,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
if(cfg.cpu_aff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting NVIDIA GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff);
@@ -179,6 +187,11 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
}
+ for (i = 0; i < n; i++)
+ {
+ static_cast<minethd*>((*pvThreads)[i])->start_mining();
+ }
+
return pvThreads;
}
@@ -208,10 +221,18 @@ void minethd::work_main()
if(affinity >= 0) //-1 means no affinity
bindMemoryToNUMANode(affinity);
- order_fix.set_value();
- std::unique_lock<std::mutex> lck(thd_aff_set);
- lck.release();
+ if(cuda_get_deviceinfo(&ctx) != 0 || cryptonight_extra_cpu_init(&ctx) != 1)
+ {
+ printer::inst()->print_msg(L0, "Setup failed for GPU %d. Exitting.\n", (int)iThreadNo);
+ std::exit(0);
+ }
+
+ // numa memory bind and gpu memory is initialized
+ numa_promise.set_value();
+
std::this_thread::yield();
+ // wait until all NVIDIA devices are initialized
+ thread_work_guard.wait();
uint64_t iCount = 0;
cryptonight_ctx* cpu_ctx;
@@ -221,12 +242,6 @@ void minethd::work_main()
globalStates::inst().iConsumeCnt++;
- if(cuda_get_deviceinfo(&ctx) != 0 || cryptonight_extra_cpu_init(&ctx) != 1)
- {
- printer::inst()->print_msg(L0, "Setup failed for GPU %d. Exitting.\n", (int)iThreadNo);
- std::exit(0);
- }
-
bool mineMonero = strcmp_i(::jconf::inst()->GetCurrency(), "monero");
while (bQuit == 0)
@@ -287,7 +302,7 @@ void minethd::work_main()
if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget)
executor::inst()->push_event(ex_event(job_result(oWork.sJobID, foundNonce[i], bResult, iThreadNo), oWork.iPoolId));
else
- executor::inst()->push_event(ex_event("NVIDIA Invalid Result", oWork.iPoolId));
+ executor::inst()->push_event(ex_event("NVIDIA Invalid Result", ctx.device_id, oWork.iPoolId));
}
iCount += h_per_round;
diff --git a/xmrstak/backend/nvidia/minethd.hpp b/xmrstak/backend/nvidia/minethd.hpp
index d13c868..fcd24fa 100644
--- a/xmrstak/backend/nvidia/minethd.hpp
+++ b/xmrstak/backend/nvidia/minethd.hpp
@@ -32,7 +32,8 @@ private:
typedef void (*cn_hash_fun)(const void*, size_t, void*, cryptonight_ctx*);
minethd(miner_work& pWork, size_t iNo, const jconf::thd_cfg& cfg);
-
+ void start_mining();
+
void work_main();
void consume_work();
@@ -44,8 +45,11 @@ private:
static miner_work oGlobalWork;
miner_work oWork;
- std::promise<void> order_fix;
- std::mutex thd_aff_set;
+ std::promise<void> numa_promise;
+ std::promise<void> thread_work_promise;
+
+ // block thread until all NVIDIA GPUs are initialized
+ std::future<void> thread_work_guard;
std::thread oWorkThd;
int64_t affinity;
diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
index 15a6f36..cc97274 100644
--- a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
+++ b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
@@ -74,24 +74,36 @@ __device__ __forceinline__ uint64_t cuda_mul128( uint64_t multiplier, uint64_t m
template< typename T >
__device__ __forceinline__ T loadGlobal64( T * const addr )
{
+#if (__CUDA_ARCH__ < 700)
T x;
asm volatile( "ld.global.cg.u64 %0, [%1];" : "=l"( x ) : "l"( addr ) );
return x;
+#else
+ return *addr;
+#endif
}
template< typename T >
__device__ __forceinline__ T loadGlobal32( T * const addr )
{
+#if (__CUDA_ARCH__ < 700)
T x;
asm volatile( "ld.global.cg.u32 %0, [%1];" : "=r"( x ) : "l"( addr ) );
return x;
+#else
+ return *addr;
+#endif
}
template< typename T >
__device__ __forceinline__ void storeGlobal32( T* addr, T const & val )
{
+#if (__CUDA_ARCH__ < 700)
asm volatile( "st.global.cg.u32 [%0], %1;" : : "l"( addr ), "r"( val ) );
+#else
+ *addr = val;
+#endif
}
template<size_t ITERATIONS, uint32_t THREAD_SHIFT>
diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
index d865e13..92259db 100644
--- a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
+++ b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
@@ -204,7 +204,13 @@ extern "C" int cryptonight_extra_cpu_init(nvid_ctx* ctx)
break;
};
- CUDA_CHECK(ctx->device_id, cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
+ const int gpuArch = ctx->device_arch[0] * 10 + ctx->device_arch[1];
+
+ /* Disable L1 cache for GPUs before Volta.
+ * L1 speed is increased and latency reduced with Volta.
+ */
+ if(gpuArch < 70)
+ CUDA_CHECK(ctx->device_id, cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
size_t hashMemSize;
if(::jconf::inst()->IsCurrencyMonero())
@@ -441,6 +447,12 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx)
maxMemUsage = size_t(1024u) * byteToMiB;
}
+ if(props.multiProcessorCount <= 6)
+ {
+ // limit memory usage for low end devices to reduce the number of threads
+ maxMemUsage = size_t(1024u) * byteToMiB;
+ }
+
int* tmp;
cudaError_t err;
// a device must be selected to get the right memory usage later on
OpenPOWER on IntegriCloud