diff options
Diffstat (limited to 'xmrstak/backend')
24 files changed, 726 insertions, 204 deletions
diff --git a/xmrstak/backend/amd/amd_gpu/gpu.cpp b/xmrstak/backend/amd/amd_gpu/gpu.cpp index 15b8457..d9bc962 100644 --- a/xmrstak/backend/amd/amd_gpu/gpu.cpp +++ b/xmrstak/backend/amd/amd_gpu/gpu.cpp @@ -332,7 +332,8 @@ size_t InitOpenCLGpu(cl_context opencl_ctx, GpuContext* ctx, const char* source_ char options[256]; snprintf(options, sizeof(options), - "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu", hasIterations, threadMemMask, int_port(ctx->workSize)); + "-DITERATIONS=%d -DMASK=%d -DWORKSIZE=%llu -DSTRIDED_INDEX=%d", + hasIterations, threadMemMask, int_port(ctx->workSize), ctx->stridedIndex ? 1 : 0); ret = clBuildProgram(ctx->Program, 1, &ctx->DeviceID, options, NULL, NULL); if(ret != CL_SUCCESS) { @@ -448,68 +449,85 @@ uint32_t getNumPlatforms() std::vector<GpuContext> getAMDDevices(int index) { std::vector<GpuContext> ctxVec; - cl_platform_id * platforms = NULL; + std::vector<cl_platform_id> platforms; + std::vector<cl_device_id> device_list; + cl_int clStatus; cl_uint num_devices; - cl_device_id *device_list = NULL; - uint32_t numPlatforms = getNumPlatforms(); - if(numPlatforms) + if(numPlatforms == 0) + return ctxVec; + + platforms.resize(numPlatforms); + if((clStatus = clGetPlatformIDs(numPlatforms, platforms.data(), NULL)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetPlatformIDs for platform information.", err_to_str(clStatus)); + return ctxVec; + } + + if((clStatus = clGetDeviceIDs( platforms[index], CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceIDs for of devices.", err_to_str(clStatus)); + return ctxVec; + } + + device_list.resize(num_devices); + if((clStatus = clGetDeviceIDs( platforms[index], CL_DEVICE_TYPE_GPU, num_devices, device_list.data(), NULL)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceIDs for device information.", err_to_str(clStatus)); + return ctxVec; + } + + for (size_t k = 0; k < num_devices; k++) { - platforms = (cl_platform_id *) malloc(sizeof(cl_platform_id) * numPlatforms); - clStatus = clGetPlatformIDs(numPlatforms, platforms, NULL); - if(clStatus == CL_SUCCESS) + std::vector<char> devVendorVec(1024); + if((clStatus = clGetDeviceInfo(device_list[k], CL_DEVICE_VENDOR, devVendorVec.size(), devVendorVec.data(), NULL)) != CL_SUCCESS) { - clStatus = clGetDeviceIDs( platforms[index], CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices); - if(clStatus == CL_SUCCESS) + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get the device vendor name for device %u.", err_to_str(clStatus), k); + continue; + } + + std::string devVendor(devVendorVec.data()); + if( devVendor.find("Advanced Micro Devices") != std::string::npos || devVendor.find("AMD") != std::string::npos) + { + GpuContext ctx; + std::vector<char> devNameVec(1024); + size_t maxMem; + + if((clStatus = clGetDeviceInfo(device_list[k], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(int), &(ctx.computeUnits), NULL)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_MAX_COMPUTE_UNITS for device %u.", err_to_str(clStatus), k); + continue; + } + + if((clStatus = clGetDeviceInfo(device_list[k], CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &(maxMem), NULL)) != CL_SUCCESS) { - device_list = (cl_device_id *) malloc(sizeof(cl_device_id)*num_devices); - clStatus = clGetDeviceIDs( platforms[index], CL_DEVICE_TYPE_GPU, num_devices, device_list, NULL); - if(clStatus == CL_SUCCESS) - { - for (int k = 0; k < num_devices; k++) - { - cl_int clError; - std::vector<char> devVendorVec(1024); - clError = clGetDeviceInfo(device_list[k], CL_DEVICE_VENDOR, devVendorVec.size(), devVendorVec.data(), NULL); - if(clStatus == CL_SUCCESS) - { - std::string devVendor(devVendorVec.data()); - if( devVendor.find("Advanced Micro Devices") != std::string::npos) - { - GpuContext ctx; - ctx.deviceIdx = k; - clError = clGetDeviceInfo(device_list[k], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(int), &(ctx.computeUnits), NULL); - size_t maxMem; - clError = clGetDeviceInfo(device_list[k], CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &(maxMem), NULL); - clError = clGetDeviceInfo(device_list[k], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof(size_t), &(ctx.freeMem), NULL); - // if environment variable GPU_SINGLE_ALLOC_PERCENT is not set we can not allocate the full memory - ctx.freeMem = std::min(ctx.freeMem, maxMem); - std::vector<char> devNameVec(1024); - clError = clGetDeviceInfo(device_list[k], CL_DEVICE_NAME, devNameVec.size(), devNameVec.data(), NULL); - ctx.name = std::string(devNameVec.data()); - printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str()); - ctx.DeviceID = device_list[k]; - ctxVec.push_back(ctx); - } - } - else - printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get the device vendor name.", err_to_str(clStatus)); - } - } - else - printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceIDs for device information.", err_to_str(clStatus)); - free(device_list); + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_MAX_MEM_ALLOC_SIZE for device %u.", err_to_str(clStatus), k); + continue; } - else - printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceIDs for of devices.", err_to_str(clStatus)); + + if((clStatus = clGetDeviceInfo(device_list[k], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof(size_t), &(ctx.freeMem), NULL)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_GLOBAL_MEM_SIZE for device %u.", err_to_str(clStatus), k); + continue; + } + + if((clStatus = clGetDeviceInfo(device_list[k], CL_DEVICE_NAME, devNameVec.size(), devNameVec.data(), NULL)) != CL_SUCCESS) + { + printer::inst()->print_msg(L1,"WARNING: %s when calling clGetDeviceInfo to get CL_DEVICE_NAME for device %u.", err_to_str(clStatus), k); + continue; + } + printer::inst()->print_msg(L0,"Found OpenCL GPU %s.",ctx.name.c_str()); + + // if environment variable GPU_SINGLE_ALLOC_PERCENT is not set we can not allocate the full memory + ctx.deviceIdx = k; + ctx.freeMem = std::min(ctx.freeMem, maxMem); + ctx.name = std::string(devNameVec.data()); + ctx.DeviceID = device_list[k]; + ctxVec.push_back(ctx); } - else - printer::inst()->print_msg(L1,"WARNING: %s when calling clGetPlatformIDs for platform information.", err_to_str(clStatus)); } - - free(platforms); return ctxVec; } @@ -541,7 +559,7 @@ int getAMDPlatformIdx() clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, infoSize, platformNameVec.data(), NULL); std::string platformName(platformNameVec.data()); - if( platformName.find("Advanced Micro Devices") != std::string::npos) + if( platformName.find("Advanced Micro Devices") != std::string::npos || platformName.find("Apple") != std::string::npos) { platformIndex = i; printer::inst()->print_msg(L0,"Found AMD platform index id = %i, name = %s",i , platformName.c_str()); diff --git a/xmrstak/backend/amd/amd_gpu/gpu.hpp b/xmrstak/backend/amd/amd_gpu/gpu.hpp index 123de01..c17bac1 100644 --- a/xmrstak/backend/amd/amd_gpu/gpu.hpp +++ b/xmrstak/backend/amd/amd_gpu/gpu.hpp @@ -9,6 +9,7 @@ #endif #include <stdint.h> +#include <string> #include <vector> #define ERR_SUCCESS (0) @@ -23,6 +24,7 @@ struct GpuContext size_t deviceIdx; size_t rawIntensity; size_t workSize; + int stridedIndex; /*Output vars*/ cl_device_id DeviceID; diff --git a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl index 966199b..255fcbb 100644 --- a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl +++ b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl @@ -411,7 +411,11 @@ void AESExpandKey256(uint *keybuf) } } -#define IDX(x) (x) +#if(STRIDED_INDEX==0) +# define IDX(x) (x) +#else +# define IDX(x) ((x) * (Threads)) +#endif __attribute__((reqd_work_group_size(WORKSIZE, 8, 1))) __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ulong *states, ulong Threads) @@ -440,7 +444,12 @@ __kernel void cn0(__global ulong *input, __global uint4 *Scratchpad, __global ul if(gIdx < Threads) { states += 25 * gIdx; + +#if(STRIDED_INDEX==0) Scratchpad += gIdx * (ITERATIONS >> 2); +#else + Scratchpad += gIdx; +#endif ((ulong8 *)State)[0] = vload8(0, input); State[8] = input[8]; @@ -519,7 +528,11 @@ __kernel void cn1(__global uint4 *Scratchpad, __global ulong *states, ulong Thre if(gIdx < Threads) { states += 25 * gIdx; +#if(STRIDED_INDEX==0) Scratchpad += gIdx * (ITERATIONS >> 2); +#else + Scratchpad += gIdx; +#endif a[0] = states[0] ^ states[4]; b[0] = states[2] ^ states[6]; @@ -588,7 +601,11 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u if(gIdx < Threads) { states += 25 * gIdx; +#if(STRIDED_INDEX==0) Scratchpad += gIdx * (ITERATIONS >> 2); +#else + Scratchpad += gIdx; +#endif #if defined(__Tahiti__) || defined(__Pitcairn__) diff --git a/xmrstak/backend/amd/amd_gpu/opencl/wolf-aes.cl b/xmrstak/backend/amd/amd_gpu/opencl/wolf-aes.cl index 996944b..81e1644 100644 --- a/xmrstak/backend/amd/amd_gpu/opencl/wolf-aes.cl +++ b/xmrstak/backend/amd/amd_gpu/opencl/wolf-aes.cl @@ -74,15 +74,29 @@ static const __constant uint AES0_C[256] = #define BYTE(x, y) (amd_bfe((x), (y) << 3U, 8U)) -uint4 AES_Round(const __local uint *AES0, const __local uint *AES1, const __local uint *AES2, const __local uint *AES3, const uint4 X, const uint4 key) +uint4 AES_Round(const __local uint *AES0, const __local uint *AES1, const __local uint *AES2, const __local uint *AES3, const uint4 X, uint4 key) { - uint4 Y; - Y.s0 = AES0[BYTE(X.s0, 0)] ^ AES1[BYTE(X.s1, 1)] ^ AES2[BYTE(X.s2, 2)] ^ AES3[BYTE(X.s3, 3)]; - Y.s1 = AES0[BYTE(X.s1, 0)] ^ AES1[BYTE(X.s2, 1)] ^ AES2[BYTE(X.s3, 2)] ^ AES3[BYTE(X.s0, 3)]; - Y.s2 = AES0[BYTE(X.s2, 0)] ^ AES1[BYTE(X.s3, 1)] ^ AES2[BYTE(X.s0, 2)] ^ AES3[BYTE(X.s1, 3)]; - Y.s3 = AES0[BYTE(X.s3, 0)] ^ AES1[BYTE(X.s0, 1)] ^ AES2[BYTE(X.s1, 2)] ^ AES3[BYTE(X.s2, 3)]; - Y ^= key; - return(Y); + key.s0 ^= AES0[BYTE(X.s0, 0)]; + key.s1 ^= AES0[BYTE(X.s1, 0)]; + key.s2 ^= AES0[BYTE(X.s2, 0)]; + key.s3 ^= AES0[BYTE(X.s3, 0)]; + + key.s0 ^= AES2[BYTE(X.s2, 2)]; + key.s1 ^= AES2[BYTE(X.s3, 2)]; + key.s2 ^= AES2[BYTE(X.s0, 2)]; + key.s3 ^= AES2[BYTE(X.s1, 2)]; + + key.s0 ^= AES1[BYTE(X.s1, 1)]; + key.s1 ^= AES1[BYTE(X.s2, 1)]; + key.s2 ^= AES1[BYTE(X.s3, 1)]; + key.s3 ^= AES1[BYTE(X.s0, 1)]; + + key.s0 ^= AES3[BYTE(X.s3, 3)]; + key.s1 ^= AES3[BYTE(X.s0, 3)]; + key.s2 ^= AES3[BYTE(X.s1, 3)]; + key.s3 ^= AES3[BYTE(X.s2, 3)]; + + return key; } #endif diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp index 0b91212..0bc5239 100644 --- a/xmrstak/backend/amd/autoAdjust.hpp +++ b/xmrstak/backend/amd/autoAdjust.hpp @@ -123,7 +123,7 @@ private: // set 8 threads per block (this is a good value for the most gpus) conf += std::string(" { \"index\" : ") + std::to_string(ctx.deviceIdx) + ",\n" + " \"intensity\" : " + std::to_string(intensity) + ", \"worksize\" : " + std::to_string(8) + ",\n" + - " \"affine_to_cpu\" : false, \n" + " \"affine_to_cpu\" : false, \"strided_index\" : true\n" " },\n"; ++i; } diff --git a/xmrstak/backend/amd/config.tpl b/xmrstak/backend/amd/config.tpl index a93859c..af662f8 100644 --- a/xmrstak/backend/amd/config.tpl +++ b/xmrstak/backend/amd/config.tpl @@ -5,9 +5,12 @@ R"===( * intensity - Number of parallel GPU threads (nothing to do with CPU threads) * worksize - Number of local GPU threads (nothing to do with CPU threads) * affine_to_cpu - This will affine the thread to a CPU. This can make a GPU miner play along nicer with a CPU miner. + * strided_index - switch memory pattern used for the scratch pad memory + * true = use 16byte contiguous memory per thread, the next memory block has offset of intensity blocks + * false = use a contiguous block of memory per thread * "gpu_threads_conf" : * [ - * { "index" : 0, "intensity" : 1000, "worksize" : 8, "affine_to_cpu" : false }, + * { "index" : 0, "intensity" : 1000, "worksize" : 8, "affine_to_cpu" : false, "strided_index" : true }, * ], */ diff --git a/xmrstak/backend/amd/jconf.cpp b/xmrstak/backend/amd/jconf.cpp index 0617aeb..07afb19 100644 --- a/xmrstak/backend/amd/jconf.cpp +++ b/xmrstak/backend/amd/jconf.cpp @@ -103,13 +103,14 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg) if(!oThdConf.IsObject()) return false; - const Value *idx, *intensity, *w_size, *aff; + const Value *idx, *intensity, *w_size, *aff, *stridedIndex; idx = GetObjectMember(oThdConf, "index"); intensity = GetObjectMember(oThdConf, "intensity"); w_size = GetObjectMember(oThdConf, "worksize"); aff = GetObjectMember(oThdConf, "affine_to_cpu"); + stridedIndex = GetObjectMember(oThdConf, "strided_index"); - if(idx == nullptr || intensity == nullptr || w_size == nullptr || aff == nullptr) + if(idx == nullptr || intensity == nullptr || w_size == nullptr || aff == nullptr || stridedIndex == nullptr) return false; if(!idx->IsUint64() || !intensity->IsUint64() || !w_size->IsUint64()) @@ -118,9 +119,13 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg) if(!aff->IsUint64() && !aff->IsBool()) return false; + if(!stridedIndex->IsBool()) + return false; + cfg.index = idx->GetUint64(); cfg.intensity = intensity->GetUint64(); cfg.w_size = w_size->GetUint64(); + cfg.stridedIndex = stridedIndex->GetBool(); if(aff->IsNumber()) cfg.cpu_aff = aff->GetInt64(); diff --git a/xmrstak/backend/amd/jconf.hpp b/xmrstak/backend/amd/jconf.hpp index da024a4..ee1882a 100644 --- a/xmrstak/backend/amd/jconf.hpp +++ b/xmrstak/backend/amd/jconf.hpp @@ -26,6 +26,7 @@ public: size_t intensity; size_t w_size; long long cpu_aff; + bool stridedIndex; }; size_t GetThreadCount(); diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp index c1399e0..103688f 100644 --- a/xmrstak/backend/amd/minethd.cpp +++ b/xmrstak/backend/amd/minethd.cpp @@ -96,6 +96,7 @@ bool minethd::init_gpus() vGpuData[i].deviceIdx = cfg.index; vGpuData[i].rawIntensity = cfg.intensity; vGpuData[i].workSize = cfg.w_size; + vGpuData[i].stridedIndex = cfg.stridedIndex; } return InitOpenCL(vGpuData.data(), n, jconf::inst()->GetPlatformIdx()) == ERR_SUCCESS; diff --git a/xmrstak/backend/cpu/config.tpl b/xmrstak/backend/cpu/config.tpl index 990a31d..b21a22d 100644 --- a/xmrstak/backend/cpu/config.tpl +++ b/xmrstak/backend/cpu/config.tpl @@ -1,9 +1,11 @@ R"===( /* * Thread configuration for each thread. Make sure it matches the number above. - * low_power_mode - This mode will double the cache usage, and double the single thread performance. It will + * low_power_mode - This can either be a boolean (true or false), or a number between 1 to 5. When set to true, + this mode will double the cache usage, and double the single thread performance. It will * consume much less power (as less cores are working), but will max out at around 80-85% of - * the maximum performance. + * the maximum performance. When set to a number N greater than 1, this mode will increase the + * cache usage and single thread performance by N times. * * no_prefetch - Some sytems can gain up to extra 5% here, but sometimes it will have no difference or make * things slower. diff --git a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h index 2a6a769..9b6e1dc 100644 --- a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h +++ b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h @@ -353,19 +353,19 @@ void cryptonight_hash(const void* input, size_t len, void* output, cryptonight_c // to fit temporary vars for two contexts. Function will read len*2 from input and write 64 bytes to output // We are still limited by L3 cache, so doubling will only work with CPUs where we have more than 2MB to core (Xeons) template<size_t MASK, size_t ITERATIONS, size_t MEM, bool SOFT_AES, bool PREFETCH> -void cryptonight_double_hash(const void* input, size_t len, void* output, cryptonight_ctx* __restrict ctx0, cryptonight_ctx* __restrict ctx1) +void cryptonight_double_hash(const void* input, size_t len, void* output, cryptonight_ctx** ctx) { - keccak((const uint8_t *)input, len, ctx0->hash_state, 200); - keccak((const uint8_t *)input+len, len, ctx1->hash_state, 200); + keccak((const uint8_t *)input, len, ctx[0]->hash_state, 200); + keccak((const uint8_t *)input+len, len, ctx[1]->hash_state, 200); // Optim - 99% time boundary - cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx0->hash_state, (__m128i*)ctx0->long_state); - cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx1->hash_state, (__m128i*)ctx1->long_state); + cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[0]->hash_state, (__m128i*)ctx[0]->long_state); + cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[1]->hash_state, (__m128i*)ctx[1]->long_state); - uint8_t* l0 = ctx0->long_state; - uint64_t* h0 = (uint64_t*)ctx0->hash_state; - uint8_t* l1 = ctx1->long_state; - uint64_t* h1 = (uint64_t*)ctx1->hash_state; + uint8_t* l0 = ctx[0]->long_state; + uint64_t* h0 = (uint64_t*)ctx[0]->hash_state; + uint8_t* l1 = ctx[1]->long_state; + uint64_t* h1 = (uint64_t*)ctx[1]->hash_state; uint64_t axl0 = h0[0] ^ h0[4]; uint64_t axh0 = h0[1] ^ h0[5]; @@ -444,13 +444,305 @@ void cryptonight_double_hash(const void* input, size_t len, void* output, crypto } // Optim - 90% time boundary - cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx0->long_state, (__m128i*)ctx0->hash_state); - cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx1->long_state, (__m128i*)ctx1->hash_state); + cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[0]->long_state, (__m128i*)ctx[0]->hash_state); + cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[1]->long_state, (__m128i*)ctx[1]->hash_state); // Optim - 99% time boundary - keccakf((uint64_t*)ctx0->hash_state, 24); - extra_hashes[ctx0->hash_state[0] & 3](ctx0->hash_state, 200, (char*)output); - keccakf((uint64_t*)ctx1->hash_state, 24); - extra_hashes[ctx1->hash_state[0] & 3](ctx1->hash_state, 200, (char*)output + 32); + keccakf((uint64_t*)ctx[0]->hash_state, 24); + extra_hashes[ctx[0]->hash_state[0] & 3](ctx[0]->hash_state, 200, (char*)output); + keccakf((uint64_t*)ctx[1]->hash_state, 24); + extra_hashes[ctx[1]->hash_state[0] & 3](ctx[1]->hash_state, 200, (char*)output + 32); +} + +#define CN_STEP1(a, b, c, l, ptr, idx) \ + a = _mm_xor_si128(a, c); \ + idx = _mm_cvtsi128_si64(a); \ + ptr = (__m128i *)&l[idx & MASK]; \ + if(PREFETCH) \ + _mm_prefetch((const char*)ptr, _MM_HINT_T0); \ + c = _mm_load_si128(ptr) + +#define CN_STEP2(a, b, c, l, ptr, idx) \ + if(SOFT_AES) \ + c = soft_aesenc(c, a); \ + else \ + c = _mm_aesenc_si128(c, a); \ + b = _mm_xor_si128(b, c); \ + _mm_store_si128(ptr, b) + +#define CN_STEP3(a, b, c, l, ptr, idx) \ + idx = _mm_cvtsi128_si64(c); \ + ptr = (__m128i *)&l[idx & MASK]; \ + if(PREFETCH) \ + _mm_prefetch((const char*)ptr, _MM_HINT_T0); \ + b = _mm_load_si128(ptr) + +#define CN_STEP4(a, b, c, l, ptr, idx) \ + lo = _umul128(idx, _mm_cvtsi128_si64(b), &hi); \ + a = _mm_add_epi64(a, _mm_set_epi64x(lo, hi)); \ + _mm_store_si128(ptr, a) + +// This lovelier creation will do 3 cn hashes at a time. +template<size_t MASK, size_t ITERATIONS, size_t MEM, bool SOFT_AES, bool PREFETCH> +void cryptonight_triple_hash(const void* input, size_t len, void* output, cryptonight_ctx** ctx) +{ + for (size_t i = 0; i < 3; i++) + { + keccak((const uint8_t *)input + len * i, len, ctx[i]->hash_state, 200); + cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->hash_state, (__m128i*)ctx[i]->long_state); + } + + uint8_t* l0 = ctx[0]->long_state; + uint64_t* h0 = (uint64_t*)ctx[0]->hash_state; + uint8_t* l1 = ctx[1]->long_state; + uint64_t* h1 = (uint64_t*)ctx[1]->hash_state; + uint8_t* l2 = ctx[2]->long_state; + uint64_t* h2 = (uint64_t*)ctx[2]->hash_state; + + __m128i ax0 = _mm_set_epi64x(h0[1] ^ h0[5], h0[0] ^ h0[4]); + __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); + __m128i ax1 = _mm_set_epi64x(h1[1] ^ h1[5], h1[0] ^ h1[4]); + __m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]); + __m128i ax2 = _mm_set_epi64x(h2[1] ^ h2[5], h2[0] ^ h2[4]); + __m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]); + __m128i cx0 = _mm_set_epi64x(0, 0); + __m128i cx1 = _mm_set_epi64x(0, 0); + __m128i cx2 = _mm_set_epi64x(0, 0); + + for (size_t i = 0; i < ITERATIONS/2; i++) + { + uint64_t idx0, idx1, idx2, hi, lo; + __m128i *ptr0, *ptr1, *ptr2; + + // EVEN ROUND + CN_STEP1(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP1(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP1(ax2, bx2, cx2, l2, ptr2, idx2); + + CN_STEP2(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP2(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP2(ax2, bx2, cx2, l2, ptr2, idx2); + + CN_STEP3(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP3(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP3(ax2, bx2, cx2, l2, ptr2, idx2); + + CN_STEP4(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP4(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP4(ax2, bx2, cx2, l2, ptr2, idx2); + + // ODD ROUND + CN_STEP1(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP1(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP1(ax2, cx2, bx2, l2, ptr2, idx2); + + CN_STEP2(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP2(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP2(ax2, cx2, bx2, l2, ptr2, idx2); + + CN_STEP3(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP3(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP3(ax2, cx2, bx2, l2, ptr2, idx2); + + CN_STEP4(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP4(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP4(ax2, cx2, bx2, l2, ptr2, idx2); + } + + for (size_t i = 0; i < 3; i++) + { + cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->long_state, (__m128i*)ctx[i]->hash_state); + keccakf((uint64_t*)ctx[i]->hash_state, 24); + extra_hashes[ctx[i]->hash_state[0] & 3](ctx[i]->hash_state, 200, (char*)output + 32 * i); + } +} + +// This even lovelier creation will do 4 cn hashes at a time. +template<size_t MASK, size_t ITERATIONS, size_t MEM, bool SOFT_AES, bool PREFETCH> +void cryptonight_quad_hash(const void* input, size_t len, void* output, cryptonight_ctx** ctx) +{ + for (size_t i = 0; i < 4; i++) + { + keccak((const uint8_t *)input + len * i, len, ctx[i]->hash_state, 200); + cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->hash_state, (__m128i*)ctx[i]->long_state); + } + + uint8_t* l0 = ctx[0]->long_state; + uint64_t* h0 = (uint64_t*)ctx[0]->hash_state; + uint8_t* l1 = ctx[1]->long_state; + uint64_t* h1 = (uint64_t*)ctx[1]->hash_state; + uint8_t* l2 = ctx[2]->long_state; + uint64_t* h2 = (uint64_t*)ctx[2]->hash_state; + uint8_t* l3 = ctx[3]->long_state; + uint64_t* h3 = (uint64_t*)ctx[3]->hash_state; + + __m128i ax0 = _mm_set_epi64x(h0[1] ^ h0[5], h0[0] ^ h0[4]); + __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); + __m128i ax1 = _mm_set_epi64x(h1[1] ^ h1[5], h1[0] ^ h1[4]); + __m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]); + __m128i ax2 = _mm_set_epi64x(h2[1] ^ h2[5], h2[0] ^ h2[4]); + __m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]); + __m128i ax3 = _mm_set_epi64x(h3[1] ^ h3[5], h3[0] ^ h3[4]); + __m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]); + __m128i cx0 = _mm_set_epi64x(0, 0); + __m128i cx1 = _mm_set_epi64x(0, 0); + __m128i cx2 = _mm_set_epi64x(0, 0); + __m128i cx3 = _mm_set_epi64x(0, 0); + + for (size_t i = 0; i < ITERATIONS/2; i++) + { + uint64_t idx0, idx1, idx2, idx3, hi, lo; + __m128i *ptr0, *ptr1, *ptr2, *ptr3; + + // EVEN ROUND + CN_STEP1(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP1(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP1(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP1(ax3, bx3, cx3, l3, ptr3, idx3); + + CN_STEP2(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP2(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP2(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP2(ax3, bx3, cx3, l3, ptr3, idx3); + + CN_STEP3(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP3(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP3(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP3(ax3, bx3, cx3, l3, ptr3, idx3); + + CN_STEP4(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP4(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP4(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP4(ax3, bx3, cx3, l3, ptr3, idx3); + + // ODD ROUND + CN_STEP1(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP1(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP1(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP1(ax3, cx3, bx3, l3, ptr3, idx3); + + CN_STEP2(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP2(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP2(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP2(ax3, cx3, bx3, l3, ptr3, idx3); + + CN_STEP3(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP3(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP3(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP3(ax3, cx3, bx3, l3, ptr3, idx3); + + CN_STEP4(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP4(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP4(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP4(ax3, cx3, bx3, l3, ptr3, idx3); + } + + for (size_t i = 0; i < 4; i++) + { + cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->long_state, (__m128i*)ctx[i]->hash_state); + keccakf((uint64_t*)ctx[i]->hash_state, 24); + extra_hashes[ctx[i]->hash_state[0] & 3](ctx[i]->hash_state, 200, (char*)output + 32 * i); + } +} + +// This most lovely creation will do 5 cn hashes at a time. +template<size_t MASK, size_t ITERATIONS, size_t MEM, bool SOFT_AES, bool PREFETCH> +void cryptonight_penta_hash(const void* input, size_t len, void* output, cryptonight_ctx** ctx) +{ + for (size_t i = 0; i < 5; i++) + { + keccak((const uint8_t *)input + len * i, len, ctx[i]->hash_state, 200); + cn_explode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->hash_state, (__m128i*)ctx[i]->long_state); + } + + uint8_t* l0 = ctx[0]->long_state; + uint64_t* h0 = (uint64_t*)ctx[0]->hash_state; + uint8_t* l1 = ctx[1]->long_state; + uint64_t* h1 = (uint64_t*)ctx[1]->hash_state; + uint8_t* l2 = ctx[2]->long_state; + uint64_t* h2 = (uint64_t*)ctx[2]->hash_state; + uint8_t* l3 = ctx[3]->long_state; + uint64_t* h3 = (uint64_t*)ctx[3]->hash_state; + uint8_t* l4 = ctx[4]->long_state; + uint64_t* h4 = (uint64_t*)ctx[4]->hash_state; + + __m128i ax0 = _mm_set_epi64x(h0[1] ^ h0[5], h0[0] ^ h0[4]); + __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); + __m128i ax1 = _mm_set_epi64x(h1[1] ^ h1[5], h1[0] ^ h1[4]); + __m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]); + __m128i ax2 = _mm_set_epi64x(h2[1] ^ h2[5], h2[0] ^ h2[4]); + __m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]); + __m128i ax3 = _mm_set_epi64x(h3[1] ^ h3[5], h3[0] ^ h3[4]); + __m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]); + __m128i ax4 = _mm_set_epi64x(h4[1] ^ h4[5], h4[0] ^ h4[4]); + __m128i bx4 = _mm_set_epi64x(h4[3] ^ h4[7], h4[2] ^ h4[6]); + __m128i cx0 = _mm_set_epi64x(0, 0); + __m128i cx1 = _mm_set_epi64x(0, 0); + __m128i cx2 = _mm_set_epi64x(0, 0); + __m128i cx3 = _mm_set_epi64x(0, 0); + __m128i cx4 = _mm_set_epi64x(0, 0); + + for (size_t i = 0; i < ITERATIONS/2; i++) + { + uint64_t idx0, idx1, idx2, idx3, idx4, hi, lo; + __m128i *ptr0, *ptr1, *ptr2, *ptr3, *ptr4; + + // EVEN ROUND + CN_STEP1(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP1(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP1(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP1(ax3, bx3, cx3, l3, ptr3, idx3); + CN_STEP1(ax4, bx4, cx4, l4, ptr4, idx4); + + CN_STEP2(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP2(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP2(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP2(ax3, bx3, cx3, l3, ptr3, idx3); + CN_STEP2(ax4, bx4, cx4, l4, ptr4, idx4); + + CN_STEP3(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP3(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP3(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP3(ax3, bx3, cx3, l3, ptr3, idx3); + CN_STEP3(ax4, bx4, cx4, l4, ptr4, idx4); + + CN_STEP4(ax0, bx0, cx0, l0, ptr0, idx0); + CN_STEP4(ax1, bx1, cx1, l1, ptr1, idx1); + CN_STEP4(ax2, bx2, cx2, l2, ptr2, idx2); + CN_STEP4(ax3, bx3, cx3, l3, ptr3, idx3); + CN_STEP4(ax4, bx4, cx4, l4, ptr4, idx4); + + // ODD ROUND + CN_STEP1(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP1(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP1(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP1(ax3, cx3, bx3, l3, ptr3, idx3); + CN_STEP1(ax4, cx4, bx4, l4, ptr4, idx4); + + CN_STEP2(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP2(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP2(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP2(ax3, cx3, bx3, l3, ptr3, idx3); + CN_STEP2(ax4, cx4, bx4, l4, ptr4, idx4); + + CN_STEP3(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP3(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP3(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP3(ax3, cx3, bx3, l3, ptr3, idx3); + CN_STEP3(ax4, cx4, bx4, l4, ptr4, idx4); + + CN_STEP4(ax0, cx0, bx0, l0, ptr0, idx0); + CN_STEP4(ax1, cx1, bx1, l1, ptr1, idx1); + CN_STEP4(ax2, cx2, bx2, l2, ptr2, idx2); + CN_STEP4(ax3, cx3, bx3, l3, ptr3, idx3); + CN_STEP4(ax4, cx4, bx4, l4, ptr4, idx4); + } + + for (size_t i = 0; i < 5; i++) + { + cn_implode_scratchpad<MEM, SOFT_AES, PREFETCH>((__m128i*)ctx[i]->long_state, (__m128i*)ctx[i]->hash_state); + keccakf((uint64_t*)ctx[i]->hash_state, 24); + extra_hashes[ctx[i]->hash_state[0] & 3](ctx[i]->hash_state, 200, (char*)output + 32 * i); + } } diff --git a/xmrstak/backend/cpu/jconf.cpp b/xmrstak/backend/cpu/jconf.cpp index 2ded8c0..6e709bd 100644 --- a/xmrstak/backend/cpu/jconf.cpp +++ b/xmrstak/backend/cpu/jconf.cpp @@ -116,7 +116,10 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg) if(mode == nullptr || no_prefetch == nullptr || aff == nullptr) return false; - if(!mode->IsBool() || !no_prefetch->IsBool()) + if(!mode->IsBool() && !mode->IsNumber()) + return false; + + if(!no_prefetch->IsBool()) return false; if(!aff->IsNumber() && !aff->IsBool()) @@ -125,7 +128,11 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg) if(aff->IsNumber() && aff->GetInt64() < 0) return false; - cfg.bDoubleMode = mode->GetBool(); + if(mode->IsNumber()) + cfg.iMultiway = (int)mode->GetInt64(); + else + cfg.iMultiway = mode->GetBool() ? 2 : 1; + cfg.bNoPrefetch = no_prefetch->GetBool(); if(aff->IsNumber()) diff --git a/xmrstak/backend/cpu/jconf.hpp b/xmrstak/backend/cpu/jconf.hpp index f843ed4..e98ed16 100644 --- a/xmrstak/backend/cpu/jconf.hpp +++ b/xmrstak/backend/cpu/jconf.hpp @@ -22,7 +22,7 @@ public: bool parse_config(const char* sFilename = params::inst().configFileCPU.c_str()); struct thd_cfg { - bool bDoubleMode; + int iMultiway; bool bNoPrefetch; long long iCpuAff; }; diff --git a/xmrstak/backend/cpu/minethd.cpp b/xmrstak/backend/cpu/minethd.cpp index cbb01f9..1c0e491 100644 --- a/xmrstak/backend/cpu/minethd.cpp +++ b/xmrstak/backend/cpu/minethd.cpp @@ -92,7 +92,7 @@ bool minethd::thd_setaffinity(std::thread::native_handle_type h, uint64_t cpu_id #endif } -minethd::minethd(miner_work& pWork, size_t iNo, bool double_work, bool no_prefetch, int64_t affinity) +minethd::minethd(miner_work& pWork, size_t iNo, int iMultiway, bool no_prefetch, int64_t affinity) { this->backendType = iBackend::CPU; oWork = pWork; @@ -105,10 +105,25 @@ minethd::minethd(miner_work& pWork, size_t iNo, bool double_work, bool no_prefet std::unique_lock<std::mutex> lck(thd_aff_set); std::future<void> order_guard = order_fix.get_future(); - if(double_work) + switch (iMultiway) + { + case 5: + oWorkThd = std::thread(&minethd::penta_work_main, this); + break; + case 4: + oWorkThd = std::thread(&minethd::quad_work_main, this); + break; + case 3: + oWorkThd = std::thread(&minethd::triple_work_main, this); + break; + case 2: oWorkThd = std::thread(&minethd::double_work_main, this); - else + break; + case 1: + default: oWorkThd = std::thread(&minethd::work_main, this); + break; + } order_guard.wait(); @@ -154,6 +169,7 @@ cryptonight_ctx* minethd::minethd_alloc_ctx() return nullptr; //Should never happen } +static constexpr size_t MAX_N = 5; bool minethd::self_test() { alloc_msg msg = { 0 }; @@ -191,14 +207,15 @@ bool minethd::self_test() if(res == 0 && fatal) return false; - cryptonight_ctx *ctx0, *ctx1; - if((ctx0 = minethd_alloc_ctx()) == nullptr) - return false; - - if((ctx1 = minethd_alloc_ctx()) == nullptr) + cryptonight_ctx *ctx[MAX_N] = {0}; + for (int i = 0; i < MAX_N; i++) { - cryptonight_free_ctx(ctx0); - return false; + if ((ctx[i] = minethd_alloc_ctx()) == nullptr) + { + for (int j = 0; j < i; j++) + cryptonight_free_ctx(ctx[j]); + return false; + } } bool bResult = true; @@ -206,31 +223,52 @@ bool minethd::self_test() bool mineMonero = ::jconf::inst()->IsCurrencyMonero(); if(mineMonero) { - unsigned char out[64]; + unsigned char out[32 * MAX_N]; cn_hash_fun hashf; - cn_hash_fun_dbl hashdf; - + cn_hash_fun_multi hashf_multi; hashf = func_selector(::jconf::inst()->HaveHardwareAes(), false, mineMonero); - hashf("This is a test", 14, out, ctx0); + hashf("This is a test", 14, out, ctx[0]); bResult = memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 32) == 0; hashf = func_selector(::jconf::inst()->HaveHardwareAes(), true, mineMonero); - hashf("This is a test", 14, out, ctx0); + hashf("This is a test", 14, out, ctx[0]); bResult &= memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 32) == 0; - hashdf = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), false, mineMonero); - hashdf("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx0, ctx1); + hashf_multi = func_multi_selector(2, ::jconf::inst()->HaveHardwareAes(), false, mineMonero); + hashf_multi("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx); bResult &= memcmp(out, "\x3e\xbb\x7f\x9f\x7d\x27\x3d\x7c\x31\x8d\x86\x94\x77\x55\x0c\xc8\x00\xcf\xb1\x1b\x0c\xad\xb7\xff\xbd\xf6\xf8\x9f\x3a\x47\x1c\x59" - "\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0; + "\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0; - hashdf = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), true, mineMonero); - hashdf("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx0, ctx1); + hashf_multi = func_multi_selector(2, ::jconf::inst()->HaveHardwareAes(), true, mineMonero); + hashf_multi("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx); bResult &= memcmp(out, "\x3e\xbb\x7f\x9f\x7d\x27\x3d\x7c\x31\x8d\x86\x94\x77\x55\x0c\xc8\x00\xcf\xb1\x1b\x0c\xad\xb7\xff\xbd\xf6\xf8\x9f\x3a\x47\x1c\x59" - "\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0; + "\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0; + + hashf_multi = func_multi_selector(3, ::jconf::inst()->HaveHardwareAes(), false, mineMonero); + hashf_multi("This is a testThis is a testThis is a test", 14, out, ctx); + bResult &= memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 96) == 0; + + hashf_multi = func_multi_selector(4, ::jconf::inst()->HaveHardwareAes(), false, mineMonero); + hashf_multi("This is a testThis is a testThis is a testThis is a test", 14, out, ctx); + bResult &= memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 128) == 0; + + hashf_multi = func_multi_selector(5, ::jconf::inst()->HaveHardwareAes(), false, mineMonero); + hashf_multi("This is a testThis is a testThis is a testThis is a testThis is a test", 14, out, ctx); + bResult &= memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05" + "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 160) == 0; } - cryptonight_free_ctx(ctx0); - cryptonight_free_ctx(ctx1); + + for (int i = 0; i < MAX_N; i++) + cryptonight_free_ctx(ctx[i]); if(!bResult) printer::inst()->print_msg(L0, @@ -272,12 +310,12 @@ std::vector<iBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory."); #endif - printer::inst()->print_msg(L1, "Starting %s thread, affinity: %d.", cfg.bDoubleMode ? "double" : "single", (int)cfg.iCpuAff); + printer::inst()->print_msg(L1, "Starting %dx thread, affinity: %d.", cfg.iMultiway, (int)cfg.iCpuAff); } else - printer::inst()->print_msg(L1, "Starting %s thread, no affinity.", cfg.bDoubleMode ? "double" : "single"); + printer::inst()->print_msg(L1, "Starting %dx thread, no affinity.", cfg.iMultiway); - minethd* thd = new minethd(pWork, i + threadOffset, cfg.bDoubleMode, cfg.bNoPrefetch, cfg.iCpuAff); + minethd* thd = new minethd(pWork, i + threadOffset, cfg.iMultiway, cfg.bNoPrefetch, cfg.iCpuAff); pvThreads.push_back(thd); } @@ -326,7 +364,7 @@ minethd::cn_hash_fun minethd::func_selector(bool bHaveAes, bool bNoPrefetch, boo // define aeon settings #if defined(CONF_NO_AEON) || defined(CONF_NO_MONERO) - // ignore 3rd bit if only on currency is active + // ignore 3rd bit if only one currency is active digit.set(2, 0); #else digit.set(2, !mineMonero); @@ -416,22 +454,34 @@ void minethd::work_main() cryptonight_free_ctx(ctx); } -minethd::cn_hash_fun_dbl minethd::func_dbl_selector(bool bHaveAes, bool bNoPrefetch, bool mineMonero) +minethd::cn_hash_fun_multi minethd::func_multi_selector(size_t N, bool bHaveAes, bool bNoPrefetch, bool mineMonero) { // We have two independent flag bits in the functions // therefore we will build a binary digit and select the // function as a two digit binary - // Digit order SOFT_AES, NO_PREFETCH, MINER_ALGO + // Digit order SOFT_AES, NO_PREFETCH - static const cn_hash_fun_dbl func_table[] = { - /* there will be 8 function entries if `CONF_NO_MONERO` and `CONF_NO_AEON` - * is not defined. If one is defined there will be 4 entries. + static const cn_hash_fun_multi func_table[] = { + /* there will be 8*(MAX_N-1) function entries if `CONF_NO_MONERO` and `CONF_NO_AEON` + * is not defined. If one is defined there will be 4*(MAX_N-1) entries. */ #ifndef CONF_NO_MONERO cryptonight_double_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, false>, cryptonight_double_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, true>, cryptonight_double_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, false>, - cryptonight_double_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, true> + cryptonight_double_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, true>, + cryptonight_triple_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, false>, + cryptonight_triple_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, true>, + cryptonight_triple_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, false>, + cryptonight_triple_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, true>, + cryptonight_quad_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, false>, + cryptonight_quad_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, true>, + cryptonight_quad_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, false>, + cryptonight_quad_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, true>, + cryptonight_penta_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, false>, + cryptonight_penta_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, false, true>, + cryptonight_penta_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, false>, + cryptonight_penta_hash<MONERO_MASK, MONERO_ITER, MONERO_MEMORY, true, true> #endif #if (!defined(CONF_NO_AEON)) && (!defined(CONF_NO_MONERO)) // comma will be added only if Monero and Aeon is build @@ -441,33 +491,71 @@ minethd::cn_hash_fun_dbl minethd::func_dbl_selector(bool bHaveAes, bool bNoPrefe cryptonight_double_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, false>, cryptonight_double_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, true>, cryptonight_double_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, false>, - cryptonight_double_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, true> + cryptonight_double_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, true>, + cryptonight_triple_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, false>, + cryptonight_triple_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, true>, + cryptonight_triple_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, false>, + cryptonight_triple_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, true>, + cryptonight_quad_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, false>, + cryptonight_quad_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, true>, + cryptonight_quad_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, false>, + cryptonight_quad_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, true>, + cryptonight_penta_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, false>, + cryptonight_penta_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, false, true>, + cryptonight_penta_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, false>, + cryptonight_penta_hash<AEON_MASK, AEON_ITER, AEON_MEMORY, true, true> #endif }; - std::bitset<3> digit; + std::bitset<2> digit; digit.set(0, !bNoPrefetch); digit.set(1, !bHaveAes); // define aeon settings #if defined(CONF_NO_AEON) || defined(CONF_NO_MONERO) - // ignore 3rd bit if only on currency is active - digit.set(2, 0); + // ignore miner algo if only one currency is active + size_t miner_algo_base = 0; #else - digit.set(2, !mineMonero); + size_t miner_algo_base = mineMonero ? 0 : 4*(MAX_N-1); #endif - return func_table[digit.to_ulong()]; + N = (N<2) ? 2 : (N>MAX_N) ? MAX_N : N; + return func_table[miner_algo_base + 4*(N-2) + digit.to_ulong()]; } -uint32_t* minethd::prep_double_work(uint8_t bDoubleWorkBlob[sizeof(miner_work::bWorkBlob) * 2]) +void minethd::double_work_main() { - memcpy(bDoubleWorkBlob, oWork.bWorkBlob, oWork.iWorkSize); - memcpy(bDoubleWorkBlob + oWork.iWorkSize, oWork.bWorkBlob, oWork.iWorkSize); - return (uint32_t*)(bDoubleWorkBlob + oWork.iWorkSize + 39); + multiway_work_main<2>(func_multi_selector(2, ::jconf::inst()->HaveHardwareAes(), bNoPrefetch, ::jconf::inst()->IsCurrencyMonero())); } -void minethd::double_work_main() +void minethd::triple_work_main() +{ + multiway_work_main<3>(func_multi_selector(3, ::jconf::inst()->HaveHardwareAes(), bNoPrefetch, ::jconf::inst()->IsCurrencyMonero())); +} + +void minethd::quad_work_main() +{ + multiway_work_main<4>(func_multi_selector(4, ::jconf::inst()->HaveHardwareAes(), bNoPrefetch, ::jconf::inst()->IsCurrencyMonero())); +} + +void minethd::penta_work_main() +{ + multiway_work_main<5>(func_multi_selector(5, ::jconf::inst()->HaveHardwareAes(), bNoPrefetch, ::jconf::inst()->IsCurrencyMonero())); +} + +template<size_t N> +void minethd::prep_multiway_work(uint8_t *bWorkBlob, uint32_t **piNonce) +{ + for (size_t i = 0; i < N; i++) + { + memcpy(bWorkBlob + oWork.iWorkSize * i, oWork.bWorkBlob, oWork.iWorkSize); + if (i > 0) + piNonce[i] = (uint32_t*)(bWorkBlob + oWork.iWorkSize * i + 39); + } +} + +template<size_t N> +void minethd::multiway_work_main(cn_hash_fun_multi hash_fun_multi) { if(affinity >= 0) //-1 means no affinity bindMemoryToNUMANode(affinity); @@ -477,31 +565,26 @@ void minethd::double_work_main() lck.release(); std::this_thread::yield(); - cn_hash_fun_dbl hash_fun; - cryptonight_ctx* ctx0; - cryptonight_ctx* ctx1; + cryptonight_ctx *ctx[MAX_N]; uint64_t iCount = 0; - uint64_t *piHashVal0, *piHashVal1; - uint32_t *piNonce0, *piNonce1; - uint8_t bDoubleHashOut[64]; - uint8_t bDoubleWorkBlob[sizeof(miner_work::bWorkBlob) * 2]; + uint64_t *piHashVal[MAX_N]; + uint32_t *piNonce[MAX_N]; + uint8_t bHashOut[MAX_N * 32]; + uint8_t bWorkBlob[sizeof(miner_work::bWorkBlob) * MAX_N]; uint32_t iNonce; job_result res; - hash_fun = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), bNoPrefetch, ::jconf::inst()->IsCurrencyMonero()); - ctx0 = minethd_alloc_ctx(); - ctx1 = minethd_alloc_ctx(); - - piHashVal0 = (uint64_t*)(bDoubleHashOut + 24); - piHashVal1 = (uint64_t*)(bDoubleHashOut + 32 + 24); - piNonce0 = (uint32_t*)(bDoubleWorkBlob + 39); + for (size_t i = 0; i < N; i++) + { + ctx[i] = minethd_alloc_ctx(); + piHashVal[i] = (uint64_t*)(bHashOut + 32 * i + 24); + piNonce[i] = (i == 0) ? (uint32_t*)(bWorkBlob + 39) : nullptr; + } if(!oWork.bStall) - piNonce1 = prep_double_work(bDoubleWorkBlob); - else - piNonce1 = nullptr; + prep_multiway_work<N>(bWorkBlob, piNonce); - globalStates::inst().inst().iConsumeCnt++; + globalStates::inst().iConsumeCnt++; while (bQuit == 0) { @@ -515,55 +598,57 @@ void minethd::double_work_main() std::this_thread::sleep_for(std::chrono::milliseconds(100)); consume_work(); - piNonce1 = prep_double_work(bDoubleWorkBlob); + prep_multiway_work<N>(bWorkBlob, piNonce); continue; } - size_t nonce_ctr = 0; - constexpr size_t nonce_chunk = 4096; //Needs to be a power of 2 + constexpr uint32_t nonce_chunk = 4096; + int64_t nonce_ctr = 0; assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID)); if(oWork.bNiceHash) - iNonce = *piNonce0; + iNonce = *piNonce[0]; while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo) { - if ((iCount & 0x7) == 0) //Store stats every 16 hashes + if ((iCount++ & 0x7) == 0) //Store stats every 8*N hashes { using namespace std::chrono; uint64_t iStamp = time_point_cast<milliseconds>(high_resolution_clock::now()).time_since_epoch().count(); - iHashCount.store(iCount, std::memory_order_relaxed); + iHashCount.store(iCount * N, std::memory_order_relaxed); iTimestamp.store(iStamp, std::memory_order_relaxed); } - iCount += 2; - - - if((nonce_ctr++ & (nonce_chunk/2 - 1)) == 0) + + nonce_ctr -= N; + if(nonce_ctr <= 0) { globalStates::inst().calc_start_nonce(iNonce, oWork.bNiceHash, nonce_chunk); + nonce_ctr = nonce_chunk; } - *piNonce0 = ++iNonce; - *piNonce1 = ++iNonce; + for (size_t i = 0; i < N; i++) + *piNonce[i] = ++iNonce; - hash_fun(bDoubleWorkBlob, oWork.iWorkSize, bDoubleHashOut, ctx0, ctx1); + hash_fun_multi(bWorkBlob, oWork.iWorkSize, bHashOut, ctx); - if (*piHashVal0 < oWork.iTarget) - executor::inst()->push_event(ex_event(job_result(oWork.sJobID, iNonce-1, bDoubleHashOut, iThreadNo), oWork.iPoolId)); - - if (*piHashVal1 < oWork.iTarget) - executor::inst()->push_event(ex_event(job_result(oWork.sJobID, iNonce, bDoubleHashOut + 32, iThreadNo), oWork.iPoolId)); + for (size_t i = 0; i < N; i++) + { + if (*piHashVal[i] < oWork.iTarget) + { + executor::inst()->push_event(ex_event(job_result(oWork.sJobID, iNonce - N + 1 + i, bHashOut + 32 * i, iThreadNo), oWork.iPoolId)); + } + } std::this_thread::yield(); } consume_work(); - piNonce1 = prep_double_work(bDoubleWorkBlob); + prep_multiway_work<N>(bWorkBlob, piNonce); } - cryptonight_free_ctx(ctx0); - cryptonight_free_ctx(ctx1); + for (int i = 0; i < N; i++) + cryptonight_free_ctx(ctx[i]); } } // namespace cpu diff --git a/xmrstak/backend/cpu/minethd.hpp b/xmrstak/backend/cpu/minethd.hpp index 5520d9e..0433d0d 100644 --- a/xmrstak/backend/cpu/minethd.hpp +++ b/xmrstak/backend/cpu/minethd.hpp @@ -29,16 +29,24 @@ public: static cryptonight_ctx* minethd_alloc_ctx(); private: + typedef void (*cn_hash_fun_multi)(const void*, size_t, void*, cryptonight_ctx**); + static cn_hash_fun_multi func_multi_selector(size_t N, bool bHaveAes, bool bNoPrefetch, bool mineMonero); - typedef void (*cn_hash_fun_dbl)(const void*, size_t, void*, cryptonight_ctx* __restrict, cryptonight_ctx* __restrict); - static cn_hash_fun_dbl func_dbl_selector(bool bHaveAes, bool bNoPrefetch, bool mineMonero); + minethd(miner_work& pWork, size_t iNo, int iMultiway, bool no_prefetch, int64_t affinity); - minethd(miner_work& pWork, size_t iNo, bool double_work, bool no_prefetch, int64_t affinity); + template<size_t N> + void multiway_work_main(cn_hash_fun_multi hash_fun_multi); + + template<size_t N> + void prep_multiway_work(uint8_t *bWorkBlob, uint32_t **piNonce); void work_main(); void double_work_main(); + void triple_work_main(); + void quad_work_main(); + void penta_work_main(); + void consume_work(); - uint32_t* prep_double_work(uint8_t bDoubleWorkBlob[sizeof(miner_work::bWorkBlob) * 2]); uint64_t iJobNo; diff --git a/xmrstak/backend/nvidia/autoAdjust.hpp b/xmrstak/backend/nvidia/autoAdjust.hpp index c6a7dca..be7d1ce 100644 --- a/xmrstak/backend/nvidia/autoAdjust.hpp +++ b/xmrstak/backend/nvidia/autoAdjust.hpp @@ -95,7 +95,7 @@ private: conf += std::string(" { \"index\" : ") + std::to_string(ctx.device_id) + ",\n" + " \"threads\" : " + std::to_string(ctx.device_threads) + ", \"blocks\" : " + std::to_string(ctx.device_blocks) + ",\n" + " \"bfactor\" : " + std::to_string(ctx.device_bfactor) + ", \"bsleep\" : " + std::to_string(ctx.device_bsleep) + ",\n" + - " \"affine_to_cpu\" : false,\n" + + " \"affine_to_cpu\" : false, \"sync_mode\" : 3,\n" + " },\n"; } } diff --git a/xmrstak/backend/nvidia/config.tpl b/xmrstak/backend/nvidia/config.tpl index 99dc023..5479172 100644 --- a/xmrstak/backend/nvidia/config.tpl +++ b/xmrstak/backend/nvidia/config.tpl @@ -9,6 +9,12 @@ R"===( * bsleep - Insert a delay of X microseconds between kernel launches. * Increase if you want to reduce GPU lag. Recommended setting on GUI systems - 100 * affine_to_cpu - This will affine the thread to a CPU. This can make a GPU miner play along nicer with a CPU miner. + * sync_mode - method used to synchronize the device + * documentation: http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g69e73c7dda3fc05306ae7c811a690fac + * 0 = cudaDeviceScheduleAuto + * 1 = cudaDeviceScheduleSpin - create a high load on one cpu thread per gpu + * 2 = cudaDeviceScheduleYield + * 3 = cudaDeviceScheduleBlockingSync (default) * * On the first run the miner will look at your system and suggest a basic configuration that will work, * you can try to tweak it from there to get the best performance. @@ -16,7 +22,9 @@ R"===( * A filled out configuration should look like this: * "gpu_threads_conf" : * [ - * { "index" : 0, "threads" : 17, "blocks" : 60, "bfactor" : 0, "bsleep" : 0, "affine_to_cpu" : false}, + * { "index" : 0, "threads" : 17, "blocks" : 60, "bfactor" : 0, "bsleep" : 0, + * "affine_to_cpu" : false, "sync_mode" : 3, + * }, * ], */ diff --git a/xmrstak/backend/nvidia/jconf.cpp b/xmrstak/backend/nvidia/jconf.cpp index 4208145..46c5726 100644 --- a/xmrstak/backend/nvidia/jconf.cpp +++ b/xmrstak/backend/nvidia/jconf.cpp @@ -123,16 +123,17 @@ bool jconf::GetGPUThreadConfig(size_t id, thd_cfg &cfg) if(!oThdConf.IsObject()) return false; - const Value *gid, *blocks, *threads, *bfactor, *bsleep, *aff; + const Value *gid, *blocks, *threads, *bfactor, *bsleep, *aff, *syncMode; gid = GetObjectMember(oThdConf, "index"); blocks = GetObjectMember(oThdConf, "blocks"); threads = GetObjectMember(oThdConf, "threads"); bfactor = GetObjectMember(oThdConf, "bfactor"); bsleep = GetObjectMember(oThdConf, "bsleep"); aff = GetObjectMember(oThdConf, "affine_to_cpu"); + syncMode = GetObjectMember(oThdConf, "sync_mode"); if(gid == nullptr || blocks == nullptr || threads == nullptr || - bfactor == nullptr || bsleep == nullptr || aff == nullptr) + bfactor == nullptr || bsleep == nullptr || aff == nullptr || syncMode == nullptr) { return false; } @@ -155,11 +156,17 @@ bool jconf::GetGPUThreadConfig(size_t id, thd_cfg &cfg) if(!aff->IsUint64() && !aff->IsBool()) return false; + if(!syncMode->IsNumber() || syncMode->GetInt() < 0 || syncMode->GetInt() > 3) + { + printer::inst()->print_msg(L0, "Error NVIDIA: sync_mode out of range or no number. ( range: 0 <= sync_mode < 4.)"); + return false; + } cfg.id = gid->GetInt(); cfg.blocks = blocks->GetInt(); cfg.threads = threads->GetInt(); cfg.bfactor = bfactor->GetInt(); cfg.bsleep = bsleep->GetInt(); + cfg.syncMode = syncMode->GetInt(); if(aff->IsNumber()) cfg.cpu_aff = aff->GetInt(); diff --git a/xmrstak/backend/nvidia/jconf.hpp b/xmrstak/backend/nvidia/jconf.hpp index b09a162..7f60f1d 100644 --- a/xmrstak/backend/nvidia/jconf.hpp +++ b/xmrstak/backend/nvidia/jconf.hpp @@ -28,6 +28,7 @@ public: bool bDoubleMode; bool bNoPrefetch; int32_t cpu_aff; + int syncMode; long long iCpuAff; }; diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp index 9eab1c0..6e628fd 100644 --- a/xmrstak/backend/nvidia/minethd.cpp +++ b/xmrstak/backend/nvidia/minethd.cpp @@ -77,6 +77,7 @@ minethd::minethd(miner_work& pWork, size_t iNo, const jconf::thd_cfg& cfg) ctx.device_threads = (int)cfg.threads; ctx.device_bfactor = (int)cfg.bfactor; ctx.device_bsleep = (int)cfg.bsleep; + ctx.syncMode = cfg.syncMode; this->affinity = cfg.cpu_aff; std::unique_lock<std::mutex> lck(thd_aff_set); diff --git a/xmrstak/backend/nvidia/nvcc_code/cryptonight.hpp b/xmrstak/backend/nvidia/nvcc_code/cryptonight.hpp index 1b63379..afbdbaf 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cryptonight.hpp +++ b/xmrstak/backend/nvidia/nvcc_code/cryptonight.hpp @@ -11,7 +11,8 @@ typedef struct { int device_blocks; int device_threads; int device_bfactor; - int device_bsleep; + int device_bsleep; + int syncMode; uint32_t *d_input; uint32_t inputlen; diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu index a92fa8c..0b175b5 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu +++ b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu @@ -167,10 +167,10 @@ __forceinline__ __device__ uint32_t shuffle(volatile uint32_t* ptr,const uint32_ #endif } +template<size_t ITERATIONS, uint32_t THREAD_SHIFT, uint32_t MASK> #ifdef XMR_STAK_THREADS __launch_bounds__( XMR_STAK_THREADS * 4 ) #endif -template<size_t ITERATIONS, uint32_t THREAD_SHIFT, uint32_t MASK> __global__ void cryptonight_core_gpu_phase2( int threads, int bfactor, int partidx, uint32_t * d_long_state, uint32_t * d_ctx_a, uint32_t * d_ctx_b ) { __shared__ uint32_t sharedMemory[1024]; @@ -327,18 +327,22 @@ void cryptonight_core_gpu_hash(nvid_ctx* ctx) for ( int i = 0; i < partcount; i++ ) { - CUDA_CHECK_KERNEL(ctx->device_id, cryptonight_core_gpu_phase2<ITERATIONS,THREAD_SHIFT,MASK><<< - grid, - block4, - block4.x * sizeof(uint32_t) * static_cast< int >( ctx->device_arch[0] < 3 ) - >>>( - ctx->device_blocks*ctx->device_threads, - ctx->device_bfactor, - i, - ctx->d_long_state, - ctx->d_ctx_a, - ctx->d_ctx_b - )); + CUDA_CHECK_MSG_KERNEL( + ctx->device_id, + "\n**suggestion: Try to increase the value of the attribute 'bfactor' or \nreduce 'threads' in the NVIDIA config file.**", + cryptonight_core_gpu_phase2<ITERATIONS,THREAD_SHIFT,MASK><<< + grid, + block4, + block4.x * sizeof(uint32_t) * static_cast< int >( ctx->device_arch[0] < 3 ) + >>>( + ctx->device_blocks*ctx->device_threads, + ctx->device_bfactor, + i, + ctx->d_long_state, + ctx->d_ctx_a, + ctx->d_ctx_b + ) + ); if ( partcount > 1 && ctx->device_bsleep > 0) compat_usleep( ctx->device_bsleep ); } diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_device.hpp b/xmrstak/backend/nvidia/nvcc_code/cuda_device.hpp index 078c165..563bb3b 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cuda_device.hpp +++ b/xmrstak/backend/nvidia/nvcc_code/cuda_device.hpp @@ -9,22 +9,41 @@ /** execute and check a CUDA api command * * @param id gpu id (thread id) + * @param msg message string which should be added to the error message * @param ... CUDA api command */ -#define CUDA_CHECK(id, ...) { \ - cudaError_t error = __VA_ARGS__; \ - if(error!=cudaSuccess){ \ - std::cerr << "[CUDA] Error gpu " << id << ": <" << __FILE__ << ">:" << __LINE__ << std::endl; \ - throw std::runtime_error(std::string("[CUDA] Error: ") + std::string(cudaGetErrorString(error))); \ - } \ -} \ +#define CUDA_CHECK_MSG(id, msg, ...) { \ + cudaError_t error = __VA_ARGS__; \ + if(error!=cudaSuccess){ \ + std::cerr << "[CUDA] Error gpu " << id << ": <" << __FILE__ << ">:" << __LINE__; \ + std::cerr << msg << std::endl; \ + throw std::runtime_error(std::string("[CUDA] Error: ") + std::string(cudaGetErrorString(error))); \ + } \ +} \ ( (void) 0 ) +/** execute and check a CUDA api command + * + * @param id gpu id (thread id) + * @param ... CUDA api command + */ +#define CUDA_CHECK(id, ...) CUDA_CHECK_MSG(id, "", __VA_ARGS__) + /** execute and check a CUDA kernel * * @param id gpu id (thread id) * @param ... CUDA kernel call */ -#define CUDA_CHECK_KERNEL(id, ...) \ - __VA_ARGS__; \ +#define CUDA_CHECK_KERNEL(id, ...) \ + __VA_ARGS__; \ CUDA_CHECK(id, cudaGetLastError()) + +/** execute and check a CUDA kernel + * + * @param id gpu id (thread id) + * @param msg message string which should be added to the error message + * @param ... CUDA kernel call + */ +#define CUDA_CHECK_MSG_KERNEL(id, msg, ...) \ + __VA_ARGS__; \ + CUDA_CHECK_MSG(id, msg, cudaGetLastError()) diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu index 5501d8d..492201d 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu +++ b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu @@ -189,7 +189,22 @@ extern "C" int cryptonight_extra_cpu_init(nvid_ctx* ctx) } cudaDeviceReset(); - cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); + switch(ctx->syncMode) + { + case 0: + cudaSetDeviceFlags(cudaDeviceScheduleAuto); + break; + case 1: + cudaSetDeviceFlags(cudaDeviceScheduleSpin); + break; + case 2: + cudaSetDeviceFlags(cudaDeviceScheduleYield); + break; + case 3: + cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); + break; + + }; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); size_t hashMemSize; @@ -203,7 +218,6 @@ extern "C" int cryptonight_extra_cpu_init(nvid_ctx* ctx) } size_t wsize = ctx->device_blocks * ctx->device_threads; - CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_long_state, hashMemSize * wsize)); CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_ctx_state, 50 * sizeof(uint32_t) * wsize)); CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_ctx_key1, 40 * sizeof(uint32_t) * wsize)); CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_ctx_key2, 40 * sizeof(uint32_t) * wsize)); @@ -213,6 +227,10 @@ extern "C" int cryptonight_extra_cpu_init(nvid_ctx* ctx) CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_input, 21 * sizeof (uint32_t ) )); CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_result_count, sizeof (uint32_t ) )); CUDA_CHECK(ctx->device_id, cudaMalloc(&ctx->d_result_nonce, 10 * sizeof (uint32_t ) )); + CUDA_CHECK_MSG( + ctx->device_id, + "\n**suggestion: Try to reduce the value of the attribute 'threads' in the NVIDIA config file.**", + cudaMalloc(&ctx->d_long_state, hashMemSize * wsize)); return 1; } @@ -239,7 +257,11 @@ extern "C" void cryptonight_extra_cpu_final(nvid_ctx* ctx, uint32_t startNonce, CUDA_CHECK(ctx->device_id, cudaMemset( ctx->d_result_nonce, 0xFF, 10 * sizeof (uint32_t ) )); CUDA_CHECK(ctx->device_id, cudaMemset( ctx->d_result_count, 0, sizeof (uint32_t ) )); - CUDA_CHECK_KERNEL(ctx->device_id, cryptonight_extra_gpu_final<<<grid, block >>>( wsize, target, ctx->d_result_count, ctx->d_result_nonce, ctx->d_ctx_state )); + CUDA_CHECK_MSG_KERNEL( + ctx->device_id, + "\n**suggestion: Try to increase the value of the attribute 'bfactor' in the NVIDIA config file.**", + cryptonight_extra_gpu_final<<<grid, block >>>( wsize, target, ctx->d_result_count, ctx->d_result_nonce, ctx->d_ctx_state ) + ); CUDA_CHECK(ctx->device_id, cudaMemcpy( rescount, ctx->d_result_count, sizeof (uint32_t ), cudaMemcpyDeviceToHost )); CUDA_CHECK(ctx->device_id, cudaMemcpy( resnonce, ctx->d_result_nonce, 10 * sizeof (uint32_t ), cudaMemcpyDeviceToHost )); @@ -380,6 +402,10 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx) */ ctx->device_blocks = props.multiProcessorCount * ( props.major < 3 ? 2 : 3 ); + + // increase bfactor for low end devices to avoid that the miner is killed by the OS + if(props.multiProcessorCount < 6) + ctx->device_bfactor += 2; } if(ctx->device_threads == -1) { |