summaryrefslogtreecommitdiffstats
path: root/xmrstak
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak')
-rw-r--r--xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl104
-rw-r--r--xmrstak/backend/amd/autoAdjust.hpp11
-rw-r--r--xmrstak/backend/amd/config.tpl3
-rw-r--r--xmrstak/backend/amd/minethd.cpp4
-rw-r--r--xmrstak/backend/cpu/autoAdjust.hpp33
-rw-r--r--xmrstak/backend/cpu/config.tpl3
-rw-r--r--xmrstak/backend/cpu/crypto/cryptonight_aesni.h13
-rw-r--r--xmrstak/backend/cpu/minethd.cpp13
-rw-r--r--xmrstak/backend/nvidia/config.tpl3
-rw-r--r--xmrstak/backend/nvidia/minethd.cpp4
-rw-r--r--xmrstak/misc/executor.cpp31
-rw-r--r--xmrstak/misc/executor.hpp1
-rw-r--r--xmrstak/misc/uac.cpp2
-rw-r--r--xmrstak/net/msgstruct.hpp5
14 files changed, 126 insertions, 104 deletions
diff --git a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
index 255fcbb..ec05712 100644
--- a/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
+++ b/xmrstak/backend/amd/amd_gpu/opencl/cryptonight.cl
@@ -653,21 +653,11 @@ __kernel void cn2(__global uint4 *Scratchpad, __global ulong *states, __global u
for(int i = 0; i < 25; ++i) states[i] = State[i];
- switch(State[0] & 3)
- {
- case 0:
- Branch0[atomic_inc(Branch0 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 1:
- Branch1[atomic_inc(Branch1 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 2:
- Branch2[atomic_inc(Branch2 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- case 3:
- Branch3[atomic_inc(Branch3 + Threads)] = get_global_id(0) - get_global_offset(0);
- break;
- }
+ ulong StateSwitch = State[0] & 3;
+ __global uint *destinationBranch1 = StateSwitch == 0 ? Branch0 : Branch1;
+ __global uint *destinationBranch2 = StateSwitch == 2 ? Branch2 : Branch3;
+ __global uint *destinationBranch = StateSwitch < 2 ? destinationBranch1 : destinationBranch2;
+ destinationBranch[atomic_inc(destinationBranch + Threads)] = gIdx;
}
}
mem_fence(CLK_GLOBAL_MEM_FENCE);
@@ -704,8 +694,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
for(uint i = 0; i < 4; ++i)
{
- if(i < 3) t[0] += 0x40UL;
- else t[0] += 0x08UL;
+ t[0] += i < 3 ? 0x40UL : 0x08UL;
t[2] = t[0] ^ t[1];
@@ -715,8 +704,7 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
h = m ^ p;
- if(i < 2) t[1] = 0x3000000000000000UL;
- else t[1] = 0xB000000000000000UL;
+ t[1] = i < 2 ? 0x3000000000000000UL : 0xB000000000000000UL;
}
t[0] = 0x08UL;
@@ -744,6 +732,27 @@ __kernel void Skein(__global ulong *states, __global uint *BranchBuf, __global u
#define SWAP8(x) as_ulong(as_uchar8(x).s76543210)
+#define JHXOR \
+ h0h ^= input[0]; \
+ h0l ^= input[1]; \
+ h1h ^= input[2]; \
+ h1l ^= input[3]; \
+ h2h ^= input[4]; \
+ h2l ^= input[5]; \
+ h3h ^= input[6]; \
+ h3l ^= input[7]; \
+\
+ E8; \
+\
+ h4h ^= input[0]; \
+ h4l ^= input[1]; \
+ h5h ^= input[2]; \
+ h5l ^= input[3]; \
+ h6h ^= input[4]; \
+ h6l ^= input[5]; \
+ h7h ^= input[6]; \
+ h7l ^= input[7]
+
__kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint *output, ulong Target, ulong Threads)
{
const uint idx = get_global_id(0) - get_global_offset(0);
@@ -757,46 +766,27 @@ __kernel void JH(__global ulong *states, __global uint *BranchBuf, __global uint
sph_u64 h4h = 0x754D2E7F8996A371UL, h4l = 0x62E27DF70849141DUL, h5h = 0x948F2476F7957627UL, h5l = 0x6C29804757B6D587UL, h6h = 0x6C0D8EAC2D275E5CUL, h6l = 0x0F7A0557C6508451UL, h7h = 0xEA12247067D3E47BUL, h7l = 0x69D71CD313ABE389UL;
sph_u64 tmp;
- for(int i = 0; i < 5; ++i)
+ for(int i = 0; i < 3; ++i)
{
ulong input[8];
- if(i < 3)
- {
- for(int x = 0; x < 8; ++x) input[x] = (states[(i << 3) + x]);
- }
- else if(i == 3)
- {
- input[0] = (states[24]);
- input[1] = 0x80UL;
- for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
- }
- else
- {
- input[7] = 0x4006000000000000UL;
-
- for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
- }
-
- h0h ^= input[0];
- h0l ^= input[1];
- h1h ^= input[2];
- h1l ^= input[3];
- h2h ^= input[4];
- h2l ^= input[5];
- h3h ^= input[6];
- h3l ^= input[7];
-
- E8;
-
- h4h ^= input[0];
- h4l ^= input[1];
- h5h ^= input[2];
- h5l ^= input[3];
- h6h ^= input[4];
- h6l ^= input[5];
- h7h ^= input[6];
- h7l ^= input[7];
+ const int shifted = i << 3;
+ for(int x = 0; x < 8; ++x) input[x] = (states[shifted + x]);
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ input[0] = (states[24]);
+ input[1] = 0x80UL;
+ #pragma unroll 6
+ for(int x = 2; x < 8; ++x) input[x] = 0x00UL;
+ JHXOR;
+ }
+ {
+ ulong input[8];
+ for(int x = 0; x < 7; ++x) input[x] = 0x00UL;
+ input[7] = 0x4006000000000000UL;
+ JHXOR;
}
//output[0] = h6h;
@@ -832,6 +822,7 @@ __kernel void Blake(__global ulong *states, __global uint *BranchBuf, __global u
((uint8 *)h)[0] = vload8(0U, c_IV256);
+ #pragma unroll 4
for(uint i = 0, bitlen = 0; i < 4; ++i)
{
if(i < 3)
@@ -907,6 +898,7 @@ __kernel void Groestl(__global ulong *states, __global uint *BranchBuf, __global
State[7] = 0x0001000000000000UL;
+ #pragma unroll 4
for(uint i = 0; i < 4; ++i)
{
ulong H[8], M[8];
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp
index 0bc5239..511a712 100644
--- a/xmrstak/backend/amd/autoAdjust.hpp
+++ b/xmrstak/backend/amd/autoAdjust.hpp
@@ -101,7 +101,16 @@ private:
* sowing down the memory performance because of TLB cache misses
*/
size_t maxThreads = 1000u;
- if(ctx.name.compare("gfx901") == 0)
+ if(
+ ctx.name.compare("gfx901") == 0 ||
+ ctx.name.compare("gfx904") == 0 ||
+ // APU
+ ctx.name.compare("gfx902") == 0 ||
+ // UNKNOWN
+ ctx.name.compare("gfx900") == 0 ||
+ ctx.name.compare("gfx903") == 0 ||
+ ctx.name.compare("gfx905") == 0
+ )
{
/* Increase the number of threads for AMD VEGA gpus.
* Limit the number of threads based on the issue: https://github.com/fireice-uk/xmr-stak/issues/5#issuecomment-339425089
diff --git a/xmrstak/backend/amd/config.tpl b/xmrstak/backend/amd/config.tpl
index af662f8..25b75a1 100644
--- a/xmrstak/backend/amd/config.tpl
+++ b/xmrstak/backend/amd/config.tpl
@@ -12,6 +12,9 @@ R"===(
* [
* { "index" : 0, "intensity" : 1000, "worksize" : 8, "affine_to_cpu" : false, "strided_index" : true },
* ],
+ * If you do not wish to mine with your AMD GPU(s) then use:
+ * "gpu_threads_conf" :
+ * null,
*/
"gpu_threads_conf" : [
diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp
index e83527c..422c28c 100644
--- a/xmrstak/backend/amd/minethd.cpp
+++ b/xmrstak/backend/amd/minethd.cpp
@@ -139,7 +139,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
if(cfg.cpu_aff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting AMD GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff);
@@ -245,7 +245,7 @@ void minethd::work_main()
if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget)
executor::inst()->push_event(ex_event(job_result(oWork.sJobID, results[i], bResult, iThreadNo), oWork.iPoolId));
else
- executor::inst()->push_event(ex_event("AMD Invalid Result", oWork.iPoolId));
+ executor::inst()->push_event(ex_event("AMD Invalid Result", pGpuCtx->deviceIdx, oWork.iPoolId));
}
iCount += pGpuCtx->rawIntensity;
diff --git a/xmrstak/backend/cpu/autoAdjust.hpp b/xmrstak/backend/cpu/autoAdjust.hpp
index 7bdb14e..db805ec 100644
--- a/xmrstak/backend/cpu/autoAdjust.hpp
+++ b/xmrstak/backend/cpu/autoAdjust.hpp
@@ -33,25 +33,21 @@ class autoAdjust
{
public:
- size_t hashMemSize;
- size_t halfHashMemSize;
-
- autoAdjust()
+ bool printConfig()
{
+ size_t hashMemSizeKB;
+ size_t halfHashMemSizeKB;
+
if(::jconf::inst()->IsCurrencyMonero())
{
- hashMemSize = MONERO_MEMORY;
- halfHashMemSize = hashMemSize / 2u;
+ hashMemSizeKB = MONERO_MEMORY / 1024u;
+ halfHashMemSizeKB = hashMemSizeKB / 2u;
}
else
{
- hashMemSize = AEON_MEMORY;
- halfHashMemSize = hashMemSize / 2u;
+ hashMemSizeKB = AEON_MEMORY / 1024u;
+ halfHashMemSizeKB = hashMemSizeKB / 2u;
}
- }
-
- bool printConfig()
- {
configEditor configTpl{};
@@ -63,9 +59,10 @@ public:
std::string conf;
- if(!detectL3Size() || L3KB_size < halfHashMemSize || L3KB_size > (halfHashMemSize * 100u))
+
+ if(!detectL3Size() || L3KB_size < halfHashMemSizeKB || L3KB_size > (halfHashMemSizeKB * 2048u))
{
- if(L3KB_size < halfHashMemSize || L3KB_size > (halfHashMemSize * 100))
+ if(L3KB_size < halfHashMemSizeKB || L3KB_size > (halfHashMemSizeKB * 2048))
printer::inst()->print_msg(L0, "Autoconf failed: L3 size sanity check failed - %u KB.", L3KB_size);
conf += std::string(" { \"low_power_mode\" : false, \"no_prefetch\" : true, \"affine_to_cpu\" : false },\n");
@@ -88,7 +85,7 @@ public:
if(L3KB_size <= 0)
break;
- double_mode = L3KB_size / hashMemSize > (int32_t)(corecnt-i);
+ double_mode = L3KB_size / hashMemSizeKB > (int32_t)(corecnt-i);
conf += std::string(" { \"low_power_mode\" : ");
conf += std::string(double_mode ? "true" : "false");
@@ -107,9 +104,9 @@ public:
aff_id++;
if(double_mode)
- L3KB_size -= hashMemSize * 2u;
+ L3KB_size -= hashMemSizeKB * 2u;
else
- L3KB_size -= hashMemSize;
+ L3KB_size -= hashMemSizeKB;
}
}
@@ -142,7 +139,7 @@ private:
}
L3KB_size = ((get_masked(cpu_info[1], 31, 22) + 1) * (get_masked(cpu_info[1], 21, 12) + 1) *
- (get_masked(cpu_info[1], 11, 0) + 1) * (cpu_info[2] + 1)) / halfHashMemSize;
+ (get_masked(cpu_info[1], 11, 0) + 1) * (cpu_info[2] + 1)) / 1024;
return true;
}
diff --git a/xmrstak/backend/cpu/config.tpl b/xmrstak/backend/cpu/config.tpl
index b21a22d..fc4acb9 100644
--- a/xmrstak/backend/cpu/config.tpl
+++ b/xmrstak/backend/cpu/config.tpl
@@ -24,6 +24,9 @@ R"===(
* { "low_power_mode" : false, "no_prefetch" : true, "affine_to_cpu" : 0 },
* { "low_power_mode" : false, "no_prefetch" : true, "affine_to_cpu" : 1 },
* ],
+ * If you do not wish to mine with your CPU(s) then use:
+ * "cpu_threads_conf" :
+ * null,
*/
"cpu_threads_conf" :
diff --git a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
index 9b6e1dc..e4ccbc3 100644
--- a/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
+++ b/xmrstak/backend/cpu/crypto/cryptonight_aesni.h
@@ -317,10 +317,9 @@ void cryptonight_hash(const void* input, size_t len, void* output, cryptonight_c
_mm_store_si128((__m128i *)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
idx0 = _mm_cvtsi128_si64(cx);
- bx0 = cx;
-
if(PREFETCH)
_mm_prefetch((const char*)&l0[idx0 & MASK], _MM_HINT_T0);
+ bx0 = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*)&l0[idx0 & MASK])[0];
@@ -329,15 +328,15 @@ void cryptonight_hash(const void* input, size_t len, void* output, cryptonight_c
lo = _umul128(idx0, cl, &hi);
al0 += hi;
- ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
+ al0 ^= cl;
+ if(PREFETCH)
+ _mm_prefetch((const char*)&l0[al0 & MASK], _MM_HINT_T0);
+ ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
- al0 ^= cl;
- idx0 = al0;
- if(PREFETCH)
- _mm_prefetch((const char*)&l0[idx0 & MASK], _MM_HINT_T0);
+ idx0 = al0;
}
// Optim - 90% time boundary
diff --git a/xmrstak/backend/cpu/minethd.cpp b/xmrstak/backend/cpu/minethd.cpp
index b41e4a9..1d9165e 100644
--- a/xmrstak/backend/cpu/minethd.cpp
+++ b/xmrstak/backend/cpu/minethd.cpp
@@ -73,7 +73,16 @@ namespace cpu
bool minethd::thd_setaffinity(std::thread::native_handle_type h, uint64_t cpu_id)
{
#if defined(_WIN32)
- return SetThreadAffinityMask(h, 1ULL << cpu_id) != 0;
+ // we can only pin up to 64 threads
+ if(cpu_id < 64)
+ {
+ return SetThreadAffinityMask(h, 1ULL << cpu_id) != 0;
+ }
+ else
+ {
+ printer::inst()->print_msg(L0, "WARNING: Windows supports only affinity up to 63.");
+ return false;
+ }
#elif defined(__APPLE__)
thread_port_t mach_thread;
thread_affinity_policy_data_t policy = { static_cast<integer_t>(cpu_id) };
@@ -309,7 +318,7 @@ std::vector<iBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work
if(cfg.iCpuAff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting %dx thread, affinity: %d.", cfg.iMultiway, (int)cfg.iCpuAff);
diff --git a/xmrstak/backend/nvidia/config.tpl b/xmrstak/backend/nvidia/config.tpl
index 5479172..f489956 100644
--- a/xmrstak/backend/nvidia/config.tpl
+++ b/xmrstak/backend/nvidia/config.tpl
@@ -26,6 +26,9 @@ R"===(
* "affine_to_cpu" : false, "sync_mode" : 3,
* },
* ],
+ * If you do not wish to mine with your nVidia GPU(s) then use:
+ * "gpu_threads_conf" :
+ * null,
*/
"gpu_threads_conf" :
diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp
index 5564596..9fd08fb 100644
--- a/xmrstak/backend/nvidia/minethd.cpp
+++ b/xmrstak/backend/nvidia/minethd.cpp
@@ -166,7 +166,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
if(cfg.cpu_aff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting NVIDIA GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff);
@@ -287,7 +287,7 @@ void minethd::work_main()
if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget)
executor::inst()->push_event(ex_event(job_result(oWork.sJobID, foundNonce[i], bResult, iThreadNo), oWork.iPoolId));
else
- executor::inst()->push_event(ex_event("NVIDIA Invalid Result", oWork.iPoolId));
+ executor::inst()->push_event(ex_event("NVIDIA Invalid Result", ctx.device_id, oWork.iPoolId));
}
iCount += h_per_round;
diff --git a/xmrstak/misc/executor.cpp b/xmrstak/misc/executor.cpp
index a3088a5..055739b 100644
--- a/xmrstak/misc/executor.cpp
+++ b/xmrstak/misc/executor.cpp
@@ -594,7 +594,7 @@ void executor::ex_main()
break;
case EV_GPU_RES_ERROR:
- log_result_error(std::string(ev.oGpuError.error_str));
+ log_result_error(std::string(ev.oGpuError.error_str + std::string(" GPU ID ") + std::to_string(ev.oGpuError.idx)));
break;
case EV_PERF_TICK:
@@ -761,6 +761,7 @@ void executor::hashrate_report(std::string& out)
else
out.append(1, '\n');
+ double fTotalCur[3] = { 0.0, 0.0, 0.0};
for (i = 0; i < nthd; i++)
{
double fHps[3];
@@ -775,10 +776,14 @@ void executor::hashrate_report(std::string& out)
out.append(hps_format(fHps[0], num, sizeof(num))).append(" |");
out.append(hps_format(fHps[1], num, sizeof(num))).append(" |");
out.append(hps_format(fHps[2], num, sizeof(num))).append(1, ' ');
-
- fTotal[0] += fHps[0];
- fTotal[1] += fHps[1];
- fTotal[2] += fHps[2];
+
+ fTotal[0] += (std::isnormal(fHps[0])) ? fHps[0] : 0.0;
+ fTotal[1] += (std::isnormal(fHps[1])) ? fHps[1] : 0.0;
+ fTotal[2] += (std::isnormal(fHps[2])) ? fHps[2] : 0.0;
+
+ fTotalCur[0] += (std::isnormal(fHps[0])) ? fHps[0] : 0.0;
+ fTotalCur[1] += (std::isnormal(fHps[1])) ? fHps[1] : 0.0;
+ fTotalCur[2] += (std::isnormal(fHps[2])) ? fHps[2] : 0.0;
if((i & 0x1) == 1) //Odd i's
out.append("|\n");
@@ -786,21 +791,25 @@ void executor::hashrate_report(std::string& out)
if((i & 0x1) == 1) //We had odd number of threads
out.append("|\n");
-
- if(nthd != 1)
- out.append("-----------------------------------------------------\n");
- else
- out.append("---------------------------\n");
+
+ out.append("Totals (").append(name).append("): ");
+ out.append(hps_format(fTotalCur[0], num, sizeof(num)));
+ out.append(hps_format(fTotalCur[1], num, sizeof(num)));
+ out.append(hps_format(fTotalCur[2], num, sizeof(num)));
+ out.append(" H/s\n");
+
+ out.append("-----------------------------------------------------------------\n");
}
}
- out.append("Totals: ");
+ out.append("Totals (ALL): ");
out.append(hps_format(fTotal[0], num, sizeof(num)));
out.append(hps_format(fTotal[1], num, sizeof(num)));
out.append(hps_format(fTotal[2], num, sizeof(num)));
out.append(" H/s\nHighest: ");
out.append(hps_format(fHighestHps, num, sizeof(num)));
out.append(" H/s\n");
+ out.append("-----------------------------------------------------------------\n");
}
char* time_format(char* buf, size_t len, std::chrono::system_clock::time_point time)
diff --git a/xmrstak/misc/executor.hpp b/xmrstak/misc/executor.hpp
index c2caa39..fbaa265 100644
--- a/xmrstak/misc/executor.hpp
+++ b/xmrstak/misc/executor.hpp
@@ -177,7 +177,6 @@ private:
iPoolCallTimes.clear();
tPoolConnTime = std::chrono::system_clock::now();
iPoolHashes = 0;
- iPoolDiff = 0;
}
double fHighestHps = 0.0;
diff --git a/xmrstak/misc/uac.cpp b/xmrstak/misc/uac.cpp
index 5e8d08a..ad9d394 100644
--- a/xmrstak/misc/uac.cpp
+++ b/xmrstak/misc/uac.cpp
@@ -1,5 +1,3 @@
-#pragma once
-
#ifdef _WIN32
#include "xmrstak/misc/console.hpp"
#include "xmrstak/params.hpp"
diff --git a/xmrstak/net/msgstruct.hpp b/xmrstak/net/msgstruct.hpp
index a5affc8..8c4bdbe 100644
--- a/xmrstak/net/msgstruct.hpp
+++ b/xmrstak/net/msgstruct.hpp
@@ -66,8 +66,9 @@ struct sock_err
// Unlike socket errors, GPU errors are read-only strings
struct gpu_res_err
{
+ size_t idx; // GPU index
const char* error_str;
- gpu_res_err(const char* error_str) : error_str(error_str) {}
+ gpu_res_err(const char* error_str, size_t idx) : error_str(error_str), idx(idx) {}
};
enum ex_event_name { EV_INVALID_VAL, EV_SOCK_READY, EV_SOCK_ERROR, EV_GPU_RES_ERROR,
@@ -99,7 +100,7 @@ struct ex_event
};
ex_event() { iName = EV_INVALID_VAL; iPoolId = 0;}
- ex_event(const char* gpu_err, size_t id) : iName(EV_GPU_RES_ERROR), iPoolId(id), oGpuError(gpu_err) {}
+ ex_event(const char* gpu_err, size_t gpu_idx, size_t id) : iName(EV_GPU_RES_ERROR), iPoolId(id), oGpuError(gpu_err, gpu_idx) {}
ex_event(std::string&& err, bool silent, size_t id) : iName(EV_SOCK_ERROR), iPoolId(id), oSocketError(std::move(err), silent) { }
ex_event(job_result dat, size_t id) : iName(EV_MINER_HAVE_RESULT), iPoolId(id), oJobResult(dat) {}
ex_event(pool_job dat, size_t id) : iName(EV_POOL_HAVE_JOB), iPoolId(id), oPoolJob(dat) {}
OpenPOWER on IntegriCloud