summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-27 16:22:34 +0200
committerpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-30 23:46:08 +0200
commitb1e92092bb5c93863d869a2d6b6e057918a77b2f (patch)
treed8a4e434159a12af6542c8302fb26d4860ac2a6d
parentef551cc032716d8cb21ecfdff73bb6c2bc581740 (diff)
downloadxmr-stak-b1e92092bb5c93863d869a2d6b6e057918a77b2f.zip
xmr-stak-b1e92092bb5c93863d869a2d6b6e057918a77b2f.tar.gz
fix singleton issues on windows
- pass all singletons into the backend libraries - move some cpu config features from the cpu config file to the global config.txt - add global jconf to the xported singleton list
-rw-r--r--Environment.hpp42
-rw-r--r--backend/BackendConnector.cpp11
-rw-r--r--backend/GlobalStates.cpp4
-rw-r--r--backend/GlobalStates.hpp25
-rw-r--r--backend/Plugin.hpp9
-rw-r--r--backend/amd/minethd.cpp26
-rw-r--r--backend/amd/minethd.h3
-rw-r--r--backend/cpu/autoAdjust.hpp9
-rw-r--r--backend/cpu/config.tpl61
-rw-r--r--backend/cpu/jconf.cpp94
-rw-r--r--backend/cpu/jconf.h18
-rw-r--r--backend/cpu/minethd.cpp53
-rw-r--r--backend/cpu/minethd.h4
-rw-r--r--backend/miner_work.h1
-rw-r--r--backend/nvidia/minethd.cpp27
-rw-r--r--backend/nvidia/minethd.h5
-rw-r--r--cli/cli-miner.cpp2
-rw-r--r--config.txt91
-rw-r--r--console.cpp2
-rw-r--r--console.h8
-rw-r--r--executor.cpp6
-rw-r--r--jconf.cpp92
-rw-r--r--jconf.h16
23 files changed, 330 insertions, 279 deletions
diff --git a/Environment.hpp b/Environment.hpp
new file mode 100644
index 0000000..9f887a1
--- /dev/null
+++ b/Environment.hpp
@@ -0,0 +1,42 @@
+#pragma once
+
+class printer;
+class jconf;
+
+namespace xmrstak
+{
+
+class GlobalStates;
+
+struct Environment
+{
+
+ static Environment& inst()
+ {
+ static Environment env;
+ return env;
+ }
+
+ Environment& operator=(const Environment& env)
+ {
+ this->pPrinter = env.pPrinter;
+ this->pGlobalStates = env.pGlobalStates;
+ this->pJconfConfig = env.pJconfConfig;
+ return *this;
+ }
+
+
+ Environment() : pPrinter(nullptr), pGlobalStates(nullptr)
+ {
+ }
+
+
+ printer* pPrinter;
+
+ GlobalStates* pGlobalStates;
+
+ jconf* pJconfConfig;
+
+};
+
+} // namepsace xmrstak
diff --git a/backend/BackendConnector.cpp b/backend/BackendConnector.cpp
index f1b71b4..194cb5c 100644
--- a/backend/BackendConnector.cpp
+++ b/backend/BackendConnector.cpp
@@ -43,6 +43,7 @@
#include <cstdlib>
#include "Plugin.hpp"
+#include "../Environment.hpp"
namespace xmrstak
{
@@ -56,15 +57,15 @@ bool BackendConnector::self_test()
std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
{
- GlobalStates::iGlobalJobNo = 0;
- GlobalStates::iConsumeCnt = 0;
+ GlobalStates::inst().iGlobalJobNo = 0;
+ GlobalStates::inst().iConsumeCnt = 0;
std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>;
#ifndef CONF_NO_CUDA
Plugin nvidiaPlugin("NVIDIA", "xmrstak_cuda_backend");
- std::vector<IBackend*>* nvidiaThreads = nvidiaPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork);
+ std::vector<IBackend*>* nvidiaThreads = nvidiaPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, Environment::inst());
pvThreads->insert(std::end(*pvThreads), std::begin(*nvidiaThreads), std::end(*nvidiaThreads));
if(nvidiaThreads->size() == 0)
printer::inst()->print_msg(L0, "WARNING: backend NVIDIA disabled.");
@@ -72,7 +73,7 @@ std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
#ifndef CONF_NO_OPENCL
Plugin amdPlugin("AMD", "xmrstak_opencl_backend");
- std::vector<IBackend*>* amdThreads = amdPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork);
+ std::vector<IBackend*>* amdThreads = amdPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, Environment::inst());
pvThreads->insert(std::end(*pvThreads), std::begin(*amdThreads), std::end(*amdThreads));
if(amdThreads->size() == 0)
printer::inst()->print_msg(L0, "WARNING: backend AMD disabled.");
@@ -85,7 +86,7 @@ std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
printer::inst()->print_msg(L0, "WARNING: backend CPU disabled.");
#endif
- GlobalStates::iThreadCount = pvThreads->size();
+ GlobalStates::inst().iThreadCount = pvThreads->size();
return pvThreads;
}
diff --git a/backend/GlobalStates.cpp b/backend/GlobalStates.cpp
index 99356b5..5251da8 100644
--- a/backend/GlobalStates.cpp
+++ b/backend/GlobalStates.cpp
@@ -33,10 +33,6 @@
namespace xmrstak
{
-std::atomic<uint64_t> GlobalStates::iGlobalJobNo;
-std::atomic<uint64_t> GlobalStates::iConsumeCnt; //Threads get jobs as they are initialized
-miner_work GlobalStates::oGlobalWork;
-uint64_t GlobalStates::iThreadCount = 0;
void GlobalStates::switch_work(miner_work& pWork)
{
diff --git a/backend/GlobalStates.hpp b/backend/GlobalStates.hpp
index 293009d..a9818ba 100644
--- a/backend/GlobalStates.hpp
+++ b/backend/GlobalStates.hpp
@@ -1,6 +1,7 @@
#pragma once
#include <atomic>
#include "miner_work.h"
+#include "../Environment.hpp"
namespace xmrstak
{
@@ -8,12 +9,26 @@ namespace xmrstak
struct GlobalStates
{
- static void switch_work(miner_work& pWork);
+ static inline GlobalStates& inst()
+ {
+ auto& env = Environment::inst();
+ if(env.pGlobalStates == nullptr)
+ env.pGlobalStates = new GlobalStates;
+ return *env.pGlobalStates;
+ }
- static miner_work oGlobalWork;
- static std::atomic<uint64_t> iGlobalJobNo;
- static std::atomic<uint64_t> iConsumeCnt;
- static uint64_t iThreadCount;
+ void switch_work(miner_work& pWork);
+
+ miner_work oGlobalWork;
+ std::atomic<uint64_t> iGlobalJobNo;
+ std::atomic<uint64_t> iConsumeCnt;
+ uint64_t iThreadCount;
+
+ private:
+
+ GlobalStates() : iThreadCount(0)
+ {
+ }
};
diff --git a/backend/Plugin.hpp b/backend/Plugin.hpp
index 4c85375..9ba9716 100644
--- a/backend/Plugin.hpp
+++ b/backend/Plugin.hpp
@@ -5,6 +5,7 @@
#include <string>
#include "IBackend.hpp"
#include <iostream>
+#include "../Environment.hpp"
#ifndef USE_PRECOMPILED_HEADERS
#ifdef WIN32
@@ -33,7 +34,7 @@ struct Plugin
return;
}
#else
- libBackend = dlopen((std::string("lib") + libName + ".so").c_str(), RTLD_LAZY);
+ libBackend = dlopen((std::string("./lib") + libName + ".so").c_str(), RTLD_LAZY);
if(!libBackend)
{
std::cerr << "WARNING: "<< m_backendName <<" cannot load backend library: " << dlerror() << std::endl;
@@ -59,7 +60,7 @@ struct Plugin
#endif
}
- std::vector<IBackend*>* startBackend(uint32_t threadOffset, miner_work& pWork)
+ std::vector<IBackend*>* startBackend(uint32_t threadOffset, miner_work& pWork, Environment& env)
{
if(fn_starterBackend == nullptr)
{
@@ -68,12 +69,12 @@ struct Plugin
return pvThreads;
}
- return fn_starterBackend(threadOffset, pWork);
+ return fn_starterBackend(threadOffset, pWork, env);
}
std::string m_backendName;
- typedef std::vector<IBackend*>* (*starterBackend_t)(uint32_t threadOffset, miner_work& pWork);
+ typedef std::vector<IBackend*>* (*starterBackend_t)(uint32_t threadOffset, miner_work& pWork, Environment& env);
starterBackend_t fn_starterBackend;
diff --git a/backend/amd/minethd.cpp b/backend/amd/minethd.cpp
index a7b84a7..0821c0e 100644
--- a/backend/amd/minethd.cpp
+++ b/backend/amd/minethd.cpp
@@ -33,12 +33,13 @@
#include "../../console.h"
#include "../../crypto/cryptonight_aesni.h"
#include "../cpu/minethd.h"
-#include "../cpu/jconf.h"
+#include "../../jconf.h"
#include "../../executor.h"
#include "minethd.h"
#include "../../jconf.h"
#include "../../crypto/cryptonight.h"
+#include "../../Environment.hpp"
#include "amd_gpu/gpu.h"
@@ -64,8 +65,9 @@ extern "C" {
#ifdef WIN32
__declspec(dllexport)
#endif
-std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork)
+std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env)
{
+ Environment::inst() = env;
return amd::minethd::thread_starter(threadOffset, pWork);
}
} // extern "C"
@@ -154,19 +156,19 @@ void minethd::switch_work(miner_work& pWork)
// faster than threads can consume them. This should never happen in real life.
// Pool cant physically send jobs faster than every 250ms or so due to net latency.
- while (GlobalStates::iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::iThreadCount)
+ while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- GlobalStates::oGlobalWork = pWork;
- GlobalStates::iConsumeCnt.store(0, std::memory_order_seq_cst);
- GlobalStates::iGlobalJobNo++;
+ GlobalStates::inst().oGlobalWork = pWork;
+ GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
+ GlobalStates::inst().iGlobalJobNo++;
}
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().iConsumeCnt++;
}
@@ -176,9 +178,9 @@ void minethd::work_main()
cryptonight_ctx* cpu_ctx;
cpu_ctx = cpu::minethd::minethd_alloc_ctx();
- cn_hash_fun hash_fun = cpu::minethd::func_selector(cpu::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
+ cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -188,7 +190,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -200,7 +202,7 @@ void minethd::work_main()
uint32_t target = oWork.iTarget32;
XMRSetJob(pGpuCtx, oWork.bWorkBlob, oWork.iWorkSize, target);
- while(GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
cl_uint results[0x100];
memset(results,0,sizeof(cl_uint)*(0x100));
diff --git a/backend/amd/minethd.h b/backend/amd/minethd.h
index 349dcfd..4fb3b13 100644
--- a/backend/amd/minethd.h
+++ b/backend/amd/minethd.h
@@ -3,6 +3,7 @@
#include <atomic>
#include "./jconf.h"
#include "../IBackend.hpp"
+#include "../../Environment.hpp"
#include "amd_gpu/gpu.h"
@@ -30,7 +31,7 @@ private:
// Bottom 24 bits allow for an hour of work at 4000 H/s
inline uint32_t calc_start_nonce(uint32_t resume)
{
- return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::iThreadCount * resume));
+ return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume));
}
void work_main();
diff --git a/backend/cpu/autoAdjust.hpp b/backend/cpu/autoAdjust.hpp
index e7f35a9..092f085 100644
--- a/backend/cpu/autoAdjust.hpp
+++ b/backend/cpu/autoAdjust.hpp
@@ -1,6 +1,7 @@
#pragma once
#include "jconf.h"
#include "../../console.h"
+#include "../../jconf.h"
#include "../../ConfigEditor.hpp"
#include <string>
@@ -108,14 +109,14 @@ private:
int32_t cpu_info[4];
char cpustr[13] = {0};
- xmrstak::cpu::jconf::cpuid(0, 0, cpu_info);
+ ::jconf::cpuid(0, 0, cpu_info);
memcpy(cpustr, &cpu_info[1], 4);
memcpy(cpustr+4, &cpu_info[3], 4);
memcpy(cpustr+8, &cpu_info[2], 4);
if(strcmp(cpustr, "GenuineIntel") == 0)
{
- xmrstak::cpu::jconf::cpuid(4, 3, cpu_info);
+ ::jconf::cpuid(4, 3, cpu_info);
if(get_masked(cpu_info[0], 7, 5) != 3)
{
@@ -130,11 +131,11 @@ private:
}
else if(strcmp(cpustr, "AuthenticAMD") == 0)
{
- xmrstak::cpu::jconf::cpuid(0x80000006, 0, cpu_info);
+ ::jconf::cpuid(0x80000006, 0, cpu_info);
L3KB_size = get_masked(cpu_info[3], 31, 18) * 512;
- xmrstak::cpu::jconf::cpuid(1, 0, cpu_info);
+ ::jconf::cpuid(1, 0, cpu_info);
if(get_masked(cpu_info[0], 11, 8) < 0x17) //0x17h is Zen
old_amd = true;
diff --git a/backend/cpu/config.tpl b/backend/cpu/config.tpl
index 7d57340..990a31d 100644
--- a/backend/cpu/config.tpl
+++ b/backend/cpu/config.tpl
@@ -29,65 +29,4 @@ R"===(
CPUCONFIG
],
-/*
- * LARGE PAGE SUPPORT
- * Large pages need a properly set up OS. It can be difficult if you are not used to systems administration,
- * but the performance results are worth the trouble - you will get around 20% boost. Slow memory mode is
- * meant as a backup, you won't get stellar results there. If you are running into trouble, especially
- * on Windows, please read the common issues in the README.
- *
- * By default we will try to allocate large pages. This means you need to "Run As Administrator" on Windows.
- * You need to edit your system's group policies to enable locking large pages. Here are the steps from MSDN
- *
- * 1. On the Start menu, click Run. In the Open box, type gpedit.msc.
- * 2. On the Local Group Policy Editor console, expand Computer Configuration, and then expand Windows Settings.
- * 3. Expand Security Settings, and then expand Local Policies.
- * 4. Select the User Rights Assignment folder.
- * 5. The policies will be displayed in the details pane.
- * 6. In the pane, double-click Lock pages in memory.
- * 7. In the Local Security Setting – Lock pages in memory dialog box, click Add User or Group.
- * 8. In the Select Users, Service Accounts, or Groups dialog box, add an account that you will run the miner on
- * 9. Reboot for change to take effect.
- *
- * Windows also tends to fragment memory a lot. If you are running on a system with 4-8GB of RAM you might need
- * to switch off all the auto-start applications and reboot to have a large enough chunk of contiguous memory.
- *
- * On Linux you will need to configure large page support "sudo sysctl -w vm.nr_hugepages=128" and increase your
- * ulimit -l. To do do this you need to add following lines to /etc/security/limits.conf - "* soft memlock 262144"
- * and "* hard memlock 262144". You can also do it Windows-style and simply run-as-root, but this is NOT
- * recommended for security reasons.
- *
- * Memory locking means that the kernel can't swap out the page to disk - something that is unlikely to happen on a
- * command line system that isn't starved of memory. I haven't observed any difference on a CLI Linux system between
- * locked and unlocked memory. If that is your setup see option "no_mlck".
- */
-
-/*
- * use_slow_memory defines our behavior with regards to large pages. There are three possible options here:
- * always - Don't even try to use large pages. Always use slow memory.
- * warn - We will try to use large pages, but fall back to slow memory if that fails.
- * no_mlck - This option is only relevant on Linux, where we can use large pages without locking memory.
- * It will never use slow memory, but it won't attempt to mlock
- * never - If we fail to allocate large pages we will print an error and exit.
- */
-"use_slow_memory" : "warn",
-
-/*
- * NiceHash mode
- * nicehash_nonce - Limit the nonce to 3 bytes as required by nicehash. This cuts all the safety margins, and
- * if a block isn't found within 30 minutes then you might run into nonce collisions. Number
- * of threads in this mode is hard-limited to 32.
- */
-"nicehash_nonce" : false,
-
-/*
- * Manual hardware AES override
- *
- * Some VMs don't report AES capability correctly. You can set this value to true to enforce hardware AES or
- * to false to force disable AES or null to let the miner decide if AES is used.
- *
- * WARNING: setting this to true on a CPU that doesn't support hardware AES will crash the miner.
- */
-"aes_override" : null,
-
)==="
diff --git a/backend/cpu/jconf.cpp b/backend/cpu/jconf.cpp
index e919096..021d607 100644
--- a/backend/cpu/jconf.cpp
+++ b/backend/cpu/jconf.cpp
@@ -50,7 +50,7 @@ using namespace rapidjson;
/*
* This enum needs to match index in oConfigValues, otherwise we will get a runtime error
*/
-enum configEnum { aCpuThreadsConf, sUseSlowMem, bNiceHashMode, bAesOverride };
+enum configEnum { aCpuThreadsConf, sUseSlowMem };
struct configVal {
configEnum iName;
@@ -61,10 +61,7 @@ struct configVal {
// Same order as in configEnum, as per comment above
// kNullType means any type
configVal oConfigValues[] = {
- { aCpuThreadsConf, "cpu_threads_conf", kNullType },
- { sUseSlowMem, "use_slow_memory", kStringType },
- { bNiceHashMode, "nicehash_nonce", kTrueType },
- { bAesOverride, "aes_override", kNullType }
+ { aCpuThreadsConf, "cpu_threads_conf", kNullType }
};
constexpr size_t iConfigCnt = (sizeof(oConfigValues)/sizeof(oConfigValues[0]));
@@ -141,21 +138,6 @@ bool jconf::GetThreadConfig(size_t id, thd_cfg &cfg)
return true;
}
-jconf::slow_mem_cfg jconf::GetSlowMemSetting()
-{
- const char* opt = prv->configValues[sUseSlowMem]->GetString();
-
- if(strcasecmp(opt, "always") == 0)
- return always_use;
- else if(strcasecmp(opt, "no_mlck") == 0)
- return no_mlck;
- else if(strcasecmp(opt, "warn") == 0)
- return print_warning;
- else if(strcasecmp(opt, "never") == 0)
- return never_use;
- else
- return unknown_value;
-}
size_t jconf::GetThreadCount()
{
@@ -165,54 +147,12 @@ size_t jconf::GetThreadCount()
return 0;
}
-bool jconf::NeedsAutoconf()
-{
- return !prv->configValues[aCpuThreadsConf]->IsArray();
-}
-
-bool jconf::NiceHashMode()
-{
- return prv->configValues[bNiceHashMode]->GetBool();
-}
-
-void jconf::cpuid(uint32_t eax, int32_t ecx, int32_t val[4])
-{
- memset(val, 0, sizeof(int32_t)*4);
-
-#ifdef _WIN32
- __cpuidex(val, eax, ecx);
-#else
- __cpuid_count(eax, ecx, val[0], val[1], val[2], val[3]);
-#endif
-}
-
-bool jconf::check_cpu_features()
-{
- constexpr int AESNI_BIT = 1 << 25;
- constexpr int SSE2_BIT = 1 << 26;
- int32_t cpu_info[4];
- bool bHaveSse2;
-
- cpuid(1, 0, cpu_info);
-
- bHaveAes = (cpu_info[2] & AESNI_BIT) != 0;
- bHaveSse2 = (cpu_info[3] & SSE2_BIT) != 0;
-
- return bHaveSse2;
-}
-
bool jconf::parse_config(const char* sFilename)
{
FILE * pFile;
char * buffer;
size_t flen;
- if(!check_cpu_features())
- {
- printer::inst()->print_msg(L0, "CPU support of SSE2 is required.");
- return false;
- }
-
pFile = fopen(sFilename, "rb");
if (pFile == NULL)
{
@@ -310,36 +250,6 @@ bool jconf::parse_config(const char* sFilename)
}
}
- if(NiceHashMode() && GetThreadCount() >= 32)
- {
- printer::inst()->print_msg(L0, "You need to use less than 32 threads in NiceHash mode.");
- return false;
- }
-
- if(GetSlowMemSetting() == unknown_value)
- {
- printer::inst()->print_msg(L0,
- "Invalid config file. use_slow_memory must be \"always\", \"no_mlck\", \"warn\" or \"never\"");
- return false;
- }
-
-#ifdef _WIN32
- if(GetSlowMemSetting() == no_mlck)
- {
- printer::inst()->print_msg(L0, "On Windows large pages need mlock. Please use another option.");
- return false;
- }
-#endif // _WIN32
-
- //if(NeedsAutoconf())
- // return true;
-
- if(prv->configValues[bAesOverride]->IsBool())
- bHaveAes = prv->configValues[bAesOverride]->GetBool();
-
- if(!bHaveAes)
- printer::inst()->print_msg(L0, "Your CPU doesn't support hardware AES. Don't expect high hashrates.");
-
return true;
}
diff --git a/backend/cpu/jconf.h b/backend/cpu/jconf.h
index 39fe6d9..9b46552 100644
--- a/backend/cpu/jconf.h
+++ b/backend/cpu/jconf.h
@@ -24,35 +24,19 @@ public:
long long iCpuAff;
};
- enum slow_mem_cfg {
- always_use,
- no_mlck,
- print_warning,
- never_use,
- unknown_value
- };
-
size_t GetThreadCount();
bool GetThreadConfig(size_t id, thd_cfg &cfg);
bool NeedsAutoconf();
- slow_mem_cfg GetSlowMemSetting();
-
- bool NiceHashMode();
+
- inline bool HaveHardwareAes() { return bHaveAes; }
-
- static void cpuid(uint32_t eax, int32_t ecx, int32_t val[4]);
private:
jconf();
static jconf* oInst;
- bool check_cpu_features();
struct opaque_private;
opaque_private* prv;
-
- bool bHaveAes;
};
} // namespace cpu
diff --git a/backend/cpu/minethd.cpp b/backend/cpu/minethd.cpp
index e6d1bd6..7991d86 100644
--- a/backend/cpu/minethd.cpp
+++ b/backend/cpu/minethd.cpp
@@ -31,6 +31,7 @@
#include "../IBackend.hpp"
#include "../GlobalStates.hpp"
#include "../../ConfigEditor.hpp"
+#include "../../jconf.h"
#include "../../executor.h"
#include "minethd.h"
@@ -129,21 +130,21 @@ cryptonight_ctx* minethd::minethd_alloc_ctx()
cryptonight_ctx* ctx;
alloc_msg msg = { 0 };
- switch (jconf::inst()->GetSlowMemSetting())
+ switch (::jconf::inst()->GetSlowMemSetting())
{
- case jconf::never_use:
+ case ::jconf::never_use:
ctx = cryptonight_alloc_ctx(1, 1, &msg);
if (ctx == NULL)
printer::inst()->print_msg(L0, "MEMORY ALLOC FAILED: %s", msg.warning);
return ctx;
- case jconf::no_mlck:
+ case ::jconf::no_mlck:
ctx = cryptonight_alloc_ctx(1, 0, &msg);
if (ctx == NULL)
printer::inst()->print_msg(L0, "MEMORY ALLOC FAILED: %s", msg.warning);
return ctx;
- case jconf::print_warning:
+ case ::jconf::print_warning:
ctx = cryptonight_alloc_ctx(1, 1, &msg);
if (msg.warning != NULL)
printer::inst()->print_msg(L0, "MEMORY ALLOC FAILED: %s", msg.warning);
@@ -151,10 +152,10 @@ cryptonight_ctx* minethd::minethd_alloc_ctx()
ctx = cryptonight_alloc_ctx(0, 0, NULL);
return ctx;
- case jconf::always_use:
+ case ::jconf::always_use:
return cryptonight_alloc_ctx(0, 0, NULL);
- case jconf::unknown_value:
+ case ::jconf::unknown_value:
return NULL; //Shut up compiler
}
@@ -167,27 +168,27 @@ bool minethd::self_test()
size_t res;
bool fatal = false;
- switch (jconf::inst()->GetSlowMemSetting())
+ switch (::jconf::inst()->GetSlowMemSetting())
{
- case jconf::never_use:
+ case ::jconf::never_use:
res = cryptonight_init(1, 1, &msg);
fatal = true;
break;
- case jconf::no_mlck:
+ case ::jconf::no_mlck:
res = cryptonight_init(1, 0, &msg);
fatal = true;
break;
- case jconf::print_warning:
+ case ::jconf::print_warning:
res = cryptonight_init(1, 1, &msg);
break;
- case jconf::always_use:
+ case ::jconf::always_use:
res = cryptonight_init(0, 0, &msg);
break;
- case jconf::unknown_value:
+ case ::jconf::unknown_value:
default:
return false; //Shut up compiler
}
@@ -214,20 +215,20 @@ bool minethd::self_test()
cn_hash_fun hashf;
cn_hash_fun_dbl hashdf;
- hashf = func_selector(jconf::inst()->HaveHardwareAes(), false);
+ hashf = func_selector(::jconf::inst()->HaveHardwareAes(), false);
hashf("This is a test", 14, out, ctx0);
bResult = memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 32) == 0;
- hashf = func_selector(jconf::inst()->HaveHardwareAes(), true);
+ hashf = func_selector(::jconf::inst()->HaveHardwareAes(), true);
hashf("This is a test", 14, out, ctx0);
bResult &= memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 32) == 0;
- hashdf = func_dbl_selector(jconf::inst()->HaveHardwareAes(), false);
+ hashdf = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), false);
hashdf("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx0, ctx1);
bResult &= memcmp(out, "\x3e\xbb\x7f\x9f\x7d\x27\x3d\x7c\x31\x8d\x86\x94\x77\x55\x0c\xc8\x00\xcf\xb1\x1b\x0c\xad\xb7\xff\xbd\xf6\xf8\x9f\x3a\x47\x1c\x59"
"\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0;
- hashdf = func_dbl_selector(jconf::inst()->HaveHardwareAes(), true);
+ hashdf = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), true);
hashdf("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy log", 43, out, ctx0, ctx1);
bResult &= memcmp(out, "\x3e\xbb\x7f\x9f\x7d\x27\x3d\x7c\x31\x8d\x86\x94\x77\x55\x0c\xc8\x00\xcf\xb1\x1b\x0c\xad\xb7\xff\xbd\xf6\xf8\x9f\x3a\x47\x1c\x59"
"\xb4\x77\xd5\x02\xe4\xd8\x48\x7f\x42\xdf\xe3\x8e\xed\x73\x81\x7a\xda\x91\xb7\xe2\x63\xd2\x91\x71\xb6\x5c\x44\x3a\x01\x2a\x41\x22", 64) == 0;
@@ -284,9 +285,9 @@ std::vector<IBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &GlobalStates::inst().inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().inst().iConsumeCnt++;
}
minethd::cn_hash_fun minethd::func_selector(bool bHaveAes, bool bNoPrefetch)
@@ -336,12 +337,12 @@ void minethd::work_main()
uint32_t* piNonce;
job_result result;
- hash_fun = func_selector(jconf::inst()->HaveHardwareAes(), bNoPrefetch);
+ hash_fun = func_selector(::jconf::inst()->HaveHardwareAes(), bNoPrefetch);
ctx = minethd_alloc_ctx();
piHashVal = (uint64_t*)(result.bResult + 24);
piNonce = (uint32_t*)(oWork.bWorkBlob + 39);
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -351,7 +352,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -366,7 +367,7 @@ void minethd::work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
memcpy(result.sJobID, oWork.sJobID, sizeof(job_result::sJobID));
- while(GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
if ((iCount & 0xF) == 0) //Store stats every 16 hashes
{
@@ -430,7 +431,7 @@ void minethd::double_work_main()
uint32_t iNonce;
job_result res;
- hash_fun = func_dbl_selector(jconf::inst()->HaveHardwareAes(), bNoPrefetch);
+ hash_fun = func_dbl_selector(::jconf::inst()->HaveHardwareAes(), bNoPrefetch);
ctx0 = minethd_alloc_ctx();
ctx1 = minethd_alloc_ctx();
@@ -439,7 +440,7 @@ void minethd::double_work_main()
piNonce0 = (uint32_t*)(bDoubleWorkBlob + 39);
piNonce1 = nullptr;
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -449,7 +450,7 @@ void minethd::double_work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -466,7 +467,7 @@ void minethd::double_work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
- while (GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
if ((iCount & 0x7) == 0) //Store stats every 16 hashes
{
diff --git a/backend/cpu/minethd.h b/backend/cpu/minethd.h
index 44b216c..40383cf 100644
--- a/backend/cpu/minethd.h
+++ b/backend/cpu/minethd.h
@@ -40,13 +40,13 @@ private:
// Bottom 22 bits allow for an hour of work at 1000 H/s
inline uint32_t calc_start_nonce(uint32_t resume)
{
- return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::iThreadCount * resume));
+ return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume));
}
// Limited version of the nonce calc above
inline uint32_t calc_nicehash_nonce(uint32_t start, uint32_t resume)
{
- return start | ( ( reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::iThreadCount * resume)) >> 4u ) );
+ return start | ( ( reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume)) >> 4u ) );
}
void work_main();
diff --git a/backend/miner_work.h b/backend/miner_work.h
index b922f4a..8349fda 100644
--- a/backend/miner_work.h
+++ b/backend/miner_work.h
@@ -5,6 +5,7 @@
#include <cstdint>
#include <climits>
#include <iostream>
+#include <cassert>
namespace xmrstak
{
diff --git a/backend/nvidia/minethd.cpp b/backend/nvidia/minethd.cpp
index d62728d..554ea69 100644
--- a/backend/nvidia/minethd.cpp
+++ b/backend/nvidia/minethd.cpp
@@ -30,12 +30,12 @@
#include "../../console.h"
#include "../../crypto/cryptonight_aesni.h"
#include "../cpu/minethd.h"
-#include "../cpu/jconf.h"
#include "../../executor.h"
#include "minethd.h"
#include "../../jconf.h"
#include "../../crypto/cryptonight.h"
+#include "../../Environment.hpp"
#include "autoAdjust.hpp"
@@ -85,7 +85,7 @@ bool minethd::self_test()
bool bResult = true;
ctx0 = new cryptonight_ctx;
- if(cpu::jconf::inst()->HaveHardwareAes())
+ if(::jconf::inst()->HaveHardwareAes())
{
//cryptonight_hash_ctx("This is a test", 14, out, ctx0);
bResult = memcmp(out, "\xa0\x84\xf0\x1d\x14\x37\xa0\x9c\x69\x85\x40\x1b\x60\xd4\x35\x54\xae\x10\x58\x02\xc5\xf5\xd8\xa9\xb3\x25\x36\x49\xc0\xbe\x66\x05", 32) == 0;
@@ -110,8 +110,9 @@ extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
-std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork)
+std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env)
{
+ Environment::inst() = env;
return nvidia::minethd::thread_starter(threadOffset, pWork);
}
} // extern "C"
@@ -173,19 +174,19 @@ void minethd::switch_work(miner_work& pWork)
// faster than threads can consume them. This should never happen in real life.
// Pool cant physically send jobs faster than every 250ms or so due to net latency.
- while (GlobalStates::iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::iThreadCount)
+ while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- GlobalStates::oGlobalWork = pWork;
- GlobalStates::iConsumeCnt.store(0, std::memory_order_seq_cst);
- GlobalStates::iGlobalJobNo++;
+ GlobalStates::inst().oGlobalWork = pWork;
+ GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
+ GlobalStates::inst().iGlobalJobNo++;
}
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().iConsumeCnt++;
}
void minethd::work_main()
@@ -194,9 +195,9 @@ void minethd::work_main()
uint32_t iNonce;
cryptonight_ctx* cpu_ctx;
cpu_ctx = cpu::minethd::minethd_alloc_ctx();
- cn_hash_fun hash_fun = cpu::minethd::func_selector(cpu::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
+ cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
- GlobalStates::iConsumeCnt++;
+ GlobalStates::inst().iConsumeCnt++;
if(/*cuda_get_deviceinfo(&ctx) != 1 ||*/ cryptonight_extra_cpu_init(&ctx) != 1)
{
@@ -212,7 +213,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -224,7 +225,7 @@ void minethd::work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
- while(GlobalStates::iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
uint32_t foundNonce[10];
diff --git a/backend/nvidia/minethd.h b/backend/nvidia/minethd.h
index a4b095e..9f3993e 100644
--- a/backend/nvidia/minethd.h
+++ b/backend/nvidia/minethd.h
@@ -7,6 +7,7 @@
#include "../../jconf.h"
#include "./jconf.h"
#include "../IBackend.hpp"
+#include "../../Environment.hpp"
#include <iostream>
namespace xmrstak
@@ -33,13 +34,13 @@ private:
// Bottom 22 bits allow for an hour of work at 1000 H/s
inline uint32_t calc_start_nonce(uint32_t resume)
{
- return reverseBits<uint32_t>(iThreadNo + GlobalStates::iThreadCount * resume);
+ return reverseBits<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume);
}
// Limited version of the nonce calc above
inline uint32_t calc_nicehash_nonce(uint32_t start, uint32_t resume)
{
- return start | ( ( reverseBits(iThreadNo + GlobalStates::iThreadCount * resume) >> 4u ) );
+ return start | ( ( reverseBits(iThreadNo + GlobalStates::inst().iThreadCount * resume) >> 4u ) );
}
void work_main();
diff --git a/cli/cli-miner.cpp b/cli/cli-miner.cpp
index 3129f34..e5c5863 100644
--- a/cli/cli-miner.cpp
+++ b/cli/cli-miner.cpp
@@ -195,7 +195,7 @@ void do_benchmark()
std::this_thread::sleep_for(std::chrono::seconds(60));
oWork = xmrstak::miner_work();
- xmrstak::GlobalStates::switch_work(oWork);
+ xmrstak::GlobalStates::inst().switch_work(oWork);
double fTotalHps = 0.0;
for (uint32_t i = 0; i < pvThreads->size(); i++)
diff --git a/config.txt b/config.txt
index 781f505..888b655 100644
--- a/config.txt
+++ b/config.txt
@@ -1,17 +1,4 @@
/*
- * TLS Settings
- * If you need real security, make sure tls_secure_algo is enabled (otherwise MITM attack can downgrade encryption
- * to trivially breakable stuff like DES and MD5), and verify the server's fingerprint through a trusted channel.
- *
- * use_tls - This option will make us connect using Transport Layer Security.
- * tls_secure_algo - Use only secure algorithms. This will make us quit with an error if we can't negotiate a secure algo.
- * tls_fingerprint - Server's SHA256 fingerprint. If this string is non-empty then we will check the server's cert against it.
- */
-"use_tls" : false,
-"tls_secure_algo" : true,
-"tls_fingerprint" : "",
-
-/*
* pool_address - Pool address should be in the form "pool.supportxmr.com:3333". Only stratum pools are supported.
* wallet_address - Your wallet, or pool login.
* pool_password - Can be empty in most cases or "x".
@@ -47,11 +34,11 @@
* performance monitors, there is very little reason to spew out pages of text instead of concise reports.
* Press 'h' (hashrate), 'r' (results) or 'c' (connection) to print reports.
*
- * verbose_level - 0 - Don't print anything.
+ * verbose_level - 0 - Don't print anything.
* 1 - Print intro, connection event, disconnect event
* 2 - All of level 1, and new job (block) event if the difficulty is different from the last job
* 3 - All of level 1, and new job (block) event in all cases, result submission event.
- * 4 - All of level 3, and automatic hashrate report printing
+ * 4 - All of level 3, and automatic hashrate report printing
*/
"verbose_level" : 3,
@@ -64,6 +51,80 @@
"h_print_time" : 60,
/*
+ * Manual hardware AES override
+ *
+ * Some VMs don't report AES capability correctly. You can set this value to true to enforce hardware AES or
+ * to false to force disable AES or null to let the miner decide if AES is used.
+ *
+ * WARNING: setting this to true on a CPU that doesn't support hardware AES will crash the miner.
+ */
+"aes_override" : null,
+
+/*
+ * LARGE PAGE SUPPORT
+ * Large pages need a properly set up OS. It can be difficult if you are not used to systems administration,
+ * but the performance results are worth the trouble - you will get around 20% boost. Slow memory mode is
+ * meant as a backup, you won't get stellar results there. If you are running into trouble, especially
+ * on Windows, please read the common issues in the README.
+ *
+ * By default we will try to allocate large pages. This means you need to "Run As Administrator" on Windows.
+ * You need to edit your system's group policies to enable locking large pages. Here are the steps from MSDN
+ *
+ * 1. On the Start menu, click Run. In the Open box, type gpedit.msc.
+ * 2. On the Local Group Policy Editor console, expand Computer Configuration, and then expand Windows Settings.
+ * 3. Expand Security Settings, and then expand Local Policies.
+ * 4. Select the User Rights Assignment folder.
+ * 5. The policies will be displayed in the details pane.
+ * 6. In the pane, double-click Lock pages in memory.
+ * 7. In the Local Security Setting – Lock pages in memory dialog box, click Add User or Group.
+ * 8. In the Select Users, Service Accounts, or Groups dialog box, add an account that you will run the miner on
+ * 9. Reboot for change to take effect.
+ *
+ * Windows also tends to fragment memory a lot. If you are running on a system with 4-8GB of RAM you might need
+ * to switch off all the auto-start applications and reboot to have a large enough chunk of contiguous memory.
+ *
+ * On Linux you will need to configure large page support "sudo sysctl -w vm.nr_hugepages=128" and increase your
+ * ulimit -l. To do do this you need to add following lines to /etc/security/limits.conf - "* soft memlock 262144"
+ * and "* hard memlock 262144". You can also do it Windows-style and simply run-as-root, but this is NOT
+ * recommended for security reasons.
+ *
+ * Memory locking means that the kernel can't swap out the page to disk - something that is unlikely to happen on a
+ * command line system that isn't starved of memory. I haven't observed any difference on a CLI Linux system between
+ * locked and unlocked memory. If that is your setup see option "no_mlck".
+ */
+
+/*
+ * use_slow_memory defines our behavior with regards to large pages. There are three possible options here:
+ * always - Don't even try to use large pages. Always use slow memory.
+ * warn - We will try to use large pages, but fall back to slow memory if that fails.
+ * no_mlck - This option is only relevant on Linux, where we can use large pages without locking memory.
+ * It will never use slow memory, but it won't attempt to mlock
+ * never - If we fail to allocate large pages we will print an error and exit.
+ */
+"use_slow_memory" : "warn",
+
+/*
+ * NiceHash mode
+ * nicehash_nonce - Limit the nonce to 3 bytes as required by nicehash. This cuts all the safety margins, and
+ * if a block isn't found within 30 minutes then you might run into nonce collisions. Number
+ * of threads in this mode is hard-limited to 32.
+ */
+"nicehash_nonce" : false,
+
+/*
+ * TLS Settings
+ * If you need real security, make sure tls_secure_algo is enabled (otherwise MITM attack can downgrade encryption
+ * to trivially breakable stuff like DES and MD5), and verify the server's fingerprint through a trusted channel.
+ *
+ * use_tls - This option will make us connect using Transport Layer Security.
+ * tls_secure_algo - Use only secure algorithms. This will make us quit with an error if we can't negotiate a secure algo.
+ * tls_fingerprint - Server's SHA256 fingerprint. If this string is non-empty then we will check the server's cert against it.
+ */
+"use_tls" : false,
+"tls_secure_algo" : true,
+"tls_fingerprint" : "",
+
+/*
* Daemon mode
*
* If you are running the process in the background and you don't need the keyboard reports, set this to true.
diff --git a/console.cpp b/console.cpp
index c6b7d4d..0c73b1d 100644
--- a/console.cpp
+++ b/console.cpp
@@ -151,8 +151,6 @@ inline void comp_localtime(const time_t* ctime, tm* stime)
#endif // __WIN32
}
-printer* printer::oInst = nullptr;
-
printer::printer()
{
verbose_level = LINF;
diff --git a/console.h b/console.h
index 51efaef..ac2ed3c 100644
--- a/console.h
+++ b/console.h
@@ -1,5 +1,6 @@
#pragma once
#include <mutex>
+#include "Environment.hpp"
enum out_colours { K_RED, K_GREEN, K_BLUE, K_YELLOW, K_CYAN, K_MAGENTA, K_WHITE, K_NONE };
@@ -24,8 +25,10 @@ class printer
public:
static inline printer* inst()
{
- if (oInst == nullptr) oInst = new printer;
- return oInst;
+ auto& env = xmrstak::Environment::inst();
+ if(env.pPrinter == nullptr)
+ env.pPrinter = new printer;
+ return env.pPrinter;
};
inline void set_verbose_level(size_t level) { verbose_level = (verbosity)level; }
@@ -35,7 +38,6 @@ public:
private:
printer();
- static printer* oInst;
std::mutex print_mutex;
verbosity verbose_level;
diff --git a/executor.cpp b/executor.cpp
index 99f7ad5..0dd9cd7 100644
--- a/executor.cpp
+++ b/executor.cpp
@@ -122,7 +122,7 @@ void executor::sched_reconnect()
rt, int_port(iReconnectAttempts));
auto work = xmrstak::miner_work();
- xmrstak::GlobalStates::switch_work(work);
+ xmrstak::GlobalStates::inst().switch_work(work);
push_timed_event(ex_event(EV_RECONNECT, usr_pool_id), rt);
}
@@ -240,7 +240,7 @@ void executor::on_pool_have_job(size_t pool_id, pool_job& oPoolJob)
oWork.iTarget32 = oPoolJob.iTarget32;
- xmrstak::GlobalStates::switch_work(oWork);
+ xmrstak::GlobalStates::inst().switch_work(oWork);
if(pool_id == dev_pool_id)
return;
@@ -362,7 +362,7 @@ void executor::on_switch_pool(size_t pool_id)
oWork.iTarget32 = oPoolJob.iTarget32;
- xmrstak::GlobalStates::switch_work(oWork);
+ xmrstak::GlobalStates::inst().switch_work(oWork);
if(dev_pool->is_running())
push_timed_event(ex_event(EV_DEV_POOL_EXIT), 5);
diff --git a/jconf.cpp b/jconf.cpp
index f5fb3bc..00c34f1 100644
--- a/jconf.cpp
+++ b/jconf.cpp
@@ -48,7 +48,7 @@ using namespace rapidjson;
enum configEnum {
bTlsMode, bTlsSecureAlgo, sTlsFingerprint, sPoolAddr, sWalletAddr, sPoolPwd,
iCallTimeout, iNetRetry, iGiveUpLimit, iVerboseLevel, iAutohashTime,
- bDaemonMode, sOutputFile, iHttpdPort, bPreferIpv4 };
+ bDaemonMode, sOutputFile, iHttpdPort, bPreferIpv4, bNiceHashMode, bAesOverride, sUseSlowMem };
struct configVal {
configEnum iName;
@@ -73,7 +73,10 @@ configVal oConfigValues[] = {
{ bDaemonMode, "daemon_mode", kTrueType },
{ sOutputFile, "output_file", kStringType },
{ iHttpdPort, "httpd_port", kNumberType },
- { bPreferIpv4, "prefer_ipv4", kTrueType }
+ { bPreferIpv4, "prefer_ipv4", kTrueType },
+ { bNiceHashMode, "nicehash_nonce", kTrueType },
+ { bAesOverride, "aes_override", kNullType },
+ { sUseSlowMem, "use_slow_memory", kStringType }
};
constexpr size_t iConfigCnt = (sizeof(oConfigValues)/sizeof(oConfigValues[0]));
@@ -102,8 +105,6 @@ struct jconf::opaque_private
}
};
-jconf* jconf::oInst = nullptr;
-
jconf::jconf()
{
prv = new opaque_private();
@@ -184,12 +185,66 @@ const char* jconf::GetOutputFile()
return prv->configValues[sOutputFile]->GetString();
}
+bool jconf::NiceHashMode()
+{
+ return prv->configValues[bNiceHashMode]->GetBool();
+}
+
+
+void jconf::cpuid(uint32_t eax, int32_t ecx, int32_t val[4])
+{
+ memset(val, 0, sizeof(int32_t)*4);
+
+#ifdef _WIN32
+ __cpuidex(val, eax, ecx);
+#else
+ __cpuid_count(eax, ecx, val[0], val[1], val[2], val[3]);
+#endif
+}
+
+bool jconf::check_cpu_features()
+{
+ constexpr int AESNI_BIT = 1 << 25;
+ constexpr int SSE2_BIT = 1 << 26;
+ int32_t cpu_info[4];
+ bool bHaveSse2;
+
+ cpuid(1, 0, cpu_info);
+
+ bHaveAes = (cpu_info[2] & AESNI_BIT) != 0;
+ bHaveSse2 = (cpu_info[3] & SSE2_BIT) != 0;
+
+ return bHaveSse2;
+}
+
+jconf::slow_mem_cfg jconf::GetSlowMemSetting()
+{
+ const char* opt = prv->configValues[sUseSlowMem]->GetString();
+
+ if(strcasecmp(opt, "always") == 0)
+ return always_use;
+ else if(strcasecmp(opt, "no_mlck") == 0)
+ return no_mlck;
+ else if(strcasecmp(opt, "warn") == 0)
+ return print_warning;
+ else if(strcasecmp(opt, "never") == 0)
+ return never_use;
+ else
+ return unknown_value;
+}
+
bool jconf::parse_config(const char* sFilename)
{
FILE * pFile;
char * buffer;
size_t flen;
+ if(!check_cpu_features())
+ {
+ printer::inst()->print_msg(L0, "CPU support of SSE2 is required.");
+ return false;
+ }
+
pFile = fopen(sFilename, "rb");
if (pFile == NULL)
{
@@ -310,8 +365,37 @@ bool jconf::parse_config(const char* sFilename)
}
#endif // CONF_NO_TLS
+ /* \todo check in the cpu backend if we have more than 32 worker
+ * keep in mined that we have change the why how the nonce is calculated (reverse thread index)
+ if(NiceHashMode() && GetThreadCount() >= 32)
+ {
+ printer::inst()->print_msg(L0, "You need to use less than 32 threads in NiceHash mode.");
+ return false;
+ }
+ */
+
+ if(prv->configValues[bAesOverride]->IsBool())
+ bHaveAes = prv->configValues[bAesOverride]->GetBool();
+
+ if(!bHaveAes)
+ printer::inst()->print_msg(L0, "Your CPU doesn't support hardware AES. Don't expect high hashrates.");
printer::inst()->set_verbose_level(prv->configValues[iVerboseLevel]->GetUint64());
+ if(GetSlowMemSetting() == unknown_value)
+ {
+ printer::inst()->print_msg(L0,
+ "Invalid config file. use_slow_memory must be \"always\", \"no_mlck\", \"warn\" or \"never\"");
+ return false;
+ }
+
+#ifdef _WIN32
+ if(GetSlowMemSetting() == no_mlck)
+ {
+ printer::inst()->print_msg(L0, "On Windows large pages need mlock. Please use another option.");
+ return false;
+ }
+#endif // _WIN32
+
return true;
}
diff --git a/jconf.h b/jconf.h
index dd6a010..2865652 100644
--- a/jconf.h
+++ b/jconf.h
@@ -1,14 +1,17 @@
#pragma once
#include <stdlib.h>
#include <string>
+#include "Environment.hpp"
class jconf
{
public:
static jconf* inst()
{
- if (oInst == nullptr) oInst = new jconf;
- return oInst;
+ auto& env = xmrstak::Environment::inst();
+ if(env.pJconfConfig == nullptr)
+ env.pJconfConfig = new jconf;
+ return env.pJconfConfig;
};
bool parse_config(const char* sFilename);
@@ -51,9 +54,16 @@ public:
bool PreferIpv4();
+ bool NiceHashMode();
+
+ inline bool HaveHardwareAes() { return bHaveAes; }
+
+ static void cpuid(uint32_t eax, int32_t ecx, int32_t val[4]);
+
+ slow_mem_cfg GetSlowMemSetting();
+
private:
jconf();
- static jconf* oInst;
bool check_cpu_features();
struct opaque_private;
OpenPOWER on IntegriCloud