summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend
diff options
context:
space:
mode:
authorpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-29 22:06:09 +0200
committerpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-30 23:46:08 +0200
commit855af1cf65de1fd3795de3c9a859fd9242625a84 (patch)
tree51368c6f1d6cd401969f4beedcbcccc21d055153 /xmrstak/backend
parent8babae3156430f5aa6b804f7c352ffb178097963 (diff)
downloadxmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.zip
xmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.tar.gz
fix compile
Diffstat (limited to 'xmrstak/backend')
-rw-r--r--xmrstak/backend/amd/autoAdjust.hpp2
-rw-r--r--xmrstak/backend/amd/minethd.cpp28
-rw-r--r--xmrstak/backend/amd/minethd.hpp16
-rw-r--r--xmrstak/backend/backendConnector.cpp18
-rw-r--r--xmrstak/backend/backendConnector.hpp2
-rw-r--r--xmrstak/backend/cpu/autoAdjust.hpp2
-rw-r--r--xmrstak/backend/cpu/autoAdjustHwloc.hpp2
-rw-r--r--xmrstak/backend/cpu/minethd.cpp41
-rw-r--r--xmrstak/backend/cpu/minethd.hpp21
-rw-r--r--xmrstak/backend/globalStates.cpp2
-rw-r--r--xmrstak/backend/globalStates.hpp14
-rw-r--r--xmrstak/backend/iBackend.hpp37
-rw-r--r--xmrstak/backend/miner_work.hpp21
-rw-r--r--xmrstak/backend/nvidia/autoAdjust.hpp2
-rw-r--r--xmrstak/backend/nvidia/jconf.cpp2
-rw-r--r--xmrstak/backend/nvidia/minethd.cpp30
-rw-r--r--xmrstak/backend/nvidia/minethd.hpp24
-rw-r--r--xmrstak/backend/plugin.hpp12
18 files changed, 126 insertions, 150 deletions
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp
index 84541ae..c936e30 100644
--- a/xmrstak/backend/amd/autoAdjust.hpp
+++ b/xmrstak/backend/amd/autoAdjust.hpp
@@ -76,7 +76,7 @@ private:
#include "./config.tpl"
;
- ConfigEditor configTpl{};
+ configEditor configTpl{};
configTpl.set( std::string(tpl) );
std::string conf;
diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp
index b2f5620..2b7ce77 100644
--- a/xmrstak/backend/amd/minethd.cpp
+++ b/xmrstak/backend/amd/minethd.cpp
@@ -63,9 +63,9 @@ extern "C" {
#ifdef WIN32
__declspec(dllexport)
#endif
-std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env)
+std::vector<iBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, environment& env)
{
- Environment::inst() = env;
+ environment::inst() = env;
return amd::minethd::thread_starter(threadOffset, pWork);
}
} // extern "C"
@@ -91,11 +91,11 @@ bool minethd::init_gpus()
std::vector<GpuContext> minethd::vGpuData;
-std::vector<IBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
+std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
{
- std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>();
+ std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>();
- if(!ConfigEditor::file_exist(Params::inst().configFileAMD))
+ if(!configEditor::file_exist(Params::inst().configFileAMD))
{
autoAdjust adjust;
if(!adjust.printConfig())
@@ -148,19 +148,19 @@ void minethd::switch_work(miner_work& pWork)
// faster than threads can consume them. This should never happen in real life.
// Pool cant physically send jobs faster than every 250ms or so due to net latency.
- while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount)
+ while (globalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < globalStates::inst().iThreadCount)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- GlobalStates::inst().oGlobalWork = pWork;
- GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
- GlobalStates::inst().iGlobalJobNo++;
+ globalStates::inst().oGlobalWork = pWork;
+ globalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
+ globalStates::inst().iGlobalJobNo++;
}
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &globalStates::inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
}
@@ -172,7 +172,7 @@ void minethd::work_main()
cpu_ctx = cpu::minethd::minethd_alloc_ctx();
cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -182,7 +182,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -194,7 +194,7 @@ void minethd::work_main()
uint32_t target = oWork.iTarget32;
XMRSetJob(pGpuCtx, oWork.bWorkBlob, oWork.iWorkSize, target);
- while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
cl_uint results[0x100];
memset(results,0,sizeof(cl_uint)*(0x100));
diff --git a/xmrstak/backend/amd/minethd.hpp b/xmrstak/backend/amd/minethd.hpp
index 7e71082..21c2dd9 100644
--- a/xmrstak/backend/amd/minethd.hpp
+++ b/xmrstak/backend/amd/minethd.hpp
@@ -2,6 +2,8 @@
#include "amd_gpu/gpu.hpp"
#include "jconf.hpp"
+#include "xmrstak/backend/cpu/crypto/cryptonight.h"
+#include "xmrstak/backend/miner_work.hpp"
#include "xmrstak/backend/iBackend.hpp"
#include "xmrstak/misc/environment.hpp"
@@ -13,27 +15,18 @@ namespace xmrstak
namespace amd
{
-class minethd : public IBackend
+class minethd : public iBackend
{
public:
static void switch_work(miner_work& pWork);
- static std::vector<IBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork);
+ static std::vector<iBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork);
static bool init_gpus();
private:
typedef void (*cn_hash_fun)(const void*, size_t, void*, cryptonight_ctx*);
minethd(miner_work& pWork, size_t iNo, GpuContext* ctx);
-
- // We use the top 8 bits of the nonce for thread and resume
- // This allows us to resume up to 64 threads 4 times before
- // we get nonce collisions
- // Bottom 24 bits allow for an hour of work at 4000 H/s
- inline uint32_t calc_start_nonce(uint32_t resume)
- {
- return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume));
- }
void work_main();
void double_work_main();
@@ -45,7 +38,6 @@ private:
miner_work oWork;
std::thread oWorkThd;
- uint8_t iThreadNo;
bool bQuit;
bool bNoPrefetch;
diff --git a/xmrstak/backend/backendConnector.cpp b/xmrstak/backend/backendConnector.cpp
index d07b54d..e0ac85a 100644
--- a/xmrstak/backend/backendConnector.cpp
+++ b/xmrstak/backend/backendConnector.cpp
@@ -56,19 +56,19 @@ bool BackendConnector::self_test()
return true;
}
-std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
+std::vector<iBackend*>* BackendConnector::thread_starter(miner_work& pWork)
{
- GlobalStates::inst().iGlobalJobNo = 0;
- GlobalStates::inst().iConsumeCnt = 0;
+ globalStates::inst().iGlobalJobNo = 0;
+ globalStates::inst().iConsumeCnt = 0;
- std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>;
+ std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>;
#ifndef CONF_NO_CUDA
if(Params::inst().useNVIDIA)
{
- Plugin nvidiaPlugin("NVIDIA", "xmrstak_cuda_backend");
- std::vector<IBackend*>* nvidiaThreads = nvidiaPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, Environment::inst());
+ plugin nvidiaplugin("NVIDIA", "xmrstak_cuda_backend");
+ std::vector<iBackend*>* nvidiaThreads = nvidiaplugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, environment::inst());
pvThreads->insert(std::end(*pvThreads), std::begin(*nvidiaThreads), std::end(*nvidiaThreads));
if(nvidiaThreads->size() == 0)
printer::inst()->print_msg(L0, "WARNING: backend NVIDIA disabled.");
@@ -78,8 +78,8 @@ std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
#ifndef CONF_NO_OPENCL
if(Params::inst().useAMD)
{
- Plugin amdPlugin("AMD", "xmrstak_opencl_backend");
- std::vector<IBackend*>* amdThreads = amdPlugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, Environment::inst());
+ plugin amdplugin("AMD", "xmrstak_opencl_backend");
+ std::vector<iBackend*>* amdThreads = amdplugin.startBackend(static_cast<uint32_t>(pvThreads->size()), pWork, environment::inst());
pvThreads->insert(std::end(*pvThreads), std::begin(*amdThreads), std::end(*amdThreads));
if(amdThreads->size() == 0)
printer::inst()->print_msg(L0, "WARNING: backend AMD disabled.");
@@ -96,7 +96,7 @@ std::vector<IBackend*>* BackendConnector::thread_starter(miner_work& pWork)
}
#endif
- GlobalStates::inst().iThreadCount = pvThreads->size();
+ globalStates::inst().iThreadCount = pvThreads->size();
return pvThreads;
}
diff --git a/xmrstak/backend/backendConnector.hpp b/xmrstak/backend/backendConnector.hpp
index f573abc..da3dc77 100644
--- a/xmrstak/backend/backendConnector.hpp
+++ b/xmrstak/backend/backendConnector.hpp
@@ -14,7 +14,7 @@ namespace xmrstak
struct BackendConnector
{
- static std::vector<IBackend*>* thread_starter(miner_work& pWork);
+ static std::vector<iBackend*>* thread_starter(miner_work& pWork);
static bool self_test();
};
diff --git a/xmrstak/backend/cpu/autoAdjust.hpp b/xmrstak/backend/cpu/autoAdjust.hpp
index f686224..639c27b 100644
--- a/xmrstak/backend/cpu/autoAdjust.hpp
+++ b/xmrstak/backend/cpu/autoAdjust.hpp
@@ -39,7 +39,7 @@ public:
bool printConfig()
{
- ConfigEditor configTpl{};
+ configEditor configTpl{};
// load the template of the backend config into a char variable
const char *tpl =
diff --git a/xmrstak/backend/cpu/autoAdjustHwloc.hpp b/xmrstak/backend/cpu/autoAdjustHwloc.hpp
index 055a7f5..8431956 100644
--- a/xmrstak/backend/cpu/autoAdjustHwloc.hpp
+++ b/xmrstak/backend/cpu/autoAdjustHwloc.hpp
@@ -37,7 +37,7 @@ public:
hwloc_topology_load(topology);
std::string conf;
- ConfigEditor configTpl{};
+ configEditor configTpl{};
// load the template of the backend config into a char variable
const char *tpl =
diff --git a/xmrstak/backend/cpu/minethd.cpp b/xmrstak/backend/cpu/minethd.cpp
index 6037161..d786bee 100644
--- a/xmrstak/backend/cpu/minethd.cpp
+++ b/xmrstak/backend/cpu/minethd.cpp
@@ -21,23 +21,19 @@
*
*/
-#include <assert.h>
-#include <cmath>
-#include <chrono>
-#include <cstring>
-#include <thread>
-#include <bitset>
+#include "crypto/cryptonight_aesni.h"
+
#include "xmrstak/misc/console.hpp"
#include "xmrstak/backend/iBackend.hpp"
#include "xmrstak/backend//globalStates.hpp"
#include "xmrstak/misc/configEditor.hpp"
#include "xmrstak/params.hpp"
-#include "xmrstak/jconf.hpp"
+#include "jconf.hpp"
#include "xmrstak/misc/executor.hpp"
#include "minethd.hpp"
#include "xmrstak/jconf.hpp"
-#include "xmrstak/backend/crypto/cryptonight_aesni.h"
+
#include "hwlocMemory.hpp"
#include "xmrstak/backend/miner_work.hpp"
@@ -47,6 +43,13 @@
# include "autoAdjust.hpp"
#endif
+#include <assert.h>
+#include <cmath>
+#include <chrono>
+#include <cstring>
+#include <thread>
+#include <bitset>
+
#ifdef _WIN32
#include <windows.h>
@@ -244,11 +247,11 @@ bool minethd::self_test()
return bResult;
}
-std::vector<IBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
+std::vector<iBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
{
- std::vector<IBackend*> pvThreads;
+ std::vector<iBackend*> pvThreads;
- if(!ConfigEditor::file_exist(Params::inst().configFileCPU))
+ if(!configEditor::file_exist(Params::inst().configFileCPU))
{
autoAdjust adjust;
if(!adjust.printConfig())
@@ -286,9 +289,9 @@ std::vector<IBackend*> minethd::thread_starter(uint32_t threadOffset, miner_work
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::inst().inst().oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &globalStates::inst().inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::inst().inst().iConsumeCnt++;
+ globalStates::inst().inst().iConsumeCnt++;
}
minethd::cn_hash_fun minethd::func_selector(bool bHaveAes, bool bNoPrefetch)
@@ -343,7 +346,7 @@ void minethd::work_main()
piHashVal = (uint64_t*)(result.bResult + 24);
piNonce = (uint32_t*)(oWork.bWorkBlob + 39);
- GlobalStates::inst().inst().iConsumeCnt++;
+ globalStates::inst().inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -353,7 +356,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -368,7 +371,7 @@ void minethd::work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
memcpy(result.sJobID, oWork.sJobID, sizeof(job_result::sJobID));
- while(GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(globalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
if ((iCount & 0xF) == 0) //Store stats every 16 hashes
{
@@ -441,7 +444,7 @@ void minethd::double_work_main()
piNonce0 = (uint32_t*)(bDoubleWorkBlob + 39);
piNonce1 = nullptr;
- GlobalStates::inst().inst().iConsumeCnt++;
+ globalStates::inst().inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -451,7 +454,7 @@ void minethd::double_work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -468,7 +471,7 @@ void minethd::double_work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
- while (GlobalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
if ((iCount & 0x7) == 0) //Store stats every 16 hashes
{
diff --git a/xmrstak/backend/cpu/minethd.hpp b/xmrstak/backend/cpu/minethd.hpp
index 1dcca86..a091ee8 100644
--- a/xmrstak/backend/cpu/minethd.hpp
+++ b/xmrstak/backend/cpu/minethd.hpp
@@ -3,7 +3,6 @@
#include "crypto/cryptonight.h"
#include "xmrstak/backend/miner_work.hpp"
#include "xmrstak/backend/iBackend.hpp"
-#include "xmrstak/backend/globalStates.hpp"
#include <iostream>
#include <thread>
@@ -16,10 +15,10 @@ namespace xmrstak
namespace cpu
{
-class minethd : public IBackend
+class minethd : public iBackend
{
public:
- static std::vector<IBackend*> thread_starter(uint32_t threadOffset, miner_work& pWork);
+ static std::vector<iBackend*> thread_starter(uint32_t threadOffset, miner_work& pWork);
static bool self_test();
typedef void (*cn_hash_fun)(const void*, size_t, void*, cryptonight_ctx*);
@@ -36,21 +35,6 @@ private:
minethd(miner_work& pWork, size_t iNo, bool double_work, bool no_prefetch, int64_t affinity);
- // We use the top 10 bits of the nonce for thread and resume
- // This allows us to resume up to 128 threads 4 times before
- // we get nonce collisions
- // Bottom 22 bits allow for an hour of work at 1000 H/s
- inline uint32_t calc_start_nonce(uint32_t resume)
- {
- return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume));
- }
-
- // Limited version of the nonce calc above
- inline uint32_t calc_nicehash_nonce(uint32_t start, uint32_t resume)
- {
- return start | ( ( reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume)) >> 4u ) );
- }
-
void work_main();
void double_work_main();
void consume_work();
@@ -65,7 +49,6 @@ private:
std::mutex work_thd_mtx;
std::thread oWorkThd;
- uint8_t iThreadNo;
int64_t affinity;
bool bQuit;
diff --git a/xmrstak/backend/globalStates.cpp b/xmrstak/backend/globalStates.cpp
index ef582c1..9104040 100644
--- a/xmrstak/backend/globalStates.cpp
+++ b/xmrstak/backend/globalStates.cpp
@@ -34,7 +34,7 @@ namespace xmrstak
{
-void GlobalStates::switch_work(miner_work& pWork)
+void globalStates::switch_work(miner_work& pWork)
{
// iConsumeCnt is a basic lock-like polling mechanism just in case we happen to push work
// faster than threads can consume them. This should never happen in real life.
diff --git a/xmrstak/backend/globalStates.hpp b/xmrstak/backend/globalStates.hpp
index 58248bd..73ccf74 100644
--- a/xmrstak/backend/globalStates.hpp
+++ b/xmrstak/backend/globalStates.hpp
@@ -9,15 +9,15 @@
namespace xmrstak
{
-struct GlobalStates
+struct globalStates
{
- static inline GlobalStates& inst()
+ static inline globalStates& inst()
{
- auto& env = Environment::inst();
- if(env.pGlobalStates == nullptr)
- env.pGlobalStates = new GlobalStates;
- return *env.pGlobalStates;
+ auto& env = environment::inst();
+ if(env.pglobalStates == nullptr)
+ env.pglobalStates = new globalStates;
+ return *env.pglobalStates;
}
void switch_work(miner_work& pWork);
@@ -29,7 +29,7 @@ struct GlobalStates
private:
- GlobalStates() : iThreadCount(0)
+ globalStates() : iThreadCount(0)
{
}
diff --git a/xmrstak/backend/iBackend.hpp b/xmrstak/backend/iBackend.hpp
index 5037028..0be8f0a 100644
--- a/xmrstak/backend/iBackend.hpp
+++ b/xmrstak/backend/iBackend.hpp
@@ -1,18 +1,51 @@
#pragma once
+#include "xmrstak/backend/globalStates.hpp"
+
#include <atomic>
#include <cstdint>
+#include <climits>
namespace xmrstak
{
+ // only allowed for unsigned value \todo add static assert
+ template<typename T>
+ T reverseBits(T value)
+ {
+ /* init with value (to get LSB) */
+ T result = value;
+ /* extra shift needed at end */
+ int s = sizeof(T) * CHAR_BIT - 1;
+ for (value >>= 1; value; value >>= 1)
+ {
+ result <<= 1;
+ result |= value & 1;
+ s--;
+ }
+ /* shift when values highest bits are zero */
+ result <<= s;
+ return result;
+ }
- struct IBackend
+ struct iBackend
{
+ inline uint32_t calc_start_nonce(uint32_t resume)
+ {
+ return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + globalStates::inst().iThreadCount * resume));
+ }
+
+ // Limited version of the nonce calc above
+ inline uint32_t calc_nicehash_nonce(uint32_t start, uint32_t resume)
+ {
+ return start | ( calc_start_nonce(resume) >> 8u );
+ }
+
std::atomic<uint64_t> iHashCount;
std::atomic<uint64_t> iTimestamp;
+ uint32_t iThreadNo;
- IBackend() : iHashCount(0), iTimestamp(0)
+ iBackend() : iHashCount(0), iTimestamp(0)
{
}
};
diff --git a/xmrstak/backend/miner_work.hpp b/xmrstak/backend/miner_work.hpp
index c471546..e9f9e07 100644
--- a/xmrstak/backend/miner_work.hpp
+++ b/xmrstak/backend/miner_work.hpp
@@ -4,31 +4,12 @@
#include <atomic>
#include <mutex>
#include <cstdint>
-#include <climits>
#include <iostream>
#include <cassert>
+#include <cstring>
namespace xmrstak
{
- // only allowed for unsigned value \todo add static assert
- template<typename T>
- T reverseBits(T value)
- {
- /* init with value (to get LSB) */
- T result = value;
- /* extra shift needed at end */
- int s = sizeof(T) * CHAR_BIT - 1;
- for (value >>= 1; value; value >>= 1)
- {
- result <<= 1;
- result |= value & 1;
- s--;
- }
- /* shift when values highest bits are zero */
- result <<= s;
- return result;
- }
-
struct miner_work
{
char sJobID[64];
diff --git a/xmrstak/backend/nvidia/autoAdjust.hpp b/xmrstak/backend/nvidia/autoAdjust.hpp
index 4107510..2354dec 100644
--- a/xmrstak/backend/nvidia/autoAdjust.hpp
+++ b/xmrstak/backend/nvidia/autoAdjust.hpp
@@ -83,7 +83,7 @@ private:
#include "./config.tpl"
;
- ConfigEditor configTpl{};
+ configEditor configTpl{};
configTpl.set( std::string(tpl) );
constexpr size_t byte2mib = 1024u * 1024u;
diff --git a/xmrstak/backend/nvidia/jconf.cpp b/xmrstak/backend/nvidia/jconf.cpp
index 7a94d19..971ff05 100644
--- a/xmrstak/backend/nvidia/jconf.cpp
+++ b/xmrstak/backend/nvidia/jconf.cpp
@@ -22,7 +22,7 @@
*/
#include "jconf.hpp"
-#include "xmrstak/miscjext.hpp"
+#include "xmrstak/misc/jext.hpp"
#include "xmrstak/misc/console.hpp"
#include <stdio.h>
diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp
index 7718d34..2e3ef01 100644
--- a/xmrstak/backend/nvidia/minethd.cpp
+++ b/xmrstak/backend/nvidia/minethd.cpp
@@ -26,7 +26,7 @@
#include "xmrstak/misc/console.hpp"
#include "xmrstak/backend/cpu/crypto/cryptonight_aesni.h"
#include "xmrstak/backend/cpu/crypto/cryptonight.h"
-#include "xmrstak/backend/cpu//cpu/minethd.hpp"
+#include "xmrstak/backend/cpu/minethd.hpp"
#include "xmrstak/params.hpp"
#include "xmrstak/misc/executor.hpp"
#include "xmrstak/jconf.hpp"
@@ -110,18 +110,18 @@ extern "C"
#ifdef WIN32
__declspec(dllexport)
#endif
-std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env)
+std::vector<iBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, environment& env)
{
- Environment::inst() = env;
+ environment::inst() = env;
return nvidia::minethd::thread_starter(threadOffset, pWork);
}
} // extern "C"
-std::vector<IBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
+std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
{
- std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>();
+ std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>();
- if(!ConfigEditor::file_exist(Params::inst().configFileNVIDIA))
+ if(!configEditor::file_exist(Params::inst().configFileNVIDIA))
{
autoAdjust adjust;
if(!adjust.printConfig())
@@ -174,19 +174,19 @@ void minethd::switch_work(miner_work& pWork)
// faster than threads can consume them. This should never happen in real life.
// Pool cant physically send jobs faster than every 250ms or so due to net latency.
- while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount)
+ while (globalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < globalStates::inst().iThreadCount)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- GlobalStates::inst().oGlobalWork = pWork;
- GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
- GlobalStates::inst().iGlobalJobNo++;
+ globalStates::inst().oGlobalWork = pWork;
+ globalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
+ globalStates::inst().iGlobalJobNo++;
}
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &globalStates::inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
}
void minethd::work_main()
@@ -197,7 +197,7 @@ void minethd::work_main()
cpu_ctx = cpu::minethd::minethd_alloc_ctx();
cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
if(/*cuda_get_deviceinfo(&ctx) != 1 ||*/ cryptonight_extra_cpu_init(&ctx) != 1)
{
@@ -213,7 +213,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -225,7 +225,7 @@ void minethd::work_main()
assert(sizeof(job_result::sJobID) == sizeof(pool_job::sJobID));
- while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
uint32_t foundNonce[10];
diff --git a/xmrstak/backend/nvidia/minethd.hpp b/xmrstak/backend/nvidia/minethd.hpp
index ecf189a..657ee6a 100644
--- a/xmrstak/backend/nvidia/minethd.hpp
+++ b/xmrstak/backend/nvidia/minethd.hpp
@@ -2,9 +2,9 @@
#include "xmrstak/jconf.hpp"
#include "jconf.hpp"
-#include "nvcc_code/cryptonight.h"
+#include "nvcc_code/cryptonight.hpp"
-#include "xmrstak/bakcend/cpu/crypto/cryptonight.h"
+#include "xmrstak/backend/cpu/crypto/cryptonight.h"
#include "xmrstak/backend/iBackend.hpp"
#include "xmrstak/misc/environment.hpp"
@@ -19,12 +19,12 @@ namespace xmrstak
namespace nvidia
{
-class minethd : public IBackend
+class minethd : public iBackend
{
public:
static void switch_work(miner_work& pWork);
- static std::vector<IBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork);
+ static std::vector<iBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork);
static bool self_test();
private:
@@ -32,21 +32,6 @@ private:
minethd(miner_work& pWork, size_t iNo, const jconf::thd_cfg& cfg);
- // We use the top 10 bits of the nonce for thread and resume
- // This allows us to resume up to 128 threads 4 times before
- // we get nonce collisions
- // Bottom 22 bits allow for an hour of work at 1000 H/s
- inline uint32_t calc_start_nonce(uint32_t resume)
- {
- return reverseBits<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume);
- }
-
- // Limited version of the nonce calc above
- inline uint32_t calc_nicehash_nonce(uint32_t start, uint32_t resume)
- {
- return start | ( ( reverseBits(iThreadNo + GlobalStates::inst().iThreadCount * resume) >> 4u ) );
- }
-
void work_main();
void consume_work();
@@ -59,7 +44,6 @@ private:
miner_work oWork;
std::thread oWorkThd;
- uint8_t iThreadNo;
nvid_ctx ctx;
diff --git a/xmrstak/backend/plugin.hpp b/xmrstak/backend/plugin.hpp
index 38f8e53..7a3e6f5 100644
--- a/xmrstak/backend/plugin.hpp
+++ b/xmrstak/backend/plugin.hpp
@@ -23,10 +23,10 @@
namespace xmrstak
{
-struct Plugin
+struct plugin
{
- Plugin(const std::string backendName, const std::string libName) : fn_starterBackend(nullptr), m_backendName(backendName)
+ plugin(const std::string backendName, const std::string libName) : fn_starterBackend(nullptr), m_backendName(backendName)
{
#ifdef WIN32
libBackend = LoadLibrary(TEXT((libName + ".dll").c_str()));
@@ -62,11 +62,11 @@ struct Plugin
#endif
}
- std::vector<IBackend*>* startBackend(uint32_t threadOffset, miner_work& pWork, Environment& env)
+ std::vector<iBackend*>* startBackend(uint32_t threadOffset, miner_work& pWork, environment& env)
{
if(fn_starterBackend == nullptr)
{
- std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>();
+ std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>();
std::cerr << "WARNING: " << m_backendName << " Backend disabled"<< std::endl;
return pvThreads;
}
@@ -76,7 +76,7 @@ struct Plugin
std::string m_backendName;
- typedef std::vector<IBackend*>* (*starterBackend_t)(uint32_t threadOffset, miner_work& pWork, Environment& env);
+ typedef std::vector<iBackend*>* (*starterBackend_t)(uint32_t threadOffset, miner_work& pWork, environment& env);
starterBackend_t fn_starterBackend;
@@ -86,7 +86,7 @@ struct Plugin
void *libBackend;
#endif
-/* \todo add unload to destructor and change usage of Plugin that libs keeped open until the miner endss
+/* \todo add unload to destructor and change usage of plugin that libs keeped open until the miner endss
#ifdef WIN32
FreeLibrary(libBackend);
#else
OpenPOWER on IntegriCloud