summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/amd/minethd.cpp
diff options
context:
space:
mode:
authorpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-29 22:06:09 +0200
committerpsychocrypt <psychocrypt@users.noreply.github.com>2017-09-30 23:46:08 +0200
commit855af1cf65de1fd3795de3c9a859fd9242625a84 (patch)
tree51368c6f1d6cd401969f4beedcbcccc21d055153 /xmrstak/backend/amd/minethd.cpp
parent8babae3156430f5aa6b804f7c352ffb178097963 (diff)
downloadxmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.zip
xmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.tar.gz
fix compile
Diffstat (limited to 'xmrstak/backend/amd/minethd.cpp')
-rw-r--r--xmrstak/backend/amd/minethd.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp
index b2f5620..2b7ce77 100644
--- a/xmrstak/backend/amd/minethd.cpp
+++ b/xmrstak/backend/amd/minethd.cpp
@@ -63,9 +63,9 @@ extern "C" {
#ifdef WIN32
__declspec(dllexport)
#endif
-std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env)
+std::vector<iBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, environment& env)
{
- Environment::inst() = env;
+ environment::inst() = env;
return amd::minethd::thread_starter(threadOffset, pWork);
}
} // extern "C"
@@ -91,11 +91,11 @@ bool minethd::init_gpus()
std::vector<GpuContext> minethd::vGpuData;
-std::vector<IBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
+std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork)
{
- std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>();
+ std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>();
- if(!ConfigEditor::file_exist(Params::inst().configFileAMD))
+ if(!configEditor::file_exist(Params::inst().configFileAMD))
{
autoAdjust adjust;
if(!adjust.printConfig())
@@ -148,19 +148,19 @@ void minethd::switch_work(miner_work& pWork)
// faster than threads can consume them. This should never happen in real life.
// Pool cant physically send jobs faster than every 250ms or so due to net latency.
- while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount)
+ while (globalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < globalStates::inst().iThreadCount)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- GlobalStates::inst().oGlobalWork = pWork;
- GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
- GlobalStates::inst().iGlobalJobNo++;
+ globalStates::inst().oGlobalWork = pWork;
+ globalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
+ globalStates::inst().iGlobalJobNo++;
}
void minethd::consume_work()
{
- memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work));
+ memcpy(&oWork, &globalStates::inst().oGlobalWork, sizeof(miner_work));
iJobNo++;
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
}
@@ -172,7 +172,7 @@ void minethd::work_main()
cpu_ctx = cpu::minethd::minethd_alloc_ctx();
cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/);
- GlobalStates::inst().iConsumeCnt++;
+ globalStates::inst().iConsumeCnt++;
while (bQuit == 0)
{
@@ -182,7 +182,7 @@ void minethd::work_main()
either because of network latency, or a socket problem. Since we are
raison d'etre of this software it us sensible to just wait until we have something*/
- while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
consume_work();
@@ -194,7 +194,7 @@ void minethd::work_main()
uint32_t target = oWork.iTarget32;
XMRSetJob(pGpuCtx, oWork.bWorkBlob, oWork.iWorkSize, target);
- while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
+ while(globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
{
cl_uint results[0x100];
memset(results,0,sizeof(cl_uint)*(0x100));
OpenPOWER on IntegriCloud