diff options
author | psychocrypt <psychocrypt@users.noreply.github.com> | 2017-09-29 22:06:09 +0200 |
---|---|---|
committer | psychocrypt <psychocrypt@users.noreply.github.com> | 2017-09-30 23:46:08 +0200 |
commit | 855af1cf65de1fd3795de3c9a859fd9242625a84 (patch) | |
tree | 51368c6f1d6cd401969f4beedcbcccc21d055153 /xmrstak/backend/amd | |
parent | 8babae3156430f5aa6b804f7c352ffb178097963 (diff) | |
download | xmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.zip xmr-stak-855af1cf65de1fd3795de3c9a859fd9242625a84.tar.gz |
fix compile
Diffstat (limited to 'xmrstak/backend/amd')
-rw-r--r-- | xmrstak/backend/amd/autoAdjust.hpp | 2 | ||||
-rw-r--r-- | xmrstak/backend/amd/minethd.cpp | 28 | ||||
-rw-r--r-- | xmrstak/backend/amd/minethd.hpp | 16 |
3 files changed, 19 insertions, 27 deletions
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp index 84541ae..c936e30 100644 --- a/xmrstak/backend/amd/autoAdjust.hpp +++ b/xmrstak/backend/amd/autoAdjust.hpp @@ -76,7 +76,7 @@ private: #include "./config.tpl" ; - ConfigEditor configTpl{}; + configEditor configTpl{}; configTpl.set( std::string(tpl) ); std::string conf; diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp index b2f5620..2b7ce77 100644 --- a/xmrstak/backend/amd/minethd.cpp +++ b/xmrstak/backend/amd/minethd.cpp @@ -63,9 +63,9 @@ extern "C" { #ifdef WIN32 __declspec(dllexport) #endif -std::vector<IBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, Environment& env) +std::vector<iBackend*>* xmrstak_start_backend(uint32_t threadOffset, miner_work& pWork, environment& env) { - Environment::inst() = env; + environment::inst() = env; return amd::minethd::thread_starter(threadOffset, pWork); } } // extern "C" @@ -91,11 +91,11 @@ bool minethd::init_gpus() std::vector<GpuContext> minethd::vGpuData; -std::vector<IBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork) +std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_work& pWork) { - std::vector<IBackend*>* pvThreads = new std::vector<IBackend*>(); + std::vector<iBackend*>* pvThreads = new std::vector<iBackend*>(); - if(!ConfigEditor::file_exist(Params::inst().configFileAMD)) + if(!configEditor::file_exist(Params::inst().configFileAMD)) { autoAdjust adjust; if(!adjust.printConfig()) @@ -148,19 +148,19 @@ void minethd::switch_work(miner_work& pWork) // faster than threads can consume them. This should never happen in real life. // Pool cant physically send jobs faster than every 250ms or so due to net latency. - while (GlobalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < GlobalStates::inst().iThreadCount) + while (globalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < globalStates::inst().iThreadCount) std::this_thread::sleep_for(std::chrono::milliseconds(100)); - GlobalStates::inst().oGlobalWork = pWork; - GlobalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst); - GlobalStates::inst().iGlobalJobNo++; + globalStates::inst().oGlobalWork = pWork; + globalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst); + globalStates::inst().iGlobalJobNo++; } void minethd::consume_work() { - memcpy(&oWork, &GlobalStates::inst().oGlobalWork, sizeof(miner_work)); + memcpy(&oWork, &globalStates::inst().oGlobalWork, sizeof(miner_work)); iJobNo++; - GlobalStates::inst().iConsumeCnt++; + globalStates::inst().iConsumeCnt++; } @@ -172,7 +172,7 @@ void minethd::work_main() cpu_ctx = cpu::minethd::minethd_alloc_ctx(); cn_hash_fun hash_fun = cpu::minethd::func_selector(::jconf::inst()->HaveHardwareAes(), true /*bNoPrefetch*/); - GlobalStates::inst().iConsumeCnt++; + globalStates::inst().iConsumeCnt++; while (bQuit == 0) { @@ -182,7 +182,7 @@ void minethd::work_main() either because of network latency, or a socket problem. Since we are raison d'etre of this software it us sensible to just wait until we have something*/ - while (GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo) + while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo) std::this_thread::sleep_for(std::chrono::milliseconds(100)); consume_work(); @@ -194,7 +194,7 @@ void minethd::work_main() uint32_t target = oWork.iTarget32; XMRSetJob(pGpuCtx, oWork.bWorkBlob, oWork.iWorkSize, target); - while(GlobalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo) + while(globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo) { cl_uint results[0x100]; memset(results,0,sizeof(cl_uint)*(0x100)); diff --git a/xmrstak/backend/amd/minethd.hpp b/xmrstak/backend/amd/minethd.hpp index 7e71082..21c2dd9 100644 --- a/xmrstak/backend/amd/minethd.hpp +++ b/xmrstak/backend/amd/minethd.hpp @@ -2,6 +2,8 @@ #include "amd_gpu/gpu.hpp" #include "jconf.hpp" +#include "xmrstak/backend/cpu/crypto/cryptonight.h" +#include "xmrstak/backend/miner_work.hpp" #include "xmrstak/backend/iBackend.hpp" #include "xmrstak/misc/environment.hpp" @@ -13,27 +15,18 @@ namespace xmrstak namespace amd { -class minethd : public IBackend +class minethd : public iBackend { public: static void switch_work(miner_work& pWork); - static std::vector<IBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork); + static std::vector<iBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork); static bool init_gpus(); private: typedef void (*cn_hash_fun)(const void*, size_t, void*, cryptonight_ctx*); minethd(miner_work& pWork, size_t iNo, GpuContext* ctx); - - // We use the top 8 bits of the nonce for thread and resume - // This allows us to resume up to 64 threads 4 times before - // we get nonce collisions - // Bottom 24 bits allow for an hour of work at 4000 H/s - inline uint32_t calc_start_nonce(uint32_t resume) - { - return reverseBits<uint32_t>(static_cast<uint32_t>(iThreadNo + GlobalStates::inst().iThreadCount * resume)); - } void work_main(); void double_work_main(); @@ -45,7 +38,6 @@ private: miner_work oWork; std::thread oWorkThd; - uint8_t iThreadNo; bool bQuit; bool bNoPrefetch; |