summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/nvidia
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/backend/nvidia')
-rw-r--r--xmrstak/backend/nvidia/minethd.cpp30
-rw-r--r--xmrstak/backend/nvidia/minethd.hpp3
2 files changed, 5 insertions, 28 deletions
diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp
index 5d4782e..a21e525 100644
--- a/xmrstak/backend/nvidia/minethd.cpp
+++ b/xmrstak/backend/nvidia/minethd.cpp
@@ -195,27 +195,6 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
return pvThreads;
}
-void minethd::switch_work(miner_work& pWork)
-{
- // iConsumeCnt is a basic lock-like polling mechanism just in case we happen to push work
- // faster than threads can consume them. This should never happen in real life.
- // Pool cant physically send jobs faster than every 250ms or so due to net latency.
-
- while (globalStates::inst().iConsumeCnt.load(std::memory_order_seq_cst) < globalStates::inst().iThreadCount)
- std::this_thread::sleep_for(std::chrono::milliseconds(100));
-
- globalStates::inst().oGlobalWork = pWork;
- globalStates::inst().iConsumeCnt.store(0, std::memory_order_seq_cst);
- globalStates::inst().iGlobalJobNo++;
-}
-
-void minethd::consume_work()
-{
- memcpy(&oWork, &globalStates::inst().oGlobalWork, sizeof(miner_work));
- iJobNo++;
- globalStates::inst().iConsumeCnt++;
-}
-
void minethd::work_main()
{
if(affinity >= 0) //-1 means no affinity
@@ -244,8 +223,6 @@ void minethd::work_main()
uint32_t iNonce;
- globalStates::inst().iConsumeCnt++;
-
uint8_t version = 0;
size_t lastPoolId = 0;
@@ -261,7 +238,7 @@ void minethd::work_main()
while (globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) == iJobNo)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
- consume_work();
+ globalStates::inst().consume_work(oWork, iJobNo);
continue;
}
uint8_t new_version = oWork.getVersion();
@@ -299,6 +276,9 @@ void minethd::work_main()
{
globalStates::inst().calc_start_nonce(iNonce, oWork.bNiceHash, h_per_round * 16);
}
+ // check if the job is still valid, there is a small posibility that the job is switched
+ if(globalStates::inst().iGlobalJobNo.load(std::memory_order_relaxed) != iJobNo)
+ break;
uint32_t foundNonce[10];
uint32_t foundCount;
@@ -337,7 +317,7 @@ void minethd::work_main()
std::this_thread::yield();
}
- consume_work();
+ globalStates::inst().consume_work(oWork, iJobNo);
}
}
diff --git a/xmrstak/backend/nvidia/minethd.hpp b/xmrstak/backend/nvidia/minethd.hpp
index ad541bf..d4ae038 100644
--- a/xmrstak/backend/nvidia/minethd.hpp
+++ b/xmrstak/backend/nvidia/minethd.hpp
@@ -24,7 +24,6 @@ class minethd : public iBackend
{
public:
- static void switch_work(miner_work& pWork);
static std::vector<iBackend*>* thread_starter(uint32_t threadOffset, miner_work& pWork);
static bool self_test();
@@ -35,14 +34,12 @@ private:
void start_mining();
void work_main();
- void consume_work();
static std::atomic<uint64_t> iGlobalJobNo;
static std::atomic<uint64_t> iConsumeCnt;
static uint64_t iThreadCount;
uint64_t iJobNo;
- static miner_work oGlobalWork;
miner_work oWork;
std::promise<void> numa_promise;
OpenPOWER on IntegriCloud