summaryrefslogtreecommitdiffstats
path: root/xmrstak
diff options
context:
space:
mode:
authorfireice-uk <fireice-uk@users.noreply.github.com>2017-10-30 17:44:01 +0000
committerGitHub <noreply@github.com>2017-10-30 17:44:01 +0000
commit69e6013cee02299e4732b02a83025e7011b12f5e (patch)
treeec9ec94c659827be9efa4344d3c1132d61856635 /xmrstak
parent83a15dc1e011146a5955bb46f2990bef820465d7 (diff)
parent3aa04f2871e96dd14c89f044ac30a9bbb42c07af (diff)
downloadxmr-stak-69e6013cee02299e4732b02a83025e7011b12f5e.zip
xmr-stak-69e6013cee02299e4732b02a83025e7011b12f5e.tar.gz
Merge pull request #74 from psychocrypt/topic-vegaAutoSuggestion
tune VEGA auto suggestion
Diffstat (limited to 'xmrstak')
-rw-r--r--xmrstak/backend/amd/autoAdjust.hpp18
1 files changed, 15 insertions, 3 deletions
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp
index 8f9aa50..41dd11a 100644
--- a/xmrstak/backend/amd/autoAdjust.hpp
+++ b/xmrstak/backend/amd/autoAdjust.hpp
@@ -97,13 +97,25 @@ private:
int i = 0;
for(auto& ctx : devVec)
{
+ /* 1000 is a magic selected limit, the reason is that more than 2GiB memory
+ * sowing down the memory performance because of TLB cache misses
+ */
+ size_t maxThreads = 1000u;
+ if(ctx.name.compare("gfx901") == 0)
+ {
+ /* Increase the number of threads for AMD VEGA gpus.
+ * Limit the number of threads based on the issue: https://github.com/fireice-uk/xmr-stak/issues/5#issuecomment-339425089
+ * to avoid out of memory errors
+ */
+ maxThreads = 2024u;
+ }
+
// keep 128MiB memory free (value is randomly chosen)
size_t availableMem = ctx.freeMem - (128u * byteToMiB);
// 224byte extra memory is used per thread for meta data
size_t perThread = hashMemSize + 224u;
- size_t max_intensity = availableMem / perThread;
- // 1000 is a magic selected limit \todo select max intensity depending of the gpu type
- size_t possibleIntensity = std::min( size_t(1000u) , max_intensity );
+ size_t maxIntensity = availableMem / perThread;
+ size_t possibleIntensity = std::min( maxThreads , maxIntensity );
// map intensity to a multiple of the compute unit count, 8 is the number of threads per work group
size_t intensity = (possibleIntensity / (8 * ctx.computeUnits)) * ctx.computeUnits * 8;
conf += std::string(" // gpu: ") + ctx.name + " memory:" + std::to_string(availableMem / byteToMiB) + "\n";
OpenPOWER on IntegriCloud