summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/amd/autoAdjust.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/backend/amd/autoAdjust.hpp')
-rw-r--r--xmrstak/backend/amd/autoAdjust.hpp55
1 files changed, 36 insertions, 19 deletions
diff --git a/xmrstak/backend/amd/autoAdjust.hpp b/xmrstak/backend/amd/autoAdjust.hpp
index 0bc5239..ea057a0 100644
--- a/xmrstak/backend/amd/autoAdjust.hpp
+++ b/xmrstak/backend/amd/autoAdjust.hpp
@@ -83,25 +83,25 @@ private:
constexpr size_t byteToMiB = 1024u * 1024u;
- size_t hashMemSize;
- if(::jconf::inst()->IsCurrencyMonero())
- {
- hashMemSize = MONERO_MEMORY;
- }
- else
- {
- hashMemSize = AEON_MEMORY;
- }
+ size_t hashMemSize = cn_select_memory(::jconf::inst()->GetMiningAlgo());
std::string conf;
- int i = 0;
for(auto& ctx : devVec)
{
/* 1000 is a magic selected limit, the reason is that more than 2GiB memory
* sowing down the memory performance because of TLB cache misses
*/
size_t maxThreads = 1000u;
- if(ctx.name.compare("gfx901") == 0)
+ if(
+ ctx.name.compare("gfx901") == 0 ||
+ ctx.name.compare("gfx904") == 0 ||
+ // APU
+ ctx.name.compare("gfx902") == 0 ||
+ // UNKNOWN
+ ctx.name.compare("gfx900") == 0 ||
+ ctx.name.compare("gfx903") == 0 ||
+ ctx.name.compare("gfx905") == 0
+ )
{
/* Increase the number of threads for AMD VEGA gpus.
* Limit the number of threads based on the issue: https://github.com/fireice-uk/xmr-stak/issues/5#issuecomment-339425089
@@ -109,6 +109,9 @@ private:
*/
maxThreads = 2024u;
}
+ // increase all intensity limits by two for aeon
+ if(::jconf::inst()->GetMiningAlgo() == cryptonight_lite)
+ maxThreads *= 2u;
// keep 128MiB memory free (value is randomly chosen)
size_t availableMem = ctx.freeMem - (128u * byteToMiB);
@@ -118,14 +121,28 @@ private:
size_t possibleIntensity = std::min( maxThreads , maxIntensity );
// map intensity to a multiple of the compute unit count, 8 is the number of threads per work group
size_t intensity = (possibleIntensity / (8 * ctx.computeUnits)) * ctx.computeUnits * 8;
- conf += std::string(" // gpu: ") + ctx.name + " memory:" + std::to_string(availableMem / byteToMiB) + "\n";
- conf += std::string(" // compute units: ") + std::to_string(ctx.computeUnits) + "\n";
- // set 8 threads per block (this is a good value for the most gpus)
- conf += std::string(" { \"index\" : ") + std::to_string(ctx.deviceIdx) + ",\n" +
- " \"intensity\" : " + std::to_string(intensity) + ", \"worksize\" : " + std::to_string(8) + ",\n" +
- " \"affine_to_cpu\" : false, \"strided_index\" : true\n"
- " },\n";
- ++i;
+ //If the intensity is 0, then it's because the multiple of the unit count is greater than intensity
+ if (intensity == 0)
+ {
+ printer::inst()->print_msg(L0, "WARNING: Auto detected intensity unexpectedly low. Try to set the environment variable GPU_SINGLE_ALLOC_PERCENT.");
+ intensity = possibleIntensity;
+
+ }
+ if (intensity != 0)
+ {
+ conf += std::string(" // gpu: ") + ctx.name + " memory:" + std::to_string(availableMem / byteToMiB) + "\n";
+ conf += std::string(" // compute units: ") + std::to_string(ctx.computeUnits) + "\n";
+ // set 8 threads per block (this is a good value for the most gpus)
+ conf += std::string(" { \"index\" : ") + std::to_string(ctx.deviceIdx) + ",\n" +
+ " \"intensity\" : " + std::to_string(intensity) + ", \"worksize\" : " + std::to_string(8) + ",\n" +
+ " \"affine_to_cpu\" : false, \"strided_index\" : 1, \"mem_chunk\" : 2,\n"
+ " \"comp_mode\" : true\n" +
+ " },\n";
+ }
+ else
+ {
+ printer::inst()->print_msg(L0, "WARNING: Ignore gpu %s, %s MiB free memory is not enough to suggest settings.", ctx.name.c_str(), std::to_string(availableMem / byteToMiB).c_str());
+ }
}
configTpl.replace("PLATFORMINDEX",std::to_string(platformIndex));
OpenPOWER on IntegriCloud