summaryrefslogtreecommitdiffstats
path: root/xmrstak/backend/nvidia
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/backend/nvidia')
-rw-r--r--xmrstak/backend/nvidia/config.tpl3
-rw-r--r--xmrstak/backend/nvidia/minethd.cpp4
-rw-r--r--xmrstak/backend/nvidia/nvcc_code/cuda_core.cu12
-rw-r--r--xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu14
4 files changed, 30 insertions, 3 deletions
diff --git a/xmrstak/backend/nvidia/config.tpl b/xmrstak/backend/nvidia/config.tpl
index 5479172..f489956 100644
--- a/xmrstak/backend/nvidia/config.tpl
+++ b/xmrstak/backend/nvidia/config.tpl
@@ -26,6 +26,9 @@ R"===(
* "affine_to_cpu" : false, "sync_mode" : 3,
* },
* ],
+ * If you do not wish to mine with your nVidia GPU(s) then use:
+ * "gpu_threads_conf" :
+ * null,
*/
"gpu_threads_conf" :
diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp
index 5564596..9fd08fb 100644
--- a/xmrstak/backend/nvidia/minethd.cpp
+++ b/xmrstak/backend/nvidia/minethd.cpp
@@ -166,7 +166,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor
if(cfg.cpu_aff >= 0)
{
#if defined(__APPLE__)
- printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory.");
+ printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory.");
#endif
printer::inst()->print_msg(L1, "Starting NVIDIA GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff);
@@ -287,7 +287,7 @@ void minethd::work_main()
if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget)
executor::inst()->push_event(ex_event(job_result(oWork.sJobID, foundNonce[i], bResult, iThreadNo), oWork.iPoolId));
else
- executor::inst()->push_event(ex_event("NVIDIA Invalid Result", oWork.iPoolId));
+ executor::inst()->push_event(ex_event("NVIDIA Invalid Result", ctx.device_id, oWork.iPoolId));
}
iCount += h_per_round;
diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
index 15a6f36..cc97274 100644
--- a/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
+++ b/xmrstak/backend/nvidia/nvcc_code/cuda_core.cu
@@ -74,24 +74,36 @@ __device__ __forceinline__ uint64_t cuda_mul128( uint64_t multiplier, uint64_t m
template< typename T >
__device__ __forceinline__ T loadGlobal64( T * const addr )
{
+#if (__CUDA_ARCH__ < 700)
T x;
asm volatile( "ld.global.cg.u64 %0, [%1];" : "=l"( x ) : "l"( addr ) );
return x;
+#else
+ return *addr;
+#endif
}
template< typename T >
__device__ __forceinline__ T loadGlobal32( T * const addr )
{
+#if (__CUDA_ARCH__ < 700)
T x;
asm volatile( "ld.global.cg.u32 %0, [%1];" : "=r"( x ) : "l"( addr ) );
return x;
+#else
+ return *addr;
+#endif
}
template< typename T >
__device__ __forceinline__ void storeGlobal32( T* addr, T const & val )
{
+#if (__CUDA_ARCH__ < 700)
asm volatile( "st.global.cg.u32 [%0], %1;" : : "l"( addr ), "r"( val ) );
+#else
+ *addr = val;
+#endif
}
template<size_t ITERATIONS, uint32_t THREAD_SHIFT>
diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
index d865e13..92259db 100644
--- a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
+++ b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu
@@ -204,7 +204,13 @@ extern "C" int cryptonight_extra_cpu_init(nvid_ctx* ctx)
break;
};
- CUDA_CHECK(ctx->device_id, cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
+ const int gpuArch = ctx->device_arch[0] * 10 + ctx->device_arch[1];
+
+ /* Disable L1 cache for GPUs before Volta.
+ * L1 speed is increased and latency reduced with Volta.
+ */
+ if(gpuArch < 70)
+ CUDA_CHECK(ctx->device_id, cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
size_t hashMemSize;
if(::jconf::inst()->IsCurrencyMonero())
@@ -441,6 +447,12 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx)
maxMemUsage = size_t(1024u) * byteToMiB;
}
+ if(props.multiProcessorCount <= 6)
+ {
+ // limit memory usage for low end devices to reduce the number of threads
+ maxMemUsage = size_t(1024u) * byteToMiB;
+ }
+
int* tmp;
cudaError_t err;
// a device must be selected to get the right memory usage later on
OpenPOWER on IntegriCloud