diff options
Diffstat (limited to 'xmrstak/backend/nvidia')
-rw-r--r-- | xmrstak/backend/nvidia/config.tpl | 3 | ||||
-rw-r--r-- | xmrstak/backend/nvidia/minethd.cpp | 4 | ||||
-rw-r--r-- | xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu | 6 |
3 files changed, 11 insertions, 2 deletions
diff --git a/xmrstak/backend/nvidia/config.tpl b/xmrstak/backend/nvidia/config.tpl index 5479172..f489956 100644 --- a/xmrstak/backend/nvidia/config.tpl +++ b/xmrstak/backend/nvidia/config.tpl @@ -26,6 +26,9 @@ R"===( * "affine_to_cpu" : false, "sync_mode" : 3, * }, * ], + * If you do not wish to mine with your nVidia GPU(s) then use: + * "gpu_threads_conf" : + * null, */ "gpu_threads_conf" : diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp index 5564596..9fd08fb 100644 --- a/xmrstak/backend/nvidia/minethd.cpp +++ b/xmrstak/backend/nvidia/minethd.cpp @@ -166,7 +166,7 @@ std::vector<iBackend*>* minethd::thread_starter(uint32_t threadOffset, miner_wor if(cfg.cpu_aff >= 0) { #if defined(__APPLE__) - printer::inst()->print_msg(L1, "WARNING on MacOS thread affinity is only advisory."); + printer::inst()->print_msg(L1, "WARNING on macOS thread affinity is only advisory."); #endif printer::inst()->print_msg(L1, "Starting NVIDIA GPU thread %d, affinity: %d.", i, (int)cfg.cpu_aff); @@ -287,7 +287,7 @@ void minethd::work_main() if ( (*((uint64_t*)(bResult + 24))) < oWork.iTarget) executor::inst()->push_event(ex_event(job_result(oWork.sJobID, foundNonce[i], bResult, iThreadNo), oWork.iPoolId)); else - executor::inst()->push_event(ex_event("NVIDIA Invalid Result", oWork.iPoolId)); + executor::inst()->push_event(ex_event("NVIDIA Invalid Result", ctx.device_id, oWork.iPoolId)); } iCount += h_per_round; diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu index d865e13..d5d0039 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu +++ b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu @@ -441,6 +441,12 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx) maxMemUsage = size_t(1024u) * byteToMiB; } + if(props.multiProcessorCount <= 6) + { + // limit memory usage for low end devices to reduce the number of threads + maxMemUsage = size_t(1024u) * byteToMiB; + } + int* tmp; cudaError_t err; // a device must be selected to get the right memory usage later on |