From c8770b98934dc99b90c811cfa32d0f89bbd74a05 Mon Sep 17 00:00:00 2001 From: psychocrypt Date: Sun, 24 Nov 2019 20:31:05 +0100 Subject: [PATCH] NVIDIA: fix auto adjustment fix #2564 - add the dataset size to the maximum allowed memory usage --- xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu index b3f2f903d..b38b39676 100644 --- a/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu +++ b/xmrstak/backend/nvidia/nvcc_code/cuda_extra.cu @@ -322,6 +322,12 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx) hashMemSize = std::max(hashMemSize, algo.Mem()); } + const size_t dataset_size = getRandomXDatasetSize(); + /* increase maxMemUsage by the dataset because the upper limits are + * only for the scratchpad and does not take the randomX dataset into account. + */ + maxMemUsage += dataset_size; + #ifdef WIN32 /* We use in windows bfactor (split slow kernel into smaller parts) to avoid * that windows is killing long running kernel. @@ -346,7 +352,6 @@ extern "C" int cuda_get_deviceinfo(nvid_ctx* ctx) size_t availableMem = freeMemory - (128u * byteToMiB) - 200u; size_t limitedMemory = std::min(availableMem, maxMemUsage); - const size_t dataset_size = getRandomXDatasetSize(); if(limitedMemory <= dataset_size) limitedMemory = 0; else