fix: cap parallelisim to 4 for cuda to avoid oom

r0.3
Meng Zhang 2023-10-20 00:32:04 -07:00
parent a9f1829a52
commit 144dceae41
1 changed files with 2 additions and 2 deletions

View File

@ -116,8 +116,8 @@ std::shared_ptr<TextInferenceEngine> create_engine(
const size_t num_cpus = std::thread::hardware_concurrency();
if (loader.device == ctranslate2::Device::CUDA) {
// When device is cuda, set parallelism to be number of thread.
loader.num_replicas_per_device = num_cpus;
// When device is cuda, set parallelism to be number of thread, capped to 4 to avoid VRAM oom.
loader.num_replicas_per_device = std::min<int32_t>(num_cpus, 4);
} else if (loader.device == ctranslate2::Device::CPU){
// When device is cpu, adjust the number based on threads per replica.
// https://github.com/OpenNMT/CTranslate2/blob/master/src/utils.cc#L77