fix: cap parallelisim to 4 for cuda to avoid oom (#601)

r0.4
Meng Zhang 2023-10-20 00:54:53 -07:00 committed by GitHub
parent 7877d300ab
commit 5a822c03b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 2 additions and 2 deletions

View File

@ -116,8 +116,8 @@ std::shared_ptr<TextInferenceEngine> create_engine(
const size_t num_cpus = std::thread::hardware_concurrency();
if (loader.device == ctranslate2::Device::CUDA) {
// When device is cuda, set parallelism to be number of thread.
loader.num_replicas_per_device = num_cpus;
// When device is cuda, set parallelism to be number of thread, capped to 4 to avoid VRAM oom.
loader.num_replicas_per_device = std::min<int32_t>(num_cpus, 4);
} else if (loader.device == ctranslate2::Device::CPU){
// When device is cpu, adjust the number based on threads per replica.
// https://github.com/OpenNMT/CTranslate2/blob/master/src/utils.cc#L77