From c4bb13935d890b9b3bc8c7904f89d72239f6d811 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Fri, 26 May 2023 22:47:51 +0800 Subject: [PATCH] no_cuda does not take effect in non distributed environment (#23795) Signed-off-by: Wang, Yi --- src/transformers/training_args.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 36258c1508f99d..63876e053ad44e 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1684,7 +1684,9 @@ def _setup_devices(self) -> "torch.device": ) device = torch.device("mps") self._n_gpu = 1 - + elif self.no_cuda: + device = torch.device("cpu") + self._n_gpu = 0 else: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`