From b41c7a1c8b965a166116f49fec6f063b4a53289d Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Fri, 26 May 2023 19:07:00 -0400 Subject: [PATCH] no_cuda does not take effect in non distributed environment Signed-off-by: Wang, Yi --- src/transformers/training_args.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 36258c1508f99d..bcf4e4fe0fc5fd 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1685,6 +1685,9 @@ def _setup_devices(self) -> "torch.device": device = torch.device("mps") self._n_gpu = 1 + elif self.no_cuda: + device = torch.device("cpu") + self._n_gpu = 0 else: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`