From d45b3119aff3ff9b1f4e30204f8ad8155ab3903b Mon Sep 17 00:00:00 2001 From: dmartins Date: Fri, 27 Sep 2024 09:08:47 +0200 Subject: [PATCH] fix: cuda imports --- detectron2/engine/train_loop.py | 4 ++-- tests/layers/test_blocks.py | 2 +- tests/modeling/test_model_e2e.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/detectron2/engine/train_loop.py b/detectron2/engine/train_loop.py index 738a69de94..7830e3a64f 100644 --- a/detectron2/engine/train_loop.py +++ b/detectron2/engine/train_loop.py @@ -469,7 +469,7 @@ def __init__( ) if grad_scaler is None: - from torch.cuda.amp import GradScaler + from torch.amp import GradScaler grad_scaler = GradScaler() self.grad_scaler = grad_scaler @@ -482,7 +482,7 @@ def run_step(self): """ assert self.model.training, "[AMPTrainer] model was changed to eval mode!" assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!" - from torch.cuda.amp import autocast + from torch.amp import autocast start = time.perf_counter() data = next(self._data_loader_iter) diff --git a/tests/layers/test_blocks.py b/tests/layers/test_blocks.py index 5a0488adbf..aa39e717df 100644 --- a/tests/layers/test_blocks.py +++ b/tests/layers/test_blocks.py @@ -24,7 +24,7 @@ def test_aspp(self): @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_frozen_batchnorm_fp16(self): - from torch.cuda.amp import autocast + from torch.amp import autocast C = 10 input = torch.rand(1, C, 10, 10).cuda() diff --git a/tests/modeling/test_model_e2e.py b/tests/modeling/test_model_e2e.py index 8c07e6856d..d7689f7ce1 100644 --- a/tests/modeling/test_model_e2e.py +++ b/tests/modeling/test_model_e2e.py @@ -155,7 +155,7 @@ def test_roiheads_inf_nan_data(self): @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_autocast(self): - from torch.cuda.amp import autocast + from torch.amp import autocast inputs = [{"image": torch.rand(3, 100, 100)}] self.model.eval() @@ -195,7 +195,7 @@ def test_inf_nan_data(self): @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_autocast(self): - from torch.cuda.amp import autocast + from torch.amp import autocast inputs = [{"image": torch.rand(3, 100, 100)}] self.model.eval()