Skip to content

Commit

Permalink
fix-deprecated-warning
Browse files Browse the repository at this point in the history
Replaced torch.cuda.amp.autocast with torch.amp.autocast("cuda",...).
  • Loading branch information
Gulin7 committed Nov 27, 2024
1 parent 36ff817 commit 99d807a
Show file tree
Hide file tree
Showing 9 changed files with 32 additions and 26 deletions.
7 changes: 4 additions & 3 deletions examples/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp import GradScaler
from torch.amp import autocast

import ignite
import ignite.distributed as idist
Expand Down Expand Up @@ -299,7 +300,7 @@ def train_step(engine, batch):

model.train()

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down Expand Up @@ -355,7 +356,7 @@ def evaluate_step(engine: Engine, batch):
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
output = model(x)
return output, y

Expand Down
5 changes: 3 additions & 2 deletions examples/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp import GradScaler
from torch.amp import autocast
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
Expand Down Expand Up @@ -34,7 +35,7 @@ def train_step(engine, batch):
optimizer.zero_grad()

# Runs the forward pass with autocasting.
with autocast():
with autocast("cuda"):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down
5 changes: 3 additions & 2 deletions examples/cifar10_qat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp import GradScaler
from torch.amp import autocast

import ignite
import ignite.distributed as idist
Expand Down Expand Up @@ -283,7 +284,7 @@ def train_step(engine, batch):

model.train()

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down
5 changes: 3 additions & 2 deletions examples/notebooks/CycleGAN_with_torch_cuda_amp.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -887,7 +887,7 @@
"id": "JE8dLeEfIl_Z"
},
"source": [
"We will use [`torch.cuda.amp.autocast`](https://pytorch.org/docs/master/amp.html#torch.cuda.amp.autocast) and [`torch.cuda.amp.GradScaler`](https://pytorch.org/docs/master/amp.html#torch.cuda.amp.GradScaler) to perform automatic mixed precision training. Our code follows a [typical mixed precision training example](https://pytorch.org/docs/master/notes/amp_examples.html#typical-mixed-precision-training)."
"We will use [`torch.amp.autocast`](https://pytorch.org/docs/master/amp.html#torch.amp.autocast) and [`torch.cuda.amp.GradScaler`](https://pytorch.org/docs/master/amp.html#torch.cuda.amp.GradScaler) to perform automatic mixed precision training. Our code follows a [typical mixed precision training example](https://pytorch.org/docs/master/notes/amp_examples.html#typical-mixed-precision-training)."
]
},
{
Expand All @@ -896,7 +896,8 @@
"id": "vrJls4p-FRcA"
},
"source": [
"from torch.cuda.amp import autocast, GradScaler\n",
"from torch.cuda.amp import GradScaler\n",
"from torch.amp import autocast\n",
"\n",
"from ignite.utils import convert_tensor\n",
"import torch.nn.functional as F\n",
Expand Down
7 changes: 4 additions & 3 deletions examples/references/classification/imagenet/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import torch

try:
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp import GradScaler
from torch.amp import autocast
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")

Expand Down Expand Up @@ -144,7 +145,7 @@ def create_trainer(model, optimizer, criterion, train_sampler, config, logger, w
def training_step(engine, batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y) / accumulation_steps
Expand Down Expand Up @@ -235,7 +236,7 @@ def create_evaluator(model, metrics, config, with_clearml, tag="val"):
@torch.no_grad()
def evaluate_step(engine, batch):
model.eval()
with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
x, y = prepare_batch(batch, device=config.device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
Expand Down
7 changes: 4 additions & 3 deletions examples/references/segmentation/pascal_voc2012/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import torch

try:
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp import GradScaler
from torch.amp import autocast
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")

Expand Down Expand Up @@ -191,7 +192,7 @@ def create_trainer(model, optimizer, criterion, train_sampler, config, logger, w
def forward_pass(batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y) / accumulation_steps
Expand Down Expand Up @@ -272,7 +273,7 @@ def create_evaluator(model, metrics, config, with_clearml, tag="val"):
@torch.no_grad()
def evaluate_step(engine, batch):
model.eval()
with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
x, y = prepare_batch(batch, device=config.device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
Expand Down
6 changes: 3 additions & 3 deletions examples/transformers/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
from torch.amp import autocast, GradScaler

import ignite
import ignite.distributed as idist
Expand Down Expand Up @@ -309,7 +309,7 @@ def train_step(engine, batch):

model.train()

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(input_batch)
loss = criterion(y_pred, labels)

Expand Down Expand Up @@ -373,7 +373,7 @@ def evaluate_step(engine, batch):
input_batch = {k: v.to(device, non_blocking=True, dtype=torch.long) for k, v in batch[0].items()}
labels = labels.to(device, non_blocking=True, dtype=torch.float)

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
output = model(input_batch)
return output, labels

Expand Down
8 changes: 4 additions & 4 deletions ignite/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def supervised_training_step_amp(
"""

try:
from torch.cuda.amp import autocast
from torch.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")

Expand All @@ -200,7 +200,7 @@ def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[to
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
with autocast("cuda", enabled=True):
output = model_fn(model, x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
Expand Down Expand Up @@ -726,15 +726,15 @@ def supervised_evaluation_step_amp(
Added `model_fn` to customize model's application on the sample
"""
try:
from torch.cuda.amp import autocast
from torch.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")

def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
with autocast("cuda", enabled=True):
output = model_fn(model, x)
y_pred = model_transform(output)
return output_transform(x, y, y_pred)
Expand Down
8 changes: 4 additions & 4 deletions tests/ignite/engine/test_create_supervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ def test_create_supervised_trainer_apex_error():
def mock_torch_cuda_amp_module():
with patch.dict(
"sys.modules",
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.cuda.amp.autocast_mode": None},
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.amp.autocast_mode": None},
):
yield torch

Expand Down Expand Up @@ -631,7 +631,7 @@ def test_create_supervised_evaluator():

# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
with mock.patch("torch.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module)


Expand All @@ -641,7 +641,7 @@ def test_create_supervised_evaluator_on_cpu():

# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
with mock.patch("torch.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu")
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module, evaluator_device="cpu")

Expand All @@ -652,7 +652,7 @@ def test_create_supervised_evaluator_traced_on_cpu():

# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
with mock.patch("torch.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu", trace=True)


Expand Down

0 comments on commit 99d807a

Please sign in to comment.