Skip to content

Commit

Permalink
Merge branch 'master' of github.com:pytorch/ignite
Browse files Browse the repository at this point in the history
  • Loading branch information
guptaaryan16 committed Jan 10, 2024
2 parents eb89948 + cbe80d2 commit b936db6
Show file tree
Hide file tree
Showing 10 changed files with 27 additions and 23 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pytorch-version-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 45
strategy:
max-parallel: 10
max-parallel: 7
fail-fast: false
matrix:
python-version: [3.8, 3.9, "3.10"]
Expand Down
Binary file added assets/logo/ignite_logo_mixed_light.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions assets/logo/ignite_logo_mixed_light.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 7 additions & 1 deletion ignite/handlers/lr_finder.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,19 @@ def _log_lr_and_loss(self, trainer: Engine, output_transform: Callable, smooth_f
raise ValueError(
"if output of the engine is torch.Tensor, then "
"it must be 0d torch.Tensor or 1d torch.Tensor with 1 element, "
f"but got torch.Tensor of shape {loss.shape}"
f"but got torch.Tensor of shape {loss.shape}."
)
else:
raise TypeError(
"output of the engine should be of type float or 0d torch.Tensor "
"or 1d torch.Tensor with 1 element, "
f"but got output of type {type(loss).__name__}"
"You may wish to use the output_transform kwarg with the attach method e.g.\n"
"""
lr_finder = FastaiLRFinder()
with lr_finder.attach(trainer, output_transform=lambda x:x["train_loss"]) as trainer_with_lr_finder:
trainer_with_lr_finder.run(dataloader_train)
"""
)
loss = idist.all_reduce(loss)
lr = self._lr_schedule.get_param()
Expand Down
2 changes: 1 addition & 1 deletion ignite/handlers/param_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ def load_state_dict(self, state_dict: Mapping) -> None:
for s, sd in zip(self.schedulers, sds):
s.load_state_dict(sd)
super(ConcatScheduler, self).load_state_dict(state_dict)
self._setup_scheduler()
self._current_scheduler = self.schedulers[self._scheduler_index]

def _setup_scheduler(self) -> None:
self._current_scheduler = self.schedulers[self._scheduler_index]
Expand Down
5 changes: 2 additions & 3 deletions tests/ignite/distributed/comp_models/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from ignite.distributed.comp_models.base import _SerialModel, _torch_version_le_112, ComputationModel


@pytest.mark.skipif(
_torch_version_le_112 and torch.backends.mps.is_available(), reason="Temporary skip if MPS is available"
)
def test_serial_model():
_SerialModel.create_from_backend()
model = _SerialModel.create_from_context()
Expand All @@ -19,6 +16,8 @@ def test_serial_model():
assert model.get_node_rank() == 0
if torch.cuda.is_available():
assert model.device().type == "cuda"
elif _torch_version_le_112 and torch.backends.mps.is_available():
assert model.device().type == "mps"
else:
assert model.device().type == "cpu"
assert model.backend() is None
Expand Down
8 changes: 2 additions & 6 deletions tests/ignite/distributed/test_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

import ignite.distributed as idist
from ignite.distributed.auto import auto_dataloader, auto_model, auto_optim, DistributedProxySampler
from ignite.distributed.comp_models.base import _torch_version_le_112


class DummyDS(Dataset):
Expand Down Expand Up @@ -180,16 +179,13 @@ def _test_auto_model_optimizer(ws, device):
assert optimizer.backward_passes_per_step == backward_passes_per_step


@pytest.mark.skipif(
_torch_version_le_112 and torch.backends.mps.is_available(), reason="Temporary skip if MPS is available"
)
def test_auto_methods_no_dist():
_test_auto_dataloader(1, 1, batch_size=1)
_test_auto_dataloader(1, 1, batch_size=10, num_workers=2)
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="DistributedSampler")

_test_auto_model_optimizer(1, "cuda" if torch.cuda.is_available() else "cpu")
device = idist.device()
_test_auto_model_optimizer(1, device)


@pytest.mark.distributed
Expand Down
6 changes: 1 addition & 5 deletions tests/ignite/distributed/test_launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from packaging.version import Version

import ignite.distributed as idist
from ignite.distributed.comp_models.base import _torch_version_le_112
from ignite.distributed.utils import has_hvd_support, has_native_dist_support, has_xla_support


Expand Down Expand Up @@ -258,11 +257,8 @@ def test_idist_parallel_n_procs_native(init_method, backend, get_fixed_dirname,


@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(
_torch_version_le_112 and torch.backends.mps.is_available(), reason="Temporary skip if MPS is available"
)
def test_idist_parallel_no_dist():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = idist.device()
with idist.Parallel(backend=None) as parallel:
parallel.run(_test_func, ws=1, device=device, backend=None, true_init_method=None)

Expand Down
8 changes: 4 additions & 4 deletions tests/ignite/distributed/utils/test_serial.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import pytest
import torch

import ignite.distributed as idist
Expand All @@ -15,13 +14,12 @@
)


@pytest.mark.skipif(
_torch_version_le_112 and torch.backends.mps.is_available(), reason="Temporary skip if MPS is available"
)
def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
elif _torch_version_le_112 and torch.backends.mps.is_available():
assert idist.device().type == "mps"
else:
assert idist.device().type == "cpu"
assert idist.get_rank() == 0
Expand All @@ -43,6 +41,8 @@ def test_no_distrib(capsys):
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
elif _torch_version_le_112 and torch.backends.mps.is_available():
assert "ignite.distributed.utils INFO: device: mps" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
assert "ignite.distributed.utils INFO: rank: 0" in out[-1]
Expand Down
10 changes: 8 additions & 2 deletions tests/ignite/handlers/test_param_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,18 +284,24 @@ def test_concat_scheduler_state_dict():
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)

steps = 0
for i in range(5):
concat_scheduler(engine=None)
steps += 1

state_dict = concat_scheduler.state_dict()

assert state_dict["durations"] == durations
assert state_dict["_current_duration"] == durations[0]
assert state_dict["_current_duration"] == durations[0] - steps
assert state_dict["_scheduler_index"] == 0

for _ in range(20):
concat_scheduler(None, None)

concat_scheduler.load_state_dict(state_dict)
assert concat_scheduler.durations == durations
assert concat_scheduler._current_duration == durations[0]
assert concat_scheduler._current_duration == durations[0] - steps
assert id(concat_scheduler._current_scheduler) == id(scheduler_1)

with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
Expand Down

0 comments on commit b936db6

Please sign in to comment.