From 478c79d8144095bb1b7e3fb4d87271881cf5a05b Mon Sep 17 00:00:00 2001 From: vfdev Date: Fri, 10 Nov 2023 21:48:38 +0000 Subject: [PATCH] Fixing CI for pytorch version tests --- tests/ignite/metrics/test_accuracy.py | 3 +++ tests/ignite/metrics/test_classification_report.py | 3 +++ tests/ignite/metrics/test_metric.py | 3 +++ 3 files changed, 9 insertions(+) diff --git a/tests/ignite/metrics/test_accuracy.py b/tests/ignite/metrics/test_accuracy.py index 0b7fee3bf60..a7954e6afa3 100644 --- a/tests/ignite/metrics/test_accuracy.py +++ b/tests/ignite/metrics/test_accuracy.py @@ -2,6 +2,7 @@ import pytest import torch +from packaging.version import Version from sklearn.metrics import accuracy_score import ignite.distributed as idist @@ -550,6 +551,7 @@ def update(_, i): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() _test_distrib_multilabel_input_NHW(device) @@ -561,6 +563,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): device = idist.device() _test_distrib_multilabel_input_NHW(device) diff --git a/tests/ignite/metrics/test_classification_report.py b/tests/ignite/metrics/test_classification_report.py index b132daf1330..87e328c8051 100644 --- a/tests/ignite/metrics/test_classification_report.py +++ b/tests/ignite/metrics/test_classification_report.py @@ -3,6 +3,7 @@ import pytest import torch +from packaging.version import Version import ignite.distributed as idist from ignite.engine import Engine @@ -161,6 +162,7 @@ def update(engine, i): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() _test_integration_multiclass(device, True) @@ -171,6 +173,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_gloo): device = idist.device() _test_integration_multiclass(device, True) diff --git a/tests/ignite/metrics/test_metric.py b/tests/ignite/metrics/test_metric.py index b0ffc1df3a2..970ff3582e0 100644 --- a/tests/ignite/metrics/test_metric.py +++ b/tests/ignite/metrics/test_metric.py @@ -1,5 +1,6 @@ import numbers import os +from packaging.version import Version from typing import Dict, List from unittest.mock import MagicMock @@ -710,6 +711,7 @@ def _test_creating_on_xla_fails(device): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_nccl_gpu(distributed_context_single_node_nccl): device = idist.device() _test_distrib_sync_all_reduce_decorator(device) @@ -722,6 +724,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl): @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") +@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0") def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo): device = idist.device() _test_distrib_sync_all_reduce_decorator(device)