From e5d3db20e9c2ddb76f88642409e527239943c983 Mon Sep 17 00:00:00 2001 From: Pavel Iakubovskii Date: Sun, 28 May 2023 15:42:19 +0200 Subject: [PATCH] Bump timm from 0.6.13 to 0.9.2 (#768) * Create Makefile, simplify checks run * Fix new timm configs API * Add timm table generation step * Update timm table in docs * Update timm version to 0.9.2 * Bump smp version 0.3.3 * Update ubuntu runner --- .github/workflows/tests.yml | 42 +- .pre-commit-config.yaml | 2 +- Makefile | 25 ++ README.md | 19 +- docs/conf.py | 67 +-- docs/encoders.rst | 2 +- docs/encoders_timm.rst | 404 ++++++++---------- requirements.txt | 2 +- segmentation_models_pytorch/__version__.py | 2 +- .../encoders/__init__.py | 6 +- .../encoders/mix_transformer.py | 2 +- .../encoders/timm_efficientnet.py | 73 ++-- 12 files changed, 323 insertions(+), 323 deletions(-) create mode 100644 Makefile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e5ab2adc..b656304c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,8 +11,9 @@ on: branches: [ master ] jobs: - test: - runs-on: ubuntu-18.04 + + lint: + runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} @@ -23,28 +24,27 @@ jobs: run: | python -m pip install --upgrade pip pip install torch==1.9.0+cpu torchvision==0.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - pip install .[test] - - name: Run Tests - run: python -m pytest -s tests + make install_dev - name: Run Flake8 - run: flake8 --config=.flake8 + run: make flake8 + - name: Run Black + run: make black - check_code_formatting: - name: Check code formatting with Black + test: runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.8] + needs: [lint] steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Set up Python + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: - python-version: ${{ matrix.python-version }} - - name: Update pip - run: python -m pip install --upgrade pip - - name: Install Black - run: pip install black==22.3.0 - - name: Run Black - run: black --config=pyproject.toml --check . + python-version: 3.7 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install torch==1.9.0+cpu torchvision==0.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + make install_dev + - name: Run Flake8 + run: make flake8 + - name: Run Tests + run: make test diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b4749e04..799eaa52 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ repos: hooks: - id: black args: [ --config=pyproject.toml ] - - repo: https://gitlab.com/pycqa/flake8 + - repo: https://github.com/pycqa/flake8.git rev: 4.0.1 hooks: - id: flake8 diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..2e9d1294 --- /dev/null +++ b/Makefile @@ -0,0 +1,25 @@ +.PHONY: test + +.venv: + python3 -m venv .venv + +install_dev: .venv + .venv/bin/pip install -e .[test] + .venv/bin/pre-commit install + +test: .venv + .venv/bin/pytest -p no:cacheprovider tests/ + +table: + .venv/bin/python misc/generate_table.py + +table_timm: + .venv/bin/python misc/generate_table_timm.py + +black: .venv + .venv/bin/black ./segmentation_models_pytorch --config=pyproject.toml --check + +flake8: .venv + .venv/bin/flake8 ./segmentation_models_pytorch --config=.flake8 + +all: black flake8 test diff --git a/README.md b/README.md index f1e51b37..95d24ee2 100644 --- a/README.md +++ b/README.md @@ -469,25 +469,22 @@ $ pip install git+https://github.com/qubvel/segmentation_models.pytorch ### 🤝 Contributing -##### Install linting and formatting pre-commit hooks -```bash -pip install pre-commit black==22.3.0 flake8==4.0.1 -pre-commit install -``` +#### Install SMP -##### Run tests ```bash -pytest -p no:cacheprovider +make install_dev # create .venv, install SMP in dev mode ``` -##### Run tests in docker +#### Run tests and code checks + ```bash -$ docker build -f docker/Dockerfile.dev -t smp:dev . && docker run --rm smp:dev pytest -p no:cacheprovider +make all # run flake8, black, tests ``` -##### Generate table with encoders (in case you add a new encoder) +#### Update table with encoders + ```bash -$ docker build -f docker/Dockerfile.dev -t smp:dev . && docker run --rm smp:dev python misc/generate_table.py +make table # generate table with encoders and print to stdout ``` ### 📝 Citing diff --git a/docs/conf.py b/docs/conf.py index 588c0f21..dda5d88e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,20 +18,24 @@ import re import sys import datetime -sys.path.append('..') + +sys.path.append("..") # -- Project information ----------------------------------------------------- -project = 'Segmentation Models' -copyright = '{}, Pavel Iakubovskii'.format(datetime.datetime.now().year) -author = 'Pavel Iakubovskii' +project = "Segmentation Models" +copyright = "{}, Pavel Iakubovskii".format(datetime.datetime.now().year) +author = "Pavel Iakubovskii" + def get_version(): - sys.path.append('../segmentation_models_pytorch') + sys.path.append("../segmentation_models_pytorch") from __version__ import __version__ as version + sys.path.pop(-1) return version + version = get_version() # -- General configuration --------------------------------------------------- @@ -41,16 +45,16 @@ def get_version(): # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'sphinx.ext.mathjax', - 'autodocsumm', + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinx.ext.mathjax", + "autodocsumm", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -65,12 +69,14 @@ def get_version(): # import sphinx_rtd_theme + html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # import karma_sphinx_theme # html_theme = "karma_sphinx_theme" import faculty_sphinx_theme + html_theme = "faculty_sphinx_theme" # import catalyst_sphinx_theme @@ -82,7 +88,7 @@ def get_version(): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Extension configuration ------------------------------------------------- @@ -92,37 +98,40 @@ def get_version(): napoleon_numpy_docstring = False autodoc_mock_imports = [ - 'torch', - 'tqdm', - 'numpy', - 'timm', - 'cv2', - 'PIL', - 'pretrainedmodels', - 'torchvision', - 'efficientnet-pytorch', - 'segmentation_models_pytorch.encoders', - 'segmentation_models_pytorch.utils', + "torch", + "tqdm", + "numpy", + "timm", + "cv2", + "PIL", + "pretrainedmodels", + "torchvision", + "efficientnet-pytorch", + "segmentation_models_pytorch.encoders", + "segmentation_models_pytorch.utils", # 'segmentation_models_pytorch.base', ] -autoclass_content = 'both' -autodoc_typehints = 'description' +autoclass_content = "both" +autodoc_typehints = "description" # --- Work around to make autoclass signatures not (*args, **kwargs) ---------- -class FakeSignature(): + +class FakeSignature: def __getattribute__(self, *args): raise ValueError + def f(app, obj, bound_method): if "__new__" in obj.__name__: obj.__signature__ = FakeSignature() + def setup(app): - app.connect('autodoc-before-process-signature', f) + app.connect("autodoc-before-process-signature", f) # Custom configuration -------------------------------------------------------- -autodoc_member_order = 'bysource' +autodoc_member_order = "bysource" diff --git a/docs/encoders.rst b/docs/encoders.rst index 32587c8d..d64607b8 100644 --- a/docs/encoders.rst +++ b/docs/encoders.rst @@ -249,7 +249,7 @@ EfficientNet +------------------------+--------------------------------------+-------------+ | timm-efficientnet-b8 | imagenet / advprop | 84M | +------------------------+--------------------------------------+-------------+ -| timm-efficientnet-l2 | noisy-student | 474M | +| timm-efficientnet-l2 | noisy-student / noisy-student-475 | 474M | +------------------------+--------------------------------------+-------------+ | timm-efficientnet-lite0| imagenet | 4M | +------------------------+--------------------------------------+-------------+ diff --git a/docs/encoders_timm.rst b/docs/encoders_timm.rst index 35595565..3441fdca 100644 --- a/docs/encoders_timm.rst +++ b/docs/encoders_timm.rst @@ -19,7 +19,15 @@ Total number of encoders: 549 +----------------------------------+------------------+ | Encoder name | Support dilation | +==================================+==================+ -| adv_inception_v3 | | +| SelecSls42 | | ++----------------------------------+------------------+ +| SelecSls42b | | ++----------------------------------+------------------+ +| SelecSls60 | | ++----------------------------------+------------------+ +| SelecSls60b | | ++----------------------------------+------------------+ +| SelecSls84 | | +----------------------------------+------------------+ | bat_resnext26ts | ✅ | +----------------------------------+------------------+ @@ -27,6 +35,50 @@ Total number of encoders: 549 +----------------------------------+------------------+ | botnet50ts_256 | ✅ | +----------------------------------+------------------+ +| coatnet_0_224 | | ++----------------------------------+------------------+ +| coatnet_0_rw_224 | | ++----------------------------------+------------------+ +| coatnet_1_224 | | ++----------------------------------+------------------+ +| coatnet_1_rw_224 | | ++----------------------------------+------------------+ +| coatnet_2_224 | | ++----------------------------------+------------------+ +| coatnet_2_rw_224 | | ++----------------------------------+------------------+ +| coatnet_3_224 | | ++----------------------------------+------------------+ +| coatnet_3_rw_224 | | ++----------------------------------+------------------+ +| coatnet_4_224 | | ++----------------------------------+------------------+ +| coatnet_5_224 | | ++----------------------------------+------------------+ +| coatnet_bn_0_rw_224 | | ++----------------------------------+------------------+ +| coatnet_nano_cc_224 | | ++----------------------------------+------------------+ +| coatnet_nano_rw_224 | | ++----------------------------------+------------------+ +| coatnet_pico_rw_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_0_rw_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_1_rw2_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_1_rw_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_2_rw_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_2_rw_384 | | ++----------------------------------+------------------+ +| coatnet_rmlp_3_rw_224 | | ++----------------------------------+------------------+ +| coatnet_rmlp_nano_rw_224 | | ++----------------------------------+------------------+ +| coatnext_nano_rw_224 | | ++----------------------------------+------------------+ | cs3darknet_focus_l | ✅ | +----------------------------------+------------------+ | cs3darknet_focus_m | ✅ | @@ -63,17 +115,13 @@ Total number of encoders: 549 +----------------------------------+------------------+ | densenet121 | | +----------------------------------+------------------+ -| densenet121d | | -+----------------------------------+------------------+ | densenet161 | | +----------------------------------+------------------+ | densenet169 | | +----------------------------------+------------------+ | densenet201 | | +----------------------------------+------------------+ -| densenet264 | | -+----------------------------------+------------------+ -| densenet264d_iabn | | +| densenet264d | | +----------------------------------+------------------+ | densenetblur121d | | +----------------------------------+------------------+ @@ -119,6 +167,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | dpn131 | | +----------------------------------+------------------+ +| dpn48b | | ++----------------------------------+------------------+ | dpn68 | | +----------------------------------+------------------+ | dpn68b | | @@ -247,8 +297,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | efficientnetv2_xl | ✅ | +----------------------------------+------------------+ -| ens_adv_inception_resnet_v2 | | -+----------------------------------+------------------+ | ese_vovnet19b_dw | | +----------------------------------+------------------+ | ese_vovnet19b_slim | | @@ -263,8 +311,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | ese_vovnet99b | | +----------------------------------+------------------+ -| ese_vovnet99b_iabn | | -+----------------------------------+------------------+ | fbnetc_100 | ✅ | +----------------------------------+------------------+ | fbnetv3_b | ✅ | @@ -295,52 +341,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | ghostnet_130 | | +----------------------------------+------------------+ -| gluon_inception_v3 | | -+----------------------------------+------------------+ -| gluon_resnet101_v1b | ✅ | -+----------------------------------+------------------+ -| gluon_resnet101_v1c | ✅ | -+----------------------------------+------------------+ -| gluon_resnet101_v1d | ✅ | -+----------------------------------+------------------+ -| gluon_resnet101_v1s | ✅ | -+----------------------------------+------------------+ -| gluon_resnet152_v1b | ✅ | -+----------------------------------+------------------+ -| gluon_resnet152_v1c | ✅ | -+----------------------------------+------------------+ -| gluon_resnet152_v1d | ✅ | -+----------------------------------+------------------+ -| gluon_resnet152_v1s | ✅ | -+----------------------------------+------------------+ -| gluon_resnet18_v1b | ✅ | -+----------------------------------+------------------+ -| gluon_resnet34_v1b | ✅ | -+----------------------------------+------------------+ -| gluon_resnet50_v1b | ✅ | -+----------------------------------+------------------+ -| gluon_resnet50_v1c | ✅ | -+----------------------------------+------------------+ -| gluon_resnet50_v1d | ✅ | -+----------------------------------+------------------+ -| gluon_resnet50_v1s | ✅ | -+----------------------------------+------------------+ -| gluon_resnext101_32x4d | ✅ | -+----------------------------------+------------------+ -| gluon_resnext101_64x4d | ✅ | -+----------------------------------+------------------+ -| gluon_resnext50_32x4d | ✅ | -+----------------------------------+------------------+ -| gluon_senet154 | ✅ | -+----------------------------------+------------------+ -| gluon_seresnext101_32x4d | ✅ | -+----------------------------------+------------------+ -| gluon_seresnext101_64x4d | ✅ | -+----------------------------------+------------------+ -| gluon_seresnext50_32x4d | ✅ | -+----------------------------------+------------------+ -| gluon_xception65 | ✅ | -+----------------------------------+------------------+ | halo2botnet50ts_256 | ✅ | +----------------------------------+------------------+ | halonet26t | ✅ | @@ -369,6 +369,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | hrnet_w18_small_v2 | | +----------------------------------+------------------+ +| hrnet_w18_ssld | | ++----------------------------------+------------------+ | hrnet_w30 | | +----------------------------------+------------------+ | hrnet_w32 | | @@ -379,15 +381,9 @@ Total number of encoders: 549 +----------------------------------+------------------+ | hrnet_w48 | | +----------------------------------+------------------+ -| hrnet_w64 | | -+----------------------------------+------------------+ -| ig_resnext101_32x16d | ✅ | +| hrnet_w48_ssld | | +----------------------------------+------------------+ -| ig_resnext101_32x32d | ✅ | -+----------------------------------+------------------+ -| ig_resnext101_32x48d | ✅ | -+----------------------------------+------------------+ -| ig_resnext101_32x8d | ✅ | +| hrnet_w64 | | +----------------------------------+------------------+ | inception_resnet_v2 | | +----------------------------------+------------------+ @@ -431,6 +427,76 @@ Total number of encoders: 549 +----------------------------------+------------------+ | legacy_seresnext50_32x4d | | +----------------------------------+------------------+ +| legacy_xception | | ++----------------------------------+------------------+ +| maxvit_base_tf_224 | | ++----------------------------------+------------------+ +| maxvit_base_tf_384 | | ++----------------------------------+------------------+ +| maxvit_base_tf_512 | | ++----------------------------------+------------------+ +| maxvit_large_tf_224 | | ++----------------------------------+------------------+ +| maxvit_large_tf_384 | | ++----------------------------------+------------------+ +| maxvit_large_tf_512 | | ++----------------------------------+------------------+ +| maxvit_nano_rw_256 | | ++----------------------------------+------------------+ +| maxvit_pico_rw_256 | | ++----------------------------------+------------------+ +| maxvit_rmlp_base_rw_224 | | ++----------------------------------+------------------+ +| maxvit_rmlp_base_rw_384 | | ++----------------------------------+------------------+ +| maxvit_rmlp_nano_rw_256 | | ++----------------------------------+------------------+ +| maxvit_rmlp_pico_rw_256 | | ++----------------------------------+------------------+ +| maxvit_rmlp_small_rw_224 | | ++----------------------------------+------------------+ +| maxvit_rmlp_small_rw_256 | | ++----------------------------------+------------------+ +| maxvit_rmlp_tiny_rw_256 | | ++----------------------------------+------------------+ +| maxvit_small_tf_224 | | ++----------------------------------+------------------+ +| maxvit_small_tf_384 | | ++----------------------------------+------------------+ +| maxvit_small_tf_512 | | ++----------------------------------+------------------+ +| maxvit_tiny_pm_256 | | ++----------------------------------+------------------+ +| maxvit_tiny_rw_224 | | ++----------------------------------+------------------+ +| maxvit_tiny_rw_256 | | ++----------------------------------+------------------+ +| maxvit_tiny_tf_224 | | ++----------------------------------+------------------+ +| maxvit_tiny_tf_384 | | ++----------------------------------+------------------+ +| maxvit_tiny_tf_512 | | ++----------------------------------+------------------+ +| maxvit_xlarge_tf_224 | | ++----------------------------------+------------------+ +| maxvit_xlarge_tf_384 | | ++----------------------------------+------------------+ +| maxvit_xlarge_tf_512 | | ++----------------------------------+------------------+ +| maxxvit_rmlp_nano_rw_256 | | ++----------------------------------+------------------+ +| maxxvit_rmlp_small_rw_256 | | ++----------------------------------+------------------+ +| maxxvit_rmlp_tiny_rw_256 | | ++----------------------------------+------------------+ +| maxxvitv2_nano_rw_256 | | ++----------------------------------+------------------+ +| maxxvitv2_rmlp_base_rw_224 | | ++----------------------------------+------------------+ +| maxxvitv2_rmlp_base_rw_384 | | ++----------------------------------+------------------+ +| maxxvitv2_rmlp_large_rw_224 | | ++----------------------------------+------------------+ | mixnet_l | ✅ | +----------------------------------+------------------+ | mixnet_m | ✅ | @@ -473,10 +539,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | mobilenetv3_large_100 | ✅ | +----------------------------------+------------------+ -| mobilenetv3_large_100_miil | ✅ | -+----------------------------------+------------------+ -| mobilenetv3_large_100_miil_in21k | ✅ | -+----------------------------------+------------------+ | mobilenetv3_rw | ✅ | +----------------------------------+------------------+ | mobilenetv3_small_050 | ✅ | @@ -501,22 +563,10 @@ Total number of encoders: 549 +----------------------------------+------------------+ | mobilevitv2_150 | ✅ | +----------------------------------+------------------+ -| mobilevitv2_150_384_in22ft1k | ✅ | -+----------------------------------+------------------+ -| mobilevitv2_150_in22ft1k | ✅ | -+----------------------------------+------------------+ | mobilevitv2_175 | ✅ | +----------------------------------+------------------+ -| mobilevitv2_175_384_in22ft1k | ✅ | -+----------------------------------+------------------+ -| mobilevitv2_175_in22ft1k | ✅ | -+----------------------------------+------------------+ | mobilevitv2_200 | ✅ | +----------------------------------+------------------+ -| mobilevitv2_200_384_in22ft1k | ✅ | -+----------------------------------+------------------+ -| mobilevitv2_200_in22ft1k | ✅ | -+----------------------------------+------------------+ | nasnetalarge | | +----------------------------------+------------------+ | nf_ecaresnet101 | ✅ | @@ -577,6 +627,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | regnetx_004 | ✅ | +----------------------------------+------------------+ +| regnetx_004_tv | ✅ | ++----------------------------------+------------------+ | regnetx_006 | ✅ | +----------------------------------+------------------+ | regnetx_008 | ✅ | @@ -605,29 +657,39 @@ Total number of encoders: 549 +----------------------------------+------------------+ | regnety_008 | ✅ | +----------------------------------+------------------+ +| regnety_008_tv | ✅ | ++----------------------------------+------------------+ | regnety_016 | ✅ | +----------------------------------+------------------+ | regnety_032 | ✅ | +----------------------------------+------------------+ | regnety_040 | ✅ | +----------------------------------+------------------+ -| regnety_040s_gn | ✅ | +| regnety_040_sgn | ✅ | +----------------------------------+------------------+ | regnety_064 | ✅ | +----------------------------------+------------------+ | regnety_080 | ✅ | +----------------------------------+------------------+ +| regnety_080_tv | ✅ | ++----------------------------------+------------------+ | regnety_120 | ✅ | +----------------------------------+------------------+ +| regnety_1280 | ✅ | ++----------------------------------+------------------+ | regnety_160 | ✅ | +----------------------------------+------------------+ +| regnety_2560 | ✅ | ++----------------------------------+------------------+ | regnety_320 | ✅ | +----------------------------------+------------------+ +| regnety_640 | ✅ | ++----------------------------------+------------------+ | regnetz_005 | ✅ | +----------------------------------+------------------+ | regnetz_040 | ✅ | +----------------------------------+------------------+ -| regnetz_040h | ✅ | +| regnetz_040_h | ✅ | +----------------------------------+------------------+ | regnetz_b16 | ✅ | +----------------------------------+------------------+ @@ -663,6 +725,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | res2net101_26w_4s | ✅ | +----------------------------------+------------------+ +| res2net101d | ✅ | ++----------------------------------+------------------+ | res2net50_14w_8s | ✅ | +----------------------------------+------------------+ | res2net50_26w_4s | ✅ | @@ -673,6 +737,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | res2net50_48w_2s | ✅ | +----------------------------------+------------------+ +| res2net50d | ✅ | ++----------------------------------+------------------+ | res2next50 | ✅ | +----------------------------------+------------------+ | resnest101e | ✅ | @@ -693,16 +759,24 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnet101 | ✅ | +----------------------------------+------------------+ +| resnet101c | ✅ | ++----------------------------------+------------------+ | resnet101d | ✅ | +----------------------------------+------------------+ +| resnet101s | ✅ | ++----------------------------------+------------------+ | resnet10t | ✅ | +----------------------------------+------------------+ | resnet14t | ✅ | +----------------------------------+------------------+ | resnet152 | ✅ | +----------------------------------+------------------+ +| resnet152c | ✅ | ++----------------------------------+------------------+ | resnet152d | ✅ | +----------------------------------+------------------+ +| resnet152s | ✅ | ++----------------------------------+------------------+ | resnet18 | ✅ | +----------------------------------+------------------+ | resnet18d | ✅ | @@ -729,8 +803,12 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnet50_gn | ✅ | +----------------------------------+------------------+ +| resnet50c | ✅ | ++----------------------------------+------------------+ | resnet50d | ✅ | +----------------------------------+------------------+ +| resnet50s | ✅ | ++----------------------------------+------------------+ | resnet50t | ✅ | +----------------------------------+------------------+ | resnet51q | ✅ | @@ -739,6 +817,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnetaa101d | ✅ | +----------------------------------+------------------+ +| resnetaa34d | ✅ | ++----------------------------------+------------------+ | resnetaa50 | ✅ | +----------------------------------+------------------+ | resnetaa50d | ✅ | @@ -769,36 +849,22 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnetv2_101d | ✅ | +----------------------------------+------------------+ -| resnetv2_101x1_bitm | ✅ | -+----------------------------------+------------------+ -| resnetv2_101x1_bitm_in21k | ✅ | +| resnetv2_101x1_bit | ✅ | +----------------------------------+------------------+ -| resnetv2_101x3_bitm | ✅ | -+----------------------------------+------------------+ -| resnetv2_101x3_bitm_in21k | ✅ | +| resnetv2_101x3_bit | ✅ | +----------------------------------+------------------+ | resnetv2_152 | ✅ | +----------------------------------+------------------+ | resnetv2_152d | ✅ | +----------------------------------+------------------+ -| resnetv2_152x2_bit_teacher | ✅ | -+----------------------------------+------------------+ -| resnetv2_152x2_bit_teacher_384 | ✅ | -+----------------------------------+------------------+ -| resnetv2_152x2_bitm | ✅ | -+----------------------------------+------------------+ -| resnetv2_152x2_bitm_in21k | ✅ | -+----------------------------------+------------------+ -| resnetv2_152x4_bitm | ✅ | +| resnetv2_152x2_bit | ✅ | +----------------------------------+------------------+ -| resnetv2_152x4_bitm_in21k | ✅ | +| resnetv2_152x4_bit | ✅ | +----------------------------------+------------------+ | resnetv2_50 | ✅ | +----------------------------------+------------------+ | resnetv2_50d | ✅ | +----------------------------------+------------------+ -| resnetv2_50d_evob | ✅ | -+----------------------------------+------------------+ | resnetv2_50d_evos | ✅ | +----------------------------------+------------------+ | resnetv2_50d_frn | ✅ | @@ -807,15 +873,13 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnetv2_50t | ✅ | +----------------------------------+------------------+ -| resnetv2_50x1_bit_distilled | ✅ | -+----------------------------------+------------------+ -| resnetv2_50x1_bitm | ✅ | +| resnetv2_50x1_bit | ✅ | +----------------------------------+------------------+ -| resnetv2_50x1_bitm_in21k | ✅ | +| resnetv2_50x3_bit | ✅ | +----------------------------------+------------------+ -| resnetv2_50x3_bitm | ✅ | +| resnext101_32x16d | ✅ | +----------------------------------+------------------+ -| resnetv2_50x3_bitm_in21k | ✅ | +| resnext101_32x32d | ✅ | +----------------------------------+------------------+ | resnext101_32x4d | ✅ | +----------------------------------+------------------+ @@ -829,35 +893,29 @@ Total number of encoders: 549 +----------------------------------+------------------+ | resnext50d_32x4d | ✅ | +----------------------------------+------------------+ -| rexnet_100 | | -+----------------------------------+------------------+ -| rexnet_130 | | +| rexnet_100 | ✅ | +----------------------------------+------------------+ -| rexnet_150 | | +| rexnet_130 | ✅ | +----------------------------------+------------------+ -| rexnet_200 | | +| rexnet_150 | ✅ | +----------------------------------+------------------+ -| rexnetr_100 | | +| rexnet_200 | ✅ | +----------------------------------+------------------+ -| rexnetr_130 | | +| rexnet_300 | ✅ | +----------------------------------+------------------+ -| rexnetr_150 | | +| rexnetr_100 | ✅ | +----------------------------------+------------------+ -| rexnetr_200 | | +| rexnetr_130 | ✅ | +----------------------------------+------------------+ -| sebotnet33ts_256 | ✅ | -+----------------------------------+------------------+ -| sehalonet33ts | ✅ | +| rexnetr_150 | ✅ | +----------------------------------+------------------+ -| selecsls42 | | +| rexnetr_200 | ✅ | +----------------------------------+------------------+ -| selecsls42b | | +| rexnetr_300 | ✅ | +----------------------------------+------------------+ -| selecsls60 | | -+----------------------------------+------------------+ -| selecsls60b | | +| sebotnet33ts_256 | ✅ | +----------------------------------+------------------+ -| selecsls84 | | +| sehalonet33ts | ✅ | +----------------------------------+------------------+ | semnasnet_050 | ✅ | +----------------------------------+------------------+ @@ -867,8 +925,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | semnasnet_140 | ✅ | +----------------------------------+------------------+ -| semobilevit_s | ✅ | -+----------------------------------+------------------+ | senet154 | ✅ | +----------------------------------+------------------+ | seresnet101 | ✅ | @@ -897,6 +953,8 @@ Total number of encoders: 549 +----------------------------------+------------------+ | seresnext101_32x8d | ✅ | +----------------------------------+------------------+ +| seresnext101_64x4d | ✅ | ++----------------------------------+------------------+ | seresnext101d_32x8d | ✅ | +----------------------------------+------------------+ | seresnext26d_32x4d | ✅ | @@ -923,82 +981,24 @@ Total number of encoders: 549 +----------------------------------+------------------+ | spnasnet_100 | ✅ | +----------------------------------+------------------+ -| ssl_resnet18 | ✅ | -+----------------------------------+------------------+ -| ssl_resnet50 | ✅ | -+----------------------------------+------------------+ -| ssl_resnext101_32x16d | ✅ | -+----------------------------------+------------------+ -| ssl_resnext101_32x4d | ✅ | -+----------------------------------+------------------+ -| ssl_resnext101_32x8d | ✅ | -+----------------------------------+------------------+ -| ssl_resnext50_32x4d | ✅ | -+----------------------------------+------------------+ -| swsl_resnet18 | ✅ | -+----------------------------------+------------------+ -| swsl_resnet50 | ✅ | -+----------------------------------+------------------+ -| swsl_resnext101_32x16d | ✅ | -+----------------------------------+------------------+ -| swsl_resnext101_32x4d | ✅ | -+----------------------------------+------------------+ -| swsl_resnext101_32x8d | ✅ | -+----------------------------------+------------------+ -| swsl_resnext50_32x4d | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b0 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b0_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b0_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b1 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b1_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b1_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b2 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b2_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b2_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b3 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b3_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b3_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b4 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b4_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b4_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b5 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b5_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b5_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b6 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b6_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b6_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b7 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b7_ap | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_b7_ns | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_b8 | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_b8_ap | ✅ | -+----------------------------------+------------------+ | tf_efficientnet_cc_b0_4e | ✅ | +----------------------------------+------------------+ | tf_efficientnet_cc_b0_8e | ✅ | @@ -1011,9 +1011,7 @@ Total number of encoders: 549 +----------------------------------+------------------+ | tf_efficientnet_es | ✅ | +----------------------------------+------------------+ -| tf_efficientnet_l2_ns | ✅ | -+----------------------------------+------------------+ -| tf_efficientnet_l2_ns_475 | ✅ | +| tf_efficientnet_l2 | ✅ | +----------------------------------+------------------+ | tf_efficientnet_lite0 | ✅ | +----------------------------------+------------------+ @@ -1035,27 +1033,11 @@ Total number of encoders: 549 +----------------------------------+------------------+ | tf_efficientnetv2_l | ✅ | +----------------------------------+------------------+ -| tf_efficientnetv2_l_in21ft1k | ✅ | -+----------------------------------+------------------+ -| tf_efficientnetv2_l_in21k | ✅ | -+----------------------------------+------------------+ | tf_efficientnetv2_m | ✅ | +----------------------------------+------------------+ -| tf_efficientnetv2_m_in21ft1k | ✅ | -+----------------------------------+------------------+ -| tf_efficientnetv2_m_in21k | ✅ | -+----------------------------------+------------------+ | tf_efficientnetv2_s | ✅ | +----------------------------------+------------------+ -| tf_efficientnetv2_s_in21ft1k | ✅ | -+----------------------------------+------------------+ -| tf_efficientnetv2_s_in21k | ✅ | -+----------------------------------+------------------+ -| tf_efficientnetv2_xl_in21ft1k | ✅ | -+----------------------------------+------------------+ -| tf_efficientnetv2_xl_in21k | ✅ | -+----------------------------------+------------------+ -| tf_inception_v3 | | +| tf_efficientnetv2_xl | ✅ | +----------------------------------+------------------+ | tf_mixnet_l | ✅ | +----------------------------------+------------------+ @@ -1085,18 +1067,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | tinynet_e | ✅ | +----------------------------------+------------------+ -| tv_densenet121 | | -+----------------------------------+------------------+ -| tv_resnet101 | ✅ | -+----------------------------------+------------------+ -| tv_resnet152 | ✅ | -+----------------------------------+------------------+ -| tv_resnet34 | ✅ | -+----------------------------------+------------------+ -| tv_resnet50 | ✅ | -+----------------------------------+------------------+ -| tv_resnext50_32x4d | ✅ | -+----------------------------------+------------------+ | vovnet39a | | +----------------------------------+------------------+ | vovnet57a | | @@ -1105,8 +1075,6 @@ Total number of encoders: 549 +----------------------------------+------------------+ | wide_resnet50_2 | ✅ | +----------------------------------+------------------+ -| xception | | -+----------------------------------+------------------+ | xception41 | ✅ | +----------------------------------+------------------+ | xception41p | ✅ | diff --git a/requirements.txt b/requirements.txt index 9dc118f2..9798841e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ torchvision>=0.5.0 pretrainedmodels==0.7.4 efficientnet-pytorch==0.7.1 -timm==0.6.13 +timm==0.9.2 tqdm pillow diff --git a/segmentation_models_pytorch/__version__.py b/segmentation_models_pytorch/__version__.py index e9768f57..432c841f 100644 --- a/segmentation_models_pytorch/__version__.py +++ b/segmentation_models_pytorch/__version__.py @@ -1,3 +1,3 @@ -VERSION = (0, 3, 2) +VERSION = (0, 3, 3) __version__ = ".".join(map(str, VERSION)) diff --git a/segmentation_models_pytorch/encoders/__init__.py b/segmentation_models_pytorch/encoders/__init__.py index 2a3ff5c0..7551153f 100644 --- a/segmentation_models_pytorch/encoders/__init__.py +++ b/segmentation_models_pytorch/encoders/__init__.py @@ -101,7 +101,7 @@ def get_preprocessing_params(encoder_name, pretrained="imagenet"): encoder_name = encoder_name[3:] if not timm.models.is_model_pretrained(encoder_name): raise ValueError(f"{encoder_name} does not have pretrained weights and preprocessing parameters") - settings = timm.models.get_pretrained_cfg(encoder_name) + settings = timm.models.get_pretrained_cfg(encoder_name).__dict__ else: all_settings = encoders[encoder_name]["pretrained_settings"] if pretrained not in all_settings.keys(): @@ -111,8 +111,8 @@ def get_preprocessing_params(encoder_name, pretrained="imagenet"): formatted_settings = {} formatted_settings["input_space"] = settings.get("input_space", "RGB") formatted_settings["input_range"] = list(settings.get("input_range", [0, 1])) - formatted_settings["mean"] = list(settings.get("mean")) - formatted_settings["std"] = list(settings.get("std")) + formatted_settings["mean"] = list(settings["mean"]) + formatted_settings["std"] = list(settings["std"]) return formatted_settings diff --git a/segmentation_models_pytorch/encoders/mix_transformer.py b/segmentation_models_pytorch/encoders/mix_transformer.py index d211e2ca..1995ca1f 100644 --- a/segmentation_models_pytorch/encoders/mix_transformer.py +++ b/segmentation_models_pytorch/encoders/mix_transformer.py @@ -8,7 +8,7 @@ import torch.nn as nn from functools import partial -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from timm.layers import DropPath, to_2tuple, trunc_normal_ class Mlp(nn.Module): diff --git a/segmentation_models_pytorch/encoders/timm_efficientnet.py b/segmentation_models_pytorch/encoders/timm_efficientnet.py index 1af1824d..0c9cd52d 100644 --- a/segmentation_models_pytorch/encoders/timm_efficientnet.py +++ b/segmentation_models_pytorch/encoders/timm_efficientnet.py @@ -5,7 +5,7 @@ from timm.models.efficientnet import EfficientNet from timm.models.efficientnet import decode_arch_def, round_channels, default_cfgs -from timm.models.layers.activations import Swish +from timm.layers.activations import Swish from ._base import EncoderMixin @@ -158,9 +158,9 @@ def __init__( def prepare_settings(settings): return { - "mean": settings["mean"], - "std": settings["std"], - "url": settings["url"], + "mean": settings.mean, + "std": settings.std, + "url": settings.url, "input_range": (0, 1), "input_space": "RGB", } @@ -170,9 +170,9 @@ def prepare_settings(settings): "timm-efficientnet-b0": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b0"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b0_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b0_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b0"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b0"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b0"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 32, 24, 40, 112, 320), @@ -185,9 +185,9 @@ def prepare_settings(settings): "timm-efficientnet-b1": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b1"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b1_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b1_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b1"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b1"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b1"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 32, 24, 40, 112, 320), @@ -200,9 +200,9 @@ def prepare_settings(settings): "timm-efficientnet-b2": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b2"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b2_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b2_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b2"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b2"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b2"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 32, 24, 48, 120, 352), @@ -215,9 +215,9 @@ def prepare_settings(settings): "timm-efficientnet-b3": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b3"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b3_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b3_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b3"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b3"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b3"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 40, 32, 48, 136, 384), @@ -230,9 +230,9 @@ def prepare_settings(settings): "timm-efficientnet-b4": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b4"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b4_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b4_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b4"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b4"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b4"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 48, 32, 56, 160, 448), @@ -245,9 +245,9 @@ def prepare_settings(settings): "timm-efficientnet-b5": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b5"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b5_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b5_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b5"].cfgs["in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b5"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b5"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 48, 40, 64, 176, 512), @@ -260,9 +260,9 @@ def prepare_settings(settings): "timm-efficientnet-b6": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b6"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b6_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b6_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b6"].cfgs["aa_in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b6"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b6"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 56, 40, 72, 200, 576), @@ -275,9 +275,9 @@ def prepare_settings(settings): "timm-efficientnet-b7": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b7"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b7_ap"]), - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b7_ns"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b7"].cfgs["aa_in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b7"].cfgs["ap_in1k"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_b7"].cfgs["ns_jft_in1k"]), }, "params": { "out_channels": (3, 64, 48, 80, 224, 640), @@ -290,8 +290,8 @@ def prepare_settings(settings): "timm-efficientnet-b8": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b8"]), - "advprop": prepare_settings(default_cfgs["tf_efficientnet_b8_ap"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_b8"].cfgs["ra_in1k"]), + "advprop": prepare_settings(default_cfgs["tf_efficientnet_b8"].cfgs["ap_in1k"]), }, "params": { "out_channels": (3, 72, 56, 88, 248, 704), @@ -304,7 +304,8 @@ def prepare_settings(settings): "timm-efficientnet-l2": { "encoder": EfficientNetEncoder, "pretrained_settings": { - "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_l2_ns"]), + "noisy-student": prepare_settings(default_cfgs["tf_efficientnet_l2"].cfgs["ns_jft_in1k"]), + "noisy-student-475": prepare_settings(default_cfgs["tf_efficientnet_l2"].cfgs["ns_jft_in1k_475"]), }, "params": { "out_channels": (3, 136, 104, 176, 480, 1376), @@ -317,7 +318,7 @@ def prepare_settings(settings): "timm-tf_efficientnet_lite0": { "encoder": EfficientNetLiteEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite0"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite0"].cfgs["in1k"]), }, "params": { "out_channels": (3, 32, 24, 40, 112, 320), @@ -330,7 +331,7 @@ def prepare_settings(settings): "timm-tf_efficientnet_lite1": { "encoder": EfficientNetLiteEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite1"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite1"].cfgs["in1k"]), }, "params": { "out_channels": (3, 32, 24, 40, 112, 320), @@ -343,7 +344,7 @@ def prepare_settings(settings): "timm-tf_efficientnet_lite2": { "encoder": EfficientNetLiteEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite2"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite2"].cfgs["in1k"]), }, "params": { "out_channels": (3, 32, 24, 48, 120, 352), @@ -356,7 +357,7 @@ def prepare_settings(settings): "timm-tf_efficientnet_lite3": { "encoder": EfficientNetLiteEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite3"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite3"].cfgs["in1k"]), }, "params": { "out_channels": (3, 32, 32, 48, 136, 384), @@ -369,7 +370,7 @@ def prepare_settings(settings): "timm-tf_efficientnet_lite4": { "encoder": EfficientNetLiteEncoder, "pretrained_settings": { - "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite4"]), + "imagenet": prepare_settings(default_cfgs["tf_efficientnet_lite4"].cfgs["in1k"]), }, "params": { "out_channels": (3, 32, 32, 56, 160, 448),