From 5afca5fd019ad4c336539407060b69dbf576da47 Mon Sep 17 00:00:00 2001 From: John Reese Date: Sun, 15 May 2022 12:53:03 -0700 Subject: [PATCH] apply import merging for fbcode (11 of 11) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: Applies new import merging and sorting from µsort v1.0. When merging imports, µsort will make a best-effort to move associated comments to match merged elements, but there are known limitations due to the diynamic nature of Python and developer tooling. These changes should not produce any dangerous runtime changes, but may require touch-ups to satisfy linters and other tooling. Note that µsort uses case-insensitive, lexicographical sorting, which results in a different ordering compared to isort. This provides a more consistent sorting order, matching the case-insensitive order used when sorting import statements by module name, and ensures that "frog", "FROG", and "Frog" always sort next to each other. For details on µsort's sorting and merging semantics, see the user guide: https://usort.readthedocs.io/en/stable/guide.html#sorting Reviewed By: lisroach Differential Revision: D36402260 fbshipit-source-id: 7cb52f09b740ccc580e61e6d1787d27381a8ce00 --- hubconf.py | 2 +- pytorchvideo/data/__init__.py | 2 +- pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py | 2 +- pytorchvideo/data/kinetics.py | 2 +- pytorchvideo/data/ucf101.py | 2 +- pytorchvideo/layers/__init__.py | 2 +- pytorchvideo/models/__init__.py | 6 +++--- pytorchvideo/models/csn.py | 2 +- pytorchvideo/models/hub/__init__.py | 2 +- pytorchvideo/models/hub/vision_transformers.py | 2 +- pytorchvideo/transforms/augmix.py | 2 +- pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py | 2 +- .../pytorchvideo_trainer/module/moco_v2.py | 2 +- .../pytorchvideo_trainer/module/video_classification.py | 2 +- tests/test_data_dataset_manifest_utils.py | 2 +- tests/test_data_domsev_dataset.py | 8 ++++---- tests/test_data_epic_kitchen_dataset.py | 4 ++-- tests/test_data_labeled_video_dataset.py | 2 +- tests/test_layers_nonlocal_net.py | 2 +- tests/test_models_head.py | 6 +++--- tests/test_models_masked_multistream.py | 4 ++-- tests/test_models_resnet.py | 6 +++--- tests/test_models_stem.py | 2 +- tests/test_transforms.py | 2 +- 24 files changed, 35 insertions(+), 35 deletions(-) diff --git a/hubconf.py b/hubconf.py index e4b72285..d22fdb99 100644 --- a/hubconf.py +++ b/hubconf.py @@ -14,9 +14,9 @@ slow_r50, slow_r50_detection, slowfast_16x8_r101_50_50, + slowfast_r101, slowfast_r50, slowfast_r50_detection, - slowfast_r101, x3d_l, x3d_m, x3d_s, diff --git a/pytorchvideo/data/__init__.py b/pytorchvideo/data/__init__.py index 4199e66a..f7316dc8 100644 --- a/pytorchvideo/data/__init__.py +++ b/pytorchvideo/data/__init__.py @@ -13,6 +13,6 @@ from .epic_kitchen_recognition import EpicKitchenRecognition # noqa from .hmdb51 import Hmdb51 # noqa from .kinetics import Kinetics # noqa -from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset # noqa +from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset # noqa from .ssv2 import SSv2 from .ucf101 import Ucf101 # noqa diff --git a/pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py b/pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py index 299a6c6a..6077517b 100644 --- a/pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py +++ b/pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py @@ -7,12 +7,12 @@ import torch from pytorchvideo.data.dataset_manifest_utils import ( EncodedVideoInfo, + get_seconds_from_hms_time, VideoClipInfo, VideoDataset, VideoDatasetType, VideoFrameInfo, VideoInfo, - get_seconds_from_hms_time, ) from pytorchvideo.data.frame_video import FrameVideo from pytorchvideo.data.utils import DataclassFieldCaster, load_dataclass_dict_from_csv diff --git a/pytorchvideo/data/kinetics.py b/pytorchvideo/data/kinetics.py index 5de2c4ce..6cdb3646 100644 --- a/pytorchvideo/data/kinetics.py +++ b/pytorchvideo/data/kinetics.py @@ -5,7 +5,7 @@ import torch from pytorchvideo.data.clip_sampling import ClipSampler -from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset +from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset """ diff --git a/pytorchvideo/data/ucf101.py b/pytorchvideo/data/ucf101.py index dcb04e04..a6453c8d 100644 --- a/pytorchvideo/data/ucf101.py +++ b/pytorchvideo/data/ucf101.py @@ -5,7 +5,7 @@ import torch from pytorchvideo.data.clip_sampling import ClipSampler -from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset +from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset """ diff --git a/pytorchvideo/layers/__init__.py b/pytorchvideo/layers/__init__.py index 20326bb3..bc0715e0 100644 --- a/pytorchvideo/layers/__init__.py +++ b/pytorchvideo/layers/__init__.py @@ -3,7 +3,7 @@ from .attention import Mlp, MultiScaleAttention, MultiScaleBlock from .attention_torchscript import ScriptableMultiScaleBlock from .drop_path import DropPath -from .fusion import ConcatFusion, ReduceFusion, make_fusion_layer +from .fusion import ConcatFusion, make_fusion_layer, ReduceFusion from .mlp import make_multilayer_perceptron from .positional_encoding import PositionalEncoding, SpatioTemporalClsPositionalEncoding from .positional_encoding_torchscript import ( diff --git a/pytorchvideo/models/__init__.py b/pytorchvideo/models/__init__.py index f99f3c66..f8a8c4a4 100644 --- a/pytorchvideo/models/__init__.py +++ b/pytorchvideo/models/__init__.py @@ -1,10 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .csn import create_csn -from .head import ResNetBasicHead, create_res_basic_head +from .head import create_res_basic_head, ResNetBasicHead from .masked_multistream import ( - LSTM, LearnMaskedDefault, + LSTM, MaskedMultiPathWay, MaskedSequential, MaskedTemporalPooling, @@ -14,6 +14,6 @@ from .net import MultiPathWayWithFuse, Net from .resnet import BottleneckBlock, create_bottleneck_block, create_resnet from .slowfast import create_slowfast -from .stem import ResNetBasicStem, create_conv_patch_embed, create_res_basic_stem +from .stem import create_conv_patch_embed, create_res_basic_stem, ResNetBasicStem from .vision_transformers import create_multiscale_vision_transformers from .weight_init import init_net_weights diff --git a/pytorchvideo/models/csn.py b/pytorchvideo/models/csn.py index 381800ac..4a154c09 100644 --- a/pytorchvideo/models/csn.py +++ b/pytorchvideo/models/csn.py @@ -5,7 +5,7 @@ import torch import torch.nn as nn from pytorchvideo.models.head import create_res_basic_head -from pytorchvideo.models.resnet import Net, create_bottleneck_block, create_res_stage +from pytorchvideo.models.resnet import create_bottleneck_block, create_res_stage, Net from pytorchvideo.models.stem import create_res_basic_stem diff --git a/pytorchvideo/models/hub/__init__.py b/pytorchvideo/models/hub/__init__.py index fca30471..218ead33 100644 --- a/pytorchvideo/models/hub/__init__.py +++ b/pytorchvideo/models/hub/__init__.py @@ -6,9 +6,9 @@ from .resnet import c2d_r50, i3d_r50, slow_r50, slow_r50_detection from .slowfast import ( slowfast_16x8_r101_50_50, + slowfast_r101, slowfast_r50, slowfast_r50_detection, - slowfast_r101, ) from .vision_transformers import mvit_base_16, mvit_base_16x4, mvit_base_32x3 from .x3d import x3d_l, x3d_m, x3d_s, x3d_xs diff --git a/pytorchvideo/models/hub/vision_transformers.py b/pytorchvideo/models/hub/vision_transformers.py index 3131257d..48ae623e 100644 --- a/pytorchvideo/models/hub/vision_transformers.py +++ b/pytorchvideo/models/hub/vision_transformers.py @@ -7,7 +7,7 @@ create_multiscale_vision_transformers, ) -from .utils import MODEL_ZOO_ROOT_DIR, hub_model_builder +from .utils import hub_model_builder, MODEL_ZOO_ROOT_DIR checkpoint_paths = { diff --git a/pytorchvideo/transforms/augmix.py b/pytorchvideo/transforms/augmix.py index 4c31e01f..d3f235c8 100644 --- a/pytorchvideo/transforms/augmix.py +++ b/pytorchvideo/transforms/augmix.py @@ -5,11 +5,11 @@ import torch from pytorchvideo.transforms.augmentations import ( _AUGMENTATION_MAX_LEVEL, - AugmentTransform, _decreasing_int_to_arg, _decreasing_to_arg, _increasing_magnitude_to_arg, _increasing_randomly_negate_to_arg, + AugmentTransform, ) from pytorchvideo.transforms.transforms import OpSampler diff --git a/pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py b/pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py index 1ff10e12..e4d726fd 100644 --- a/pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py +++ b/pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py @@ -10,7 +10,7 @@ from omegaconf import MISSING from pytorchvideo.models.resnet import create_resnet from pytorchvideo.models.weight_init import init_net_weights -from pytorchvideo_trainer.module.ssl_helper import SSLBaseModule, create_mlp_util +from pytorchvideo_trainer.module.ssl_helper import create_mlp_util, SSLBaseModule from pytorchvideo_trainer.module.video_classification import ( Batch, BatchKey, diff --git a/pytorchvideo_trainer/pytorchvideo_trainer/module/moco_v2.py b/pytorchvideo_trainer/pytorchvideo_trainer/module/moco_v2.py index 3ea06054..fdda0b6d 100644 --- a/pytorchvideo_trainer/pytorchvideo_trainer/module/moco_v2.py +++ b/pytorchvideo_trainer/pytorchvideo_trainer/module/moco_v2.py @@ -12,7 +12,7 @@ from omegaconf import MISSING from pytorchvideo.models.resnet import create_resnet from pytorchvideo.models.weight_init import init_net_weights -from pytorchvideo_trainer.module.ssl_helper import SSLBaseModule, create_mlp_util +from pytorchvideo_trainer.module.ssl_helper import create_mlp_util, SSLBaseModule from pytorchvideo_trainer.module.video_classification import ( Batch, BatchKey, diff --git a/pytorchvideo_trainer/pytorchvideo_trainer/module/video_classification.py b/pytorchvideo_trainer/pytorchvideo_trainer/module/video_classification.py index e601486f..b5aa1783 100644 --- a/pytorchvideo_trainer/pytorchvideo_trainer/module/video_classification.py +++ b/pytorchvideo_trainer/pytorchvideo_trainer/module/video_classification.py @@ -26,7 +26,7 @@ from omegaconf import MISSING, OmegaConf from pytorch_lightning.utilities import rank_zero_info from pytorchvideo_trainer.datamodule.transforms import MixVideoBatchWrapper -from pytorchvideo_trainer.module.lr_policy import LRSchedulerConf, get_epoch_lr, set_lr +from pytorchvideo_trainer.module.lr_policy import get_epoch_lr, LRSchedulerConf, set_lr from pytorchvideo_trainer.module.optimizer import construct_optimizer from torch import nn from torch.optim.lr_scheduler import _LRScheduler diff --git a/tests/test_data_dataset_manifest_utils.py b/tests/test_data_dataset_manifest_utils.py index 7293091a..4019e18a 100644 --- a/tests/test_data_dataset_manifest_utils.py +++ b/tests/test_data_dataset_manifest_utils.py @@ -9,7 +9,7 @@ VideoFrameInfo, VideoInfo, ) -from utils import MOCK_VIDEO_IDS, MOCK_VIDEO_INFOS, get_flat_video_frames +from utils import get_flat_video_frames, MOCK_VIDEO_IDS, MOCK_VIDEO_INFOS class TestDatasetManifestUtils(unittest.TestCase): diff --git a/tests/test_data_domsev_dataset.py b/tests/test_data_domsev_dataset.py index 863a4d1c..12ddaff8 100644 --- a/tests/test_data_domsev_dataset.py +++ b/tests/test_data_domsev_dataset.py @@ -10,17 +10,17 @@ from parameterized import parameterized from pytorchvideo.data.dataset_manifest_utils import VideoClipInfo, VideoDatasetType from pytorchvideo.data.domsev import ( - DomsevVideoDataset, - LabelData, _get_overlap_for_time_range_pair, _seconds_to_frame_index, + DomsevVideoDataset, + LabelData, ) from pytorchvideo.data.utils import save_dataclass_objs_to_headered_csv from utils import ( - MOCK_VIDEO_IDS, - MOCK_VIDEO_INFOS, get_encoded_video_infos, get_flat_video_frames, + MOCK_VIDEO_IDS, + MOCK_VIDEO_INFOS, ) diff --git a/tests/test_data_epic_kitchen_dataset.py b/tests/test_data_epic_kitchen_dataset.py index f681e9ec..4100c522 100644 --- a/tests/test_data_epic_kitchen_dataset.py +++ b/tests/test_data_epic_kitchen_dataset.py @@ -12,10 +12,10 @@ from pytorchvideo.data.epic_kitchen import ActionData, EpicKitchenDataset from pytorchvideo.data.utils import save_dataclass_objs_to_headered_csv from utils import ( - MOCK_VIDEO_IDS, - MOCK_VIDEO_INFOS, get_encoded_video_infos, get_flat_video_frames, + MOCK_VIDEO_IDS, + MOCK_VIDEO_INFOS, ) diff --git a/tests/test_data_labeled_video_dataset.py b/tests/test_data_labeled_video_dataset.py index 7f6771a8..f899a6f9 100644 --- a/tests/test_data_labeled_video_dataset.py +++ b/tests/test_data_labeled_video_dataset.py @@ -22,8 +22,8 @@ from pytorchvideo.data import Hmdb51 from pytorchvideo.data.clip_sampling import make_clip_sampler from pytorchvideo.data.labeled_video_dataset import ( - LabeledVideoDataset, labeled_video_dataset, + LabeledVideoDataset, ) from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths from pytorchvideo.data.utils import MultiProcessSampler, thwc_to_cthw diff --git a/tests/test_layers_nonlocal_net.py b/tests/test_layers_nonlocal_net.py index d3075aa1..0f35aa55 100644 --- a/tests/test_layers_nonlocal_net.py +++ b/tests/test_layers_nonlocal_net.py @@ -6,7 +6,7 @@ import numpy as np import torch -from pytorchvideo.layers.nonlocal_net import NonLocal, create_nonlocal +from pytorchvideo.layers.nonlocal_net import create_nonlocal, NonLocal from torch import nn diff --git a/tests/test_models_head.py b/tests/test_models_head.py index d597d7d3..e9e98087 100644 --- a/tests/test_models_head.py +++ b/tests/test_models_head.py @@ -6,12 +6,12 @@ import numpy as np import torch from pytorchvideo.models.head import ( - ResNetBasicHead, - ResNetRoIHead, - SequencePool, create_res_basic_head, create_res_roi_pooling_head, create_vit_basic_head, + ResNetBasicHead, + ResNetRoIHead, + SequencePool, ) from torch import nn from torchvision.ops import RoIAlign diff --git a/tests/test_models_masked_multistream.py b/tests/test_models_masked_multistream.py index 292b6a9b..fa97b487 100644 --- a/tests/test_models_masked_multistream.py +++ b/tests/test_models_masked_multistream.py @@ -5,10 +5,10 @@ import torch import torch.nn -from pytorchvideo.layers import PositionalEncoding, make_multilayer_perceptron +from pytorchvideo.layers import make_multilayer_perceptron, PositionalEncoding from pytorchvideo.models.masked_multistream import ( - LSTM, LearnMaskedDefault, + LSTM, MaskedSequential, MaskedTemporalPooling, TransposeMultiheadAttention, diff --git a/tests/test_models_resnet.py b/tests/test_models_resnet.py index cbd271ca..a95c2f42 100644 --- a/tests/test_models_resnet.py +++ b/tests/test_models_resnet.py @@ -10,9 +10,6 @@ from pytorchvideo.models.net import Net from pytorchvideo.models.resnet import ( BottleneckBlock, - ResBlock, - ResStage, - SeparableBottleneckBlock, create_acoustic_bottleneck_block, create_acoustic_resnet, create_bottleneck_block, @@ -20,6 +17,9 @@ create_res_stage, create_resnet, create_resnet_with_roi_head, + ResBlock, + ResStage, + SeparableBottleneckBlock, ) from pytorchvideo.models.stem import ResNetBasicStem from torch import nn diff --git a/tests/test_models_stem.py b/tests/test_models_stem.py index 5056bf8a..28b6d1d1 100644 --- a/tests/test_models_stem.py +++ b/tests/test_models_stem.py @@ -7,9 +7,9 @@ import torch from pytorchvideo.layers.convolutions import ConvReduce3D from pytorchvideo.models.stem import ( - ResNetBasicStem, create_acoustic_res_basic_stem, create_res_basic_stem, + ResNetBasicStem, ) from torch import nn diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 6b9a5fa6..285bf9d3 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -10,6 +10,7 @@ from pytorchvideo.transforms import ( ApplyTransformToKey, AugMix, + create_video_transform, CutMix, MixUp, MixVideo, @@ -22,7 +23,6 @@ ShortSideScale, UniformCropVideo, UniformTemporalSubsample, - create_video_transform, ) from pytorchvideo.transforms.functional import ( clip_boxes_to_image,