Skip to content

Commit

Permalink
apply import merging for fbcode (11 of 11)
Browse files Browse the repository at this point in the history
Summary:
Applies new import merging and sorting from µsort v1.0.

When merging imports, µsort will make a best-effort to move associated
comments to match merged elements, but there are known limitations due to
the diynamic nature of Python and developer tooling. These changes should
not produce any dangerous runtime changes, but may require touch-ups to
satisfy linters and other tooling.

Note that µsort uses case-insensitive, lexicographical sorting, which
results in a different ordering compared to isort. This provides a more
consistent sorting order, matching the case-insensitive order used when
sorting import statements by module name, and ensures that "frog", "FROG",
and "Frog" always sort next to each other.

For details on µsort's sorting and merging semantics, see the user guide:
https://usort.readthedocs.io/en/stable/guide.html#sorting

Reviewed By: lisroach

Differential Revision: D36402260

fbshipit-source-id: 7cb52f09b740ccc580e61e6d1787d27381a8ce00
  • Loading branch information
amyreese authored and facebook-github-bot committed May 15, 2022
1 parent 825e8aa commit 5afca5f
Show file tree
Hide file tree
Showing 24 changed files with 35 additions and 35 deletions.
2 changes: 1 addition & 1 deletion hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
slow_r50,
slow_r50_detection,
slowfast_16x8_r101_50_50,
slowfast_r101,
slowfast_r50,
slowfast_r50_detection,
slowfast_r101,
x3d_l,
x3d_m,
x3d_s,
Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@
from .epic_kitchen_recognition import EpicKitchenRecognition # noqa
from .hmdb51 import Hmdb51 # noqa
from .kinetics import Kinetics # noqa
from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset # noqa
from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset # noqa
from .ssv2 import SSv2
from .ucf101 import Ucf101 # noqa
2 changes: 1 addition & 1 deletion pytorchvideo/data/epic_kitchen/epic_kitchen_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
import torch
from pytorchvideo.data.dataset_manifest_utils import (
EncodedVideoInfo,
get_seconds_from_hms_time,
VideoClipInfo,
VideoDataset,
VideoDatasetType,
VideoFrameInfo,
VideoInfo,
get_seconds_from_hms_time,
)
from pytorchvideo.data.frame_video import FrameVideo
from pytorchvideo.data.utils import DataclassFieldCaster, load_dataclass_dict_from_csv
Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/data/kinetics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
from pytorchvideo.data.clip_sampling import ClipSampler

from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset
from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset


"""
Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/data/ucf101.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
from pytorchvideo.data.clip_sampling import ClipSampler

from .labeled_video_dataset import LabeledVideoDataset, labeled_video_dataset
from .labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset


"""
Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from .attention import Mlp, MultiScaleAttention, MultiScaleBlock
from .attention_torchscript import ScriptableMultiScaleBlock
from .drop_path import DropPath
from .fusion import ConcatFusion, ReduceFusion, make_fusion_layer
from .fusion import ConcatFusion, make_fusion_layer, ReduceFusion
from .mlp import make_multilayer_perceptron
from .positional_encoding import PositionalEncoding, SpatioTemporalClsPositionalEncoding
from .positional_encoding_torchscript import (
Expand Down
6 changes: 3 additions & 3 deletions pytorchvideo/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.

from .csn import create_csn
from .head import ResNetBasicHead, create_res_basic_head
from .head import create_res_basic_head, ResNetBasicHead
from .masked_multistream import (
LSTM,
LearnMaskedDefault,
LSTM,
MaskedMultiPathWay,
MaskedSequential,
MaskedTemporalPooling,
Expand All @@ -14,6 +14,6 @@
from .net import MultiPathWayWithFuse, Net
from .resnet import BottleneckBlock, create_bottleneck_block, create_resnet
from .slowfast import create_slowfast
from .stem import ResNetBasicStem, create_conv_patch_embed, create_res_basic_stem
from .stem import create_conv_patch_embed, create_res_basic_stem, ResNetBasicStem
from .vision_transformers import create_multiscale_vision_transformers
from .weight_init import init_net_weights
2 changes: 1 addition & 1 deletion pytorchvideo/models/csn.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
import torch.nn as nn
from pytorchvideo.models.head import create_res_basic_head
from pytorchvideo.models.resnet import Net, create_bottleneck_block, create_res_stage
from pytorchvideo.models.resnet import create_bottleneck_block, create_res_stage, Net
from pytorchvideo.models.stem import create_res_basic_stem


Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/models/hub/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from .resnet import c2d_r50, i3d_r50, slow_r50, slow_r50_detection
from .slowfast import (
slowfast_16x8_r101_50_50,
slowfast_r101,
slowfast_r50,
slowfast_r50_detection,
slowfast_r101,
)
from .vision_transformers import mvit_base_16, mvit_base_16x4, mvit_base_32x3
from .x3d import x3d_l, x3d_m, x3d_s, x3d_xs
2 changes: 1 addition & 1 deletion pytorchvideo/models/hub/vision_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
create_multiscale_vision_transformers,
)

from .utils import MODEL_ZOO_ROOT_DIR, hub_model_builder
from .utils import hub_model_builder, MODEL_ZOO_ROOT_DIR


checkpoint_paths = {
Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo/transforms/augmix.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
import torch
from pytorchvideo.transforms.augmentations import (
_AUGMENTATION_MAX_LEVEL,
AugmentTransform,
_decreasing_int_to_arg,
_decreasing_to_arg,
_increasing_magnitude_to_arg,
_increasing_randomly_negate_to_arg,
AugmentTransform,
)
from pytorchvideo.transforms.transforms import OpSampler

Expand Down
2 changes: 1 addition & 1 deletion pytorchvideo_trainer/pytorchvideo_trainer/module/byol.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from omegaconf import MISSING
from pytorchvideo.models.resnet import create_resnet
from pytorchvideo.models.weight_init import init_net_weights
from pytorchvideo_trainer.module.ssl_helper import SSLBaseModule, create_mlp_util
from pytorchvideo_trainer.module.ssl_helper import create_mlp_util, SSLBaseModule
from pytorchvideo_trainer.module.video_classification import (
Batch,
BatchKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from omegaconf import MISSING
from pytorchvideo.models.resnet import create_resnet
from pytorchvideo.models.weight_init import init_net_weights
from pytorchvideo_trainer.module.ssl_helper import SSLBaseModule, create_mlp_util
from pytorchvideo_trainer.module.ssl_helper import create_mlp_util, SSLBaseModule
from pytorchvideo_trainer.module.video_classification import (
Batch,
BatchKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from omegaconf import MISSING, OmegaConf
from pytorch_lightning.utilities import rank_zero_info
from pytorchvideo_trainer.datamodule.transforms import MixVideoBatchWrapper
from pytorchvideo_trainer.module.lr_policy import LRSchedulerConf, get_epoch_lr, set_lr
from pytorchvideo_trainer.module.lr_policy import get_epoch_lr, LRSchedulerConf, set_lr
from pytorchvideo_trainer.module.optimizer import construct_optimizer
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
Expand Down
2 changes: 1 addition & 1 deletion tests/test_data_dataset_manifest_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
VideoFrameInfo,
VideoInfo,
)
from utils import MOCK_VIDEO_IDS, MOCK_VIDEO_INFOS, get_flat_video_frames
from utils import get_flat_video_frames, MOCK_VIDEO_IDS, MOCK_VIDEO_INFOS


class TestDatasetManifestUtils(unittest.TestCase):
Expand Down
8 changes: 4 additions & 4 deletions tests/test_data_domsev_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
from parameterized import parameterized
from pytorchvideo.data.dataset_manifest_utils import VideoClipInfo, VideoDatasetType
from pytorchvideo.data.domsev import (
DomsevVideoDataset,
LabelData,
_get_overlap_for_time_range_pair,
_seconds_to_frame_index,
DomsevVideoDataset,
LabelData,
)
from pytorchvideo.data.utils import save_dataclass_objs_to_headered_csv
from utils import (
MOCK_VIDEO_IDS,
MOCK_VIDEO_INFOS,
get_encoded_video_infos,
get_flat_video_frames,
MOCK_VIDEO_IDS,
MOCK_VIDEO_INFOS,
)


Expand Down
4 changes: 2 additions & 2 deletions tests/test_data_epic_kitchen_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
from pytorchvideo.data.epic_kitchen import ActionData, EpicKitchenDataset
from pytorchvideo.data.utils import save_dataclass_objs_to_headered_csv
from utils import (
MOCK_VIDEO_IDS,
MOCK_VIDEO_INFOS,
get_encoded_video_infos,
get_flat_video_frames,
MOCK_VIDEO_IDS,
MOCK_VIDEO_INFOS,
)


Expand Down
2 changes: 1 addition & 1 deletion tests/test_data_labeled_video_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from pytorchvideo.data import Hmdb51
from pytorchvideo.data.clip_sampling import make_clip_sampler
from pytorchvideo.data.labeled_video_dataset import (
LabeledVideoDataset,
labeled_video_dataset,
LabeledVideoDataset,
)
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.data.utils import MultiProcessSampler, thwc_to_cthw
Expand Down
2 changes: 1 addition & 1 deletion tests/test_layers_nonlocal_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import numpy as np
import torch
from pytorchvideo.layers.nonlocal_net import NonLocal, create_nonlocal
from pytorchvideo.layers.nonlocal_net import create_nonlocal, NonLocal
from torch import nn


Expand Down
6 changes: 3 additions & 3 deletions tests/test_models_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
import numpy as np
import torch
from pytorchvideo.models.head import (
ResNetBasicHead,
ResNetRoIHead,
SequencePool,
create_res_basic_head,
create_res_roi_pooling_head,
create_vit_basic_head,
ResNetBasicHead,
ResNetRoIHead,
SequencePool,
)
from torch import nn
from torchvision.ops import RoIAlign
Expand Down
4 changes: 2 additions & 2 deletions tests/test_models_masked_multistream.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

import torch
import torch.nn
from pytorchvideo.layers import PositionalEncoding, make_multilayer_perceptron
from pytorchvideo.layers import make_multilayer_perceptron, PositionalEncoding
from pytorchvideo.models.masked_multistream import (
LSTM,
LearnMaskedDefault,
LSTM,
MaskedSequential,
MaskedTemporalPooling,
TransposeMultiheadAttention,
Expand Down
6 changes: 3 additions & 3 deletions tests/test_models_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@
from pytorchvideo.models.net import Net
from pytorchvideo.models.resnet import (
BottleneckBlock,
ResBlock,
ResStage,
SeparableBottleneckBlock,
create_acoustic_bottleneck_block,
create_acoustic_resnet,
create_bottleneck_block,
create_res_block,
create_res_stage,
create_resnet,
create_resnet_with_roi_head,
ResBlock,
ResStage,
SeparableBottleneckBlock,
)
from pytorchvideo.models.stem import ResNetBasicStem
from torch import nn
Expand Down
2 changes: 1 addition & 1 deletion tests/test_models_stem.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import torch
from pytorchvideo.layers.convolutions import ConvReduce3D
from pytorchvideo.models.stem import (
ResNetBasicStem,
create_acoustic_res_basic_stem,
create_res_basic_stem,
ResNetBasicStem,
)
from torch import nn

Expand Down
2 changes: 1 addition & 1 deletion tests/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from pytorchvideo.transforms import (
ApplyTransformToKey,
AugMix,
create_video_transform,
CutMix,
MixUp,
MixVideo,
Expand All @@ -22,7 +23,6 @@
ShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
create_video_transform,
)
from pytorchvideo.transforms.functional import (
clip_boxes_to_image,
Expand Down

0 comments on commit 5afca5f

Please sign in to comment.