Skip to content

Commit

Permalink
[ruff] Doc-string should start at the first line [4/n] (pyg-team#8333)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
rusty1s and pre-commit-ci[bot] authored Nov 6, 2023
1 parent 4535cf9 commit 62b386f
Show file tree
Hide file tree
Showing 54 changed files with 118 additions and 111 deletions.
3 changes: 1 addition & 2 deletions examples/contrib/pgm_explainer_graph_classification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
This is an example of using the PGM explainer algorithm
"""This is an example of using the PGM explainer algorithm
on a graph classification task
"""
import os.path as osp
Expand Down
3 changes: 1 addition & 2 deletions examples/contrib/pgm_explainer_node_classification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
This is an example of using the PGM explainer algorithm
"""This is an example of using the PGM explainer algorithm
on a node classification task
"""
import os.path as osp
Expand Down
3 changes: 1 addition & 2 deletions examples/equilibrium_median.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
r"""
Replicates the experiment from `"Deep Graph Infomax"
r"""Replicates the experiment from `"Deep Graph Infomax"
<https://arxiv.org/abs/1809.10341>`_ to try and teach
`EquilibriumAggregation` to learn to take the median of
a set of numbers
Expand Down
3 changes: 1 addition & 2 deletions examples/multi_gpu/distributed_sampling_xpu.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
Distributed GAT training, targeting XPU devices.
"""Distributed GAT training, targeting XPU devices.
PVC has 2 tiles, each reports itself as a separate
device. DDP approach allows us to employ both tiles.
Expand Down
3 changes: 1 addition & 2 deletions examples/multi_gpu/multinode_multigpu_papers100m_gcn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
To run:
"""To run:
srun -l -N<num_nodes> --ntasks-per-node=<ngpu_per_node> \
--container-name=cont --container-image=<image_url> \
--container-mounts=/ogb-papers100m/:/workspace/dataset
Expand Down
3 changes: 1 addition & 2 deletions examples/randlanet_classification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
An adaptation of RandLA-Net to the classification task, which was not
"""An adaptation of RandLA-Net to the classification task, which was not
addressed in the paper:
RandLA-Net: Efficient Semantic Segmentation of Large-Scale Point Clouds
Reference: https://arxiv.org/abs/1911.11236
Expand Down
3 changes: 1 addition & 2 deletions examples/randlanet_segmentation.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""
An implementation of RandLA-Net based on the paper:
"""An implementation of RandLA-Net based on the paper:
RandLA-Net: Efficient Semantic Segmentation of Large-Scale Point Clouds
Reference: https://arxiv.org/abs/1911.11236
"""
Expand Down
3 changes: 1 addition & 2 deletions graphgym/custom_graphgym/encoder/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@

@register_node_encoder('example')
class ExampleNodeEncoder(torch.nn.Module):
"""
Provides an encoder for integer node features.
"""Provides an encoder for integer node features.
Args:
num_classes (int): The number of classes for the embedding mapping to
Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ ignore = [
"D107", # TODO: Don't ignore "Missing docstring in __init__"
"D200", # Ignore "One-line docstring should fit on one line"
"D205", # Ignore "1 blank line required between summary line and description"
"D212", # Ignore "Multi-line docstring summary should start at the first line"
"D415", # Ignore "First line should end with a period, question mark, or exclamation point"

]
Expand Down
8 changes: 4 additions & 4 deletions torch_geometric/data/feature_store.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
r"""
This class defines the abstraction for a backend-agnostic feature store. The
goal of the feature store is to abstract away all node and edge feature memory
management so that varying implementations can allow for independent scale-out.
r"""This class defines the abstraction for a backend-agnostic feature store.
The goal of the feature store is to abstract away all node and edge feature
memory management so that varying implementations can allow for independent
scale-out.
This particular feature store abstraction makes a few key assumptions:
* The features we care about storing are node and edge features of a graph.
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/data/graph_store.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
r"""
This class defines the abstraction for a backend-agnostic graph store. The
r"""This class defines the abstraction for a backend-agnostic graph store. The
goal of the graph store is to abstract away all graph edge index memory
management so that varying implementations can allow for independent scale-out.
Expand Down
18 changes: 6 additions & 12 deletions torch_geometric/graphgym/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@


def set_cfg(cfg):
r"""
This function sets the default config value.
r"""This function sets the default config value.
1) Note that for an experiment, only part of the arguments will be used
The remaining unused arguments won't affect anything.
So feel free to register any argument in graphgym.contrib.config
Expand Down Expand Up @@ -476,8 +475,7 @@ def assert_cfg(cfg):


def dump_cfg(cfg):
r"""
Dumps the config to the output directory specified in
r"""Dumps the config to the output directory specified in
:obj:`cfg.out_dir`
Args:
Expand All @@ -490,8 +488,7 @@ def dump_cfg(cfg):


def load_cfg(cfg, args):
r"""
Load configurations from file system and command line
r"""Load configurations from file system and command line
Args:
cfg (CfgNode): Configuration node
Expand All @@ -509,8 +506,7 @@ def makedirs_rm_exist(dir):


def get_fname(fname):
r"""
Extract filename from file name path
r"""Extract filename from file name path
Args:
fname (str): Filename for the yaml format configuration file
Expand All @@ -524,8 +520,7 @@ def get_fname(fname):


def set_out_dir(out_dir, fname):
r"""
Create the directory for full experiment run
r"""Create the directory for full experiment run
Args:
out_dir (str): Directory for output, specified in :obj:`cfg.out_dir`
Expand All @@ -541,8 +536,7 @@ def set_out_dir(out_dir, fname):


def set_run_dir(out_dir):
r"""
Create the directory for each random seed experiment run
r"""Create the directory for each random seed experiment run
Args:
out_dir (str): Directory for output, specified in :obj:`cfg.out_dir`
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/graphgym/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@


def init_weights(m):
r"""
Performs weight initialization
r"""Performs weight initialization
Args:
m (nn.Module): PyTorch module
Expand Down
20 changes: 6 additions & 14 deletions torch_geometric/graphgym/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ def planetoid_dataset(name: str) -> Callable:


def load_pyg(name, dataset_dir):
"""
Load PyG dataset objects. (More PyG datasets will be supported)
"""Load PyG dataset objects. (More PyG datasets will be supported)
Args:
name (str): dataset name
Expand Down Expand Up @@ -101,9 +100,7 @@ def set_dataset_attr(dataset, name, value, size):


def load_ogb(name, dataset_dir):
r"""
Load OGB dataset objects.
r"""Load OGB dataset objects.
Args:
Expand Down Expand Up @@ -173,9 +170,7 @@ def load_ogb(name, dataset_dir):


def load_dataset():
r"""
Load dataset objects.
r"""Load dataset objects.
Returns: PyG dataset object
Expand All @@ -200,8 +195,7 @@ def load_dataset():


def set_dataset_info(dataset):
r"""
Set global dataset information
r"""Set global dataset information
Args:
dataset: PyG dataset object
Expand Down Expand Up @@ -233,8 +227,7 @@ def set_dataset_info(dataset):


def create_dataset():
r"""
Create dataset object
r"""Create dataset object
Returns: PyG dataset object
Expand Down Expand Up @@ -311,8 +304,7 @@ def get_loader(dataset, sampler, batch_size, shuffle=True):


def create_loader():
"""
Create data loader object
"""Create data loader object
Returns: List of PyTorch data loaders
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/graphgym/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@


def set_printing():
"""
Set up printing options
"""Set up printing options
"""
logging.root.handlers = []
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/graphgym/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@


def compute_loss(pred, true):
"""
Compute loss and prediction score
"""Compute loss and prediction score
Args:
pred (torch.tensor): Unnormalized prediction
Expand Down
6 changes: 2 additions & 4 deletions torch_geometric/graphgym/models/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@


def create_link_label(pos_edge_index, neg_edge_index):
"""
Create labels for link prediction, based on positive and negative edges
"""Create labels for link prediction, based on positive and negative edges
Args:
pos_edge_index (torch.tensor): Positive edge index [2, num_edges]
Expand All @@ -22,8 +21,7 @@ def create_link_label(pos_edge_index, neg_edge_index):


def neg_sampling_transform(data):
"""
Do negative sampling for link prediction tasks
"""Do negative sampling for link prediction tasks
Args:
data (torch_geometric.data): Input data object
Expand Down
9 changes: 3 additions & 6 deletions torch_geometric/graphgym/utils/agg_runs.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ def join_list(l1, l2):


def agg_dict_list(dict_list):
"""
Aggregate a list of dictionaries: mean + std
"""Aggregate a list of dictionaries: mean + std
Args:
dict_list: list of dictionaries
Expand Down Expand Up @@ -80,8 +79,7 @@ def rm_keys(dict, keys):


def agg_runs(dir, metric_best='auto'):
r"""
Aggregate over different random seeds of a single experiment
r"""Aggregate over different random seeds of a single experiment
Args:
dir (str): Directory of the results, containing 1 experiment
Expand Down Expand Up @@ -161,8 +159,7 @@ def agg_runs(dir, metric_best='auto'):


def agg_batch(dir, metric_best='auto'):
r"""
Aggregate across results from multiple experiments via grid search
r"""Aggregate across results from multiple experiments via grid search
Args:
dir (str): Directory of the results, containing multiple experiments
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/aggr/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ def forward(
dim: int = -2,
max_num_elements: Optional[int] = None,
) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): The source tensor.
index (torch.Tensor, optional): The indices of elements for
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/nn/aggr/equilibrium.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ def forward(self, x: Tensor, y: Tensor, index: Optional[Tensor],


class MomentumOptimizer(torch.nn.Module):
r"""
Provides an inner loop optimizer for the implicitly defined output
r"""Provides an inner loop optimizer for the implicitly defined output
layer. It is based on an unrolled Nesterov momentum algorithm.
Args:
Expand Down
2 changes: 1 addition & 1 deletion torch_geometric/nn/attention/performer.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def __init__(
self.dropout = torch.nn.Dropout(dropout)

def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
Expand Down
3 changes: 1 addition & 2 deletions torch_geometric/nn/conv/han_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ def group(


class HANConv(MessagePassing):
r"""
The Heterogenous Graph Attention Operator from the
r"""The Heterogenous Graph Attention Operator from the
`"Heterogenous Graph Attention Network"
<https://arxiv.org/pdf/1903.07293.pdf>`_ paper.
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dense_gat_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ def reset_parameters(self):

def forward(self, x: Tensor, adj: Tensor, mask: Optional[Tensor] = None,
add_loop: bool = True):
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dense_gcn_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def reset_parameters(self):

def forward(self, x: Tensor, adj: Tensor, mask: OptTensor = None,
add_loop: bool = True) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dense_gin_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ def reset_parameters(self):

def forward(self, x: Tensor, adj: Tensor, mask: Optional[Tensor] = None,
add_loop: bool = True) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dense_graph_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ def reset_parameters(self):

def forward(self, x: Tensor, adj: Tensor,
mask: Optional[Tensor] = None) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dense_sage_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ def reset_parameters(self):

def forward(self, x: Tensor, adj: Tensor,
mask: OptTensor = None) -> Tensor:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
3 changes: 2 additions & 1 deletion torch_geometric/nn/dense/dmon_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ def forward(
adj: Tensor,
mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
r"""
r"""Forward pass.
Args:
x (torch.Tensor): Node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with
Expand Down
Loading

0 comments on commit 62b386f

Please sign in to comment.