Skip to content

Commit

Permalink
Fix internal lint errors (tensorflow#6937)
Browse files Browse the repository at this point in the history
  • Loading branch information
haoyuz authored and tfboyd committed May 31, 2019
1 parent ba41541 commit 7546a9e
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 16 deletions.
1 change: 1 addition & 0 deletions official/recommendation/ncf_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ def xla_validator(flag_dict):
help=flags_core.help_wrap(
"If True, we use a custom training loop for keras."))


def convert_to_softmax_logits(logits):
'''Convert the logits returned by the base model to softmax logits.
Expand Down
2 changes: 1 addition & 1 deletion official/recommendation/ncf_keras_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ def step_fn(inputs):
train_loss += train_step()
time_callback.on_batch_end(step+epoch*num_train_steps)
logging.info("Done training epoch %s, epoch loss=%s.",
epoch+1, train_loss/num_train_steps)
epoch+1, train_loss/num_train_steps)
eval_input_iterator.initialize()
hr_sum = 0
hr_count = 0
Expand Down
24 changes: 11 additions & 13 deletions official/resnet/keras/keras_imagenet_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,9 +603,7 @@ def benchmark_xla_8_gpu_fp16_tweaked(self):
self._run_and_report_benchmark()

def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and
cloning.
"""
"""Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning."""
self._setup()

FLAGS.num_gpus = 8
Expand All @@ -623,8 +621,9 @@ def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
self._run_and_report_benchmark()

def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16. Delay
performance measurement for stable performance on 96 vCPU platforms.
"""Test with manual config tuning, XLA, 8 GPUs and fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()

Expand All @@ -643,9 +642,9 @@ def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
self._run_and_report_benchmark()

def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs, fp16, and
cloning. Delay performance measurement for stable performance on 96 vCPU
platforms.
"""Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()

Expand Down Expand Up @@ -821,9 +820,9 @@ def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
self._run_and_report_benchmark()

def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA,
8 GPUs and fp16. Delay performance measurement for stable performance
on 96 vCPU platforms.
"""Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()

Expand All @@ -841,8 +840,7 @@ def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
self._run_and_report_benchmark()

def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
"""Test Keras model in legacy graph mode with manual config tuning, XLA,
8 GPUs and fp16.
"""Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
This test also enables get_next_as_optional.
"""
Expand Down
3 changes: 2 additions & 1 deletion official/transformer/v2/data_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,8 @@ def _read_and_batch_from_files(
([max_length], [max_length]), drop_remainder=True)
else:
# Group and batch such that each batch has examples of similar length.
# TODO: _batch_examples might need to do something special for num_replicas.
# TODO(xunkai): _batch_examples might need to do something special for
# num_replicas.
dataset = _batch_examples(dataset, batch_size, max_length)

dataset = dataset.repeat(repeat)
Expand Down
2 changes: 1 addition & 1 deletion official/transformer/v2/transformer_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import os
import tempfile

from absl import app as absl_app
from absl import app as absl_app # pylint: disable=unused-import
from absl import flags
import tensorflow as tf

Expand Down

0 comments on commit 7546a9e

Please sign in to comment.