Skip to content

Commit

Permalink
Remove uses of deprecated APIs
Browse files Browse the repository at this point in the history
  • Loading branch information
peastman committed Mar 28, 2019
1 parent bcbe3e1 commit 2aa5ccc
Show file tree
Hide file tree
Showing 29 changed files with 442 additions and 428 deletions.
10 changes: 5 additions & 5 deletions contrib/atomicconv/models/atomicnet_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def AtomicNNLayer(tensor, size, weights, biases, name=None):
"""

if len(tensor.get_shape()) != 2:
raise ValueError('Dense layer input must be 2D, not %dD' %
len(tensor.get_shape()))
raise ValueError(
'Dense layer input must be 2D, not %dD' % len(tensor.get_shape()))
with tf.name_scope(name, 'fully_connected', [tensor, weights, biases]):
return tf.nn.xw_plus_b(tensor, weights, biases)

Expand Down Expand Up @@ -111,8 +111,8 @@ def gather_neighbors(X, nbr_indices, B, N, M, d):
example_tensors = tf.unstack(X, axis=0)
example_nbrs = tf.unstack(nbr_indices, axis=0)
all_nbr_coords = []
for example, (example_tensor,
example_nbr) in enumerate(zip(example_tensors, example_nbrs)):
for example, (example_tensor, example_nbr) in enumerate(
zip(example_tensors, example_nbrs)):
nbr_coords = tf.gather(example_tensor, example_nbr)
all_nbr_coords.append(nbr_coords)
neighbors = tf.stack(all_nbr_coords)
Expand Down Expand Up @@ -149,7 +149,7 @@ def DistanceTensor(X, Nbrs, boxsize, B, N, M, d):
nbrs_tensors = tf.unstack(nbrs, axis=1)
for nbr, nbr_tensor in enumerate(nbrs_tensors):
_D = tf.subtract(nbr_tensor, atom_tensor)
_D = tf.subtract(_D, boxsize * tf.round(tf.div(_D, boxsize)))
_D = tf.subtract(_D, boxsize * tf.round(tf.math.divide(_D, boxsize)))
D.append(_D)
else:
for atom, atom_tensor in enumerate(atom_tensors):
Expand Down
22 changes: 11 additions & 11 deletions contrib/atomicconv/models/legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def add_training_cost(self, graph, name_scopes, output, labels, weights):
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.div(
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)

Expand Down Expand Up @@ -416,8 +416,8 @@ def add_example_weight_placeholders(self, graph, name_scopes):
feeding and fetching the same tensor.
"""
weights = []
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with placeholder_scope:
for task in range(self.n_tasks):
weights.append(
Expand Down Expand Up @@ -617,8 +617,8 @@ def add_label_placeholders(self, graph, name_scopes):
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
n_classes = self.n_classes
Expand Down Expand Up @@ -770,8 +770,8 @@ def add_label_placeholders(self, graph, name_scopes):
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
labels = []
Expand Down Expand Up @@ -859,8 +859,8 @@ def build(self, graph, name_scopes, training):
batch_size x n_features.
"""
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
name_scopes)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
self.mol_features = tf.placeholder(
Expand Down Expand Up @@ -906,8 +906,8 @@ def build(self, graph, name_scopes, training):
weight_init=tf.truncated_normal(
shape=[prev_layer_size, 1],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(value=bias_init_consts[i], shape=[1
]))))
bias_init=tf.constant(value=bias_init_consts[i],
shape=[1]))))
return output

def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
Expand Down
12 changes: 7 additions & 5 deletions contrib/one_shot_models/multitask_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,9 @@ def __init__(self,
pad_batches=True,
verbose=True):

warnings.warn("MultitaskGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"MultitaskGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(MultitaskGraphClassifier, self).__init__(
model_dir=logdir, verbose=verbose)
self.n_tasks = n_tasks
Expand Down Expand Up @@ -191,16 +192,17 @@ def add_training_loss(self, final_loss, logits):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
# Convert the labels into one-hot vector encodings.
one_hot_labels = tf.to_float(
tf.one_hot(tf.to_int32(tf.squeeze(task_label_vector)), 2))
one_hot_labels = tf.cast(
tf.one_hot(tf.cast(tf.squeeze(task_label_vector), tf.int32), 2),
tf.float32)
# Since we use tf.nn.softmax_cross_entropy_with_logits note that we pass in
# un-softmaxed logits rather than softmax outputs.
task_loss = loss_fn(logits[task], one_hot_labels, task_weight_vector)
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.div(total_loss, self.batch_size)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss

def add_softmax(self, outputs):
Expand Down
15 changes: 8 additions & 7 deletions contrib/one_shot_models/multitask_regressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,9 @@ def __init__(self,
pad_batches=True,
verbose=True):

warnings.warn("MultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"MultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)

super(MultitaskGraphRegressor, self).__init__(
model_dir=logdir, verbose=verbose)
Expand Down Expand Up @@ -154,14 +155,13 @@ def add_training_loss(self, final_loss, outputs):
for task in range(self.n_tasks):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
task_loss = loss_fn(outputs[task],
tf.squeeze(task_label_vector),
task_loss = loss_fn(outputs[task], tf.squeeze(task_label_vector),
tf.squeeze(task_weight_vector))
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.div(total_loss, self.batch_size)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss

def fit(self,
Expand Down Expand Up @@ -222,8 +222,9 @@ class DTNNMultitaskGraphRegressor(MultitaskGraphRegressor):

def build(self):
# Create target inputs
warnings.warn("DTNNMultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"DTNNMultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.label_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
Expand Down
12 changes: 7 additions & 5 deletions contrib/tensorflow_models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,9 @@ class TensorflowGraph(object):

def __init__(self, graph, session, name_scopes, output, labels, weights,
loss):
warnings.warn("TensorflowGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"TensorflowGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = graph
self.session = session
self.name_scopes = name_scopes
Expand Down Expand Up @@ -178,8 +179,9 @@ def __init__(self,
seed: int
If not none, is used as random seed for tensorflow.
"""
warnings.warn("TensorflowGraphModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"TensorflowGraphModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)

# Save hyperparameters
self.n_tasks = n_tasks
Expand Down Expand Up @@ -278,7 +280,7 @@ def add_training_cost(self, graph, name_scopes, output, labels, weights):
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.div(
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)

Expand Down
8 changes: 5 additions & 3 deletions contrib/tensorflow_models/progressive_joint.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops


class ProgressiveJointRegressor(TensorflowMultiTaskRegressor):
"""Implements a progressive multitask neural network.
Expand Down Expand Up @@ -39,8 +40,9 @@ def __init__(self, n_tasks, n_features, alpha_init_stddevs=[.02], **kwargs):
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
"""
warnings.warn("ProgressiveJointRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"ProgressiveJointRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.alpha_init_stddevs = alpha_init_stddevs
super(ProgressiveJointRegressor, self).__init__(n_tasks, n_features,
**kwargs)
Expand Down Expand Up @@ -312,7 +314,7 @@ def add_training_costs(self, graph, name_scopes, output, labels, weights):
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.div(
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)

Expand Down
11 changes: 7 additions & 4 deletions contrib/tensorflow_models/progressive_multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops


class ProgressiveMultitaskRegressor(TensorflowMultiTaskRegressor):
"""Implements a progressive multitask neural network.
Expand Down Expand Up @@ -39,8 +40,9 @@ def __init__(self, n_tasks, n_features, alpha_init_stddevs=[.02], **kwargs):
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
"""
warnings.warn("ProgressiveMultitaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"ProgressiveMultitaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.alpha_init_stddevs = alpha_init_stddevs
super(ProgressiveMultitaskRegressor, self).__init__(n_tasks, n_features,
**kwargs)
Expand Down Expand Up @@ -293,7 +295,7 @@ def add_training_costs(self, graph, name_scopes, output, labels, weights):
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.div(
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)

Expand Down Expand Up @@ -487,7 +489,8 @@ def add_task_training_costs(self, graph, name_scopes, outputs, labels,
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
task_cost = tf.div(tf.reduce_sum(weighted_cost), self.batch_size)
task_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
task_costs[task] = task_cost

return task_costs
Expand Down
8 changes: 4 additions & 4 deletions contrib/tensorflow_models/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def PrepareMask(self, features, mask):
return masked_features, mask_t

def Check(self, func, features, expected, axis=None, mask=None):
with self.test_session() as sess:
with self.session() as sess:
features, features_t = self.PrepareFeatures(features)
if mask is not None:
features, mask = self.PrepareMask(features, mask)
Expand Down Expand Up @@ -127,7 +127,7 @@ def testVarianceWithMask(self):
mask=[[1, 0], [0, 1]])

def testMoment(self):
with self.test_session() as sess:
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)

Expand Down Expand Up @@ -168,7 +168,7 @@ def testMoment(self):
atol=1e-5)

def testSkewness(self):
with self.test_session() as sess:
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)
self.assertAllClose(
Expand All @@ -183,7 +183,7 @@ def testSkewness(self):
atol=1e-5)

def testKurtosis(self):
with self.test_session() as sess:
with self.session() as sess:
features = np.random.random((3, 4, 5))
features_t = tf.constant(features, dtype=tf.float32)
self.assertAllClose(
Expand Down
13 changes: 7 additions & 6 deletions contrib/tensorflow_models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@ def ParseCheckpoint(checkpoint):
Returns:
The path to an actual checkpoint file.
"""
warnings.warn("ParseCheckpoint is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
warnings.warn(
"ParseCheckpoint is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
with open(checkpoint) as f:
try:
cp = checkpoint_state_pb2.CheckpointState()
Expand Down Expand Up @@ -67,8 +68,8 @@ def Mask(t, mask):
if mask is None:
return t
if not t.get_shape()[:-1].is_compatible_with(mask.get_shape()):
raise ValueError('Shapes do not match: %s vs. %s' % (t.get_shape(),
mask.get_shape()))
raise ValueError(
'Shapes do not match: %s vs. %s' % (t.get_shape(), mask.get_shape()))
return tf.multiply(t, tf.expand_dims(mask, -1))


Expand Down Expand Up @@ -184,12 +185,12 @@ def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):

# compute the requested central moment
# note that mean is a raw moment, not a central moment
mean = tf.div(
mean = tf.math.divide(
tf.reduce_sum(tensor, axis=reduction_indices, keep_dims=True), divisor)
delta = tensor - mean
if mask is not None:
delta = Mask(delta, mask)
moment = tf.div(
moment = tf.math.divide(
tf.reduce_sum(
math_ops.pow(delta, k), axis=reduction_indices, keep_dims=True),
divisor)
Expand Down
12 changes: 6 additions & 6 deletions contrib/vina_model/test_vina_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def test_get_cells(self):
start = 0
stop = 4
nbr_cutoff = 1
with self.test_session() as sess:
with self.session() as sess:
ndim = 3
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim).eval()
assert len(cells.shape) == 2
Expand All @@ -71,7 +71,7 @@ def test_compute_neighbor_list(self):
# The number of cells which we should theoretically have
n_cells = int(((stop - start) / nbr_cutoff)**ndim)

with self.test_session() as sess:
with self.session() as sess:
coords = start + np.random.rand(N, ndim) * (stop - start)
coords = tf.stack(coords)
nbr_list = compute_neighbor_list(
Expand All @@ -90,7 +90,7 @@ def test_put_atoms_in_cells(self):
# The number of cells which we should theoretically have
n_cells = ((stop - start) / nbr_cutoff)**ndim

with self.test_session() as sess:
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
coords = np.random.rand(N, ndim)
_, atoms_in_cells = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
Expand All @@ -113,7 +113,7 @@ def test_compute_neighbor_cells(self):
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.

with self.test_session() as sess:
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
nbr_cells = compute_neighbor_cells(cells, ndim, n_cells)
nbr_cells = nbr_cells.eval()
Expand All @@ -136,7 +136,7 @@ def test_compute_closest_neighbors(self):
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.

with self.test_session() as sess:
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
nbr_cells = compute_neighbor_cells(cells, ndim, n_cells)
coords = np.random.rand(N, ndim)
Expand All @@ -158,7 +158,7 @@ def test_get_cells_for_atoms(self):
# TODO(rbharath): The test below only checks that shapes work out.
# Need to do a correctness implementation vs. a simple CPU impl.

with self.test_session() as sess:
with self.session() as sess:
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
coords = np.random.rand(N, ndim)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
Expand Down
Loading

0 comments on commit 2aa5ccc

Please sign in to comment.