Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

correct some description of annotations and spelling of class name #470

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ and less prone to user error.

### Changed

- Fixed the bug in `tfrs.layers.loss.SamplingProbablityCorrection` that logits
- Fixed the bug in `tfrs.layers.loss.SamplingProbabilityCorrection` that logits
should subtract the log of item probability.
- `tfrs.experimental.optimizers.CompositeOptimizer`: an optimizer that
composes multiple individual optimizers which can be applied to different
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class DotInteraction(tf.keras.layers.Layer):
section 2.1.3. Sparse activations and dense activations are combined.
Dot interaction is applied to a batch of input Tensors [e1,...,e_k] of the
same dimension and the output is a batch of Tensors with all distinct pairwise
dot products of the form dot(e_i, e_j) for i <= j if self self_interaction is
dot products of the form dot(e_i, e_j) for i <= j if self_interaction is
True, otherwise dot(e_i, e_j) i < j.

Attributes:
Expand Down
3 changes: 2 additions & 1 deletion tensorflow_recommenders/layers/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def call(self, labels: tf.Tensor, logits: tf.Tensor,
return logits + duplicate * MIN_FLOAT


class SamplingProbablityCorrection(tf.keras.layers.Layer):
class SamplingProbabilityCorrection(tf.keras.layers.Layer):
"""Sampling probability correction."""

def __call__(self, logits: tf.Tensor,
Expand All @@ -156,3 +156,4 @@ def __call__(self, logits: tf.Tensor,

return logits - tf.math.log(
tf.clip_by_value(candidate_sampling_probability, 1e-6, 1.))

4 changes: 2 additions & 2 deletions tensorflow_recommenders/layers/loss_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,15 @@ def test_sampling_probability_correction(self, random_seed):
logits = rng.uniform(size=shape).astype(np.float32)
probs = rng.uniform(size=shape[0]).astype(np.float32)

corrected_logits = loss.SamplingProbablityCorrection()(logits, probs)
corrected_logits = loss.SamplingProbabilityCorrection()(logits, probs)
corrected_logits = corrected_logits.numpy()

np.testing.assert_array_less(logits, corrected_logits)

# set some of the probabilities to 0
probs_with_zeros = probs * rng.choice([0., 1.], size=probs.shape)

corrected_logits_with_zeros = loss.SamplingProbablityCorrection()(
corrected_logits_with_zeros = loss.SamplingProbabilityCorrection()(
logits, probs_with_zeros)
corrected_logits_with_zeros = corrected_logits_with_zeros.numpy()

Expand Down
4 changes: 2 additions & 2 deletions tensorflow_recommenders/tasks/retrieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def call(self,
Args:
query_embeddings: [num_queries, embedding_dim] tensor of query
representations.
candidate_embeddings: [num_queries, embedding_dim] tensor of candidate
candidate_embeddings: [num_candidates, embedding_dim] tensor of candidate
representations.
sample_weight: [num_queries] tensor of sample weights.
candidate_sampling_probability: Optional tensor of candidate sampling
Expand Down Expand Up @@ -150,7 +150,7 @@ def call(self,
scores = scores / self._temperature

if candidate_sampling_probability is not None:
scores = layers.loss.SamplingProbablityCorrection()(
scores = layers.loss.SamplingProbabilityCorrection()(
scores, candidate_sampling_probability)

if self._remove_accidental_hits:
Expand Down