Skip to content

Commit

Permalink
Rename embedding to word embedding for clarity when distinguishing be…
Browse files Browse the repository at this point in the history
…tween word embedding and upos emb
  • Loading branch information
SecroLoL committed Jan 9, 2024
1 parent f4b3f53 commit 78ac443
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions stanza/models/lemma_classifier/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def load(filename, args=None):
# TODO: refactor loading the pretrain (also done in the trainer)
pt = load_pretrain(args['wordvec_pretrain_file'])
emb_matrix = pt.emb
embeddings = nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix))
word_embeddings = nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix))
vocab_map = { word.replace('\xa0', ' '): i for i, word in enumerate(pt.vocab) }
vocab_size = emb_matrix.shape[0]
embedding_dim = emb_matrix.shape[1]
Expand All @@ -88,7 +88,7 @@ def load(filename, args=None):
hidden_dim=saved_args['hidden_dim'],
output_dim=len(checkpoint['label_decoder']),
vocab_map=vocab_map,
pt_embedding=embeddings,
pt_embedding=word_embeddings,
label_decoder=checkpoint['label_decoder'],
charlm=True,
charlm_forward_file=saved_args['charlm_forward_file'],
Expand All @@ -100,7 +100,7 @@ def load(filename, args=None):
hidden_dim=saved_args['hidden_dim'],
output_dim=len(checkpoint['label_decoder']),
vocab_map=vocab_map,
pt_embedding=embeddings,
pt_embedding=word_embeddings,
label_decoder=checkpoint['label_decoder'])
elif model_type is ModelType.TRANSFORMER:
from stanza.models.lemma_classifier.transformer_baseline.model import LemmaClassifierWithTransformer
Expand Down

0 comments on commit 78ac443

Please sign in to comment.