From 0dafcf373de8be9653d5c146dfa03c22ce6bf75c Mon Sep 17 00:00:00 2001 From: Lengyue Date: Wed, 20 Dec 2023 01:12:37 +0000 Subject: [PATCH] Reuse cache for different generation size --- tools/llama/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llama/generate.py b/tools/llama/generate.py index e7b5cce7..c84a69be 100644 --- a/tools/llama/generate.py +++ b/tools/llama/generate.py @@ -163,7 +163,7 @@ def decode_n_tokens( **sampling_kwargs, ): previous_tokens = torch.zeros( - (model.config.num_codebooks + 1, num_new_tokens), + (model.config.num_codebooks + 1, model.config.max_seq_len), dtype=torch.int, device=cur_token.device, )