Skip to content

Commit

Permalink
Fix using lm_bemch/wwb with version w/o apply_chat_template (#1651)
Browse files Browse the repository at this point in the history
  • Loading branch information
sbalandi authored Jan 30, 2025
1 parent 40cb849 commit b10ebcf
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 3 deletions.
6 changes: 4 additions & 2 deletions tools/llm_bench/task/text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,8 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data
gen_config.rng_seed = args["seed"]
gen_config.num_beams = args["num_beams"]
gen_config.do_sample = False
gen_config.apply_chat_template = False
if hasattr(gen_config, 'apply_chat_template'):
gen_config.apply_chat_template = False
if args.get('draft_model', ''):
config_info = "Speculative decoding config: "
if args.get('num_assistant_tokens', None):
Expand Down Expand Up @@ -382,7 +383,8 @@ def run_text_generation_genai_with_stream(input_text, num, model, tokenizer, arg
gen_config.num_beams = args["num_beams"]
gen_config.do_sample = False
gen_config.ignore_eos = True
gen_config.apply_chat_template = False
if hasattr(gen_config, 'apply_chat_template'):
gen_config.apply_chat_template = False
enable_prompt_permutations = not args.get("disable_prompt_permutation", False)
if enable_prompt_permutations:
log.warning(
Expand Down
3 changes: 2 additions & 1 deletion tools/llm_bench/task/visual_language_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ def run_visual_language_generation_genai(
gen_config.max_new_tokens = max_gen_tokens
gen_config.num_beams = args["num_beams"]
gen_config.do_sample = False
gen_config.apply_chat_template = False
if hasattr(gen_config, 'apply_chat_template'):
gen_config.apply_chat_template = False
kwargs = {}
if len(images) >= 1:
kwargs["images"] = images[0]
Expand Down

0 comments on commit b10ebcf

Please sign in to comment.