Skip to content

Commit

Permalink
fix files
Browse files Browse the repository at this point in the history
  • Loading branch information
justjais committed Jan 31, 2025
1 parent 0a8cac0 commit c562b2f
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 13 deletions.
14 changes: 4 additions & 10 deletions scripts/evaluation/olsconfig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,12 @@ llm_providers:
credentials_path: ols_api_key.txt
models:
- name: granite3-8b
# - name: my_rhoai
# type: openai
# url: "https://granite3-1-8b-wisdom-model-staging.apps.stage2-west.v2dz.p1.openshiftapps.com/v1"
# credentials_path: rhoai_api_key.txt
# models:
# - name: granite3-1-8b
ols_config:
# max_workers: 1
reference_content:
product_docs_index_path: "./vector_db/vector_db/aap_product_docs/2.5"
product_docs_index_id: aap-product-docs-2_5
embeddings_model_path: "./vector_db/embeddings_model"
# product_docs_index_path: "./vector_db/vector_db/aap_product_docs/2.5"
# product_docs_index_id: aap-product-docs-2_5
# embeddings_model_path: "./vector_db/embeddings_model"
conversation_cache:
type: memory
memory:
Expand All @@ -59,4 +53,4 @@ dev_config:
pyroscope_url: "https://pyroscope.pyroscope.svc.cluster.local:4040"
# llm_params:
# temperature_override: 0
# k8s_auth_token: optional_token_when_no_available_kube_config
# k8s_auth_token: optional_token_when_no_available_kube_config
2 changes: 2 additions & 0 deletions scripts/evaluation/utils/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def get_model_response(query, provider, model, mode, api_client=None):
provider_config = config.config.llm_providers.providers[provider]
model_config = provider_config.models[model]
llm = VANILLA_MODEL[provider_config.type](model, provider_config).load()

if mode == "ols_param":
max_resp_tokens = model_config.parameters.max_tokens_for_response
override_params = {
Expand All @@ -49,5 +50,6 @@ def get_model_response(query, provider, model, mode, api_client=None):
prompt, prompt_input = GeneratePrompt(
query, rag_chunks, [], BASIC_PROMPT
).generate_prompt(model)

llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=True)
return llm_chain(inputs=prompt_input)["text"].strip()
3 changes: 0 additions & 3 deletions scripts/evaluation/utils/similarity_score_llm.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
"""Similarity check by LLM."""

import logging
from time import sleep

from langchain_core.prompts.prompt import PromptTemplate

from .constants import MAX_RETRY_ATTEMPTS, TIME_TO_BREATH
from .prompts import ANSWER_SIMILARITY_PROMPT

logger = logging.getLogger(__name__)


class AnswerSimilarityScore:
"""Get similarity score generated by LLM."""
Expand Down

0 comments on commit c562b2f

Please sign in to comment.