You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
this is a part of my code.
first, can I use Ollama LLM with ragas? but keep in mind that my LLM runs locally on my computer to avoid API
calls.
so if i want to use it, should I implement it in this way or not?
classOllamaRagasLLM(BaseRagasLLM):
"""Custom Ragas LLM class using Mistral via Ollama."""def__init__(self, run_config=None, **kwargs):
# Initialize the parent class with run_configifrun_configisNone:
# If no run_config is provided, you can define a default onerun_config= {"some_config_key": "default_value"} # Example, replace with actual configsuper().__init__(run_config=run_config, **kwargs)
asyncdef_call(self, prompt: str) ->str:
response=ollama.chat(model="mistral", messages=[{"role": "user", "content": prompt}])
returnresponse['message']['content']
asyncdefgenerate_text(self, prompt, **kwargs) ->LLMResult:
text=awaitself._call(prompt)
returnLLMResult(generations=[[Generation(text=text)]])
asyncdefagenerate_text(self, prompt, **kwargs) ->LLMResult:
text=awaitself._call(prompt)
returnLLMResult(generations=[[Generation(text=text)]])
run_config= {"some_config_key": "default_value"}
# Assuming your 'evaluate' function requires 'run_config' to be passedasyncdefrun():
# Use the custom LLM class with the provided run_configllm=OllamaRagasLLM(run_config=run_config)
#custom_llm = CustomRagasLLM(api_key=None, embedding_dataset=embedding_dataset)# Define an async function to test the LLM#async def test_llm_with_prompts():#results = []#for prompt in prompts:# Call the custom LLM with each prompt#result = await custom_llm.generate_text(prompt)#results.append(result)# Debugging: Print each result#print(f"LLM Output for prompt: {result[:900]}") # Truncate to avoid overly large output#return resultsdefis_json(myjson):
try:
json_object=json.loads(myjson)
exceptValueErrorase:
returnFalsereturnTrue#async def test_llm_with_prompts():# results = []# for prompt in prompts:# # Call the custom LLM with each prompt# llm_result = await custom_llm.generate_text(prompt=prompt)# # Extract the text from LLMResult# generated_text = llm_result.generations[0][0].text # Access the first generation# results.append(generated_text)# # Debugging: Print each result (truncate to avoid overly large output)# print(f"LLM Output for prompt: {generated_text[:900]}") # Truncate for display# return resultsif__name__=="__main__":
asyncio.run(run())
# Ensure proper execution of async calls#all_results = asyncio.run(test_llm_with_prompts())#print(f"Total Results: {len(all_results)}") # Confirm all prompts were processedembedding_dict=embedding_datasetragas_embeddings=CustomHuggingFaceRagasEmbeddings(model_name='distilbert-base-uncased', custome_embedding=embedding_dict)
#custom_llm =CustomRagasLLM(api_key=None)# Define the evaluation metricsmetrics= [context_utilization]
# Run the evaluationevaluation_report=evaluate(data_set, metrics=metrics, embeddings=ragas_embeddings, llm=llm ,is_async=True) # Pass your custom LLM here
The text was updated successfully, but these errors were encountered:
this is a part of my code.
first, can I use Ollama LLM with ragas? but keep in mind that my LLM runs locally on my computer to avoid API
calls.
so if i want to use it, should I implement it in this way or not?
The text was updated successfully, but these errors were encountered: