Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

what is the problem #1890

Open
amin-kh96 opened this issue Jan 30, 2025 · 1 comment
Open

what is the problem #1890

amin-kh96 opened this issue Jan 30, 2025 · 1 comment
Labels
answered 🤖 The question has been answered. Will be closed automatically if no new comments question Further information is requested

Comments

@amin-kh96
Copy link

amin-kh96 commented Jan 30, 2025

this is a part of my code.
first, can I use Ollama LLM with ragas? but keep in mind that my LLM runs locally on my computer to avoid API
calls.
so if i want to use it, should I implement it in this way or not?

class OllamaRagasLLM(BaseRagasLLM):
    """Custom Ragas LLM class using Mistral via Ollama."""
    
    def __init__(self, run_config=None, **kwargs):
        # Initialize the parent class with run_config
        if run_config is None:
            # If no run_config is provided, you can define a default one
            run_config = {"some_config_key": "default_value"}  # Example, replace with actual config
        super().__init__(run_config=run_config, **kwargs)
    
    async def _call(self, prompt: str) -> str:
        response = ollama.chat(model="mistral", messages=[{"role": "user", "content": prompt}])
        return response['message']['content']

    async def generate_text(self, prompt, **kwargs) -> LLMResult:
        text = await self._call(prompt)
        return LLMResult(generations=[[Generation(text=text)]])

    async def agenerate_text(self, prompt, **kwargs) -> LLMResult:
        text = await self._call(prompt)
        return LLMResult(generations=[[Generation(text=text)]])

run_config = {"some_config_key": "default_value"}
# Assuming your 'evaluate' function requires 'run_config' to be passed
async def run():
    # Use the custom LLM class with the provided run_config
    llm = OllamaRagasLLM(run_config=run_config)
#custom_llm = CustomRagasLLM(api_key=None, embedding_dataset=embedding_dataset)

# Define an async function to test the LLM
#async def test_llm_with_prompts():
    #results = []
    #for prompt in prompts:
        # Call the custom LLM with each prompt
        #result = await custom_llm.generate_text(prompt)
        #results.append(result)
        # Debugging: Print each result
        #print(f"LLM Output for prompt: {result[:900]}")  # Truncate to avoid overly large output
    #return results

def is_json(myjson):
    try:
        json_object = json.loads(myjson)
    except ValueError as e:
        return False
    return True


#async def test_llm_with_prompts():
    # results = []
    # for prompt in prompts:
    #     # Call the custom LLM with each prompt
    #     llm_result = await custom_llm.generate_text(prompt=prompt)
        
    #     # Extract the text from LLMResult
    #     generated_text = llm_result.generations[0][0].text  # Access the first generation
        
    #     results.append(generated_text)
        
    #     # Debugging: Print each result (truncate to avoid overly large output)
    #     print(f"LLM Output for prompt: {generated_text[:900]}")  # Truncate for display
    # return results

if __name__ == "__main__":

    asyncio.run(run())
      # Ensure proper execution of async calls
    #all_results = asyncio.run(test_llm_with_prompts())
    #print(f"Total Results: {len(all_results)}")  # Confirm all prompts were processed

    embedding_dict=embedding_dataset

    ragas_embeddings = CustomHuggingFaceRagasEmbeddings(model_name='distilbert-base-uncased', custome_embedding= embedding_dict)
    #custom_llm =CustomRagasLLM(api_key=None)

    # Define the evaluation metrics
    metrics = [context_utilization]


    # Run the evaluation
    evaluation_report = evaluate(data_set, metrics=metrics, embeddings=ragas_embeddings, llm=llm ,is_async=True)  # Pass your custom LLM here
@amin-kh96 amin-kh96 added the question Further information is requested label Jan 30, 2025
@jjmachan
Copy link
Member

jjmachan commented Feb 4, 2025

hey @amin-kh96 #1170 support is not there as of now sadly

@jjmachan jjmachan added the answered 🤖 The question has been answered. Will be closed automatically if no new comments label Feb 4, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
answered 🤖 The question has been answered. Will be closed automatically if no new comments question Further information is requested
Projects
None yet
Development

No branches or pull requests

2 participants