Skip to content

Commit

Permalink
[CLEANUP]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye Gomez authored and Kye Gomez committed Jun 5, 2024
1 parent 8c90e26 commit c02d86c
Show file tree
Hide file tree
Showing 12 changed files with 758 additions and 59 deletions.
1 change: 0 additions & 1 deletion .env.example

This file was deleted.

18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,24 @@ The idea for WhiteRock emerged during a pivotal event that underscored the ineff
## Install
`$ pip3 install whiterock`

## Usage
```python
from whiterock.main import WhiteRock
from whiterock.agents import due_diligence_agent, principal_investor

# Instantiate the WhiteRock class
whiterock = WhiteRock(
agents=[due_diligence_agent, principal_investor],
max_loops=5,
phone_number="+16505188709", ##+19729719060",
phone_call_duration=160,
)

# Run the WhiteRock class
task = "Enter in your task"
whiterock.run(task)
```

## What It Does

WhiteRock is designed to transform the way venture capital operates by automating the entire investment process. Here’s how it works:
Expand Down
Binary file added chroma/chroma.sqlite3
Binary file not shown.
Empty file added errors.txt
Empty file.
14 changes: 14 additions & 0 deletions example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from whiterock.main import WhiteRock
from whiterock.agents import due_diligence_agent, principal_investor

# Instantiate the WhiteRock class
whiterock = WhiteRock(
agents=[due_diligence_agent, principal_investor],
max_loops=5,
phone_number="+16505188709", ##+19729719060",
phone_call_duration=160,
)

# Run the WhiteRock class
task = "Enter in your task"
whiterock.run(task)
57 changes: 0 additions & 57 deletions tools.py

This file was deleted.

87 changes: 87 additions & 0 deletions whiterock/agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import os

from dotenv import load_dotenv
from swarms import Agent

from swarms import OpenAIChat
from whiterock.memory import ChromaDB
from whiterock.prompts import (
PRINCIPAL_SYSTEM_PROMPT,
due_diligence_agent_system_prompt,
)

# Load the environment variables
load_dotenv()

# Memory
memory = ChromaDB(
output_dir="whiterock",
n_results=1,
)

# GROQ API key
groq_api_key = os.getenv("GROQ_API_KEY")

# GROQ LLM
model = OpenAIChat(
openai_api_key=os.getenv("OPENAI_API_KEY"),
max_tokens=3000,
temperature=0.2,
)


agent_names = [
"Due Diligence Agent",
"Principal Investor Agent",
]

system_prompts = [
due_diligence_agent_system_prompt(),
PRINCIPAL_SYSTEM_PROMPT(),
]


due_diligence_agent = Agent(
agent_name=agent_names[0],
system_prompt=system_prompts[0],
agent_description=system_prompts[0],
llm=model,
max_loops=1,
autosave=True,
# dashboard=False,
# verbose=True,
# interactive=True,
# interactive=True,
state_save_file_type="json",
saved_state_path=f"{agent_names[0].lower().replace(' ', '_')}.json",
# docs_folder="data", # Folder of docs to parse and add to the agent's memory
# long_term_memory=memory,
# dynamic_temperature_enabled=True,
# pdf_path="docs/medical_papers.pdf",
# list_of_pdf=["docs/medical_papers.pdf", "docs/medical_papers_2.pdf"],
# docs=["docs/medicalx_papers.pdf", "docs/medical_papers_2.txt"],
# memory_chunk_size=2000,
)


principal_investor = Agent(
agent_name=agent_names[1],
system_prompt=system_prompts[1],
agent_description=system_prompts[1],
llm=model,
max_loops=1,
autosave=True,
# dashboard=False,
# verbose=True,
# interactive=True,
# interactive=True,
state_save_file_type="json",
saved_state_path=f"{agent_names[0].lower().replace(' ', '_')}.json",
# docs_folder="data", # Folder of docs to parse and add to the agent's memory
# long_term_memory=memory,
# dynamic_temperature_enabled=True,
# pdf_path="docs/medical_papers.pdf",
# list_of_pdf=["docs/medical_papers.pdf", "docs/medical_papers_2.pdf"],
# docs=["docs/medicalx_papers.pdf", "docs/medical_papers_2.txt"],
# memory_chunk_size=2000,
)
74 changes: 74 additions & 0 deletions whiterock/groq_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from groq import Groq
from swarms import BaseLLM


class GroqChat(BaseLLM):
"""
A client for interacting with the Groq API.
Args:
system_prompt (str, optional): The system prompt to use for generating completions. Defaults to None.
model_name (str, optional): The name of the model to use for generating completions. Defaults to "llama3-8b-8192".
temperature (float, optional): The temperature parameter for generating completions. Defaults to 0.5.
max_tokens (int, optional): The maximum number of tokens in the generated completions. Defaults to 1024.
top_p (float, optional): The top-p parameter for generating completions. Defaults to 1.
stop (str, optional): The stop sequence to use for generating completions. Defaults to None.
"""

def __init__(
self,
groq_api_key: str = None,
system_prompt: str = None,
model_name: str = "llama3-8b-8192",
temperature: float = 0.5,
max_tokens: int = 1024,
top_p: float = 1,
stop: str = None,
*args,
**kwargs,
):
self.groq_api_key = groq_api_key
self.system_prompt = system_prompt
self.model_name = model_name
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.stop = stop

self.client = Groq(api_key=self.groq_api_key, *args, **kwargs)

def run(self, task: str = None, *args, **kwargs):

output = self.client.chat.completions.create(
messages=[
{
"role": "system",
"content": self.system_prompt,
},
{
"role": "user",
"content": task,
},
],
model=self.model_name,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
stop=self.stop,
*args,
**kwargs,
)

output = output.choices[0].message.content
return output


# # Model
# model = GroqChat(
# groq_api_key="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",
# system_prompt="Welcome to GroqChat! How can I assist you today?",
# )

# # Run
# out = model.run(task="What is the capital of France?")
# print(out)
115 changes: 115 additions & 0 deletions whiterock/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
from swarms import BaseSwarm, Agent
from typing import List
from swarms import Conversation
from whiterock.agents import principal_investor, due_diligence_agent
from whiterock.prompts import analyst_caller_agent
from whiterock.tools import call_api, fetch_transcription

class WhiteRock(BaseSwarm):
"""
WhiteRock class represents a swarm of agents involved in a due diligence process.
Args:
agents (List[Agent]): List of agents participating in the swarm. Defaults to vc_agents.
max_loops (int): Maximum number of loops the swarm will run. Defaults to 5.
phone_number (str): Phone number used for making phone calls. Defaults to "+17866955339".
phone_call_duration (int): Maximum duration of a phone call in seconds. Defaults to 160.
vc_rules (str): Rules for the conversation. Defaults to None.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Attributes:
agents (List[Agent]): List of agents participating in the swarm.
max_loops (int): Maximum number of loops the swarm will run.
phone_number (str): Phone number used for making phone calls.
phone_call_duration (int): Maximum duration of a phone call in seconds.
history (Conversation): Conversation history object.
Methods:
run(task: str, *args, **kwargs) -> Any: Runs the swarm and returns the output.
"""

def __init__(
self,
agents: List[Agent] = None,
max_loops: int = 5,
phone_number: str = "+17866955339",
phone_call_duration: int = 160,
vc_rules: str = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.agents = agents
self.max_loops = max_loops
self.phone_number = phone_number
self.phone_call_duration = phone_call_duration

# History
self.history = Conversation(
time_enabled=True,
auto_save=True,
rules=vc_rules,
)

def run(self, task: str, *args, **kwargs):
"""
Runs the swarm and returns the output.
Args:
task (str): The task to be performed by the swarm.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Any: The output of the swarm.
"""
loop = 0

for _ in range(self.max_loops):
# Combine strings into one
out = f"{analyst_caller_agent()} {task}"

transcription = call_api(
self.phone_number,
max_duration=self.phone_call_duration,
prompt=out,
)

out = fetch_transcription(transcription, max_duration=self.phone_call_duration)

self.history.add(
role="Due Diligence Agent",
content=out,
)

# Due Diligence Agent
due_diligence_agent = self.agents[0]
out = due_diligence_agent.run(
self.history.return_history_as_string(), *args, **kwargs
)
print(out)
self.history.add(
role="Due Diligence Agent",
content=out,
)
# Function call to mercury, add docs into a folder so rag can pick it up
# Function call to rag, add perplexity agent to the rag agent so it can search the web for more information

# Principal Investor Agent
principal_investor_agent = self.agents[1]
out = principal_investor_agent.run(
self.history.return_history_as_string(), *args, **kwargs
)
print(out)
self.history.add(
role="Principal Investor Agent",
content=out,
)

loop += 1

return out

Loading

0 comments on commit c02d86c

Please sign in to comment.