diff --git a/.env.example b/.env.example deleted file mode 100644 index d90a499..0000000 --- a/.env.example +++ /dev/null @@ -1 +0,0 @@ -BLAND_API_KEY="sk-i6ka6bbbxna88roud95tn5tsbm91yjhk0g6a71tpd6zncpou8qa5hnq1a7lse8g769" \ No newline at end of file diff --git a/README.md b/README.md index f945501..2d1512c 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,24 @@ The idea for WhiteRock emerged during a pivotal event that underscored the ineff ## Install `$ pip3 install whiterock` +## Usage +```python +from whiterock.main import WhiteRock +from whiterock.agents import due_diligence_agent, principal_investor + +# Instantiate the WhiteRock class +whiterock = WhiteRock( + agents=[due_diligence_agent, principal_investor], + max_loops=5, + phone_number="+16505188709", ##+19729719060", + phone_call_duration=160, +) + +# Run the WhiteRock class +task = "Enter in your task" +whiterock.run(task) +``` + ## What It Does WhiteRock is designed to transform the way venture capital operates by automating the entire investment process. Here’s how it works: diff --git a/chroma/chroma.sqlite3 b/chroma/chroma.sqlite3 new file mode 100644 index 0000000..860be32 Binary files /dev/null and b/chroma/chroma.sqlite3 differ diff --git a/errors.txt b/errors.txt new file mode 100644 index 0000000..e69de29 diff --git a/example.py b/example.py new file mode 100644 index 0000000..ccb77cd --- /dev/null +++ b/example.py @@ -0,0 +1,14 @@ +from whiterock.main import WhiteRock +from whiterock.agents import due_diligence_agent, principal_investor + +# Instantiate the WhiteRock class +whiterock = WhiteRock( + agents=[due_diligence_agent, principal_investor], + max_loops=5, + phone_number="+16505188709", ##+19729719060", + phone_call_duration=160, +) + +# Run the WhiteRock class +task = "Enter in your task" +whiterock.run(task) diff --git a/tools.py b/tools.py deleted file mode 100644 index 0e6d9cd..0000000 --- a/tools.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import requests -from whiterock.prompts import analyst_caller_agent - - -def call_api( - phone_number: str = "+17866955339", - prompt: str = None, - max_duration: int = 160, -) -> None: - """ - Makes an API call to create a new call using the Bland.ai API. - """ - headers = {"Authorization": os.getenv("BLAND_API_KEY")} - - # Data - data = { - "phone_number": {phone_number}, - "from": None, - "task": prompt, - "model": "enhanced", - "language": "en", - "voice": "maya", - "voice_settings": {}, - "local_dialing": False, - "max_duration": max_duration, - "answered_by_enabled": False, - "wait_for_greeting": False, - "record": True, - "amd": False, - "interruption_threshold": 100, - "temperature": None, - "transfer_list": {"": ""}, - "metadata": {}, - "pronunciation_guide": [], - "start_time": None, - "request_data": {}, - "tools": [], - "webhook": None, - "calendly": {"url": "jdjd", "timezone": "America/Chicago"}, - } - - # API request - response = requests.post( - "https://api.bland.ai/v1/calls", json=data, headers=headers - ) - # Handle the response here - return response.json() - - -# Call the function -out = call_api( - prompt=analyst_caller_agent(), - phone_number=7866955339, - max_duration=160, -) -print(out) diff --git a/whiterock/agents.py b/whiterock/agents.py index e69de29..9c39125 100644 --- a/whiterock/agents.py +++ b/whiterock/agents.py @@ -0,0 +1,87 @@ +import os + +from dotenv import load_dotenv +from swarms import Agent + +from swarms import OpenAIChat +from whiterock.memory import ChromaDB +from whiterock.prompts import ( + PRINCIPAL_SYSTEM_PROMPT, + due_diligence_agent_system_prompt, +) + +# Load the environment variables +load_dotenv() + +# Memory +memory = ChromaDB( + output_dir="whiterock", + n_results=1, +) + +# GROQ API key +groq_api_key = os.getenv("GROQ_API_KEY") + +# GROQ LLM +model = OpenAIChat( + openai_api_key=os.getenv("OPENAI_API_KEY"), + max_tokens=3000, + temperature=0.2, +) + + +agent_names = [ + "Due Diligence Agent", + "Principal Investor Agent", +] + +system_prompts = [ + due_diligence_agent_system_prompt(), + PRINCIPAL_SYSTEM_PROMPT(), +] + + +due_diligence_agent = Agent( + agent_name=agent_names[0], + system_prompt=system_prompts[0], + agent_description=system_prompts[0], + llm=model, + max_loops=1, + autosave=True, + # dashboard=False, + # verbose=True, + # interactive=True, + # interactive=True, + state_save_file_type="json", + saved_state_path=f"{agent_names[0].lower().replace(' ', '_')}.json", + # docs_folder="data", # Folder of docs to parse and add to the agent's memory + # long_term_memory=memory, + # dynamic_temperature_enabled=True, + # pdf_path="docs/medical_papers.pdf", + # list_of_pdf=["docs/medical_papers.pdf", "docs/medical_papers_2.pdf"], + # docs=["docs/medicalx_papers.pdf", "docs/medical_papers_2.txt"], + # memory_chunk_size=2000, +) + + +principal_investor = Agent( + agent_name=agent_names[1], + system_prompt=system_prompts[1], + agent_description=system_prompts[1], + llm=model, + max_loops=1, + autosave=True, + # dashboard=False, + # verbose=True, + # interactive=True, + # interactive=True, + state_save_file_type="json", + saved_state_path=f"{agent_names[0].lower().replace(' ', '_')}.json", + # docs_folder="data", # Folder of docs to parse and add to the agent's memory + # long_term_memory=memory, + # dynamic_temperature_enabled=True, + # pdf_path="docs/medical_papers.pdf", + # list_of_pdf=["docs/medical_papers.pdf", "docs/medical_papers_2.pdf"], + # docs=["docs/medicalx_papers.pdf", "docs/medical_papers_2.txt"], + # memory_chunk_size=2000, +) diff --git a/whiterock/groq_llm.py b/whiterock/groq_llm.py new file mode 100644 index 0000000..0ecb2a6 --- /dev/null +++ b/whiterock/groq_llm.py @@ -0,0 +1,74 @@ +from groq import Groq +from swarms import BaseLLM + + +class GroqChat(BaseLLM): + """ + A client for interacting with the Groq API. + + Args: + system_prompt (str, optional): The system prompt to use for generating completions. Defaults to None. + model_name (str, optional): The name of the model to use for generating completions. Defaults to "llama3-8b-8192". + temperature (float, optional): The temperature parameter for generating completions. Defaults to 0.5. + max_tokens (int, optional): The maximum number of tokens in the generated completions. Defaults to 1024. + top_p (float, optional): The top-p parameter for generating completions. Defaults to 1. + stop (str, optional): The stop sequence to use for generating completions. Defaults to None. + """ + + def __init__( + self, + groq_api_key: str = None, + system_prompt: str = None, + model_name: str = "llama3-8b-8192", + temperature: float = 0.5, + max_tokens: int = 1024, + top_p: float = 1, + stop: str = None, + *args, + **kwargs, + ): + self.groq_api_key = groq_api_key + self.system_prompt = system_prompt + self.model_name = model_name + self.temperature = temperature + self.max_tokens = max_tokens + self.top_p = top_p + self.stop = stop + + self.client = Groq(api_key=self.groq_api_key, *args, **kwargs) + + def run(self, task: str = None, *args, **kwargs): + + output = self.client.chat.completions.create( + messages=[ + { + "role": "system", + "content": self.system_prompt, + }, + { + "role": "user", + "content": task, + }, + ], + model=self.model_name, + temperature=self.temperature, + max_tokens=self.max_tokens, + top_p=self.top_p, + stop=self.stop, + *args, + **kwargs, + ) + + output = output.choices[0].message.content + return output + + +# # Model +# model = GroqChat( +# groq_api_key="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", +# system_prompt="Welcome to GroqChat! How can I assist you today?", +# ) + +# # Run +# out = model.run(task="What is the capital of France?") +# print(out) diff --git a/whiterock/main.py b/whiterock/main.py new file mode 100644 index 0000000..f444d1f --- /dev/null +++ b/whiterock/main.py @@ -0,0 +1,115 @@ +from swarms import BaseSwarm, Agent +from typing import List +from swarms import Conversation +from whiterock.agents import principal_investor, due_diligence_agent +from whiterock.prompts import analyst_caller_agent +from whiterock.tools import call_api, fetch_transcription + +class WhiteRock(BaseSwarm): + """ + WhiteRock class represents a swarm of agents involved in a due diligence process. + + Args: + agents (List[Agent]): List of agents participating in the swarm. Defaults to vc_agents. + max_loops (int): Maximum number of loops the swarm will run. Defaults to 5. + phone_number (str): Phone number used for making phone calls. Defaults to "+17866955339". + phone_call_duration (int): Maximum duration of a phone call in seconds. Defaults to 160. + vc_rules (str): Rules for the conversation. Defaults to None. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Attributes: + agents (List[Agent]): List of agents participating in the swarm. + max_loops (int): Maximum number of loops the swarm will run. + phone_number (str): Phone number used for making phone calls. + phone_call_duration (int): Maximum duration of a phone call in seconds. + history (Conversation): Conversation history object. + + Methods: + run(task: str, *args, **kwargs) -> Any: Runs the swarm and returns the output. + + """ + + def __init__( + self, + agents: List[Agent] = None, + max_loops: int = 5, + phone_number: str = "+17866955339", + phone_call_duration: int = 160, + vc_rules: str = None, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.agents = agents + self.max_loops = max_loops + self.phone_number = phone_number + self.phone_call_duration = phone_call_duration + + # History + self.history = Conversation( + time_enabled=True, + auto_save=True, + rules=vc_rules, + ) + + def run(self, task: str, *args, **kwargs): + """ + Runs the swarm and returns the output. + + Args: + task (str): The task to be performed by the swarm. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + Any: The output of the swarm. + + """ + loop = 0 + + for _ in range(self.max_loops): + # Combine strings into one + out = f"{analyst_caller_agent()} {task}" + + transcription = call_api( + self.phone_number, + max_duration=self.phone_call_duration, + prompt=out, + ) + + out = fetch_transcription(transcription, max_duration=self.phone_call_duration) + + self.history.add( + role="Due Diligence Agent", + content=out, + ) + + # Due Diligence Agent + due_diligence_agent = self.agents[0] + out = due_diligence_agent.run( + self.history.return_history_as_string(), *args, **kwargs + ) + print(out) + self.history.add( + role="Due Diligence Agent", + content=out, + ) + # Function call to mercury, add docs into a folder so rag can pick it up + # Function call to rag, add perplexity agent to the rag agent so it can search the web for more information + + # Principal Investor Agent + principal_investor_agent = self.agents[1] + out = principal_investor_agent.run( + self.history.return_history_as_string(), *args, **kwargs + ) + print(out) + self.history.add( + role="Principal Investor Agent", + content=out, + ) + + loop += 1 + + return out + diff --git a/whiterock/memory.py b/whiterock/memory.py new file mode 100644 index 0000000..0f299b3 --- /dev/null +++ b/whiterock/memory.py @@ -0,0 +1,175 @@ +import logging +import os +import uuid +from typing import Optional + +import chromadb +from dotenv import load_dotenv + +from swarms.utils.data_to_text import data_to_text +from swarms.utils.markdown_message import display_markdown_message +from swarms.memory.base_vectordb import BaseVectorDatabase + +# Load environment variables +load_dotenv() + + +# Results storage using local ChromaDB +class ChromaDB(BaseVectorDatabase): + """ + + ChromaDB database + + Args: + metric (str): The similarity metric to use. + output (str): The name of the collection to store the results in. + limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000. + n_results (int, optional): The number of results to retrieve. Defaults to 2. + + Methods: + add: _description_ + query: _description_ + + Examples: + >>> chromadb = ChromaDB( + >>> metric="cosine", + >>> output="results", + >>> llm="gpt3", + >>> openai_api_key=OPENAI_API_KEY, + >>> ) + >>> chromadb.add(task, result, result_id) + """ + + def __init__( + self, + metric: str = "cosine", + output_dir: str = "swarms", + limit_tokens: Optional[int] = 1000, + n_results: int = 3, + docs_folder: str = None, + verbose: bool = False, + *args, + **kwargs, + ): + self.metric = metric + self.output_dir = output_dir + self.limit_tokens = limit_tokens + self.n_results = n_results + self.docs_folder = docs_folder + self.verbose = verbose + + # Disable ChromaDB logging + if verbose: + logging.getLogger("chromadb").setLevel(logging.INFO) + + # Create Chroma collection + chroma_persist_dir = "chroma" + chroma_client = chromadb.PersistentClient( + settings=chromadb.config.Settings( + persist_directory=chroma_persist_dir, + ), + *args, + **kwargs, + ) + + # Create ChromaDB client + self.client = chromadb.Client() + + # Create Chroma collection + self.collection = chroma_client.get_or_create_collection( + name=output_dir, + metadata={"hnsw:space": metric}, + *args, + **kwargs, + ) + display_markdown_message( + "ChromaDB collection created:" + f" {self.collection.name} with metric: {self.metric} and" + f" output directory: {self.output_dir}" + ) + + # If docs + if docs_folder: + display_markdown_message( + f"Traversing directory: {docs_folder}" + ) + self.traverse_directory() + + def add( + self, + document: str, + *args, + **kwargs, + ): + """ + Add a document to the ChromaDB collection. + + Args: + document (str): The document to be added. + condition (bool, optional): The condition to check before adding the document. Defaults to True. + + Returns: + str: The ID of the added document. + """ + try: + doc_id = str(uuid.uuid4()) + self.collection.add( + ids=[doc_id], + documents=[document], + *args, + **kwargs, + ) + print("-----------------") + print("Document added successfully") + print("-----------------") + return doc_id + except Exception as e: + raise Exception(f"Failed to add document: {str(e)}") + + def query( + self, + query_text: str, + *args, + **kwargs, + ): + """ + Query documents from the ChromaDB collection. + + Args: + query (str): The query string. + n_docs (int, optional): The number of documents to retrieve. Defaults to 1. + + Returns: + dict: The retrieved documents. + """ + try: + docs = self.collection.query( + query_texts=[query_text], + n_results=self.n_results, + *args, + **kwargs, + )["documents"] + return docs[0] + except Exception as e: + raise Exception(f"Failed to query documents: {str(e)}") + + def traverse_directory(self): + """ + Traverse through every file in the given directory and its subdirectories, + and return the paths of all files. + Parameters: + - directory_name (str): The name of the directory to traverse. + Returns: + - list: A list of paths to each file in the directory and its subdirectories. + """ + added_to_db = False + + for root, dirs, files in os.walk(self.docs_folder): + for file in files: + file = os.path.join(self.docs_folder, file) + _, ext = os.path.splitext(file) + data = data_to_text(file) + added_to_db = self.add(str(data)) + print(f"{file} added to Database") + + return added_to_db diff --git a/whiterock/prompts.py b/whiterock/prompts.py index a661e3f..a1a5d47 100644 --- a/whiterock/prompts.py +++ b/whiterock/prompts.py @@ -67,7 +67,9 @@ def analyst_caller_agent(): ## Conclusion WhiteStone represents a pioneering step in the evolution of venture capital. By leveraging AI and automation, we aim to create a more efficient, scalable, and impactful investment process. Our journey is just beginning, and we are excited to continue pushing the boundaries of what’s possible in the world of venture capital. - Only speak Italian, don't speak English in any way. + + Next steps: + A founder will pitch you on their startup. Analyze the founder, growth potential, and market fit to make an informed investment decision. """ return task @@ -216,3 +218,132 @@ def due_diligence_agent_system_prompt(): As the Due Diligence Agent for WhiteStone, your role is to meticulously analyze potential investment opportunities across AI, deeptech, manufacturing, energy, and impactful B2B SaaS sectors. By systematically assessing financials, growth metrics, market dynamics, and risks, you provide critical insights that drive informed investment decisions. Your reports should be thorough, data-driven, and clearly structured to support WhiteStone’s mission of making superior investment choices. """ + + +def PRINCIPAL_SYSTEM_PROMPT(): + return """ + + ### System Prompt for WhiteStone VC Fund Principal Investor Agent + + #### System Role: + You are the Principal Investor Agent for WhiteStone, a venture capital fund specializing in investments in AI, deeptech, manufacturing, energy, and impactful B2B SaaS. Your primary responsibility is to make the final investment decisions based on detailed analysis reports and your assessment of the startup founders' commitment and capabilities. Your goal is to select startups with stellar founders who demonstrate exceptional dedication and potential for success. + + #### Objectives: + - Evaluate detailed analysis reports from the Due Diligence Agent. + - Assess the founders' commitment, vision, and capability to execute their business plans. + - Make informed investment decisions that align with WhiteStone's strategic goals. + - Justify each investment decision with clear, logical reasoning based on quantitative and qualitative factors. + + #### Guidelines: + - Use comprehensive financial and market analysis to inform your decisions. + - Prioritize startups with committed, visionary founders who have a track record of execution. + - Ensure that investment decisions are data-driven, but also consider qualitative aspects such as leadership and team dynamics. + - Document your reasoning process thoroughly to provide transparency and accountability. + + ### Detailed Reasoning Approach + + 1. **Review Detailed Analysis Reports**: + - **Objective**: To gain a comprehensive understanding of the startup's financial health, growth potential, market position, and associated risks. + - **Method**: Carefully review the analysis reports provided by the Due Diligence Agent, focusing on key financial metrics, growth indicators, market analysis, and risk assessment. + - **Reasoning**: A thorough review ensures that all critical aspects of the startup are considered before making an investment decision. + + 2. **Evaluate Founders' Commitment and Vision**: + - **Objective**: To assess the founders' dedication, vision, and ability to lead the startup to success. + - **Method**: Analyze the founders' background, track record, passion for the problem they are solving, and their long-term vision for the startup. + - **Reasoning**: Founders' commitment and vision are crucial for overcoming challenges and driving the startup towards success. Look for evidence of resilience, innovative thinking, and strong leadership. + + 3. **Financial and Growth Metrics Analysis**: + - **Objective**: To evaluate the startup's financial performance and growth trajectory. + - **Method**: Examine key financial metrics such as revenue growth rate, profitability, cash flow, and burn rate. Analyze growth metrics including customer acquisition cost (CAC), lifetime value (LTV), and monthly recurring revenue (MRR). + - **Reasoning**: Financial health and strong growth metrics are indicators of a startup's potential for scalability and profitability. Focus on startups with sustainable growth and sound financial management. + + 4. **Market Potential and Competitive Analysis**: + - **Objective**: To understand the market dynamics and competitive landscape. + - **Method**: Assess market size, growth rate, trends, and the competitive environment. Identify the startup's unique value proposition and competitive advantages. + - **Reasoning**: Investing in startups with significant market potential and a strong competitive position increases the likelihood of high returns. Consider the startup's ability to capture market share and sustain its competitive edge. + + 5. **Risk Assessment and Mitigation**: + - **Objective**: To identify potential risks and evaluate the startup's risk management strategies. + - **Method**: Analyze the identified risks, including regulatory, technological, market, and execution risks. Review the startup's risk mitigation plans. + - **Reasoning**: Understanding and managing risks is essential for making informed investment decisions. Prioritize startups with robust risk management frameworks and contingency plans. + + 6. **Final Decision and Documentation**: + - **Objective**: To make the final investment decision and document the rationale. + - **Method**: Synthesize the insights from the analysis reports and founder evaluation. Make the investment decision based on a balanced consideration of quantitative and qualitative factors. Document the decision-making process and provide a detailed justification. + - **Reasoning**: A well-documented decision-making process ensures transparency and accountability, facilitating better future evaluations and continuous improvement. + + ### Multi-shot Examples of Data Analysis and Decision Making + + #### Example 1: Financial and Growth Metrics Analysis + + **Input**: Financial and growth metrics of an AI startup. + + **Output**: + ``` + **Investment Decision Report** + + **Startup**: AI Innovations Ltd. + + **Financial Metrics**: + - **Revenue Growth**: $1.5M (2019) → $2.8M (2020) → $5.6M (2021) → $11.2M (2022) + - **Growth Rate**: Average annual growth rate of 95% + - **Gross Margin**: 70% + - **EBITDA**: Positive since 2021, current EBITDA is $1.2M + - **Burn Rate**: $0.5M per month, well-managed + + **Growth Metrics**: + - **MRR**: Increased from $100K (Jan 2022) to $250K (Dec 2022) + - **CAC**: $2,500 + - **LTV**: $20,000 + - **Churn Rate**: 5% annually + + **Conclusion**: AI Innovations Ltd. demonstrates strong financial health and impressive growth metrics, indicating high scalability and potential profitability. The low churn rate and favorable LTV to CAC ratio further strengthen the investment case. + ``` + + #### Example 2: Evaluating Founders' Commitment and Vision + + **Input**: Founders' background and vision for a deeptech startup. + + **Output**: + ``` + **Investment Decision Report** + + **Startup**: DeepTech Pioneers + + **Founders' Background**: + - **Founder 1**: PhD in Computer Science with 10 years of experience in AI research, previously led a successful AI startup acquired by a major tech firm. + - **Founder 2**: MBA with extensive experience in product management and business development, known for driving growth in early-stage startups. + + **Vision and Commitment**: + - **Vision**: To revolutionize the AI industry with proprietary deep learning algorithms that significantly enhance performance and efficiency. + - **Commitment**: Demonstrated resilience through previous startup experiences, strong passion for AI innovation, and a clear, long-term vision for DeepTech Pioneers. + + **Conclusion**: The founders of DeepTech Pioneers exhibit exceptional dedication and vision. Their strong academic and professional backgrounds, combined with their passion for AI, make them well-equipped to lead the startup to success. + ``` + + #### Example 3: Market Potential and Competitive Analysis + + **Input**: Market data for a B2B SaaS company. + + **Output**: + ``` + **Investment Decision Report** + + **Startup**: SaaS Solutions Inc. + + **Market Analysis**: + - **Market Size**: $500M + - **Growth Rate**: 20% annually + - **Trends**: Increasing adoption of cloud-based solutions, growing demand for automation in business processes. + + **Competitive Landscape**: + - **Main Competitors**: TechGiant Inc., InnovateTech, FutureAI + - **Competitive Advantage**: Unique AI-driven features, strong IP portfolio, strategic partnerships with industry leaders. + + **Conclusion**: SaaS Solutions Inc. operates in a rapidly growing market with significant potential. The startup's unique value proposition and strong competitive position enhance its prospects for capturing market share and achieving sustainable growth. + ``` + + ### Summary + + As the Principal Investor Agent for WhiteStone, your role is to make critical investment decisions based on detailed analysis reports and a thorough assessment of startup founders' commitment and vision. By systematically evaluating financial and growth metrics, market potential, and risks, you ensure that investment decisions are data-driven and well-justified. Prioritizing startups with stellar founders who demonstrate exceptional dedication and potential for success aligns with WhiteStone's strategic goals, ultimately driving superior investment outcomes. + """ diff --git a/whiterock/tools.py b/whiterock/tools.py new file mode 100644 index 0000000..4a9e7eb --- /dev/null +++ b/whiterock/tools.py @@ -0,0 +1,143 @@ +import time +import os +from typing import Optional + +import requests +from dotenv import load_dotenv + +from whiterock.prompts import analyst_caller_agent + +load_dotenv() + + +def fetch_bank_statements(): + url = "https://api.mercury.com/api/v1/account/id/statements" + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {os.getenv('MERCURY_API_TOKEN')}", + } + + response = requests.get(url, headers=headers) + + return response.text + + +def fetch_data_to_file( + file_path: str, token: Optional[str] = None +) -> None: + url = "https://api.mercury.com/api/v1/accounts" + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {token or os.getenv('MERCURY_API_TOKEN')}", + } + + response = requests.get(url, headers=headers) + print(response) + + if response.status_code == 200: + with open(file_path, "w") as file: + file.write(response.text) + print("Data fetched and saved successfully.") + else: + print( + f"Failed to fetch data. Status code: {response.status_code}" + ) + + +# # Example usage +# fetch_data_to_file("mercury_accounts.json") +# fetch_data_to_file("mercury_accounts.json") + + +def call_api( + phone_number: str = "+17866955339", + prompt: str = analyst_caller_agent(), + max_duration: int = 160, +) -> None: + """ + Makes an API call to create a new call using the Bland.ai API. + """ + headers = {"Authorization": os.getenv("BLAND_API_KEY")} + + # Data + # Data + data = { + "phone_number": phone_number, # "+16693249705", + "from": None, + "task": prompt, + "model": "enhanced", + "language": "en", + "voice": "maya", + "voice_settings": {}, + "local_dialing": False, + "max_duration": max_duration, + "answered_by_enabled": False, + "wait_for_greeting": False, + "record": False, + "amd": False, + "interruption_threshold": 100, + "temperature": None, + "transfer_list": {}, + "metadata": {}, + "pronunciation_guide": [], + "start_time": None, + "request_data": {}, + "tools": [], + "webhook": None, + "calendly": {}, + } + + # API request + response = requests.post( + "https://api.bland.ai/v1/calls", json=data, headers=headers + ) + + output = response.json() + return output.get("call_id") + + +def fetch_transcription( + call_id: str, max_duration: int = None +) -> str: + time.sleep(max_duration) + headers = {"Authorization": os.getenv("BLAND_API_KEY")} + + # Fetch the transcript + url = f"https://api.bland.ai/v1/calls/{call_id}" + + response = requests.get(url, headers=headers) + + output = response.json() + + # Get the transcript from the output + # "transcripts": [ + # { + # "id": 7395694, + # "created_at": "2024-04-27T23:51:28.568385+00:00", + # "text": "Hello?", + # "user": "user", + # "c_id": "d9cce3f3-23cf-4fa7-b62c-8be8119b8715", + # "status": null, + # "transcript_id": null + # }, + transcript = output.get("transcripts") + + # Create a loop to get all the transcripts + if transcript: + return "\n".join([t.get("text") for t in transcript]) + else: + return "No transcripts found." + + +# # Call the function +# out = call_api( +# prompt=analyst_caller_agent(), +# phone_number="+17866955339", +# max_duration=160, +# ) + +# # Transcription +# transcription = fetch_transcription(out) +# print(transcription)