-
Notifications
You must be signed in to change notification settings - Fork 53
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' of https://github.com/ronidas39/LLMtutorial
- Loading branch information
Showing
22 changed files
with
337 additions
and
21 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
from langchain.agents import AgentType,initialize_agent | ||
from langchain.tools import Tool | ||
from langchain_openai import ChatOpenAI | ||
import requests | ||
|
||
|
||
def getPrice(input): | ||
url="https://api.coincap.io/v2/assets/"+input.lower() | ||
response=requests.get(url) | ||
price=response.json()["data"]["priceUsd"] | ||
return price | ||
|
||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
apicall=Tool( | ||
name="getCryptoPrice", | ||
func=getPrice, | ||
description="use to get the price for any given crypto from user input" | ||
) | ||
tools=[apicall] | ||
agent=initialize_agent(tools,llm,agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,verbose=True) | ||
print(agent.run("what is the price of cardano")) |
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,28 +1,38 @@ | ||
from langchain_openai import ChatOpenAI | ||
from langchain.agents import initialize_agent,AgentType | ||
from langchain.tools import Tool | ||
from openai import OpenAI | ||
import requests | ||
client = OpenAI() | ||
import streamlit as st | ||
|
||
response = client.images.generate( | ||
model="dall-e-3", | ||
prompt="a tatto design using some motivation quotes", | ||
size="1024x1024", | ||
quality="hd", | ||
n=1, | ||
) | ||
client=OpenAI() | ||
|
||
image_url = response.data[0].url | ||
|
||
st.set_page_config(page_title="Design anything") | ||
st.header="write anything you want to design" | ||
input=st.text_input("enter your thoughts") | ||
|
||
def genImage(input): | ||
response=client.images.generate( | ||
model="dall-e-3", | ||
prompt=input, | ||
size="1024x1024", | ||
quality="hd", | ||
n=1 | ||
) | ||
url=response.data[0].url | ||
return url | ||
|
||
# Use the requests library to fetch the image content | ||
response = requests.get(image_url) | ||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
design=Tool( | ||
name="generateImage", | ||
func=genImage, | ||
description="use to generate image from generarteImage tool" | ||
) | ||
tools=[design] | ||
agent=initialize_agent(tools,llm,agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,verbose=True) | ||
if st.button("Submit",type="primary"): | ||
if input is not None: | ||
response=agent.run(input) | ||
url="https://" + response.split("https://")[1].replace(")","") | ||
st.image(url,caption=input) | ||
|
||
# Check if the request was successful | ||
if response.status_code == 200: | ||
# Open a file in binary write mode | ||
with open("siamese_cat_image.png", "wb") as file: | ||
# Write the content of the response to the file | ||
file.write(response.content) | ||
print("Image successfully saved.") | ||
else: | ||
print("Failed to fetch image.") |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
from langchain.agents import initialize_agent,AgentType,load_tools | ||
from langchain_openai import ChatOpenAI | ||
from langchain.prompts import ChatPromptTemplate | ||
import os | ||
os.environ["SERPAPI_API_KEY"]="b80da0e0471a9b4d198b7accd612fce298addb1036c9cc73ee65c2307de4aee9" | ||
tools=load_tools(["serpapi"]) | ||
|
||
ts=""" | ||
You are an intelligent search master and analyst who can search internet using serpapi tool and analyse any product to find the brand of the product ,name of the product, | ||
product description,price and rating between 1-5 based on your owen analysis. | ||
Take the input below delimited by tripe backticks and use it to search and analyse using serapi tool | ||
input:```{input}``` | ||
then based on the input you format the output as JSON with the following keys: | ||
brand_name | ||
product_name | ||
description | ||
price | ||
rating | ||
""" | ||
pt=ChatPromptTemplate.from_template(ts) | ||
|
||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
|
||
agent=initialize_agent(tools,llm,agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,verbose=True) | ||
|
||
pi=pt.format_messages(input="best luxury watch in uae") | ||
|
||
pa_response=agent.run(pi) | ||
print(type(pa_response)) | ||
|
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
from langchain.agents import AgentType,initialize_agent,load_tools | ||
from langchain.prompts import ChatPromptTemplate | ||
from langchain.output_parsers import ResponseSchema,StructuredOutputParser | ||
from langchain_openai import ChatOpenAI | ||
import os | ||
os.environ["SERPAPI_API_KEY"]="4132eb5fa197a54daf77381f7441a4b44c916b7e39e58cf4221f6797f29d083d" | ||
tools=load_tools(["serpapi"]) | ||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
brand_name=ResponseSchema(name="brand_name",description="this is the brand of the product") | ||
product_name=ResponseSchema(name="product_name",description="this is the product name") | ||
description=ResponseSchema(name="description",description="this is the short description of the product") | ||
product_price=ResponseSchema(name="price",description="this will be in number, represents the price of the product") | ||
product_rating=ResponseSchema(name="rating",description="this is whole integer,this gives the rating between 1-10") | ||
response_schema=[brand_name,product_name,description,product_price,product_rating] | ||
output_parser=StructuredOutputParser.from_response_schemas(response_schema) | ||
format_instruction=output_parser.get_format_instructions() | ||
ts=""" | ||
You are an intelligent search master and analyst who can search internet using serpapi tool and analyse any product to find the brand of the product ,name of the product, | ||
product description,price and rating between 1-5 based on your owen analysis. | ||
Take the input below delimited by tripe backticks and use it to search and analyse using serapi tool | ||
input:```{input}``` | ||
{format_instruction} | ||
""" | ||
prompt=ChatPromptTemplate.from_template(ts) | ||
fs=prompt.format_messages(input="best android phone in India",format_instruction=format_instruction) | ||
agent=initialize_agent(tools,llm,agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,verbose=True) | ||
response=agent.run(fs) | ||
output=output_parser.parse(response) | ||
print(output["brand_name"],output["product_name"]) |
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
from langchain_openai import ChatOpenAI | ||
from langchain.prompts import HumanMessagePromptTemplate,ChatPromptTemplate,PromptTemplate | ||
from langchain.output_parsers import StructuredOutputParser,ResponseSchema | ||
from langchain.chains import LLMChain | ||
|
||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
|
||
tagline=ResponseSchema( | ||
name="tagline",description="generated tagline for the input company description" | ||
) | ||
rating=ResponseSchema( | ||
name="rating",description="this is a whole number ,generated rating between 1-100 for the input company description" | ||
) | ||
rs=[tagline,rating] | ||
output_parser=StructuredOutputParser.from_response_schemas(rs) | ||
format_instruction=output_parser.get_format_instructions() | ||
ts=""" | ||
you are master at suggesting unique tagline for a company based on a input description. | ||
take the company description below delimited by triple backticks and use it to create the unique tagline. | ||
input description: ```{input}``` | ||
the based on the input you should create a tagline and popularity score for the generates tagline between 1-100 basaed on your knowledge and analysis. | ||
{format_instruction} | ||
""" | ||
prompt=ChatPromptTemplate( | ||
messages=[ | ||
HumanMessagePromptTemplate.from_template(ts) | ||
], | ||
input_variables=["input"], | ||
partial_variables={ | ||
"format_instruction":format_instruction}, | ||
output_parser=output_parser | ||
) | ||
chain=LLMChain(llm=llm,prompt=prompt) | ||
response=chain.predict_and_parse(input="this is company makes cool and trendy watches for indian youth ") | ||
print(response["tagline"]) |
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
import arxiv,os,glob | ||
import streamlit as st | ||
from langchain_community.document_loaders import DirectoryLoader,PyPDFLoader | ||
from langchain.text_splitter import RecursiveCharacterTextSplitter | ||
from langchain_community.vectorstores import FAISS | ||
from langchain_openai import ChatOpenAI,OpenAIEmbeddings | ||
from langchain.schema.runnable import RunnableParallel,RunnablePassthrough | ||
from langchain.schema.output_parser import StrOutputParser | ||
from langchain.prompts import ChatPromptTemplate | ||
|
||
path="C:\\Users\\welcome\\OneDrive\\Documents\\GitHub\\LLMtutorial\\tutorial41\\output\\" | ||
|
||
|
||
|
||
if "last_selected_option" not in st.session_state: | ||
st.session_state["last_selected_option"]=None | ||
if "docs_processed" not in st.session_state: | ||
st.session_state["docs_processed"]=False | ||
if "retriever" not in st.session_state: | ||
st.session_state["retriever"]=None | ||
|
||
|
||
llm=ChatOpenAI(model="gpt-4",temperature=0.0,max_tokens=1024) | ||
template=""" | ||
Answer the question based only on the following context: | ||
{context} | ||
Question:{question} | ||
""" | ||
prompt=ChatPromptTemplate.from_template(template) | ||
|
||
def download_parse(selected_option): | ||
client=arxiv.Client() | ||
search=arxiv.Search( | ||
query=selected_option, | ||
max_results=15, | ||
sort_by=arxiv.SortCriterion.SubmittedDate | ||
) | ||
results=client.results(search) | ||
for result in results: | ||
try: | ||
result.download_pdf(dirpath=path) | ||
except Exception as e: | ||
print(result) | ||
|
||
|
||
def setoutput(input_text,retriever): | ||
chain=(RunnableParallel({"context":retriever,"question":RunnablePassthrough()}) | ||
| prompt | ||
| llm | ||
| StrOutputParser() | ||
) | ||
result=chain.invoke(input_text) | ||
return result | ||
|
||
|
||
|
||
|
||
st.title("Multi Specialty Research Assistant") | ||
col1,col2=st.columns(2) | ||
with col1: | ||
st.header("SELECT YOUR DOMAIN FOR RESEARCH") | ||
options=["healthcare","mathematics","physics","chemistry","AI","computer science","space research","quantum computing"] | ||
selected_option=st.selectbox("choose your domain for research",options,index=0,key="select_option") | ||
if selected_option: | ||
if selected_option != st.session_state["last_selected_option"]: | ||
st.session_state["docs_processed"]=False | ||
st.session_state["last_selected_option"]=selected_option | ||
if selected_option and not st.session_state["docs_processed"]: | ||
files=glob.glob(path+"*.*") | ||
for file in files: | ||
os.remove(file) | ||
download_parse(selected_option) | ||
loader=DirectoryLoader(path=path,glob="./*.pdf",loader_cls=PyPDFLoader) | ||
docs=[] | ||
try: | ||
docs=loader.load() | ||
except Exception as e: | ||
print(f"error load docs{e}") | ||
full_text="" | ||
for doc in docs: | ||
full_text +=doc.page_content | ||
lines=full_text.splitlines() | ||
non_empty_lines=[] | ||
for line in lines: | ||
if line: | ||
non_empty_lines.append(line) | ||
full_text="".join(non_empty_lines) | ||
text_splitter=RecursiveCharacterTextSplitter(chunk_size=600,chunk_overlap=50) | ||
doc_chunks=text_splitter.create_documents([full_text]) | ||
vs=FAISS.from_documents(documents=doc_chunks,embedding=OpenAIEmbeddings()) | ||
retriever=vs.as_retriever() | ||
vs.save_local("research_index") | ||
st.session_state["docs_processed"]=True | ||
st.session_state["retriever"]=retriever | ||
st.success("Documents are processed and stored into vector db") | ||
input_text=st.text_area("User Question Section",f"ask question related to topic {selected_option}",key="input_text") | ||
if st.button("Submit",type="primary"): | ||
if st.session_state["retriever"] is not None: | ||
result=setoutput(input_text,st.session_state["retriever"]) | ||
with col2: | ||
st.header("OUTPUT SECTION") | ||
st.write(result) | ||
else: | ||
with col2: | ||
st.header("OUTPUT SECTION") | ||
st.write("your output will be generated by AI once you hit the submit button") | ||
|
||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
import requests | ||
|
||
API_KEY="cm9uaWRhczA0MTk4N0BnbWFpbC5jb20:vU6fHQuEgIZK6zzLY8QK2" | ||
|
||
def download_video(id): | ||
url = "https://api.d-id.com/talks/"+id | ||
|
||
headers = { | ||
"accept": "application/json", | ||
"authorization": f"Basic {API_KEY}" | ||
} | ||
|
||
response = requests.get(url, headers=headers) | ||
print(response.json()) | ||
url=response.json()["result_url"] | ||
|
||
return url |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
import requests | ||
|
||
API_KEY="cm9uaWRhczA0MTk4N0BnbWFpbC5jb20:vU6fHQuEgIZK6zzLY8QK2" | ||
|
||
url = "https://api.d-id.com/talks" | ||
|
||
def genvideo(img_url,summary,v_id): | ||
|
||
payload = { | ||
"source_url":img_url, | ||
"script": { | ||
"type": "text", | ||
"input": summary, | ||
"provider": { | ||
"type": "microsoft", | ||
"voice_id": v_id, | ||
"voice_config":{ | ||
"style":"Default" | ||
} | ||
} | ||
} | ||
} | ||
headers = { | ||
"accept": "application/json", | ||
"content-type": "application/json", | ||
"authorization": f"Basic {API_KEY}" | ||
} | ||
|
||
response = requests.post(url, json=payload, headers=headers) | ||
|
||
id=response.json()["id"] | ||
print(id) | ||
return id |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
import streamlit as st | ||
from langchain_openai import ChatOpenAI | ||
from langchain_community.tools import DuckDuckGoSearchResults | ||
from langchain.docstore.document import Document | ||
from langchain.chains.summarize import load_summarize_chain | ||
from langchain.prompts import PromptTemplate | ||
from genvideo import genvideo | ||
from downloadvideo import download_video | ||
import time | ||
llm=ChatOpenAI(model="gpt-4",temperature=0.0) | ||
ts=""" | ||
you are a news anchor for a global news channel,with this context genereate a concise summary of ther following | ||
{text} | ||
""" | ||
pt=PromptTemplate(template=ts,input_variables=["text"]) | ||
st.set_page_config(page_title="24/7 NEWS CHANNEL POWERED BY AI DRIVEN NEWS ANCHOR") | ||
st.header="what you want to hear and watch" | ||
qsn=st.text_area("enter your query") | ||
search=DuckDuckGoSearchResults(backend="news") | ||
if st.button("Submit",type="primary"): | ||
if qsn is not None: | ||
result=search.run(qsn) | ||
data=result.replace("[snippet: ","") | ||
data=data[:-1] | ||
docs=[Document(page_content=t) for t in data] | ||
chain=load_summarize_chain(llm,chain_type="stuff",prompt=pt) | ||
summary=chain.run(docs) | ||
id=genvideo("https://clips-presenters.d-id.com/lana/uXbrIxQFjr/kzlKYBZ2wc/image.png",summary,"en-US-JaneNeural") | ||
time.sleep(100) | ||
url=download_video(id) | ||
st.video(url) | ||
|
||
|
Binary file not shown.