Skip to content

Commit

Permalink
tutorial126
Browse files Browse the repository at this point in the history
  • Loading branch information
ronidas39 committed Dec 15, 2024
1 parent ade2c42 commit e6e7bb2
Show file tree
Hide file tree
Showing 19 changed files with 18,250 additions and 2 deletions.
Binary file modified .DS_Store
Binary file not shown.
20 changes: 20 additions & 0 deletions tutorial125/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from langchain_openai import ChatOpenAI
import asyncio
from browser_use import Agent
from browser_use.controller.service import Controller
import streamlit as st

llm=ChatOpenAI(model="gpt-4o")
st.title("CHAT WITH ANY WEBSITE")
input=st.text_input("enter your question")

async def searchWeb(input):
agent=Agent(task=input,llm=llm,controller=Controller(keep_open=False,headless=False))
result=await agent.run()
return result

if input is not None:
btn=st.button("submit")
if btn:
result=asyncio.run(searchWeb(input))
st.write(result.final_result())
2 changes: 2 additions & 0 deletions tutorial125/qsn.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
find best reviewed whey protein from healthkart.com and return the results with deatils
find top 10 mobiles under 25000 from amazon.in based on review
Binary file added tutorial125/tutorial125.pptx
Binary file not shown.
31 changes: 31 additions & 0 deletions tutorial126/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import chromadb
from llama_index.core import SimpleDirectoryReader,VectorStoreIndex,StorageContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from openai import OpenAI
import streamlit as st
client=OpenAI()
st.title("AYURDEVA WITH AI")
query=st.text_input("ask your question")
db=chromadb.PersistentClient(path="./db")
cc=db.get_or_create_collection("tutorial126")
vs=ChromaVectorStore(chroma_collection=cc)
index=VectorStoreIndex.from_vector_store(vector_store=vs)
if query is not None:
btn=st.button("submit")
if btn:
retriver=index.as_retriever(similarity_top_k=5)
nodes=retriver.retrieve(query)
docs=""
for node in nodes:
docs=docs+node.text
#st.write(docs)
system_prompt=f"""you are an intelligent ai assistant who has expertise in ayurvedic can write nice and well crafted articles ,use only {docs} as context to write answer the question asked by user, dont use anything else other than this{docs},make it professional writing,detailed use headers and bullets"""
response=client.chat.completions.create(
model="gpt-4o",
messages=[
{"role":"system","content":system_prompt},
{"role":"user","content":query}
]
)
st.write(response.choices[0].message.content)
Binary file not shown.
Binary file not shown.
Binary file not shown.
Empty file.
Binary file added tutorial126/db/chroma.sqlite3
Binary file not shown.
32 changes: 32 additions & 0 deletions tutorial126/genData.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from langchain_community.document_loaders import AsyncChromiumLoader
from langchain_community.document_transformers import BeautifulSoupTransformer
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from langchain.prompts import PromptTemplate
import io,sys,time
with io.open("urls.csv","r",encoding="utf-8")as f1:
data=f1.read()
f1.close()

#llm=ChatOllama(model="llama3.2:latest")
llm=ChatOpenAI(model="gpt-4o")
template=""" for the given {text} extract every information related to {name} , dont miss anything related to {name} ,dont shorten the content try to use as it is"""
prompt=PromptTemplate.from_template(template)

lines=data.split("\n")
with io.open("ragdoc.txt","w",encoding="utf-8")as f1:
for line in lines:
url=line.split(",")[1]
name=line.split(",")[0]
loader=AsyncChromiumLoader([url],user_agent="MyAppUserAgent")
htmldocs=loader.load()
print(htmldocs)
bs_transformer=BeautifulSoupTransformer()
docs_transformed=bs_transformer.transform_documents(htmldocs,tags_to_extract=["div"])
chain=prompt|llm
response=chain.invoke({"name":name,"text":docs_transformed})
print(name)
#print(response.content)
f1.write(str(response.content)+"\n\n")
time.sleep(20)
f1.close()
13 changes: 13 additions & 0 deletions tutorial126/genIndex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import chromadb
from llama_index.core import SimpleDirectoryReader,VectorStoreIndex,StorageContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore

embed_model=OpenAIEmbedding()
loader=SimpleDirectoryReader(input_files=["/Users/roni/Documents/GitHub/LLMtutorial/tutorial126/ragdoc.txt"])
docs=loader.load_data()
db=chromadb.PersistentClient(path="./db")
cc=db.get_or_create_collection("tutorial126")
vs=ChromaVectorStore(chroma_collection=cc)
sc=StorageContext.from_defaults(vector_store=vs)
index=VectorStoreIndex.from_documents(docs,storage_context=sc,embed_model=embed_model)
11 changes: 11 additions & 0 deletions tutorial126/genUrl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import requests,json,io
import string
alphabets=list(string.ascii_lowercase)
with io.open("urls.csv","w",encoding="utf-8")as f1:
for alphabet in alphabets:
url=f"https://www.1mg.com/pharmacy_api_gateway/v4/ayurvedas/by_alphabet?alphabet={alphabet}&page=1&per_page=50"
response=requests.get(url)
items=response.json()["data"]["schema"]["itemListElement"]
for item in items:
f1.write(item["name"]+","+item["url"]+"\n")
f1.close()
1 change: 1 addition & 0 deletions tutorial126/prompt.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
f"""you are an intelligent ai assistant who has expertise in ayurvedic can write nice and well crafted articles ,use only {docs} as context to write answer the question asked by user, dont use anything else other than this{docs},make it professional writing,detailed use headers and bullets"""
Loading

0 comments on commit e6e7bb2

Please sign in to comment.