Skip to content

Commit

Permalink
updating UI examples
Browse files Browse the repository at this point in the history
  • Loading branch information
DARREN OBERST authored and DARREN OBERST committed May 5, 2024
1 parent a6b24fc commit 425a645
Show file tree
Hide file tree
Showing 4 changed files with 261 additions and 97 deletions.
97 changes: 0 additions & 97 deletions examples/Notebooks/ui_without_a_database.py

This file was deleted.

94 changes: 94 additions & 0 deletions examples/UI/rag_ui_with_query_topic_with_streamlit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@

""" This example shows how to build a simple UI RAG application for longer documents in which a retrieval query step
is required to build a context from selected text chunks in the document.
This example is build with a Streamlit UI. To run, it requires a separate `pip install streamlit`, and
to execute the script, you should run from the command line with:
`streamlit run using_with_streamlit_ui.py`
For more information about Streamlit, check out their docs: https://docs.streamlit.io/develop/tutorials
To build out the application, you would replace the very simple 'text search' mechanism used below with
techniques outlined in examples in Embeddings and Retrieval.
"""


import os
import streamlit as st

from llmware.prompts import Prompt
from llmware.setup import Setup

# st.set_page_config(layout="wide")


def simple_analyzer_with_topic_query ():

st.title("Simple RAG Analyzer with Focusing Query")

prompter = Prompt()

sample_files_path = Setup().load_sample_files(over_write=False)
doc_path = os.path.join(sample_files_path, "Agreements")

files = os.listdir(doc_path)
file_name = st.selectbox("Choose an Agreement", files)

# ** topic_query ** = this is a proxy for a more complex focusing retrieval strategy to target only a
# specific part of the document, rather then the whole document
# in this case, this will run a 'text match' search against the topic query to reduce the
# text chunks reviewed in trying to answer the question

topic_query = st.text_area("Filtering Topic (hint: 'vacation')")

# ** prompt_text ** - this is the question that will be passed to the LLM
prompt_text = st.text_area("Question (hint: 'how many vacation days will the executive receive'")

model_name = st.selectbox("Choose a model for answering questions", ["bling-phi-3-gguf",
"bling-tiny-llama-1b",
"bling-stablelm-3b-tool",
"llama-3-instruct-bartowski-gguf",
"dragon-llama-answer-tool"])

if st.button("Run Analysis"):

if file_name and prompt_text and model_name:

prompter.load_model(model_name, temperature=0.0, sample=False)

# parse the PDF in memory and attach to the prompt
if not topic_query:
sources = prompter.add_source_document(doc_path,file_name)
else:
# this is where we use the topic_query to filter the parsed document
sources = prompter.add_source_document(doc_path,file_name, query=topic_query)

# run the inference with the source
response = prompter.prompt_with_source(prompt_text)

# fact checks
fc = prompter.evidence_check_numbers(response)
cs = prompter.evidence_check_sources(response)

if len(response) > 0:
if "llm_response" in response[0]:
response = response[0]["llm_response"]

st.write(f"Answer: {response}")

if len(fc) > 0:
if "fact_check" in fc[0]:
fc_out = fc[0]["fact_check"]
st.write(f"Numbers Check: {fc_out}")

if len(cs) > 0:
if "source_review" in cs[0]:
sr_out = cs[0]["source_review"]
st.write(f"Source review: {sr_out}")


if __name__ == "__main__":

simple_analyzer_with_topic_query()
86 changes: 86 additions & 0 deletions examples/UI/simple_rag_ui_with_streamlit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@

""" This example shows how to build a simple RAG application with UI with Streamlit and LLMWare.
Note: it requires a separate `pip install streamlit`, and to run the script, you should run from the
command line with:
`streamlit run using_with_streamlit_ui.py`
For this example, we will be prompting against a set of Invoice documents, provided in the LLMWare
sample files.
If you would like to substitute longer documents then please look at the UI example:
-- rag_ui_with_query_topic_with_streamlit.py
as a framework to get started integrating a retrieval step before the prompt of the source
For more information about Streamlit, check out their docs: https://docs.streamlit.io/develop/tutorials
"""


import os
import streamlit as st

from llmware.prompts import Prompt
from llmware.setup import Setup

# st.set_page_config(layout="wide")


def simple_analyzer ():

st.title("Simple RAG Analyzer")

prompter = Prompt()

sample_files_path = Setup().load_sample_files(over_write=False)
doc_path = os.path.join(sample_files_path, "Invoices")

files = os.listdir(doc_path)
file_name = st.selectbox("Choose an Invoice", files)

prompt_text = st.text_area("Question (hint: 'what is the total amount of the invoice?'")

model_name = st.selectbox("Choose a model for answering questions", ["bling-phi-3-gguf",
"bling-tiny-llama-1b",
"bling-stablelm-3b-tool",
"llama-3-instruct-bartowski-gguf",
"dragon-llama-answer-tool"])

if st.button("Run Analysis"):

if file_name and prompt_text and model_name:

prompter.load_model(model_name, temperature=0.0, sample=False)

# parse the PDF in memory and attach to the prompt
sources = prompter.add_source_document(doc_path,file_name)

# run the inference with the source
response = prompter.prompt_with_source(prompt_text)

# fact checks
fc = prompter.evidence_check_numbers(response)
cs = prompter.evidence_check_sources(response)

if len(response) > 0:
if "llm_response" in response[0]:
response = response[0]["llm_response"]

st.write(f"Answer: {response}")

if len(fc) > 0:
if "fact_check" in fc[0]:
fc_out = fc[0]["fact_check"]
st.write(f"Numbers Check: {fc_out}")

if len(cs) > 0:
if "source_review" in cs[0]:
sr_out = cs[0]["source_review"]
st.write(f"Source review: {sr_out}")


if __name__ == "__main__":

simple_analyzer()
81 changes: 81 additions & 0 deletions examples/UI/using_streamlit_chat_ui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@

""" This example provides a basic framework to build a Chatbot UI interface in conjunction with LLMWare
using Streamlit Chat UI.
To run this example requires an install of Streamlit, e.g., `pip3 install streamlit`
To execute the script, run from the command line with: `streamlit run using_with_streamlit_ui.py`
Also, please note that the first time you run with a new model, the model will be downloaded and cached locally,
so expect a delay on the 'first run' which will be much faster on every successive run.
All components of the chatbot will be running locally, so the speed will be determined greatly by the
CPU/GPU capacities of your machine.
We have set the max_output at 250 tokens - for faster, set lower ...
For more information on the Streamlit Chat UI,
see https://docs.streamlit.io/develop/tutorials/llms/build-conversational-apps
"""


import streamlit as st
from llmware.models import ModelCatalog


def simple_chat_ui_app (model_name):

st.title(f"Simple Chat with {model_name}")

model = ModelCatalog().load_model(model_name, temperature=0.3, sample=True, max_output=250)

# initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []

# display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])

# accept user input
prompt = st.chat_input("Say something")
if prompt:

with st.chat_message("user"):
st.markdown(prompt)

with st.chat_message("assistant"):

model_response = model.inference(prompt)

# insert additional error checking / post-processing of output here
bot_response = model_response["llm_response"]

st.markdown(bot_response)

st.session_state.messages.append({"role": "user", "content": prompt})
st.session_state.messages.append({"role": "assistant", "content": bot_response})

return 0


if __name__ == "__main__":

# a few representative good chat models that can run locally
# note: will take a minute for the first time it is downloaded and cached locally

chat_models = ["phi-3-gguf",
"llama-2-7b-chat-gguf",
"llama-3-instruct-bartowski-gguf",
"openhermes-mistral-7b-gguf",
"zephyr-7b-gguf"]

model_name = chat_models[0]

simple_chat_ui_app(model_name)



0 comments on commit 425a645

Please sign in to comment.