-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstreamlit-app.py
183 lines (159 loc) · 6.52 KB
/
streamlit-app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import requests
import streamlit as st
import time
import random
import os
from dotenv import load_dotenv
import pandas as pd
import sqlalchemy as sa
import base64
load_dotenv()
# get env variables RDS_ENDPOINT
RDS_ENDPOINT = os.getenv("RDS_ENDPOINT")
RDS_ENDPOINT_SUMMARIZE = RDS_ENDPOINT + "/summarize"
# set meta data
st.set_page_config(
page_title="LLM Summarizer",
page_icon="📖",
layout="wide",
initial_sidebar_state="auto",
)
# add side bar
st.sidebar.title("LLM Summarizer")
st.sidebar.info("This is a demo of the LLM Summarizer. You can ask me to summarize anything!")
with st.sidebar.expander("About"):
st.write("Fine-tuned LLM model for video conference/webinar transcripts summarization.")
st.write("Built with 🤗 Transformers, Streamlit, and AWS")
st.write("######")
st.markdown(
"""<p><strong>About Me<strong><br/>Hi, I'm <a href="https://github.com/jolenechong/">Jolene Chong</a>! I design and build interfaces. I'm passionate about building products that make a positive impact on people's lives with AI.</p>""",
unsafe_allow_html=True)
# divider
st.sidebar.markdown("---")
st.sidebar.write("Model Details")
# dropdown to select model used
selected_model = st.sidebar.selectbox(
"",
("bart-large", "gpt-3.5", "palm-2"),
label_visibility="collapsed"
)
if selected_model == "gpt-3.5":
api_key = st.sidebar.text_input("Enter OpenAI API key", type="password")
elif selected_model == "palm-2":
api_key = st.sidebar.text_input("Enter GCP API key", type="password")
def get_table_download_link(messages):
df = pd.DataFrame(messages)
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="chat_history.csv">Download here</a>'
def summarizeBart(text):
print('hello')
try:
start_time = time.time()
response = requests.get(RDS_ENDPOINT, timeout=2)
addBotPrompt("Sure! Summarizing...")
with st.spinner('This might take awhile...'):
response = requests.post(
RDS_ENDPOINT_SUMMARIZE,
json={"text": text})
elapsed_time = round(time.time() - start_time, 2)
addBotPrompt(f"Done! Generated in {elapsed_time} seconds")
addBotPrompt(response.json()['message'])
return True
except requests.exceptions.ConnectionError:
print("Connection refused")
return False
# download chat history
if st.sidebar.button("Download chat history"):
st.sidebar.markdown(get_table_download_link(st.session_state.messages), unsafe_allow_html=True)
info = """
Here are some things you can ask me to do:\n
Tell me to summarize something:
\tsummarize: <enter text here>
\tsummarize: The quick brown fox jumped over the lazy dog.\n
You can also select a different model and upload a transcript as a file to summarize.
"""
def addUserPrompt(prompt):
st.chat_message("user").markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
def addBotPrompt(response, help=False):
with st.chat_message("ai"):
message_placeholder = st.empty()
full_response = ""
# Simulate stream of response with milliseconds delay
for chunk in response.split():
full_response += chunk + " "
time.sleep(0.05)
message_placeholder.markdown(full_response + "▌")
if help == True:
st.info(info)
st.session_state.messages.append({"role": "ai", "content": response + "<<<" + info + ">>>"})
else:
st.session_state.messages.append({"role": "ai", "content": response})
message_placeholder.markdown(full_response)
st.subheader("💬 Summarize")
st.write("Ask me to summarize anything!")
# View Current DB button
with st.expander("View Current DB"):
view_current_db = st.button("Get Current DB")
if view_current_db:
database_uri = os.getenv("SQLALCHEMY_DATABASE_URI")
engine = sa.create_engine(database_uri)
query = sa.text("SELECT * FROM text")
result = engine.connect().execute(query)
df = pd.DataFrame(result.fetchall())
st.dataframe(df)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
# display text within <<<>>> as info box
if "<<<" in message["content"]:
info = message["content"].split("<<<")[1].split(">>>")[0]
text = message["content"].split("<<<")[0]
st.markdown(text)
st.info(info)
else:
st.markdown(message["content"])
st.sidebar.write("Summarize a file")
uploaded = st.sidebar.file_uploader("", type=["txt"], label_visibility="collapsed")
if uploaded is not None:
st.sidebar.write("Say hi to get summary!")
# prompt = "File Uploaded"
# React to user input
if prompt := st.chat_input("Send a message..."):
if prompt.lower().startswith("hi") or prompt.lower().startswith("hello"):
addUserPrompt(prompt)
assistant_response = random.choice(
[
"Hello there! How can I assist you today?",
"Hi, human! Is there anything I can help you with?",
"Do you need help?",
]
)
addBotPrompt(assistant_response, help=True)
elif "summarize:" in prompt.lower():
# check if server is up
addUserPrompt(prompt)
if selected_model == "bart-large":
if summarizeBart(prompt.lower().split("summarize:")[1].strip()) == False:
addBotPrompt("Sorry, I'm not available right now. Please try again later.")
elif selected_model == "gpt-3.5":
addBotPrompt("NOT IMPLEMENTED YET")
elif selected_model == "palm-2":
addBotPrompt("NOT IMPLEMENTED YET")
elif "use" in prompt.lower():
modelToUse = prompt.lower().split("use")[1].strip()
addUserPrompt(prompt)
addBotPrompt("Sure! I will start using " + modelToUse + "...")
else:
addUserPrompt(prompt)
assistant_response = "Sorry, I don't understand. Could you rephrase?"
addBotPrompt(assistant_response)
if uploaded is not None:
text = uploaded.read().decode("utf-8")
if selected_model == "bart-large":
if summarizeBart(text) == False:
addBotPrompt("Sorry, I'm not available right now. Please try again later.")