Skip to content

Commit

Permalink
support pre commit (#30)
Browse files Browse the repository at this point in the history
* fix imgs and typos

* add gitignore

* support pre-commit
  • Loading branch information
lwaekfjlk authored Apr 9, 2024
1 parent 47acb4d commit bd94f5c
Show file tree
Hide file tree
Showing 8 changed files with 110 additions and 60 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ jobs:
- name: Push to hub
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: git push https://wdplx:[email protected]/spaces/wdplx/Sotopia-demo main
run: git push https://wdplx:[email protected]/spaces/wdplx/Sotopia-demo main
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__pycache__/
__pycache__/
27 changes: 27 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.1 # Use the sha / tag you want to point at
hooks:
- id: prettier
types_or: [html]
- repo: https://github.com/psf/black
rev: 22.12.0
hooks:
- id: black
args: [--line-length=79]
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
args: ["--profile", "black", --line-length=72]
- repo: https://github.com/kynan/nbstripout
rev: 0.6.0
hooks:
- id: nbstripout
107 changes: 62 additions & 45 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
import gradio as gr
from dataclasses import dataclass
import os
from dataclasses import dataclass
from uuid import uuid4

import gradio as gr
import torch
import transformers
from uuid import uuid4
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import PeftConfig, PeftModel
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)

from utils import Agent, get_starter_prompt, format_sotopia_prompt
from utils import Agent, format_sotopia_prompt, get_starter_prompt

DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"
DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"


def prepare_sotopia_info():
Expand All @@ -18,52 +23,60 @@ def prepare_sotopia_info():
background="Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food.",
goal="Uknown",
secrets="Uknown",
personality="Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding.",)
personality="Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding.",
)

machine_agent = Agent(
name="Benjamin Jackson",
background="Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches.",
goal="Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)",
secrets="Descendant of a wealthy oil tycoon, rejects family fortune",
personality="Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment.",)
personality="Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment.",
)

scenario = "Conversation between two friends, where one is upset and crying"
scenario = (
"Conversation between two friends, where one is upset and crying"
)
instructions = get_starter_prompt(machine_agent, human_agent, scenario)
return human_agent, machine_agent, scenario, instructions




def prepare():
model_name = "cmu-lti/sotopia-pi-mistral-7b-BC_SR"
compute_type = torch.float16
config_dict = PeftConfig.from_json_file("peft_config.json")
config = PeftConfig.from_peft_type(**config_dict)
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1").to("cuda")
model = PeftModel.from_pretrained(model, model_name, config=config).to("cuda")
tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1"
)
model = AutoModelForCausalLM.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1"
).to("cuda")
model = PeftModel.from_pretrained(model, model_name, config=config).to(
"cuda"
)
return model, tokenizer



def introduction():
with gr.Column(scale=2):
gr.Image("images/sotopia.jpg", elem_id="banner-image", show_label=False)
gr.Image(
"images/sotopia.jpg", elem_id="banner-image", show_label=False
)
with gr.Column(scale=5):
gr.Markdown(
"""# Sotopia-Pi Demo
**Chat with [Sotopia-Pi](https://github.com/sotopia-lab/sotopia-pi), brainstorm ideas, discuss your holiday plans, and more!**
➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [sotopia-pi-mistral-7b-BC_SR](https://huggingface.co/cmu-lti/sotopia-pi-mistral-7b-BC_SR)/
⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
🗄️ **Disclaimer**: User prompts and generated replies from the model may be collected by TII solely for the purpose of enhancing and refining our models. TII will not store any personally identifiable information associated with your inputs. By using this demo, users implicitly agree to these terms.
"""
)



def param_accordion(according_visible=True):
with gr.Accordion("Parameters", open=False, visible=according_visible):
temperature = gr.Slider(
Expand Down Expand Up @@ -91,8 +104,12 @@ def param_accordion(according_visible=True):
return temperature, session_id, max_tokens


def sotopia_info_accordion(human_agent, machine_agent, scenario, according_visible=True):
with gr.Accordion("Sotopia Information", open=False, visible=according_visible):
def sotopia_info_accordion(
human_agent, machine_agent, scenario, according_visible=True
):
with gr.Accordion(
"Sotopia Information", open=False, visible=according_visible
):
with gr.Row():
with gr.Column():
user_name = gr.Textbox(
Expand Down Expand Up @@ -150,33 +167,32 @@ def run_chat(
bot_name: str,
temperature: float,
top_p: float,
max_tokens: int
max_tokens: int,
):
prompt = format_sotopia_prompt(
message,
history,
instructions,
user_name,
bot_name
message, history, instructions, user_name, bot_name
)
input_tokens = tokenizer(prompt, return_tensors="pt", padding="do_not_pad").input_ids.to("cuda")
input_tokens = tokenizer(
prompt, return_tensors="pt", padding="do_not_pad"
).input_ids.to("cuda")
input_length = input_tokens.shape[-1]
output_tokens = model.generate(
input_tokens,
temperature=temperature,
top_p=top_p,
max_length=max_tokens,
pad_token_id=tokenizer.eos_token_id,
num_return_sequences=1
num_return_sequences=1,
)
output_tokens = output_tokens[:, input_length:]
text_output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
return text_output


def chat_tab():
model, tokenizer = prepare()
human_agent, machine_agent, scenario, instructions = prepare_sotopia_info()

# history are input output pairs
def run_chat(
message: str,
Expand All @@ -189,33 +205,34 @@ def run_chat(
max_tokens: int,
):
prompt = format_sotopia_prompt(
message,
history,
instructions,
user_name,
bot_name
message, history, instructions, user_name, bot_name
)
input_tokens = tokenizer(prompt, return_tensors="pt", padding="do_not_pad").input_ids.to("cuda")
input_tokens = tokenizer(
prompt, return_tensors="pt", padding="do_not_pad"
).input_ids.to("cuda")
input_length = input_tokens.shape[-1]
output_tokens = model.generate(
input_tokens,
temperature=temperature,
top_p=top_p,
max_length=max_tokens,
pad_token_id=tokenizer.eos_token_id,
num_return_sequences=1
num_return_sequences=1,
)
output_tokens = output_tokens[:, input_length:]
text_output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
text_output = tokenizer.decode(
output_tokens[0], skip_special_tokens=True
)
return text_output


with gr.Column():
with gr.Row():
temperature, session_id, max_tokens = param_accordion()
user_name, bot_name, scenario = sotopia_info_accordion(human_agent, machine_agent, scenario)
user_name, bot_name, scenario = sotopia_info_accordion(
human_agent, machine_agent, scenario
)
instructions = instructions_accordion(instructions)

with gr.Column():
with gr.Blocks():
gr.ChatInterface(
Expand All @@ -226,8 +243,8 @@ def run_chat(
show_label=False,
rtl=False,
avatar_images=(
"images/profile1.jpg",
"images/profile2.jpg"
"images/profile1.jpg",
"images/profile2.jpg",
),
),
textbox=gr.Textbox(
Expand Down
2 changes: 1 addition & 1 deletion example.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
"model": "gpt-4",
"prompt": "Prompt after formatting:\nImagine you are Benjamin Jackson, your task is to act/speak as Benjamin Jackson would, keeping in mind Benjamin Jackson's social goal.\nYou can find Benjamin Jackson's background and goal in the 'Here is the context of the interaction' field.\nNote that Benjamin Jackson's secret and goal is only visible to you.\nYou should try your best to achieve Benjamin Jackson's goal in a way that align with their character traits.\nAdditionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).\n\nHere is the context of this interaction:\nScenario: Conversation between two friends, where one is upset and crying\nParticipants: Ethan Johnson and Benjamin Jackson\nEthan Johnson's background: Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food. Personality and values description: Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding. \nBenjamin Jackson's background: Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches. Personality and values description: Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment. Benjamin's secrets: Descendant of a wealthy oil tycoon, rejects family fortune\nEthan Johnson's goal: Unknown\nBenjamin Jackson's goal: Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)\nConversation Starts:\n\nTurn #0: Ethan Johnson said: \"Hey Benjamin, I feel so sorry to see you like this. Do you want to talk about what's bothering you?\"\n.\nYou are at Turn #1.",
"result": "{'action_type': 'speak', 'argument': \"Hey Ethan, appreciate your concern, man. Actually, I've noticed we haven't been hanging out as much recently. Is everything okay? You've seemed distant...\"}"
}
}
2 changes: 1 addition & 1 deletion peft_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@
"v_proj"
],
"task_type": "CAUSAL_LM"
}
}
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
gradio
transformers
torch
peft
peft
26 changes: 16 additions & 10 deletions utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from typing import Tuple, List
from typing import List, Tuple


class Agent:
def __init__(self, name, background, goal, secrets, personality):
Expand All @@ -8,15 +9,18 @@ def __init__(self, name, background, goal, secrets, personality):
self.secrets = secrets
self.personality = personality


def get_starter_prompt(machine_agent, human_agent, scenario):
return f"Prompt after formatting:\nImagine you are {machine_agent.name}, your task is to act/speak as {machine_agent.name} would, keeping in mind {machine_agent.name}'s social goal.\nYou can find {machine_agent.name}'s background and goal in the 'Here is the context of the interaction' field.\nNote that {machine_agent.name}'s secret and goal is only visible to you.\nYou should try your best to achieve {machine_agent.name}'s goal in a way that align with their character traits.\nAdditionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).\n\nHere is the context of this interaction:\n Scenario: {scenario}\nParticipants: {human_agent.name} and {machine_agent.name}\n{human_agent.name}'s background: {human_agent.background} Personality and values description: {human_agent.personality} \n{machine_agent.name}'s background: {machine_agent.background} Personality and values description: {machine_agent.personality} {machine_agent.name}'s secrets: {machine_agent.secrets}\n{human_agent.name}'s goal: Unknown\n{machine_agent.name}'s goal: {machine_agent.goal}\nConversation Starts:"

# we define history as

# we define history as
# [(user_message, bot_message), (user_message, bot_message)]

# we define dialogue history as
# we define dialogue history as
# user_name: user_message\nbot_name: bot_message\nuser_name: user_message\nbot_name: bot_message\n


def dialogue_history_length_check(string, max_token, tokenizer):
prompt_tokens = len(tokenizer(string)["input_ids"])
return max(prompt_tokens - max_token, 0)
Expand Down Expand Up @@ -46,9 +50,13 @@ def dialogue_history_creation(history, user_name, bot_name):


def dialogue_history_truncation(dialogue_history, max_token_num, tokenizer):
surpass_num = dialogue_history_length_check(dialogue_history, max_token_num, tokenizer)
surpass_num = dialogue_history_length_check(
dialogue_history, max_token_num, tokenizer
)
if surpass_num > 0:
dialogue_history = truncate_dialogue_history_to_length(dialogue_history, surpass_num, tokenizer)
dialogue_history = truncate_dialogue_history_to_length(
dialogue_history, surpass_num, tokenizer
)
return dialogue_history


Expand All @@ -59,14 +67,12 @@ def format_sotopia_prompt(
user_name: str,
bot_name: str,
include_all_chat_history: bool = True,
index : int = 1
index: int = 1,
) -> str:
prompt = instructions.strip()
dialogue_history, last_turn_idx = dialogue_history_creation(
history,
user_name,
bot_name
history, user_name, bot_name
)
prompt = f"{prompt}\n{dialogue_history}"
prompt = f"{prompt}\n\nTurn #{last_turn_idx+1}: {user_name}: {message}\n.\nYou are at Turn #{last_turn_idx+2}."
return prompt
return prompt

0 comments on commit bd94f5c

Please sign in to comment.