-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
51 lines (37 loc) · 1.42 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from langchain.llms import HuggingFacePipeline
from langchain import PromptTemplate, LLMChain
import json
import os
import requests
# example JSON object:
# {
# "template": "You are a friendly chatbot assistant that responds conversationally to users' questions. \n Keep the answers short, unless specifically asked by the user to elaborate on something. \n \n Question: {question} \n \n Answer:",
# "parameters": {"question": "What is a chatbot?"}
# }
os.environ["HF_DATASETS_OFFLINE"]="1"
os.environ["TRANSFORMERS_OFFLINE"]="1"
model_id = "lmsys/fastchat-t5-3b-v1.0"
llm = HuggingFacePipeline.from_model_id(
model_id=model_id,
task="text2text-generation",
model_kwargs={"temperature": 0, "max_length": 1000},
device=0
)
default_template = """
You are a friendly chatbot assistant that responds conversationally to users' questions.
Keep the answers short, unless specifically asked by the user to elaborate on something.
Question: {question}
Answer:"""
content = open("/prompt_template.json").read()
json_content = json.loads(content)
print(json_content)
prompt = PromptTemplate.from_template(json_content["template"])
for key in prompt.input_variables:
if key not in json_content["parameters"]:
json_content["parameters"][key]=""
llm_chain = LLMChain(prompt=prompt, llm=llm)
result = llm_chain(json_content["parameters"])
print(result)
f = open("/output/result.json", "w")
json.dump(result, f)
f.close()