Skip to content

Commit

Permalink
refactor: ChatOpenAI의 인스턴스는 openai_api_key는 환경변수에서 불러옴
Browse files Browse the repository at this point in the history
  • Loading branch information
kooqooo committed Aug 8, 2024
1 parent 007d67c commit 570ed9b
Show file tree
Hide file tree
Showing 7 changed files with 18 additions and 30 deletions.
1 change: 0 additions & 1 deletion pages/1_home.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@
print("user_id : ", st.session_state["user_id"])

if "openai_api_key" not in st.session_state:
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
st.session_state.openai_api_key = OPENAI_API_KEY

if "FAV_IMAGE_PATH" not in st.session_state:
Expand Down
12 changes: 4 additions & 8 deletions pages/3_gene_question.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from src.rule_based import list_extend_questions_based_on_keywords
from src.util import local_css, read_prompt_from_txt
from src.semantic_search import faiss_inference, reranker
from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME
from config import DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME

st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png")
st.set_page_config(
Expand Down Expand Up @@ -144,9 +144,7 @@
st.session_state.logger.info("create prompt JD object")

### 모델 세팅 그대로
llm = ChatOpenAI(temperature=st.session_state.temperature,
model_name=MODEL_NAME,
openai_api_key=OPENAI_API_KEY)
llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME)

st.session_state.logger.info("create llm object")

Expand Down Expand Up @@ -176,9 +174,7 @@
st.session_state.logger.info("user_resume chunk OpenAIEmbeddings ")

### STEP 2 를 위한 새 모델 호출
llm2 = ChatOpenAI(temperature=0.0,
model_name=MODEL_NAME,
openai_api_key=OPENAI_API_KEY)
llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME)

st.session_state.chain_type_kwargs = {"prompt": st.session_state.prompt_resume}

Expand All @@ -201,7 +197,7 @@
st.session_state.logger.info("create prompt question template")
st.session_state.prompt_question = create_prompt_with_question(prompt_template_question)

llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME)
st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question)
st.session_state.main_question = st.session_state.chain.invoke({"jd": st.session_state.job_description, "resume": st.session_state.resume})['text']
#################
Expand Down
8 changes: 3 additions & 5 deletions pages/3_gene_question_no_resume.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from src.rule_based import list_extend_questions_based_on_keywords
from src.util import local_css, read_prompt_from_txt
from src.semantic_search import faiss_inference, reranker
from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, MODEL_NAME
from config import DATA_DIR, IMG_PATH, CSS_PATH, MODEL_NAME

st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png")
st.set_page_config(
Expand Down Expand Up @@ -135,9 +135,7 @@
st.session_state.logger.info("create prompt JD object")

### 모델 세팅 그대로
llm = ChatOpenAI(temperature=st.session_state.temperature,
model_name=MODEL_NAME,
openai_api_key=OPENAI_API_KEY)
llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME,)

st.session_state.logger.info("create llm object")

Expand All @@ -155,7 +153,7 @@
st.session_state.logger.info("create no resume prompt question template")
st.session_state.prompt_question = create_prompt_with_no_resume(prompt_noResume_question_template)

llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME)
st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question)
st.session_state.main_question = st.session_state.chain.run({"jd": st.session_state.job_description})
#################
Expand Down
6 changes: 3 additions & 3 deletions pages/4_show_questions_hint.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from src.generate_question import (create_prompt_feedback, # 추가
create_prompt_hint)
from src.util import read_prompt_from_txt
from config import DATA_DIR, IMG_PATH, OPENAI_API_KEY, MODEL_NAME
from config import DATA_DIR, IMG_PATH, MODEL_NAME

st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png")
st.set_page_config(
Expand Down Expand Up @@ -80,7 +80,7 @@
st.session_state.logger.info("create prompt_Feedback object")

### 모델 세팅 그대로
llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME)

st.session_state.logger.info("create llm object")

Expand Down Expand Up @@ -115,7 +115,7 @@
st.session_state.logger.info("create prompt_Hint object")

### 모델 세팅
llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME)

st.session_state.logger.info("create llm object")

Expand Down
16 changes: 7 additions & 9 deletions src/gene_question_2chain_ver.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@
from streamlit_extras.switch_page_button import switch_page
from util import local_css, read_prompt_from_txt

from config import OPENAI_API_KEY, MODEL_NAME
from config import MODEL_NAME, PATH

DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images/favicon.png")
st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images", "favicon.png")
st.set_page_config(
page_title="Hello Jobits", # 브라우저탭에 뜰 제목
page_icon=Image.open(
Expand All @@ -36,14 +36,12 @@
st.session_state.logger.info("start")
NEXT_PAGE = "show_questions_hint"

MY_PATH = os.path.dirname(os.path.dirname(__file__))

#### style css ####
MAIN_IMG = st.session_state.MAIN_IMG
LOGO_IMG = st.session_state.LOGO_IMG

local_css(MY_PATH + "/css/background.css")
local_css(MY_PATH + "/css/2_generate_question.css")
local_css(PATH + "/css/background.css")
local_css(PATH + "/css/2_generate_question.css")
st.markdown(f"""
<style>
/* 로딩이미지 */
Expand Down Expand Up @@ -181,7 +179,7 @@
### JD 사용하여 JD 추출용 프롬프트 만들기
st.session_state.logger.info("prompt JD start")

prompt_template = read_prompt_from_txt(MY_PATH + "/data/test/prompt_JD_template.txt")
prompt_template = read_prompt_from_txt(PATH + "data", "test", "prompt_JD_template.txt")

prompt_JD = create_prompt_with_jd(prompt_template)
# prompt_JD 생성완료
Expand Down Expand Up @@ -211,7 +209,7 @@

st.session_state.logger.info("prompt QA start")

prompt_template = read_prompt_from_txt(MY_PATH + "/data/test/prompt_qa_template")
prompt_template = read_prompt_from_txt(PATH + "/data/test/prompt_qa_template")

st.session_state.logger.info("create prompt QA template")

Expand All @@ -225,7 +223,7 @@

### STEP 2 를 위한 새 모델 호출

llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME)

chain_type_kwargs = {"prompt": prompt_qa}

Expand Down
2 changes: 1 addition & 1 deletion src/semantic_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def faiss_inference(query):
output으로는 (4 * 키워드 줄 개수 - 중복된 문장)개의 질문이 반환됩니다
'''
embeddings = HuggingFaceEmbeddings(
model_name = "BM-K/KoSimCSE-roberta-multitask",
model_name = "jhgan/ko-sroberta-multitask",
model_kwargs = {'device': 'cuda'}
)
store_name="./FAISS_INDEX_TAG"
Expand Down
3 changes: 0 additions & 3 deletions src/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,6 @@ def save_uploaded_jd_as_filepath(uploaded_jd, save_directory, filename="uploaded
return file_path


os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY


@st.cache_resource
def load_chain(question):
"""
Expand Down

0 comments on commit 570ed9b

Please sign in to comment.