diff --git a/pages/1_home.py b/pages/1_home.py index 0eb7e25..18193b1 100644 --- a/pages/1_home.py +++ b/pages/1_home.py @@ -66,7 +66,6 @@ print("user_id : ", st.session_state["user_id"]) if "openai_api_key" not in st.session_state: - os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY st.session_state.openai_api_key = OPENAI_API_KEY if "FAV_IMAGE_PATH" not in st.session_state: diff --git a/pages/3_gene_question.py b/pages/3_gene_question.py index 5c56754..64b735c 100644 --- a/pages/3_gene_question.py +++ b/pages/3_gene_question.py @@ -26,7 +26,7 @@ from src.rule_based import list_extend_questions_based_on_keywords from src.util import local_css, read_prompt_from_txt from src.semantic_search import faiss_inference, reranker -from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME +from config import DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -144,9 +144,7 @@ st.session_state.logger.info("create prompt JD object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=st.session_state.temperature, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") @@ -176,9 +174,7 @@ st.session_state.logger.info("user_resume chunk OpenAIEmbeddings ") ### STEP 2 를 위한 새 모델 호출 - llm2 = ChatOpenAI(temperature=0.0, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.chain_type_kwargs = {"prompt": st.session_state.prompt_resume} @@ -201,7 +197,7 @@ st.session_state.logger.info("create prompt question template") st.session_state.prompt_question = create_prompt_with_question(prompt_template_question) - llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME) st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question) st.session_state.main_question = st.session_state.chain.invoke({"jd": st.session_state.job_description, "resume": st.session_state.resume})['text'] ################# diff --git a/pages/3_gene_question_no_resume.py b/pages/3_gene_question_no_resume.py index d50194f..92f10a4 100644 --- a/pages/3_gene_question_no_resume.py +++ b/pages/3_gene_question_no_resume.py @@ -27,7 +27,7 @@ from src.rule_based import list_extend_questions_based_on_keywords from src.util import local_css, read_prompt_from_txt from src.semantic_search import faiss_inference, reranker -from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, MODEL_NAME +from config import DATA_DIR, IMG_PATH, CSS_PATH, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -135,9 +135,7 @@ st.session_state.logger.info("create prompt JD object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=st.session_state.temperature, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME,) st.session_state.logger.info("create llm object") @@ -155,7 +153,7 @@ st.session_state.logger.info("create no resume prompt question template") st.session_state.prompt_question = create_prompt_with_no_resume(prompt_noResume_question_template) - llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME) st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question) st.session_state.main_question = st.session_state.chain.run({"jd": st.session_state.job_description}) ################# diff --git a/pages/4_show_questions_hint.py b/pages/4_show_questions_hint.py index 3be227e..2c524a4 100644 --- a/pages/4_show_questions_hint.py +++ b/pages/4_show_questions_hint.py @@ -12,7 +12,7 @@ from src.generate_question import (create_prompt_feedback, # 추가 create_prompt_hint) from src.util import read_prompt_from_txt -from config import DATA_DIR, IMG_PATH, OPENAI_API_KEY, MODEL_NAME +from config import DATA_DIR, IMG_PATH, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -80,7 +80,7 @@ st.session_state.logger.info("create prompt_Feedback object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") @@ -115,7 +115,7 @@ st.session_state.logger.info("create prompt_Hint object") ### 모델 세팅 - llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") diff --git a/src/gene_question_2chain_ver.py b/src/gene_question_2chain_ver.py index b591674..a8a16f1 100644 --- a/src/gene_question_2chain_ver.py +++ b/src/gene_question_2chain_ver.py @@ -20,10 +20,10 @@ from streamlit_extras.switch_page_button import switch_page from util import local_css, read_prompt_from_txt -from config import OPENAI_API_KEY, MODEL_NAME +from config import MODEL_NAME, PATH DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data") -st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images/favicon.png") +st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images", "favicon.png") st.set_page_config( page_title="Hello Jobits", # 브라우저탭에 뜰 제목 page_icon=Image.open( @@ -36,14 +36,12 @@ st.session_state.logger.info("start") NEXT_PAGE = "show_questions_hint" -MY_PATH = os.path.dirname(os.path.dirname(__file__)) - #### style css #### MAIN_IMG = st.session_state.MAIN_IMG LOGO_IMG = st.session_state.LOGO_IMG -local_css(MY_PATH + "/css/background.css") -local_css(MY_PATH + "/css/2_generate_question.css") +local_css(PATH + "/css/background.css") +local_css(PATH + "/css/2_generate_question.css") st.markdown(f"""