taldemir commited on
Commit
3273f38
ยท
verified ยท
1 Parent(s): 26d6fcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -33
app.py CHANGED
@@ -4,8 +4,9 @@ from dotenv import load_dotenv
4
  from openai import OpenAI
5
  from prompts.initial_prompt import INITIAL_PROMPT
6
  from prompts.main_prompt import MAIN_PROMPT
 
7
 
8
- # .env ํŒŒ์ผ์—์„œ OPENAI_API_KEY ๋กœ๋“œ
9
  if os.path.exists(".env"):
10
  load_dotenv(".env")
11
 
@@ -13,33 +14,37 @@ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
 
14
  client = OpenAI(api_key=OPENAI_API_KEY)
15
 
 
 
16
 
 
 
 
 
17
 
 
 
 
 
 
18
  def gpt_call(history, user_message,
19
  model="gpt-4o-mini",
20
  max_tokens=512,
21
  temperature=0.7,
22
  top_p=0.95):
23
  """
24
- OpenAI ChatCompletion API๋ฅผ ํ†ตํ•ด ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜.
25
- - history: [(user_text, assistant_text), ...]
26
- - user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์ž…๋ ฅํ•œ ๋ฉ”์‹œ์ง€
27
  """
28
- # 1) ์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€(=MAIN_PROMPT)๋ฅผ ๊ฐ€์žฅ ์•ž์— ์ถ”๊ฐ€
29
  messages = [{"role": "system", "content": MAIN_PROMPT}]
30
 
31
- # 2) ๊ธฐ์กด ๋Œ€ํ™” ๊ธฐ๋ก(history)์„ OpenAI ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
32
- # user_text -> 'user' / assistant_text -> 'assistant'
33
  for user_text, assistant_text in history:
34
  if user_text:
35
  messages.append({"role": "user", "content": user_text})
36
  if assistant_text:
37
  messages.append({"role": "assistant", "content": assistant_text})
38
 
39
- # 3) ๋งˆ์ง€๋ง‰์— ์ด๋ฒˆ ์‚ฌ์šฉ์ž์˜ ์ž…๋ ฅ์„ ์ถ”๊ฐ€
40
  messages.append({"role": "user", "content": user_message})
41
 
42
- # 4) OpenAI API ํ˜ธ์ถœ
43
  completion = client.chat.completions.create(
44
  model=model,
45
  messages=messages,
@@ -49,61 +54,45 @@ def gpt_call(history, user_message,
49
  )
50
  return completion.choices[0].message.content
51
 
 
52
  def respond(user_message, history):
53
- """
54
- Gradio ์ƒ์—์„œ submitํ•  ๋•Œ ํ˜ธ์ถœ๋˜๋Š” ํ•จ์ˆ˜
55
- - user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์นœ ๋ฉ”์‹œ์ง€
56
- - history: ๊ธฐ์กด (user, assistant) ํŠœํ”Œ ๋ฆฌ์ŠคํŠธ
57
- """
58
- # ์‚ฌ์šฉ์ž๊ฐ€ ๋นˆ ๋ฌธ์ž์—ด์„ ๋ณด๋ƒˆ๋‹ค๋ฉด ์•„๋ฌด ์ผ๋„ ํ•˜์ง€ ์•Š์Œ
59
  if not user_message:
60
  return "", history
61
 
62
- # GPT ๋ชจ๋ธ๋กœ๋ถ€ํ„ฐ ์‘๋‹ต์„ ๋ฐ›์Œ
63
  assistant_reply = gpt_call(history, user_message)
64
-
65
- # history์— (user, assistant) ์Œ ์ถ”๊ฐ€
66
  history.append((user_message, assistant_reply))
67
 
68
- # Gradio์—์„œ๋Š” (์ƒˆ๋กœ ๋น„์›Œ์งˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ history)๋ฅผ ๋ฐ˜ํ™˜
69
  return "", history
70
 
71
- ##############################
72
- # Gradio Blocks UI
73
- ##############################
74
  with gr.Blocks() as demo:
75
  gr.Markdown("## Simple Chat Interface")
76
 
77
- # Chatbot ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ์„ค์ •
78
- # ์ฒซ ๋ฒˆ์งธ ๋ฉ”์‹œ์ง€๋Š” (user="", assistant=INITIAL_PROMPT) ํ˜•ํƒœ๋กœ ๋„ฃ์–ด
79
- # ํ™”๋ฉด์ƒ์—์„œ 'assistant'๊ฐ€ INITIAL_PROMPT๋ฅผ ๋งํ•œ ๊ฒƒ์ฒ˜๋Ÿผ ๋ณด์ด๊ฒŒ ํ•จ
80
  chatbot = gr.Chatbot(
81
- value=[("", INITIAL_PROMPT)], # (user, assistant)
82
  height=500
83
  )
84
 
85
- # (user, assistant) ์Œ์„ ์ €์žฅํ•  ํžˆ์Šคํ† ๋ฆฌ ์ƒํƒœ
86
- # ์—ฌ๊ธฐ์„œ๋„ ๋™์ผํ•œ ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ๋„ฃ์–ด์คŒ
87
  state_history = gr.State([("", INITIAL_PROMPT)])
88
 
89
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ
90
  user_input = gr.Textbox(
91
  placeholder="Type your message here...",
92
  label="Your Input"
93
  )
94
 
95
- # ์ž…๋ ฅ์ด submit๋˜๋ฉด respond() ํ˜ธ์ถœ โ†’ ์ถœ๋ ฅ์€ (์ƒˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ chatbot)
96
  user_input.submit(
97
  respond,
98
  inputs=[user_input, state_history],
99
  outputs=[user_input, chatbot]
100
  ).then(
101
- # respond ๋๋‚œ ๋’ค, ์ตœ์‹  history๋ฅผ state_history์— ๋ฐ˜์˜
102
  fn=lambda _, h: h,
103
  inputs=[user_input, chatbot],
104
  outputs=[state_history]
105
  )
106
 
107
- # ๋ฉ”์ธ ์‹คํ–‰
108
- if __name__ == "__main__":
 
 
109
  demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
 
 
4
  from openai import OpenAI
5
  from prompts.initial_prompt import INITIAL_PROMPT
6
  from prompts.main_prompt import MAIN_PROMPT
7
+ from fastapi import FastAPI
8
 
9
+ # Load OpenAI API Key from .env file
10
  if os.path.exists(".env"):
11
  load_dotenv(".env")
12
 
 
14
 
15
  client = OpenAI(api_key=OPENAI_API_KEY)
16
 
17
+ # โœ… Create FastAPI app
18
+ app = FastAPI()
19
 
20
+ # โœ… API Endpoints to Serve Prompts
21
+ @app.get("/initial_prompt")
22
+ def get_initial_prompt():
23
+ return {"prompt": INITIAL_PROMPT}
24
 
25
+ @app.get("/main_prompt")
26
+ def get_main_prompt():
27
+ return {"prompt": MAIN_PROMPT}
28
+
29
+ # โœ… Chatbot function
30
  def gpt_call(history, user_message,
31
  model="gpt-4o-mini",
32
  max_tokens=512,
33
  temperature=0.7,
34
  top_p=0.95):
35
  """
36
+ OpenAI ChatCompletion API call
 
 
37
  """
 
38
  messages = [{"role": "system", "content": MAIN_PROMPT}]
39
 
 
 
40
  for user_text, assistant_text in history:
41
  if user_text:
42
  messages.append({"role": "user", "content": user_text})
43
  if assistant_text:
44
  messages.append({"role": "assistant", "content": assistant_text})
45
 
 
46
  messages.append({"role": "user", "content": user_message})
47
 
 
48
  completion = client.chat.completions.create(
49
  model=model,
50
  messages=messages,
 
54
  )
55
  return completion.choices[0].message.content
56
 
57
+ # โœ… Gradio Chatbot UI
58
  def respond(user_message, history):
 
 
 
 
 
 
59
  if not user_message:
60
  return "", history
61
 
 
62
  assistant_reply = gpt_call(history, user_message)
 
 
63
  history.append((user_message, assistant_reply))
64
 
 
65
  return "", history
66
 
 
 
 
67
  with gr.Blocks() as demo:
68
  gr.Markdown("## Simple Chat Interface")
69
 
 
 
 
70
  chatbot = gr.Chatbot(
71
+ value=[("", INITIAL_PROMPT)],
72
  height=500
73
  )
74
 
 
 
75
  state_history = gr.State([("", INITIAL_PROMPT)])
76
 
 
77
  user_input = gr.Textbox(
78
  placeholder="Type your message here...",
79
  label="Your Input"
80
  )
81
 
 
82
  user_input.submit(
83
  respond,
84
  inputs=[user_input, state_history],
85
  outputs=[user_input, chatbot]
86
  ).then(
 
87
  fn=lambda _, h: h,
88
  inputs=[user_input, chatbot],
89
  outputs=[state_history]
90
  )
91
 
92
+ # โœ… Launch Gradio and FastAPI together
93
+ import threading
94
+
95
+ def run_gradio():
96
  demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
97
+
98
+ threading.Thread(target=run_gradio).start()