alibicer commited on
Commit
a3b78b2
ยท
verified ยท
1 Parent(s): 88624a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -49
app.py CHANGED
@@ -3,10 +3,9 @@ import gradio as gr
3
  from dotenv import load_dotenv
4
  from openai import OpenAI
5
  from prompts.initial_prompt import INITIAL_PROMPT
6
- from prompts.main_prompt import TASK_PROMPT
7
 
8
-
9
- # Load the OpenAI API key from .env file
10
  if os.path.exists(".env"):
11
  load_dotenv(".env")
12
 
@@ -15,105 +14,96 @@ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
15
  client = OpenAI(api_key=OPENAI_API_KEY)
16
 
17
 
 
18
  def gpt_call(history, user_message,
19
  model="gpt-4o-mini",
20
  max_tokens=512,
21
  temperature=0.7,
22
  top_p=0.95):
23
  """
24
- Calls OpenAI's ChatCompletion API to generate responses.
25
  - history: [(user_text, assistant_text), ...]
26
- - user_message: User's latest input
27
  """
28
- # System message (TASK_PROMPT) at the beginning
29
- messages = [{"role": "system", "content": TASK_PROMPT}]
30
 
31
- # Convert history into OpenAI format
 
32
  for user_text, assistant_text in history:
33
  if user_text:
34
  messages.append({"role": "user", "content": user_text})
35
  if assistant_text:
36
  messages.append({"role": "assistant", "content": assistant_text})
37
 
38
- # Add the latest user input
39
  messages.append({"role": "user", "content": user_message})
40
 
41
- # AI-controlled gradual guidance
42
- if "bar model" in user_message.lower():
43
- return "Great! You've started using a bar model. Can you explain how you divided it? What does each section represent?"
44
-
45
- elif "double number line" in user_message.lower():
46
- return "Nice! How does your number line show the relationship between time and distance? Did you mark the correct intervals?"
47
-
48
- elif "ratio table" in user_message.lower():
49
- return "Good choice! Before I check, how did you determine the ratio for 1 hour?"
50
-
51
- elif "graph" in user_message.lower():
52
- return "Graphs are powerful! What key points did you plot, and why?"
53
-
54
- else:
55
- # OpenAI API call (fallback response)
56
- completion = client.chat.completions.create(
57
- model=model,
58
- messages=messages,
59
- max_tokens=max_tokens,
60
- temperature=temperature,
61
- top_p=top_p
62
- )
63
- return completion.choices[0].message.content
64
-
65
 
66
  def respond(user_message, history):
67
  """
68
- Handles user input and chatbot response in Gradio.
69
- - user_message: The latest input from the user.
70
- - history: A list of (user, assistant) message pairs.
71
  """
 
72
  if not user_message:
73
  return "", history
74
 
75
- # Generate AI response
76
  assistant_reply = gpt_call(history, user_message)
77
 
78
- # Append to history
79
  history.append((user_message, assistant_reply))
80
 
81
- # Return the updated history and clear the input box
82
  return "", history
83
 
84
-
85
  ##############################
86
- # Gradio Chatbot UI
87
  ##############################
88
  with gr.Blocks() as demo:
89
- gr.Markdown("## AI-Guided Teacher PD Chatbot")
90
 
91
- # Initial chatbot message (starts with the task)
 
 
92
  chatbot = gr.Chatbot(
93
- value=[("", INITIAL_PROMPT)],
94
  height=500
95
  )
96
 
97
- # Chat history state
 
98
  state_history = gr.State([("", INITIAL_PROMPT)])
99
 
100
- # User input box
101
  user_input = gr.Textbox(
102
- placeholder="Type your response here...",
103
  label="Your Input"
104
  )
105
 
106
- # When user submits input โ†’ respond() updates chatbot
107
  user_input.submit(
108
  respond,
109
  inputs=[user_input, state_history],
110
  outputs=[user_input, chatbot]
111
  ).then(
 
112
  fn=lambda _, h: h,
113
  inputs=[user_input, chatbot],
114
  outputs=[state_history]
115
  )
116
 
117
- # Launch the chatbot
118
  if __name__ == "__main__":
119
  demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
3
  from dotenv import load_dotenv
4
  from openai import OpenAI
5
  from prompts.initial_prompt import INITIAL_PROMPT
6
+ from prompts.main_prompt import MAIN_PROMPT
7
 
8
+ # .env ํŒŒ์ผ์—์„œ OPENAI_API_KEY ๋กœ๋“œ
 
9
  if os.path.exists(".env"):
10
  load_dotenv(".env")
11
 
 
14
  client = OpenAI(api_key=OPENAI_API_KEY)
15
 
16
 
17
+
18
  def gpt_call(history, user_message,
19
  model="gpt-4o-mini",
20
  max_tokens=512,
21
  temperature=0.7,
22
  top_p=0.95):
23
  """
24
+ OpenAI ChatCompletion API๋ฅผ ํ†ตํ•ด ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜.
25
  - history: [(user_text, assistant_text), ...]
26
+ - user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์ž…๋ ฅํ•œ ๋ฉ”์‹œ์ง€
27
  """
28
+ # 1) ์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€(=MAIN_PROMPT)๋ฅผ ๊ฐ€์žฅ ์•ž์— ์ถ”๊ฐ€
29
+ messages = [{"role": "system", "content": MAIN_PROMPT}]
30
 
31
+ # 2) ๊ธฐ์กด ๋Œ€ํ™” ๊ธฐ๋ก(history)์„ OpenAI ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
32
+ # user_text -> 'user' / assistant_text -> 'assistant'
33
  for user_text, assistant_text in history:
34
  if user_text:
35
  messages.append({"role": "user", "content": user_text})
36
  if assistant_text:
37
  messages.append({"role": "assistant", "content": assistant_text})
38
 
39
+ # 3) ๋งˆ์ง€๋ง‰์— ์ด๋ฒˆ ์‚ฌ์šฉ์ž์˜ ์ž…๋ ฅ์„ ์ถ”๊ฐ€
40
  messages.append({"role": "user", "content": user_message})
41
 
42
+ # 4) OpenAI API ํ˜ธ์ถœ
43
+ completion = client.chat.completions.create(
44
+ model=model,
45
+ messages=messages,
46
+ max_tokens=max_tokens,
47
+ temperature=temperature,
48
+ top_p=top_p
49
+ )
50
+ return completion.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def respond(user_message, history):
53
  """
54
+ Gradio ์ƒ์—์„œ submitํ•  ๋•Œ ํ˜ธ์ถœ๋˜๋Š” ํ•จ์ˆ˜
55
+ - user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์นœ ๋ฉ”์‹œ์ง€
56
+ - history: ๊ธฐ์กด (user, assistant) ํŠœํ”Œ ๋ฆฌ์ŠคํŠธ
57
  """
58
+ # ์‚ฌ์šฉ์ž๊ฐ€ ๋นˆ ๋ฌธ์ž์—ด์„ ๋ณด๋ƒˆ๋‹ค๋ฉด ์•„๋ฌด ์ผ๋„ ํ•˜์ง€ ์•Š์Œ
59
  if not user_message:
60
  return "", history
61
 
62
+ # GPT ๋ชจ๋ธ๋กœ๋ถ€ํ„ฐ ์‘๋‹ต์„ ๋ฐ›์Œ
63
  assistant_reply = gpt_call(history, user_message)
64
 
65
+ # history์— (user, assistant) ์Œ ์ถ”๊ฐ€
66
  history.append((user_message, assistant_reply))
67
 
68
+ # Gradio์—์„œ๋Š” (์ƒˆ๋กœ ๋น„์›Œ์งˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ history)๋ฅผ ๋ฐ˜ํ™˜
69
  return "", history
70
 
 
71
  ##############################
72
+ # Gradio Blocks UI
73
  ##############################
74
  with gr.Blocks() as demo:
75
+ gr.Markdown("## Simple Chat Interface")
76
 
77
+ # Chatbot ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ์„ค์ •
78
+ # ์ฒซ ๋ฒˆ์งธ ๋ฉ”์‹œ์ง€๋Š” (user="", assistant=INITIAL_PROMPT) ํ˜•ํƒœ๋กœ ๋„ฃ์–ด
79
+ # ํ™”๋ฉด์ƒ์—์„œ 'assistant'๊ฐ€ INITIAL_PROMPT๋ฅผ ๋งํ•œ ๊ฒƒ์ฒ˜๋Ÿผ ๋ณด์ด๊ฒŒ ํ•จ
80
  chatbot = gr.Chatbot(
81
+ value=[("", INITIAL_PROMPT)], # (user, assistant)
82
  height=500
83
  )
84
 
85
+ # (user, assistant) ์Œ์„ ์ €์žฅํ•  ํžˆ์Šคํ† ๋ฆฌ ์ƒํƒœ
86
+ # ์—ฌ๊ธฐ์„œ๋„ ๋™์ผํ•œ ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ๋„ฃ์–ด์คŒ
87
  state_history = gr.State([("", INITIAL_PROMPT)])
88
 
89
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ
90
  user_input = gr.Textbox(
91
+ placeholder="Type your message here...",
92
  label="Your Input"
93
  )
94
 
95
+ # ์ž…๋ ฅ์ด submit๋˜๋ฉด respond() ํ˜ธ์ถœ โ†’ ์ถœ๋ ฅ์€ (์ƒˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ chatbot)
96
  user_input.submit(
97
  respond,
98
  inputs=[user_input, state_history],
99
  outputs=[user_input, chatbot]
100
  ).then(
101
+ # respond ๋๋‚œ ๋’ค, ์ตœ์‹  history๋ฅผ state_history์— ๋ฐ˜์˜
102
  fn=lambda _, h: h,
103
  inputs=[user_input, chatbot],
104
  outputs=[state_history]
105
  )
106
 
107
+ # ๋ฉ”์ธ ์‹คํ–‰
108
  if __name__ == "__main__":
109
  demo.launch(server_name="0.0.0.0", server_port=7860, share=True)