jeon commited on
Commit
cd4e5ff
โ€ข
1 Parent(s): 93c3753
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -13,7 +13,17 @@ def get_completion(prompt, model="gpt-3.5-turbo"):
13
  )
14
  return response.choices[0].message["content"]
15
 
16
- def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0.8):
 
 
 
 
 
 
 
 
 
 
17
  response = openai.ChatCompletion.create(
18
  model=model,
19
  messages=messages,
@@ -22,9 +32,7 @@ def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0.
22
  # print(str(response.choices[0].message))
23
  return response.choices[0].message["content"]
24
 
25
- messages = [
26
- {'role':'interviewer', 'content':'๋„ˆ๋Š” ์ž๊ธฐ์†Œ๊ฐœ์„œ์— ๊ธฐ๋ฐ˜ํ•˜์—ฌ ์งˆ๋ฌธ์„ ํ•˜๋Š” ๋ฉด์ ‘๊ด€์ด์•ผ.\
27
- ๋งŒ์•ฝ ์ „๋ฌธ์šฉ์–ด๊ฐ€ ์žˆ๋‹ค๋ฉด ๊ผฌ๋ฆฌ์งˆ๋ฌธํ•ด์ค˜'}]
28
 
29
 
30
 
@@ -41,7 +49,7 @@ class ChatBot:
41
 
42
  def predict(self, user_input):
43
 
44
- response_text =get_completion_from_messages(messages, temperature=0.8)
45
 
46
  return response_text # Return the generated response
47
 
@@ -56,7 +64,7 @@ title = "์ž์†Œ์„œ๊ธฐ๋ฐ˜ ๋ฉด์ ‘ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ chat bot (this template based on
56
  iface = gr.Interface(
57
  fn=bot.predict,
58
  title=title,
59
- inputs=["text", "text"], # Take user input and system prompt separately
60
  outputs="text",
61
  theme="ParityError/Anime"
62
  )
 
13
  )
14
  return response.choices[0].message["content"]
15
 
16
+
17
+
18
+ def get_completion_from_messages(input, model="gpt-3.5-turbo", temperature=0.8):
19
+ messages = [
20
+ {'role': 'system', 'content': '๋„ˆ๋Š” ์ž๊ธฐ์†Œ๊ฐœ์„œ์— ๊ธฐ๋ฐ˜ํ•˜์—ฌ ์งˆ๋ฌธ์„ ํ•˜๋Š” ๋ฉด์ ‘๊ด€์ด์•ผ.\
21
+ ๋งŒ์•ฝ ์ „๋ฌธ์šฉ์–ด๊ฐ€ ์žˆ๋‹ค๋ฉด ๊ผฌ๋ฆฌ์งˆ๋ฌธํ•ด์ค˜'},
22
+ {
23
+ "role": "user",
24
+ "content": input
25
+ }
26
+ ]
27
  response = openai.ChatCompletion.create(
28
  model=model,
29
  messages=messages,
 
32
  # print(str(response.choices[0].message))
33
  return response.choices[0].message["content"]
34
 
35
+
 
 
36
 
37
 
38
 
 
49
 
50
  def predict(self, user_input):
51
 
52
+ response_text =get_completion_from_messages(user_input, temperature=0.8)
53
 
54
  return response_text # Return the generated response
55
 
 
64
  iface = gr.Interface(
65
  fn=bot.predict,
66
  title=title,
67
+ inputs=["text"], # Take user input and system prompt separately
68
  outputs="text",
69
  theme="ParityError/Anime"
70
  )