|
import openai |
|
import gradio as gr |
|
import time |
|
import os |
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
def get_completion(prompt, model="gpt-3.5-turbo"): |
|
messages = [{"role": "user", "content": prompt}] |
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=messages, |
|
temperature=0, |
|
) |
|
return response.choices[0].message["content"] |
|
|
|
|
|
|
|
def get_completion_from_messages(input, model="gpt-3.5-turbo", temperature=0.8): |
|
messages = [ |
|
{'role': 'system', 'content': '๋๋ ์๊ธฐ์๊ฐ์์ ๊ธฐ๋ฐํ์ฌ ์ง๋ฌธ์ ํ๋ ๋ฉด์ ๊ด์ด์ผ.\ |
|
๋ง์ฝ ์ ๋ฌธ์ฉ์ด๊ฐ ์๋ค๋ฉด ๊ผฌ๋ฆฌ์ง๋ฌธํด์ค'}, |
|
{"role": "user","content": input }] |
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=messages, |
|
temperature=temperature, |
|
) |
|
print(111111) |
|
return response.choices[0].message["content"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Interviewer: |
|
def __init__(self): |
|
|
|
self.history = [] |
|
|
|
def predict(self, user_input): |
|
response =get_completion_from_messages(user_input, temperature=0.8) |
|
return response |
|
|
|
|
|
inter = Interviewer() |
|
title = "์์์๊ธฐ๋ฐ ๋ฉด์ ์๋ฎฌ๋ ์ด์
chat bot (this template based on Tonic's MistralMed Chat)" |
|
chatbot = gr.Interface( |
|
fn=inter.predict, |
|
title=title, |
|
inputs="text", |
|
outputs="text", |
|
|
|
) |
|
|
|
chatbot.launch() |