Spaces:
Sleeping
Sleeping
Commit
·
7311ece
1
Parent(s):
aa80f08
Added app
Browse files- .gitignore +2 -0
- app.py +44 -0
- utils.py +6 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.vscode
|
2 |
+
__pycache__
|
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from neon_llm_chatgpt.chatgpt import ChatGPT
|
3 |
+
from utils import convert_history
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
model_choices = [
|
9 |
+
"gpt-3.5-turbo",
|
10 |
+
"gpt-4"
|
11 |
+
]
|
12 |
+
|
13 |
+
|
14 |
+
key = os.environ['OPENAI_API_KEY']
|
15 |
+
config = {
|
16 |
+
"key": key,
|
17 |
+
"model": model_choices[0],
|
18 |
+
"role": "You are trying to give a short answer in less than 40 words.",
|
19 |
+
"context_depth": 3,
|
20 |
+
"max_tokens": 256,
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
chatgpt = ChatGPT(config)
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
def ask(message, history, persona):
|
30 |
+
chat_history = convert_history(history)
|
31 |
+
responce = chatgpt.ask(message, chat_history, persona = {"description": persona})
|
32 |
+
|
33 |
+
return responce
|
34 |
+
|
35 |
+
|
36 |
+
demo = gr.ChatInterface(ask,
|
37 |
+
additional_inputs=[
|
38 |
+
gr.Textbox(chatgpt.role, label="Persona"),
|
39 |
+
#gr.Dropdown(choices=model_choices, value=model_choices[0], label="Model")
|
40 |
+
]
|
41 |
+
)
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
demo.queue().launch()
|
utils.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def convert_history(history):
|
2 |
+
chat_history = []
|
3 |
+
for block in history:
|
4 |
+
chat_history.append(["user", block[0]])
|
5 |
+
chat_history.append(["llm", block[1]])
|
6 |
+
return chat_history
|