Spaces:
Sleeping
Sleeping
testing if the app can red ques from url parameters
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
3 |
import os, json
|
|
|
4 |
# Attempt to load configuration from config.json
|
5 |
try:
|
6 |
with open('config.json') as config_file:
|
@@ -25,25 +26,94 @@ system_prompt = {
|
|
25 |
"content": SYSTEM_PROMPT
|
26 |
}
|
27 |
|
28 |
-
|
29 |
MODEL = "gpt-3.5-turbo"
|
30 |
|
31 |
-
def predict(message, history):
|
|
|
32 |
history_openai_format = [system_prompt]
|
33 |
for human, assistant in history:
|
34 |
-
history_openai_format.append({"role": "user", "content": human
|
35 |
-
history_openai_format.append({"role": "assistant", "content":assistant})
|
36 |
history_openai_format.append({"role": "user", "content": message})
|
37 |
|
38 |
response = client.chat.completions.create(model=MODEL,
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
|
43 |
partial_message = ""
|
44 |
for chunk in response:
|
45 |
if chunk.choices[0].delta.content:
|
46 |
-
partial_message
|
47 |
yield partial_message
|
48 |
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
3 |
import os, json
|
4 |
+
|
5 |
# Attempt to load configuration from config.json
|
6 |
try:
|
7 |
with open('config.json') as config_file:
|
|
|
26 |
"content": SYSTEM_PROMPT
|
27 |
}
|
28 |
|
|
|
29 |
MODEL = "gpt-3.5-turbo"
|
30 |
|
31 |
+
def predict(message, history, url_params):
|
32 |
+
# Now includes URL params in function but does not use it directly for OpenAI call
|
33 |
history_openai_format = [system_prompt]
|
34 |
for human, assistant in history:
|
35 |
+
history_openai_format.append({"role": "user", "content": human})
|
36 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
37 |
history_openai_format.append({"role": "user", "content": message})
|
38 |
|
39 |
response = client.chat.completions.create(model=MODEL,
|
40 |
+
messages=history_openai_format,
|
41 |
+
temperature=1.0,
|
42 |
+
stream=True)
|
43 |
|
44 |
partial_message = ""
|
45 |
for chunk in response:
|
46 |
if chunk.choices[0].delta.content:
|
47 |
+
partial_message += chunk.choices[0].delta.content
|
48 |
yield partial_message
|
49 |
|
50 |
+
# JavaScript functions to manipulate URL parameters
|
51 |
+
get_window_url_params = """
|
52 |
+
function() {
|
53 |
+
const params = new URLSearchParams(window.location.search);
|
54 |
+
return { question: params.get("question") || "" };
|
55 |
+
}
|
56 |
+
"""
|
57 |
+
|
58 |
+
with gr.Blocks() as app:
|
59 |
+
url_params = gr.Variable()
|
60 |
+
history = gr.Variable() # Assuming history management is handled elsewhere or is not needed for first query
|
61 |
+
message = gr.Textbox(label="Your Question", value="")
|
62 |
+
message.change(fn=predict, inputs=[message, history, url_params], outputs=message, _js=get_window_url_params)
|
63 |
+
|
64 |
+
app.load(_js=get_window_url_params) # Load the question from URL on startup
|
65 |
+
|
66 |
+
app.launch(share=True, debug=True)
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
# V0
|
71 |
+
# from openai import OpenAI
|
72 |
+
# import gradio as gr
|
73 |
+
# import os, json
|
74 |
+
# # Attempt to load configuration from config.json
|
75 |
+
# try:
|
76 |
+
# with open('config.json') as config_file:
|
77 |
+
# config = json.load(config_file)
|
78 |
+
# OPENAI_API_KEY = config.get("OPENAI_API_KEY")
|
79 |
+
# SYSTEM_PROMPT = config.get("SYSTEM_PROMPT")
|
80 |
+
# except FileNotFoundError:
|
81 |
+
# # If config.json is not found, fall back to environment variables
|
82 |
+
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
83 |
+
# SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
|
84 |
+
|
85 |
+
# # Fallback to default values if necessary
|
86 |
+
# if not OPENAI_API_KEY:
|
87 |
+
# raise ValueError("OPENAI_API_KEY is not set in config.json or as an environment variable.")
|
88 |
+
# if not SYSTEM_PROMPT:
|
89 |
+
# SYSTEM_PROMPT = "This is a default system prompt."
|
90 |
+
|
91 |
+
# client = OpenAI(api_key=OPENAI_API_KEY)
|
92 |
+
|
93 |
+
# system_prompt = {
|
94 |
+
# "role": "system",
|
95 |
+
# "content": SYSTEM_PROMPT
|
96 |
+
# }
|
97 |
+
|
98 |
+
|
99 |
+
# MODEL = "gpt-3.5-turbo"
|
100 |
+
|
101 |
+
# def predict(message, history):
|
102 |
+
# history_openai_format = [system_prompt]
|
103 |
+
# for human, assistant in history:
|
104 |
+
# history_openai_format.append({"role": "user", "content": human })
|
105 |
+
# history_openai_format.append({"role": "assistant", "content":assistant})
|
106 |
+
# history_openai_format.append({"role": "user", "content": message})
|
107 |
+
|
108 |
+
# response = client.chat.completions.create(model=MODEL,
|
109 |
+
# messages= history_openai_format,
|
110 |
+
# temperature=1.0,
|
111 |
+
# stream=True)
|
112 |
+
|
113 |
+
# partial_message = ""
|
114 |
+
# for chunk in response:
|
115 |
+
# if chunk.choices[0].delta.content:
|
116 |
+
# partial_message = partial_message + chunk.choices[0].delta.content
|
117 |
+
# yield partial_message
|
118 |
+
|
119 |
+
# gr.ChatInterface(predict).launch(share=True)
|