Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,37 +10,7 @@ client = AsyncOpenAI(
|
|
10 |
)
|
11 |
|
12 |
assistantID = os.getenv("OPENAI_ASSISTANT_ID")
|
13 |
-
|
14 |
-
password = os.getenv("YOUR_PASSWORD")
|
15 |
-
|
16 |
-
mytitle = "<h1 align=center>RTL AI News Reader : What happened in the country 🇱🇺 and in the world 🌎 ?</h1>"
|
17 |
-
|
18 |
-
mydescription="""
|
19 |
-
<h3 align='center'>Which topic interests you : 🐶 🏃🏻♂️ 🌗 🍇 🌈 🍽️ 🏆 🚘 ✈️ 🩺 </h3>
|
20 |
-
<table width=100%>
|
21 |
-
<tr>
|
22 |
-
<th width=50% bgcolor="Moccasin">Ask your questions in english or another language :</th>
|
23 |
-
<th bgcolor="Khaki">Response from the OpenAI File-Search Assistant :</th>
|
24 |
-
</tr>
|
25 |
-
</table>
|
26 |
-
"""
|
27 |
-
|
28 |
-
myarticle ="""
|
29 |
-
<h3>Background :</h3>
|
30 |
-
<p>This HuggingFace Space demo was created by <a href="https://github.com/mbarnig">Marco Barnig</a>. As an artificial intelligence,
|
31 |
-
the <a href="https://platform.openai.com/docs/models">OpenAI model</a> gpt-4o-mini-2024-07-18 is used via API,
|
32 |
-
which can utilize up to 128,000 tokens as context, provide an answer to a question with a maximum of 16,384 tokens,
|
33 |
-
and process up to 200,000 tokens per minute (TPM). All english content from RTL.lu from the beginning up to September 2024 has been split into 16 JSON files
|
34 |
-
and uploaded to a Vector Store by the OpenAI File-Search Assistant "RTL English News Reader."
|
35 |
-
Each file contains fewer than 5 million tokens, which is an upper limit for the AI model. It is possible to upload up to 10,000 files to an OpenAI Assistant.
|
36 |
-
The responses of the examples are cached and therefore displayed without delay.</p>
|
37 |
-
"""
|
38 |
-
|
39 |
-
myinput = gr.Textbox(lines=3, label=" What would you like to know ?")
|
40 |
-
|
41 |
-
myexamples = [
|
42 |
-
"What happened in 2014 ?"
|
43 |
-
]
|
44 |
|
45 |
class EventHandler(AsyncAssistantEventHandler):
|
46 |
def __init__(self) -> None:
|
@@ -86,14 +56,17 @@ async def initialize_thread():
|
|
86 |
session_data["thread_id"] = thread.id
|
87 |
|
88 |
async def generate_response(user_input):
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
|
|
|
|
97 |
)
|
98 |
|
99 |
# Create and Stream a Run
|
@@ -111,34 +84,59 @@ async def generate_response(user_input):
|
|
111 |
yield event_handler.response_text
|
112 |
|
113 |
# Gradio interface function (generator)
|
114 |
-
async def gradio_chat_interface(user_input):
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
)
|
11 |
|
12 |
assistantID = os.getenv("OPENAI_ASSISTANT_ID")
|
13 |
+
mypassword = os.getenv("RTL_PASSWORD")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
class EventHandler(AsyncAssistantEventHandler):
|
16 |
def __init__(self) -> None:
|
|
|
56 |
session_data["thread_id"] = thread.id
|
57 |
|
58 |
async def generate_response(user_input):
|
59 |
+
if user_input == "":
|
60 |
+
yield "Schreif eng Fro als Input"
|
61 |
+
else:
|
62 |
+
assistant_id = session_data["assistant_id"]
|
63 |
+
thread_id = session_data["thread_id"]
|
64 |
+
|
65 |
+
# Add a Message to the Thread
|
66 |
+
oai_message = await client.beta.threads.messages.create(
|
67 |
+
thread_id=thread_id,
|
68 |
+
role="user",
|
69 |
+
content=user_input
|
70 |
)
|
71 |
|
72 |
# Create and Stream a Run
|
|
|
84 |
yield event_handler.response_text
|
85 |
|
86 |
# Gradio interface function (generator)
|
87 |
+
async def gradio_chat_interface(mode, password, user_input, example):
|
88 |
+
if mode == "Beispiller":
|
89 |
+
filename = example[-6:-2] + ".md"
|
90 |
+
file = open("examples/" + filename, "r")
|
91 |
+
output = file.read()
|
92 |
+
yield output
|
93 |
+
else:
|
94 |
+
# check the password
|
95 |
+
if password == "":
|
96 |
+
yield "To serach you need to enter an RTL password !"
|
97 |
+
elif password != mypassword:
|
98 |
+
yield "Please enter the correct RTL password !"
|
99 |
+
else:
|
100 |
+
|
101 |
+
# Create a new event loop if none exists (or if we are in a new thread)
|
102 |
+
try:
|
103 |
+
loop = asyncio.get_running_loop()
|
104 |
+
except RuntimeError:
|
105 |
+
loop = asyncio.new_event_loop()
|
106 |
+
asyncio.set_event_loop(loop)
|
107 |
+
|
108 |
+
# Initialize the thread if not already done
|
109 |
+
if session_data["thread_id"] is None:
|
110 |
+
await initialize_thread()
|
111 |
+
|
112 |
+
# Generate and yield responses
|
113 |
+
async for response in generate_response(user_input):
|
114 |
+
yield response
|
115 |
+
|
116 |
+
with gr.Blocks() as demo:
|
117 |
+
with gr.Row():
|
118 |
+
myTitle = gr.HTML("<h2 align=center>RTL AI News Reader : What happened in the country 🇱🇺 or in the world 🌎 ?</h2>")
|
119 |
+
with gr.Row():
|
120 |
+
myDescription = gr.HTML("""
|
121 |
+
<h3 align='center'>Wat fir een Thema interesséiert Iech ?</h3>
|
122 |
+
<p align='center'>🐶 🏃🏻♂️ 🌗 🍇 🌈 🍽️ 🏆 🚘 ✈️ 🩺 </p>
|
123 |
+
<p align='center' bgcolor="Moccasin">Submit your question in english or in another language !</p>
|
124 |
+
"""
|
125 |
+
)
|
126 |
+
with gr.Row():
|
127 |
+
mode = gr.Radio(choices=["Search", "Examples"], label = "You can run the examples without password.", value = "Examples")
|
128 |
+
pw = gr.Textbox(lines=1, label="Enter the correct RTL password !")
|
129 |
+
with gr.Row():
|
130 |
+
question = gr.Textbox(lines=3, label="Please submit your question ?")
|
131 |
+
with gr.Row():
|
132 |
+
examples = gr.Radio(["Wat war lass am Juni 2023 ?", "Wat ass gewosst iwwert de SREL ?", "Wat fir eng Katastroph war 2022 zu Lëtzebuerg ?", "Koumen an de leschte Jore gréisser Kriminalfäll viru Geriicht ?"], value="Wat ass gewosst iwwert de SREL ?" , label="Beispiller")
|
133 |
+
with gr.Row():
|
134 |
+
clear = gr.Button("Clear")
|
135 |
+
submit = gr.Button("Submit")
|
136 |
+
with gr.Row():
|
137 |
+
mySubtitle = gr.HTML("<p align='center' bgcolor='Khaki'>English RTL News :</p>")
|
138 |
+
with gr.Row():
|
139 |
+
myOutput = gr.Markdown(label="Answer from the OpenAI File-Search Assistent :")
|
140 |
+
|
141 |
+
submit.click(fn = gradio_chat_interface, inputs=[mode, pw, question, examples], outputs = myOutput)
|
142 |
+
demo.launch()
|