mbarnig commited on
Commit
90c1f8c
·
verified ·
1 Parent(s): 21636c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -70
app.py CHANGED
@@ -10,37 +10,7 @@ client = AsyncOpenAI(
10
  )
11
 
12
  assistantID = os.getenv("OPENAI_ASSISTANT_ID")
13
- username = os.getenv("YOUR_ID")
14
- password = os.getenv("YOUR_PASSWORD")
15
-
16
- mytitle = "<h1 align=center>RTL AI News Reader : What happened in the country 🇱🇺 and in the world 🌎 ?</h1>"
17
-
18
- mydescription="""
19
- <h3 align='center'>Which topic interests you : 🐶 🏃🏻‍♂️ 🌗 🍇 🌈 🍽️ 🏆 🚘 ✈️ 🩺 </h3>
20
- <table width=100%>
21
- <tr>
22
- <th width=50% bgcolor="Moccasin">Ask your questions in english or another language :</th>
23
- <th bgcolor="Khaki">Response from the OpenAI File-Search Assistant :</th>
24
- </tr>
25
- </table>
26
- """
27
-
28
- myarticle ="""
29
- <h3>Background :</h3>
30
- <p>This HuggingFace Space demo was created by <a href="https://github.com/mbarnig">Marco Barnig</a>. As an artificial intelligence,
31
- the <a href="https://platform.openai.com/docs/models">OpenAI model</a> gpt-4o-mini-2024-07-18 is used via API,
32
- which can utilize up to 128,000 tokens as context, provide an answer to a question with a maximum of 16,384 tokens,
33
- and process up to 200,000 tokens per minute (TPM). All english content from RTL.lu from the beginning up to September 2024 has been split into 16 JSON files
34
- and uploaded to a Vector Store by the OpenAI File-Search Assistant "RTL English News Reader."
35
- Each file contains fewer than 5 million tokens, which is an upper limit for the AI model. It is possible to upload up to 10,000 files to an OpenAI Assistant.
36
- The responses of the examples are cached and therefore displayed without delay.</p>
37
- """
38
-
39
- myinput = gr.Textbox(lines=3, label=" What would you like to know ?")
40
-
41
- myexamples = [
42
- "What happened in 2014 ?"
43
- ]
44
 
45
  class EventHandler(AsyncAssistantEventHandler):
46
  def __init__(self) -> None:
@@ -86,14 +56,17 @@ async def initialize_thread():
86
  session_data["thread_id"] = thread.id
87
 
88
  async def generate_response(user_input):
89
- assistant_id = session_data["assistant_id"]
90
- thread_id = session_data["thread_id"]
91
-
92
- # Add a Message to the Thread
93
- oai_message = await client.beta.threads.messages.create(
94
- thread_id=thread_id,
95
- role="user",
96
- content=user_input
 
 
 
97
  )
98
 
99
  # Create and Stream a Run
@@ -111,34 +84,59 @@ async def generate_response(user_input):
111
  yield event_handler.response_text
112
 
113
  # Gradio interface function (generator)
114
- async def gradio_chat_interface(user_input):
115
- # Create a new event loop if none exists (or if we are in a new thread)
116
- try:
117
- loop = asyncio.get_running_loop()
118
- except RuntimeError:
119
- loop = asyncio.new_event_loop()
120
- asyncio.set_event_loop(loop)
121
-
122
- # Initialize the thread if not already done
123
- if session_data["thread_id"] is None:
124
- await initialize_thread()
125
-
126
- # Generate and yield responses
127
- async for response in generate_response(user_input):
128
- yield response
129
-
130
- # Set up Gradio interface with streaming
131
- interface = gr.Interface(
132
- fn=gradio_chat_interface,
133
- inputs=myinput,
134
- outputs="markdown",
135
- title=mytitle,
136
- description=mydescription,
137
- article=myarticle,
138
- live=False,
139
- allow_flagging="never",
140
- examples=myexamples
141
- )
142
-
143
- # Launch the Gradio app
144
- interface.launch(auth=(username, password), auth_message="<h1>RTL AI News Reader</h1><p>This HuggingFace Space is a prototype and is not yet accessible to everyone. The project is based on a file search assistant using OpenAI's API and employs the GPT-4o-mini model. You need to use a Chrome browser. AI specialists interested in accessing it can request a username and password by contacting [email protected].</p>")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  )
11
 
12
  assistantID = os.getenv("OPENAI_ASSISTANT_ID")
13
+ mypassword = os.getenv("RTL_PASSWORD")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  class EventHandler(AsyncAssistantEventHandler):
16
  def __init__(self) -> None:
 
56
  session_data["thread_id"] = thread.id
57
 
58
  async def generate_response(user_input):
59
+ if user_input == "":
60
+ yield "Schreif eng Fro als Input"
61
+ else:
62
+ assistant_id = session_data["assistant_id"]
63
+ thread_id = session_data["thread_id"]
64
+
65
+ # Add a Message to the Thread
66
+ oai_message = await client.beta.threads.messages.create(
67
+ thread_id=thread_id,
68
+ role="user",
69
+ content=user_input
70
  )
71
 
72
  # Create and Stream a Run
 
84
  yield event_handler.response_text
85
 
86
  # Gradio interface function (generator)
87
+ async def gradio_chat_interface(mode, password, user_input, example):
88
+ if mode == "Beispiller":
89
+ filename = example[-6:-2] + ".md"
90
+ file = open("examples/" + filename, "r")
91
+ output = file.read()
92
+ yield output
93
+ else:
94
+ # check the password
95
+ if password == "":
96
+ yield "To serach you need to enter an RTL password !"
97
+ elif password != mypassword:
98
+ yield "Please enter the correct RTL password !"
99
+ else:
100
+
101
+ # Create a new event loop if none exists (or if we are in a new thread)
102
+ try:
103
+ loop = asyncio.get_running_loop()
104
+ except RuntimeError:
105
+ loop = asyncio.new_event_loop()
106
+ asyncio.set_event_loop(loop)
107
+
108
+ # Initialize the thread if not already done
109
+ if session_data["thread_id"] is None:
110
+ await initialize_thread()
111
+
112
+ # Generate and yield responses
113
+ async for response in generate_response(user_input):
114
+ yield response
115
+
116
+ with gr.Blocks() as demo:
117
+ with gr.Row():
118
+ myTitle = gr.HTML("<h2 align=center>RTL AI News Reader : What happened in the country 🇱🇺 or in the world 🌎 ?</h2>")
119
+ with gr.Row():
120
+ myDescription = gr.HTML("""
121
+ <h3 align='center'>Wat fir een Thema interesséiert Iech ?</h3>
122
+ <p align='center'>🐶 🏃🏻‍♂️ 🌗 🍇 🌈 🍽️ 🏆 🚘 ✈️ 🩺 </p>
123
+ <p align='center' bgcolor="Moccasin">Submit your question in english or in another language !</p>
124
+ """
125
+ )
126
+ with gr.Row():
127
+ mode = gr.Radio(choices=["Search", "Examples"], label = "You can run the examples without password.", value = "Examples")
128
+ pw = gr.Textbox(lines=1, label="Enter the correct RTL password !")
129
+ with gr.Row():
130
+ question = gr.Textbox(lines=3, label="Please submit your question ?")
131
+ with gr.Row():
132
+ examples = gr.Radio(["Wat war lass am Juni 2023 ?", "Wat ass gewosst iwwert de SREL ?", "Wat fir eng Katastroph war 2022 zu Lëtzebuerg ?", "Koumen an de leschte Jore gréisser Kriminalfäll viru Geriicht ?"], value="Wat ass gewosst iwwert de SREL ?" , label="Beispiller")
133
+ with gr.Row():
134
+ clear = gr.Button("Clear")
135
+ submit = gr.Button("Submit")
136
+ with gr.Row():
137
+ mySubtitle = gr.HTML("<p align='center' bgcolor='Khaki'>English RTL News :</p>")
138
+ with gr.Row():
139
+ myOutput = gr.Markdown(label="Answer from the OpenAI File-Search Assistent :")
140
+
141
+ submit.click(fn = gradio_chat_interface, inputs=[mode, pw, question, examples], outputs = myOutput)
142
+ demo.launch()