mbarnig commited on
Commit
96d4cba
·
verified ·
1 Parent(s): 9d47737

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -7
app.py CHANGED
@@ -10,6 +10,32 @@ client = AsyncOpenAI(
10
 
11
  assistantID = "asst_pMk1lyBSaVZPulq44RvIJUNe"
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  class EventHandler(AsyncAssistantEventHandler):
14
  def __init__(self) -> None:
15
  super().__init__()
@@ -70,7 +96,8 @@ async def generate_response(user_input):
70
  async with client.beta.threads.runs.stream(
71
  thread_id=thread_id,
72
  assistant_id=assistant_id,
73
- instructions="""You are a Code Interpreter to analyze JSON files with RTL comments. Here is the format of the files :
 
74
  [
75
  {
76
  "context_id": "",
@@ -92,7 +119,9 @@ async def generate_response(user_input):
92
  },
93
  }
94
  ]
95
- You will search dates ("date_created" of a comment and "date" of the related thumbs), calculate the total number of up scores and down scores. You will answer questions about "context_id", "text" and "referers".""",
 
 
96
  event_handler=event_handler,
97
  ) as stream:
98
  # Yield incremental updates
@@ -120,12 +149,14 @@ async def gradio_chat_interface(user_input):
120
  # Set up Gradio interface with streaming
121
  interface = gr.Interface(
122
  fn=gradio_chat_interface,
123
- inputs="text",
124
  outputs="markdown",
125
- title="OpenAI Interpreter with Gradio",
126
- description="Ask anything and get an AI-generated response in real-time.",
127
- live=False, # Important to allow streaming-like behavior
128
- allow_flagging="never"
 
 
129
  )
130
 
131
  # Launch the Gradio app
 
10
 
11
  assistantID = "asst_pMk1lyBSaVZPulq44RvIJUNe"
12
 
13
+ mytitle = "<h1 align=center>Wat hunn d'Lëtzebuerger an de leschte Jore kommentéiert ?</h1>"
14
+
15
+ mydescription="""
16
+ <h3 align='center'>Wat fir een Thema interesséiert Dech : 👍 👎 🤛 ☝️ </h3>
17
+ <table width=100%>
18
+ <tr>
19
+ <th width=50% bgcolor="Moccasin">Stell deng Fro op englesch, fir einfach Froe versteet d'AI och Lëtzebuergesch !</th>
20
+ <th bgcolor="Khaki">Äntwert vum OpenAI Code-Interpreter Assistent :</th>
21
+ </tr>
22
+ </table>
23
+ """
24
+
25
+ myarticle ="""
26
+ <h3>Hannergrënn :</h3>
27
+ <p>Dës HuggingFace Space Demo gouf vum <a href="https://github.com/mbarnig">Marco Barnig</a> realiséiert. Als kënstlech Intelligenz gëtt, mëttels API, den <a href="https://platform.openai.com/docs/models">OpenAI Modell</a> gpt-4o-mini-2024-07-18 benotzt, deen als Kontext bis 128.000 Tokens ka benotzen, eng Äntwert op eng Fro vu maximal 16.384 Tokens ka ginn a bis zu 200.000 Tokens pro Minutt (TPM) ka beaarbechten. Fir dës Demo goufen nëmmen eng News-JSON-Datei mat enger Gréisst vun 30 MB benotzt. Et ass méiglech bis zu 20 Dateien op en OpenAI Code-Interpreter Assistent opzelueden. D'Äntwerte vun de Beispiller sinn am Cache gespäichert a ginn duerfir ouni Delai ugewise.</p>
28
+ """
29
+
30
+ myinput = gr.Textbox(lines=3, label="Wat interesséiert Dech ?")
31
+
32
+ myexamples = [
33
+ "Wat fir ee Kommentar krut déi meescht 👍 ?",
34
+ "Wat fir ee Kommentar krut déi meescht 👎 ?",
35
+ "Show me a random comment !",
36
+ "Please show a comment with 2 👍 and 2 👎 !"
37
+ ]
38
+
39
  class EventHandler(AsyncAssistantEventHandler):
40
  def __init__(self) -> None:
41
  super().__init__()
 
96
  async with client.beta.threads.runs.stream(
97
  thread_id=thread_id,
98
  assistant_id=assistant_id,
99
+ instructions="""
100
+ You are a Code Interpreter to analyze JSON files with RTL comments. Here is the format of the JSON files :
101
  [
102
  {
103
  "context_id": "",
 
119
  },
120
  }
121
  ]
122
+ You will search dates ("date_created" of a comment and "date" of the related thumbs), calculate the total number of "thumbs":[{"score": "up"},{"score": "down"}] and answer questions about "context_id", "text" and "referers". A 👍 means "thumbs":[{"score": "up"}], a 👎 means "thumbs":[{"score": "down"}]. Please indicate in all responses the number of thumbs. Please provide your answers in luxembourgish language.
123
+ """
124
+ ,
125
  event_handler=event_handler,
126
  ) as stream:
127
  # Yield incremental updates
 
149
  # Set up Gradio interface with streaming
150
  interface = gr.Interface(
151
  fn=gradio_chat_interface,
152
+ inputs=myinput,
153
  outputs="markdown",
154
+ title=mytitle,
155
+ description=mydescription,
156
+ article=myarticle,
157
+ live=False,
158
+ allow_flagging="never",
159
+ examples=myexamples
160
  )
161
 
162
  # Launch the Gradio app