Spaces:
Sleeping
Sleeping
update app with chatgpt
Browse files
app.py
CHANGED
@@ -72,7 +72,6 @@ system_template = {
|
|
72 |
|
73 |
# if file key.key exist read the key if note read the env variable OPENAI_TOKEN
|
74 |
if os.path.isfile("key.key"):
|
75 |
-
|
76 |
# read key.key file and set openai api key
|
77 |
with open("key.key", "r") as f:
|
78 |
key = f.read()
|
@@ -198,15 +197,17 @@ def chat(
|
|
198 |
Yields:
|
199 |
tuple: chat gradio format, chat openai format, sources used.
|
200 |
"""
|
201 |
-
reformulated_query = openai.
|
202 |
-
model="
|
203 |
-
|
204 |
-
|
205 |
-
|
|
|
206 |
stop=["\n---\n", "<|im_end|>"],
|
207 |
)
|
|
|
208 |
|
209 |
-
reformulated_query = reformulated_query["choices"][0]["
|
210 |
language = "francais"
|
211 |
|
212 |
sources = retrieve_with_summaries(
|
@@ -235,14 +236,24 @@ def chat(
|
|
235 |
)
|
236 |
messages.append(
|
237 |
{
|
238 |
-
"role": "
|
239 |
-
"content": f"{sources_prompt}\n\n{docs_string}\n\
|
240 |
}
|
241 |
)
|
242 |
|
243 |
-
|
244 |
-
|
245 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
temperature=0, # deterministic
|
247 |
stream=True,
|
248 |
max_tokens=1024,
|
@@ -256,13 +267,14 @@ def chat(
|
|
256 |
file = user_id[0] + timestamp + ".json"
|
257 |
|
258 |
for chunk in response:
|
259 |
-
if
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
266 |
|
267 |
else:
|
268 |
docs_string = "Pas d'élements juridique trouvé dans les codes de loi"
|
@@ -330,8 +342,6 @@ with gr.Blocks(title="LoiLibre Q&A", css="style.css", theme=theme) as demo:
|
|
330 |
with gr.Column(scale=1, variant="panel"):
|
331 |
gr.Markdown("### Sources")
|
332 |
sources_textbox = gr.Markdown(show_label=False)
|
333 |
-
|
334 |
-
|
335 |
|
336 |
ask.submit(
|
337 |
fn=chat,
|
@@ -354,11 +364,13 @@ with gr.Blocks(title="LoiLibre Q&A", css="style.css", theme=theme) as demo:
|
|
354 |
Version 0.1-beta - This tool is under active development
|
355 |
|
356 |
</div>
|
357 |
-
"""
|
|
|
358 |
gr.Markdown(
|
359 |
"""
|
360 |
|
361 |
-
"""
|
|
|
362 |
|
363 |
demo.queue(concurrency_count=16)
|
364 |
|
|
|
72 |
|
73 |
# if file key.key exist read the key if note read the env variable OPENAI_TOKEN
|
74 |
if os.path.isfile("key.key"):
|
|
|
75 |
# read key.key file and set openai api key
|
76 |
with open("key.key", "r") as f:
|
77 |
key = f.read()
|
|
|
197 |
Yields:
|
198 |
tuple: chat gradio format, chat openai format, sources used.
|
199 |
"""
|
200 |
+
reformulated_query = openai.ChatCompletion.create(
|
201 |
+
model="gpt-3.5-turbo",
|
202 |
+
messages=[
|
203 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
204 |
+
{"role": "user", "content": get_reformulation_prompt(query)},
|
205 |
+
],
|
206 |
stop=["\n---\n", "<|im_end|>"],
|
207 |
)
|
208 |
+
|
209 |
|
210 |
+
reformulated_query = reformulated_query["choices"][0]["message"]["content"]
|
211 |
language = "francais"
|
212 |
|
213 |
sources = retrieve_with_summaries(
|
|
|
236 |
)
|
237 |
messages.append(
|
238 |
{
|
239 |
+
"role": "user",
|
240 |
+
"content": f"{sources_prompt}\n\n{docs_string}\n\Réponds en {language}:",
|
241 |
}
|
242 |
)
|
243 |
|
244 |
+
# quick message preprocessing
|
245 |
+
def cleaning_message(message):
|
246 |
+
if isinstance(message["content"], tuple):
|
247 |
+
message["content"] = "\n".join(message["content"])
|
248 |
+
|
249 |
+
return message
|
250 |
+
|
251 |
+
messages = [cleaning_message(message) for message in messages]
|
252 |
+
|
253 |
+
|
254 |
+
response = openai.ChatCompletion.create(
|
255 |
+
model="gpt-3.5-turbo-16k",
|
256 |
+
messages=messages,
|
257 |
temperature=0, # deterministic
|
258 |
stream=True,
|
259 |
max_tokens=1024,
|
|
|
267 |
file = user_id[0] + timestamp + ".json"
|
268 |
|
269 |
for chunk in response:
|
270 |
+
if "content" in chunk["choices"][0]["delta"]:
|
271 |
+
if (
|
272 |
+
chunk_message := chunk["choices"][0]["delta"]["content"]
|
273 |
+
) and chunk_message != "<|im_end|>":
|
274 |
+
complete_response += chunk_message
|
275 |
+
messages[-1]["content"] = complete_response
|
276 |
+
gradio_format = make_pairs([a["content"] for a in messages[1:]])
|
277 |
+
yield gradio_format, messages, docs_html
|
278 |
|
279 |
else:
|
280 |
docs_string = "Pas d'élements juridique trouvé dans les codes de loi"
|
|
|
342 |
with gr.Column(scale=1, variant="panel"):
|
343 |
gr.Markdown("### Sources")
|
344 |
sources_textbox = gr.Markdown(show_label=False)
|
|
|
|
|
345 |
|
346 |
ask.submit(
|
347 |
fn=chat,
|
|
|
364 |
Version 0.1-beta - This tool is under active development
|
365 |
|
366 |
</div>
|
367 |
+
"""
|
368 |
+
)
|
369 |
gr.Markdown(
|
370 |
"""
|
371 |
|
372 |
+
"""
|
373 |
+
)
|
374 |
|
375 |
demo.queue(concurrency_count=16)
|
376 |
|