Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,8 +28,9 @@ llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
|
|
28 |
ACCOUNT_ID = os.environ.get("CLOUDFARE_ACCOUNT_ID")
|
29 |
API_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
|
30 |
API_BASE_URL = "https://api.cloudflare.com/client/v4/accounts/a17f03e0f049ccae0c15cdcf3b9737ce/ai/run/"
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
|
35 |
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
|
@@ -279,14 +280,17 @@ def generate_chunked_response(prompt, model, max_tokens=10000, num_calls=3, temp
|
|
279 |
print(f"Final clean response: {final_response[:100]}...")
|
280 |
return final_response
|
281 |
|
282 |
-
def chatbot_interface(message, history, model, temperature, num_calls):
|
|
|
|
|
|
|
283 |
if not message.strip():
|
284 |
return "", history
|
285 |
|
286 |
history = history + [(message, "")]
|
287 |
|
288 |
try:
|
289 |
-
for response in respond(message, history, model, temperature, num_calls):
|
290 |
history[-1] = (message, response)
|
291 |
yield history
|
292 |
except gr.CancelledError:
|
@@ -398,27 +402,8 @@ def summarize_web_results(query: str, search_results: List[Dict[str, str]], conv
|
|
398 |
except Exception as e:
|
399 |
return f"An error occurred during summarization: {str(e)}"
|
400 |
|
401 |
-
def transcribe_audio(audio):
|
402 |
-
if audio is None:
|
403 |
-
return "No audio file provided."
|
404 |
-
|
405 |
-
try:
|
406 |
-
sr, y = audio
|
407 |
-
y = y.astype(np.float32)
|
408 |
-
y /= np.max(np.abs(y))
|
409 |
-
|
410 |
-
response = requests.post(API_URL, headers=headers, json={"inputs": {"sampling_rate": sr, "raw": y.tolist()}})
|
411 |
-
response.raise_for_status()
|
412 |
-
return response.json()["text"]
|
413 |
-
except Exception as e:
|
414 |
-
return f"An error occurred during transcription: {str(e)}"
|
415 |
-
|
416 |
# Modify the existing respond function to handle both PDF and web search
|
417 |
-
def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs
|
418 |
-
if audio_input is not None:
|
419 |
-
transcription = transcribe_audio(audio_input)
|
420 |
-
message = f"Transcription: {transcription}\n\nUser query: {message}"
|
421 |
-
|
422 |
logging.info(f"User Query: {message}")
|
423 |
logging.info(f"Model Used: {model}")
|
424 |
logging.info(f"Selected Documents: {selected_docs}")
|
@@ -627,6 +612,17 @@ Write a detailed and complete response that answers the following user question:
|
|
627 |
|
628 |
logging.info("Finished generating response")
|
629 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
def vote(data: gr.LikeData):
|
631 |
if data.liked:
|
632 |
print(f"You upvoted this response: {data.value}")
|
@@ -679,45 +675,28 @@ custom_placeholder = "Ask a question (Note: You can toggle between Web Search an
|
|
679 |
# Update the demo interface
|
680 |
# Update the Gradio interface
|
681 |
demo = gr.ChatInterface(
|
682 |
-
|
683 |
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False),
|
684 |
additional_inputs=[
|
685 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
|
686 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
687 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
688 |
gr.Checkbox(label="Use Web Search", value=True),
|
689 |
-
gr.CheckboxGroup(label="Select documents to query")
|
|
|
690 |
],
|
691 |
title="AI-powered PDF Chat and Web Search Assistant",
|
692 |
-
description="Chat with your PDFs
|
693 |
theme=gr.themes.Soft(
|
694 |
-
|
695 |
-
secondary_hue="amber",
|
696 |
-
neutral_hue="gray",
|
697 |
-
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
|
698 |
-
).set(
|
699 |
-
body_background_fill_dark="#0c0505",
|
700 |
-
block_background_fill_dark="#0c0505",
|
701 |
-
block_border_width="1px",
|
702 |
-
block_title_background_fill_dark="#1b0f0f",
|
703 |
-
input_background_fill_dark="#140b0b",
|
704 |
-
button_secondary_background_fill_dark="#140b0b",
|
705 |
-
border_color_accent_dark="#1b0f0f",
|
706 |
-
border_color_primary_dark="#1b0f0f",
|
707 |
-
background_fill_secondary_dark="#0c0505",
|
708 |
-
color_accent_soft_dark="transparent",
|
709 |
-
code_background_fill_dark="#140b0b"
|
710 |
),
|
711 |
css=css,
|
712 |
examples=[
|
713 |
-
|
714 |
-
["What are the main topics discussed in the documents?"],
|
715 |
-
["Can you summarize the key points from the PDFs?"],
|
716 |
-
["What's the latest news about artificial intelligence?"]
|
717 |
],
|
718 |
cache_examples=False,
|
719 |
analytics_enabled=False,
|
720 |
-
textbox=gr.Textbox(placeholder="
|
721 |
chatbot = gr.Chatbot(
|
722 |
show_copy_button=True,
|
723 |
likeable=True,
|
@@ -727,7 +706,6 @@ demo = gr.ChatInterface(
|
|
727 |
)
|
728 |
)
|
729 |
|
730 |
-
# Add file upload functionality
|
731 |
# Add file upload functionality
|
732 |
with demo:
|
733 |
gr.Markdown("## Upload and Manage PDF Documents")
|
|
|
28 |
ACCOUNT_ID = os.environ.get("CLOUDFARE_ACCOUNT_ID")
|
29 |
API_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
|
30 |
API_BASE_URL = "https://api.cloudflare.com/client/v4/accounts/a17f03e0f049ccae0c15cdcf3b9737ce/ai/run/"
|
31 |
+
|
32 |
+
# Add this to your existing imports and configurations
|
33 |
+
whisper_client = InferenceClient("openai/whisper-large-v3", token=huggingface_token)
|
34 |
|
35 |
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
|
36 |
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
|
|
|
280 |
print(f"Final clean response: {final_response[:100]}...")
|
281 |
return final_response
|
282 |
|
283 |
+
def chatbot_interface(message, audio, history, model, temperature, num_calls, use_web_search, selected_docs):
|
284 |
+
if audio is not None:
|
285 |
+
message = transcribe_audio(audio)
|
286 |
+
|
287 |
if not message.strip():
|
288 |
return "", history
|
289 |
|
290 |
history = history + [(message, "")]
|
291 |
|
292 |
try:
|
293 |
+
for response in respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
|
294 |
history[-1] = (message, response)
|
295 |
yield history
|
296 |
except gr.CancelledError:
|
|
|
402 |
except Exception as e:
|
403 |
return f"An error occurred during summarization: {str(e)}"
|
404 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
# Modify the existing respond function to handle both PDF and web search
|
406 |
+
def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
|
|
|
|
|
|
|
|
|
407 |
logging.info(f"User Query: {message}")
|
408 |
logging.info(f"Model Used: {model}")
|
409 |
logging.info(f"Selected Documents: {selected_docs}")
|
|
|
612 |
|
613 |
logging.info("Finished generating response")
|
614 |
|
615 |
+
def transcribe_audio(audio_file):
|
616 |
+
if audio_file is None:
|
617 |
+
return ""
|
618 |
+
|
619 |
+
with open(audio_file, "rb") as f:
|
620 |
+
data = f.read()
|
621 |
+
|
622 |
+
response = whisper_client.audio_to_text(data)
|
623 |
+
return response["text"]
|
624 |
+
|
625 |
+
|
626 |
def vote(data: gr.LikeData):
|
627 |
if data.liked:
|
628 |
print(f"You upvoted this response: {data.value}")
|
|
|
675 |
# Update the demo interface
|
676 |
# Update the Gradio interface
|
677 |
demo = gr.ChatInterface(
|
678 |
+
chatbot_interface,
|
679 |
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False),
|
680 |
additional_inputs=[
|
681 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
|
682 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
683 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
684 |
gr.Checkbox(label="Use Web Search", value=True),
|
685 |
+
gr.CheckboxGroup(label="Select documents to query"),
|
686 |
+
gr.Audio(source="microphone", type="filepath", label="Speak your question")
|
687 |
],
|
688 |
title="AI-powered PDF Chat and Web Search Assistant",
|
689 |
+
description="Chat with your PDFs, use web search, or speak your questions.",
|
690 |
theme=gr.themes.Soft(
|
691 |
+
# ... (keep the existing theme configuration)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
692 |
),
|
693 |
css=css,
|
694 |
examples=[
|
695 |
+
# ... (keep the existing examples)
|
|
|
|
|
|
|
696 |
],
|
697 |
cache_examples=False,
|
698 |
analytics_enabled=False,
|
699 |
+
textbox=gr.Textbox(placeholder="Type or speak your question", container=False, scale=7),
|
700 |
chatbot = gr.Chatbot(
|
701 |
show_copy_button=True,
|
702 |
likeable=True,
|
|
|
706 |
)
|
707 |
)
|
708 |
|
|
|
709 |
# Add file upload functionality
|
710 |
with demo:
|
711 |
gr.Markdown("## Upload and Manage PDF Documents")
|