Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,53 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
-
from PyPDF2 import PdfReader
|
3 |
-
from docx import Document
|
4 |
-
import os
|
5 |
-
|
6 |
-
def process_pdf(pdf_file, token):
|
7 |
-
try:
|
8 |
-
# Extract text from PDF
|
9 |
-
pdf_reader = PdfReader(pdf_file.name)
|
10 |
-
text = "\n".join([page.extract_text() for page in pdf_reader.pages])
|
11 |
-
|
12 |
-
# Initialize LLM client
|
13 |
-
from huggingface_hub import InferenceClient
|
14 |
-
client = InferenceClient(token=token)
|
15 |
-
|
16 |
-
# Generate rewritten text
|
17 |
-
response = client.text_generation(
|
18 |
-
prompt=f"Rewrite this text clearly and concisely while preserving all key information:\n\n{text}",
|
19 |
-
model="meta-llama/Llama-3.3-70B-Instruct",
|
20 |
-
max_new_tokens=2000
|
21 |
-
)
|
22 |
-
|
23 |
-
# Create Word document
|
24 |
-
doc = Document()
|
25 |
-
doc.add_paragraph(response)
|
26 |
-
output_path = "rewritten.docx"
|
27 |
-
doc.save(output_path)
|
28 |
-
|
29 |
-
return response, output_path
|
30 |
-
|
31 |
-
except Exception as e:
|
32 |
-
return f"Error: {str(e)}", None
|
33 |
|
34 |
with gr.Blocks(fill_height=True) as demo:
|
35 |
with gr.Sidebar():
|
36 |
-
gr.Markdown("#
|
37 |
-
gr.Markdown("
|
38 |
-
|
|
|
39 |
|
40 |
-
|
41 |
-
file_input = gr.File(label="Upload PDF", type="file")
|
42 |
-
process_btn = gr.Button("Process PDF")
|
43 |
-
text_output = gr.Textbox(label="Rewritten Text", interactive=False)
|
44 |
-
file_output = gr.Download(label="Download Word File")
|
45 |
-
|
46 |
-
process_btn.click(
|
47 |
-
fn=process_pdf,
|
48 |
-
inputs=[file_input, login_btn],
|
49 |
-
outputs=[text_output, file_output]
|
50 |
-
)
|
51 |
-
|
52 |
-
if __name__ == "__main__":
|
53 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
with gr.Blocks(fill_height=True) as demo:
|
4 |
with gr.Sidebar():
|
5 |
+
gr.Markdown("# Inference Provider")
|
6 |
+
gr.Markdown("This Space showcases the meta-llama/Llama-3.3-70B-Instruct model, served by the cerebras API. Sign in with your Hugging Face account to use this API.")
|
7 |
+
button = gr.LoginButton("Sign in")
|
8 |
+
gr.load("models/meta-llama/Llama-3.3-70B-Instruct", accept_token=button, provider="cerebras")
|
9 |
|
10 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|