Spaces:
Running
Running
Commit
·
8ce4bd9
1
Parent(s):
de32dbb
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
!pip uninstall -y tensorflow
|
| 2 |
+
!pip install tensorflow==2.14
|
| 3 |
+
|
| 4 |
+
!pip install --upgrade pip
|
| 5 |
+
!pip install --upgrade transformers scipy
|
| 6 |
+
|
| 7 |
+
!pip install transformers
|
| 8 |
+
!pip install pymupdf
|
| 9 |
+
|
| 10 |
+
## Summarization
|
| 11 |
+
import gradio as gr
|
| 12 |
+
import fitz # PyMuPDF
|
| 13 |
+
from transformers import BartTokenizer, BartForConditionalGeneration, pipeline
|
| 14 |
+
import scipy.io.wavfile
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
|
| 18 |
+
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
|
| 19 |
+
|
| 20 |
+
def extract_abstract(pdf_path):
|
| 21 |
+
doc = fitz.open(pdf_path)
|
| 22 |
+
first_page = doc[0].get_text()
|
| 23 |
+
start_idx = first_page.lower().find("abstract")
|
| 24 |
+
end_idx = first_page.lower().find("introduction")
|
| 25 |
+
if start_idx != -1 and end_idx != -1:
|
| 26 |
+
return first_page[start_idx:end_idx].strip()
|
| 27 |
+
else:
|
| 28 |
+
return "Abstract not found or '1 Introduction' not found in the first page."
|
| 29 |
+
|
| 30 |
+
# Specify the path to your PDF file
|
| 31 |
+
pdf_path = "/content/article11.pdf" # Update the path
|
| 32 |
+
|
| 33 |
+
# Extract the abstract
|
| 34 |
+
abstract_text = extract_abstract(pdf_path)
|
| 35 |
+
|
| 36 |
+
# Print the extracted abstract
|
| 37 |
+
print("Extracted Abstract:")
|
| 38 |
+
print(abstract_text)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
from IPython.core.display import display, HTML
|
| 42 |
+
|
| 43 |
+
# Function to display summary and reduction percentage aesthetically
|
| 44 |
+
def display_results(final_summary, original_text):
|
| 45 |
+
reduction_percentage = 100 * (1 - len(final_summary) / len(original_text))
|
| 46 |
+
html_content = f"""
|
| 47 |
+
<div style='padding: 20px; background-color: #f3f3f3; border-radius: 10px;'>
|
| 48 |
+
<h2 style='color: #2c3e50; text-align: center;'>Summary</h2>
|
| 49 |
+
<p style='color: #34495e; font-size: 16px; text-align: justify;'>{final_summary}</p>
|
| 50 |
+
<p style='color: #2c3e50;'><b>Reduction in Text:</b> {reduction_percentage:.2f}%</p>
|
| 51 |
+
</div>
|
| 52 |
+
"""
|
| 53 |
+
display(HTML(html_content))
|
| 54 |
+
|
| 55 |
+
# Summary generation and post-processing
|
| 56 |
+
inputs = tokenizer([abstract_text], max_length=1024, return_tensors='pt', truncation=True)
|
| 57 |
+
max_length_for_summary = 40
|
| 58 |
+
length_penalty_value = 2.0
|
| 59 |
+
|
| 60 |
+
summary_ids = model.generate(inputs['input_ids'],
|
| 61 |
+
num_beams=4,
|
| 62 |
+
max_length=max_length_for_summary,
|
| 63 |
+
min_length=10,
|
| 64 |
+
length_penalty=length_penalty_value,
|
| 65 |
+
early_stopping=True,
|
| 66 |
+
no_repeat_ngram_size=2)
|
| 67 |
+
|
| 68 |
+
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
| 69 |
+
summary = ' '.join(summary.split()) # Remove extra spaces
|
| 70 |
+
|
| 71 |
+
# Handle truncated words and adjust periods
|
| 72 |
+
words = summary.split()
|
| 73 |
+
cleaned_summary = []
|
| 74 |
+
for i, word in enumerate(words):
|
| 75 |
+
if '-' in word and i < len(words) - 1:
|
| 76 |
+
word = word.replace('-', '') + words[i + 1]
|
| 77 |
+
words[i + 1] = ""
|
| 78 |
+
|
| 79 |
+
if '.' in word and i != len(words) - 1:
|
| 80 |
+
word = word.replace('.', '')
|
| 81 |
+
cleaned_summary.append(word + ' and')
|
| 82 |
+
else:
|
| 83 |
+
cleaned_summary.append(word)
|
| 84 |
+
|
| 85 |
+
# Capitalize first word and adjust following words
|
| 86 |
+
final_summary = ' '.join(cleaned_summary)
|
| 87 |
+
final_summary = final_summary[0].upper() + final_summary[1:]
|
| 88 |
+
final_summary = ' '.join(w[0].lower() + w[1:] if w.lower() != 'and' else w for w in final_summary.split())
|
| 89 |
+
|
| 90 |
+
# Displaying the results
|
| 91 |
+
display_results(final_summary, abstract_text)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
##Text-to-Speech
|
| 95 |
+
|
| 96 |
+
# Initialize the Bark TTS pipeline
|
| 97 |
+
synthesiser = pipeline("text-to-speech", "suno/bark")
|
| 98 |
+
|
| 99 |
+
# Initialize the Bark TTS pipeline
|
| 100 |
+
synthesiser = pipeline("text-to-speech", "suno/bark")
|
| 101 |
+
|
| 102 |
+
# Convert the summarized text to speech
|
| 103 |
+
speech = synthesiser(final_summary, forward_params={"do_sample": True})
|
| 104 |
+
|
| 105 |
+
# Normalize the audio data
|
| 106 |
+
audio_data = speech["audio"].squeeze()
|
| 107 |
+
normalized_audio_data = np.int16(audio_data / np.max(np.abs(audio_data)) * 32767)
|
| 108 |
+
|
| 109 |
+
# Save the normalized audio data as a WAV file
|
| 110 |
+
output_file = "/content/bark_output.wav"
|
| 111 |
+
scipy.io.wavfile.write(output_file, rate=speech["sampling_rate"], data=normalized_audio_data)
|
| 112 |
+
print(f"Audio file saved as {output_file}")
|
| 113 |
+
|
| 114 |
+
# Display an audio player widget to play the generated speech
|
| 115 |
+
Audio(output_file)
|
| 116 |
+
|
| 117 |
+
# Gradio Interface
|
| 118 |
+
iface = gr.Interface(
|
| 119 |
+
fn=process_text,
|
| 120 |
+
inputs="text",
|
| 121 |
+
outputs=["text", "audio"],
|
| 122 |
+
title="Summarization and Text-to-Speech",
|
| 123 |
+
description="Enter text to summarize and convert to speech."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if __name__ == "__main__":
|
| 127 |
+
iface.launch()
|