VishnuPottabatthini commited on
Commit
fc16cba
·
verified ·
1 Parent(s): a02d0cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -57
app.py CHANGED
@@ -1,58 +1,3 @@
1
- # import gradio as gr
2
- # from transformers import BartTokenizer, BartForConditionalGeneration
3
- # import torch
4
-
5
- # # Load the fine-tuned BART model and tokenizer from the local directory
6
- # MODEL_DIR = './BART model small/model'
7
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
-
9
- # tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
10
- # model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
11
-
12
- # # Define the summarization function
13
- # def predict(text):
14
- # try:
15
- # # Tokenize the input article
16
- # inputs = tokenizer(
17
- # text,
18
- # return_tensors="pt",
19
- # max_length=1024,
20
- # truncation=True
21
- # ).to(device)
22
-
23
- # # Generate the summary
24
- # summary_ids = model.generate(
25
- # inputs['input_ids'],
26
- # attention_mask=inputs['attention_mask'],
27
- # max_length=150, # Set maximum length for the summary
28
- # min_length=30, # Set minimum length for the summary
29
- # num_beams=4, # Use beam search to generate the summary
30
- # early_stopping=True
31
- # )
32
-
33
- # # Decode the summary
34
- # summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
35
-
36
- # return summary
37
-
38
- # except Exception as e:
39
- # return str(e)
40
-
41
- # # Create Gradio interface
42
- # # Textbox input for the article and output for the summary
43
- # interface = gr.Interface(
44
- # fn=predict, # The function to summarize the article
45
- # inputs="text", # Input is a text box where users can input the article text
46
- # outputs="text", # Output is a text box displaying the summary
47
- # title="BART Summarization", # The title of the app
48
- # description="Enter an article to generate a summary using a fine-tuned BART model."
49
- # )
50
-
51
- # # Launch the Gradio app
52
- # interface.launch()
53
-
54
-
55
-
56
 
57
  import gradio as gr
58
  from transformers import BartTokenizer, BartForConditionalGeneration
@@ -64,7 +9,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
64
  tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
65
  model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
66
 
67
- def predict(text):
68
  try:
69
  inputs = tokenizer(
70
  text,
@@ -88,7 +33,7 @@ def predict(text):
88
  return str(e)
89
 
90
  interface = gr.Interface(
91
- fn=predict,
92
  inputs="text",
93
  outputs="text",
94
  title="BART Summarization",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  import gradio as gr
3
  from transformers import BartTokenizer, BartForConditionalGeneration
 
9
  tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
10
  model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
11
 
12
+ def summarize(text):
13
  try:
14
  inputs = tokenizer(
15
  text,
 
33
  return str(e)
34
 
35
  interface = gr.Interface(
36
+ fn=summarize,
37
  inputs="text",
38
  outputs="text",
39
  title="BART Summarization",