VishnuPottabatthini commited on
Commit
a02d0cf
·
verified ·
1 Parent(s): 73387eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -17
app.py CHANGED
@@ -1,18 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import BartTokenizer, BartForConditionalGeneration
3
  import torch
4
 
5
- # Load the fine-tuned BART model and tokenizer from the local directory
6
  MODEL_DIR = './BART model small/model'
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
  tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
10
  model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
11
 
12
- # Define the summarization function
13
  def predict(text):
14
  try:
15
- # Tokenize the input article
16
  inputs = tokenizer(
17
  text,
18
  return_tensors="pt",
@@ -20,33 +73,26 @@ def predict(text):
20
  truncation=True
21
  ).to(device)
22
 
23
- # Generate the summary
24
  summary_ids = model.generate(
25
  inputs['input_ids'],
26
  attention_mask=inputs['attention_mask'],
27
- max_length=150, # Set maximum length for the summary
28
- min_length=30, # Set minimum length for the summary
29
- num_beams=4, # Use beam search to generate the summary
30
  early_stopping=True
31
  )
32
 
33
- # Decode the summary
34
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
35
-
36
  return summary
37
-
38
  except Exception as e:
39
  return str(e)
40
 
41
- # Create Gradio interface
42
- # Textbox input for the article and output for the summary
43
  interface = gr.Interface(
44
- fn=predict, # The function to summarize the article
45
- inputs="text", # Input is a text box where users can input the article text
46
- outputs="text", # Output is a text box displaying the summary
47
- title="BART Summarization", # The title of the app
48
  description="Enter an article to generate a summary using a fine-tuned BART model."
49
  )
50
 
51
- # Launch the Gradio app
52
  interface.launch()
 
1
+ # import gradio as gr
2
+ # from transformers import BartTokenizer, BartForConditionalGeneration
3
+ # import torch
4
+
5
+ # # Load the fine-tuned BART model and tokenizer from the local directory
6
+ # MODEL_DIR = './BART model small/model'
7
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ # tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
10
+ # model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
11
+
12
+ # # Define the summarization function
13
+ # def predict(text):
14
+ # try:
15
+ # # Tokenize the input article
16
+ # inputs = tokenizer(
17
+ # text,
18
+ # return_tensors="pt",
19
+ # max_length=1024,
20
+ # truncation=True
21
+ # ).to(device)
22
+
23
+ # # Generate the summary
24
+ # summary_ids = model.generate(
25
+ # inputs['input_ids'],
26
+ # attention_mask=inputs['attention_mask'],
27
+ # max_length=150, # Set maximum length for the summary
28
+ # min_length=30, # Set minimum length for the summary
29
+ # num_beams=4, # Use beam search to generate the summary
30
+ # early_stopping=True
31
+ # )
32
+
33
+ # # Decode the summary
34
+ # summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
35
+
36
+ # return summary
37
+
38
+ # except Exception as e:
39
+ # return str(e)
40
+
41
+ # # Create Gradio interface
42
+ # # Textbox input for the article and output for the summary
43
+ # interface = gr.Interface(
44
+ # fn=predict, # The function to summarize the article
45
+ # inputs="text", # Input is a text box where users can input the article text
46
+ # outputs="text", # Output is a text box displaying the summary
47
+ # title="BART Summarization", # The title of the app
48
+ # description="Enter an article to generate a summary using a fine-tuned BART model."
49
+ # )
50
+
51
+ # # Launch the Gradio app
52
+ # interface.launch()
53
+
54
+
55
+
56
+
57
  import gradio as gr
58
  from transformers import BartTokenizer, BartForConditionalGeneration
59
  import torch
60
 
 
61
  MODEL_DIR = './BART model small/model'
62
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
63
 
64
  tokenizer = BartTokenizer.from_pretrained(MODEL_DIR)
65
  model = BartForConditionalGeneration.from_pretrained(MODEL_DIR).to(device)
66
 
 
67
  def predict(text):
68
  try:
 
69
  inputs = tokenizer(
70
  text,
71
  return_tensors="pt",
 
73
  truncation=True
74
  ).to(device)
75
 
 
76
  summary_ids = model.generate(
77
  inputs['input_ids'],
78
  attention_mask=inputs['attention_mask'],
79
+ max_length=150,
80
+ min_length=30,
81
+ num_beams=4,
82
  early_stopping=True
83
  )
84
 
 
85
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
 
86
  return summary
 
87
  except Exception as e:
88
  return str(e)
89
 
 
 
90
  interface = gr.Interface(
91
+ fn=predict,
92
+ inputs="text",
93
+ outputs="text",
94
+ title="BART Summarization",
95
  description="Enter an article to generate a summary using a fine-tuned BART model."
96
  )
97
 
 
98
  interface.launch()