owaiskha9654 commited on
Commit
9ebc9d5
β€’
1 Parent(s): c63981c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -1,21 +1,19 @@
1
- import numpy as np
2
  import torch
3
- from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
4
- from transformers import BertForSequenceClassification,BertTokenizer
5
-
6
  import gradio as gr
7
  from typing import Dict
8
-
 
9
 
10
  num_labels=14
 
11
  model = BertForSequenceClassification.from_pretrained("owaiskha9654/Multi-Label-Classification-of-PubMed-Articles", num_labels=num_labels)
12
  tokenizer = BertTokenizer.from_pretrained('owaiskha9654/Multi-Label-Classification-of-PubMed-Articles', do_lower_case=True)
13
 
14
 
15
- def Multi_Label_Classification_of_Pubmed_Articles(model_input: str) -> Dict[str, float]:
16
-
17
  dict_custom={}
18
- Preprocess_part1=model_input[:len(model_input)]
19
  Preprocess_part2=model_input[len(model_input):]
20
  dict1=tokenizer.encode_plus(Preprocess_part1,max_length=1024,padding=True,truncation=True)
21
  dict2=tokenizer.encode_plus(Preprocess_part2,max_length=1024,padding=True,truncation=True)
 
 
1
  import torch
2
+ import numpy as np
 
 
3
  import gradio as gr
4
  from typing import Dict
5
+ from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
6
+ from transformers import BertForSequenceClassification,BertTokenizer
7
 
8
  num_labels=14
9
+ #Loading Model and Tokenizer from Hugging Face Spaces
10
  model = BertForSequenceClassification.from_pretrained("owaiskha9654/Multi-Label-Classification-of-PubMed-Articles", num_labels=num_labels)
11
  tokenizer = BertTokenizer.from_pretrained('owaiskha9654/Multi-Label-Classification-of-PubMed-Articles', do_lower_case=True)
12
 
13
 
14
+ def Multi_Label_Classification_of_Pubmed_Articles(model_input: str) -> Dict[str, float]: #This wrapper function will pass the article into the model
 
15
  dict_custom={}
16
+ Preprocess_part1=model_input[:len(model_input)] #splitting inputext into 2 parts
17
  Preprocess_part2=model_input[len(model_input):]
18
  dict1=tokenizer.encode_plus(Preprocess_part1,max_length=1024,padding=True,truncation=True)
19
  dict2=tokenizer.encode_plus(Preprocess_part2,max_length=1024,padding=True,truncation=True)