santoshtyss commited on
Commit
42ef63e
1 Parent(s): 2b5eece

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -15
app.py CHANGED
@@ -5,6 +5,21 @@ from indicnlp.tokenize import sentence_tokenize
5
  from docx import Document
6
 
7
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  os.system('git clone https://github.com/TheAtticusProject/cuad.git')
10
  os.system('mv cuad cuad-training')
@@ -224,20 +239,6 @@ def translate_txt(document_name, output_file, src, trg):
224
 
225
  return output_file
226
 
227
- import torch
228
- import time
229
- import json
230
- from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
231
-
232
- from transformers import (
233
- AutoConfig,
234
- AutoModelForQuestionAnswering,
235
- AutoTokenizer,
236
- squad_convert_examples_to_features
237
- )
238
-
239
- from transformers.data.processors.squad import SquadResult, SquadV2Processor, SquadExample
240
- from transformers.data.metrics.squad_metrics import compute_predictions_logits
241
 
242
  info_model_path = 'cuad-models/roberta-base/'
243
  info_config_class, info_model_class, info_tokenizer_class = (
@@ -390,7 +391,7 @@ def run_key_clause(document_name, output_name,source_language):
390
  return final_info
391
 
392
 
393
- from transformers import AutoModelWithLMHead, AutoTokenizer
394
  from docx import Document
395
 
396
  qg_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
 
5
  from docx import Document
6
 
7
  import os
8
+ import torch
9
+ import time
10
+ import json
11
+ from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
12
+
13
+ from transformers import (
14
+ AutoConfig,
15
+ AutoModelForQuestionAnswering,
16
+ AutoTokenizer,
17
+ squad_convert_examples_to_features
18
+ )
19
+
20
+ from transformers.data.processors.squad import SquadResult, SquadV2Processor, SquadExample
21
+ from transformers.data.metrics.squad_metrics import compute_predictions_logits
22
+
23
 
24
  os.system('git clone https://github.com/TheAtticusProject/cuad.git')
25
  os.system('mv cuad cuad-training')
 
239
 
240
  return output_file
241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  info_model_path = 'cuad-models/roberta-base/'
244
  info_config_class, info_model_class, info_tokenizer_class = (
 
391
  return final_info
392
 
393
 
394
+ from transformers import AutoModelWithLMHead, AutoTokenizer
395
  from docx import Document
396
 
397
  qg_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")