SurajSingh commited on
Commit
c1faeac
·
verified ·
1 Parent(s): 1b9bfc9

Create qa_gen.py

Browse files
Files changed (1) hide show
  1. app/pyfiles/qa_gen.py +31 -0
app/pyfiles/qa_gen.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import numpy as np
4
+ import torch
5
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
6
+
7
+ current_path = os.path.dirname(os.path.abspath(__file__))
8
+ tokenizer_path = os.path.join(current_path, "gpt_tokenizer")
9
+ model_path = os.path.join(current_path, "gpt2_qa_model")
10
+ tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path) # also try gpt2-medium
11
+ model = GPT2LMHeadModel.from_pretrained(model_path)
12
+ def generate_text(sequence, max_new_tokens):
13
+ ids = tokenizer.encode(f'{sequence}', return_tensors='pt')
14
+ input_length = ids.size(1)
15
+ max_length = input_length + max_new_tokens
16
+ final_outputs = model.generate(
17
+ ids,
18
+ do_sample=True,
19
+ max_length=max_length,
20
+ pad_token_id=model.config.eos_token_id
21
+ )
22
+ return tokenizer.decode(final_outputs[0], skip_special_tokens=True)
23
+
24
+
25
+ def question_awnser(prompt: str):
26
+ result = generate_text("Question: " + prompt + "Answer: ", 35).split('Answer: ')[1]
27
+ try:
28
+ result = result.split('.')[0] + '.'
29
+ except Exception as e:
30
+ print(e)
31
+ return result