Spaces:
Runtime error
Runtime error
Commit
·
3747804
1
Parent(s):
93aa534
max length set to 200
Browse files
app.py
CHANGED
@@ -7,12 +7,12 @@ from transformers import pipeline, GPT2Tokenizer, OPTForCausalLM
|
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
model=OPTForCausalLM.from_pretrained('pushkarraj/opt_paraphraser')
|
10 |
-
tokenizer=GPT2Tokenizer.from_pretrained('facebook/opt-1.3b',truncation=True)
|
11 |
|
12 |
generator=pipeline("text-generation",model=model,tokenizer=tokenizer,device=device)
|
13 |
|
14 |
def cleaned_para(input_sentence):
|
15 |
-
p=generator('<s>'+input_sentence+ '</s>>>>><p>',do_sample=True,max_length=len(input_sentence.split(" "))+
|
16 |
return p[0]['generated_text'].split('</s>>>>><p>')[1].split('</p>')[0]
|
17 |
|
18 |
from spacy.lang.en import English # updated
|
|
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
model=OPTForCausalLM.from_pretrained('pushkarraj/opt_paraphraser')
|
10 |
+
tokenizer=GPT2Tokenizer.from_pretrained('facebook/opt-1.3b',truncation=True,padding=True)
|
11 |
|
12 |
generator=pipeline("text-generation",model=model,tokenizer=tokenizer,device=device)
|
13 |
|
14 |
def cleaned_para(input_sentence):
|
15 |
+
p=generator('<s>'+input_sentence+ '</s>>>>><p>',do_sample=True,max_length=len(input_sentence.split(" "))+200,temperature = 0.9,repetition_penalty=1.2)
|
16 |
return p[0]['generated_text'].split('</s>>>>><p>')[1].split('</p>')[0]
|
17 |
|
18 |
from spacy.lang.en import English # updated
|