Spaces:
Sleeping
Sleeping
Commit
·
eef9168
1
Parent(s):
9c97d52
app.py changes
Browse files
app.py
CHANGED
@@ -40,11 +40,14 @@ def clean_text(text):
|
|
40 |
|
41 |
def generateAnswer(question):
|
42 |
|
|
|
|
|
43 |
question = "<question>" + clean_text(question) + "<answer>"
|
44 |
|
45 |
prompt = []
|
46 |
prompt.append(question)
|
47 |
|
|
|
48 |
prompts_batch_ids = tokenizer(prompt,
|
49 |
padding=True, truncation=True, return_tensors='pt').to(model.device)
|
50 |
output_ids = model.generate(
|
@@ -53,6 +56,8 @@ def generateAnswer(question):
|
|
53 |
outputs_batch = [seq.split('<answer>')[1] for seq in
|
54 |
tokenizer.batch_decode(output_ids, skip_special_tokens=True)]
|
55 |
print(outputs_batch)
|
|
|
|
|
56 |
return outputs_batch[0]
|
57 |
|
58 |
|
|
|
40 |
|
41 |
def generateAnswer(question):
|
42 |
|
43 |
+
question = question.strip()
|
44 |
+
|
45 |
question = "<question>" + clean_text(question) + "<answer>"
|
46 |
|
47 |
prompt = []
|
48 |
prompt.append(question)
|
49 |
|
50 |
+
tokenizer.padding_side='left'
|
51 |
prompts_batch_ids = tokenizer(prompt,
|
52 |
padding=True, truncation=True, return_tensors='pt').to(model.device)
|
53 |
output_ids = model.generate(
|
|
|
56 |
outputs_batch = [seq.split('<answer>')[1] for seq in
|
57 |
tokenizer.batch_decode(output_ids, skip_special_tokens=True)]
|
58 |
print(outputs_batch)
|
59 |
+
tokenizer.padding_side='right'
|
60 |
+
|
61 |
return outputs_batch[0]
|
62 |
|
63 |
|