Commit
·
1cefe7d
1
Parent(s):
298fce5
Update app.py
Browse files
app.py
CHANGED
@@ -3,4 +3,31 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
3 |
|
4 |
tokenizer = AutoTokenizer.from_pretrained("docto/Docto-Bot")
|
5 |
model = AutoModelForCausalLM.from_pretrained("docto/Docto-Bot")
|
6 |
-
special_token = '<|endoftext|>'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
tokenizer = AutoTokenizer.from_pretrained("docto/Docto-Bot")
|
5 |
model = AutoModelForCausalLM.from_pretrained("docto/Docto-Bot")
|
6 |
+
special_token = '<|endoftext|>'
|
7 |
+
|
8 |
+
def get_reply(user_input):
|
9 |
+
prompt_text = f'Question: {user_input}\nAnswer:'
|
10 |
+
encoded_prompt = tokenizer.encode(prompt_text,
|
11 |
+
add_special_tokens = False,
|
12 |
+
return_tensors = 'pt')
|
13 |
+
|
14 |
+
output_sequences = model.generate(
|
15 |
+
input_ids = encoded_prompt,
|
16 |
+
max_length = 500,
|
17 |
+
temperature = 0.9,
|
18 |
+
top_k = 20,
|
19 |
+
top_p = 0.9,
|
20 |
+
repetition_penalty = 1,
|
21 |
+
do_sample = True,
|
22 |
+
num_return_sequences = 4
|
23 |
+
)
|
24 |
+
|
25 |
+
# result = tokenizer.decode(random.choice(output_sequences))
|
26 |
+
# result = result[result.index("Answer: "):result.index(special_token)]
|
27 |
+
try:
|
28 |
+
result = tokenizer.decode(random.choice(output_sequences))
|
29 |
+
result = result[result.index("Answer: "):result.index(special_token)]
|
30 |
+
return(result[8:])
|
31 |
+
except:
|
32 |
+
return "Sorry! I don\'t Know"
|
33 |
+
|