Spaces:
Paused
Paused
Commit
·
f18e25c
1
Parent(s):
8fc450b
Update main.py
Browse files
main.py
CHANGED
@@ -23,7 +23,7 @@ tokenizer = AutoTokenizer.from_pretrained("Open-Orca/OpenOrca-Platypus2-13B", tr
|
|
23 |
def ask_bot(question):
|
24 |
input_ids = tokenizer.encode(question, return_tensors="pt").to(device)
|
25 |
with torch.no_grad():
|
26 |
-
output = model.generate(input_ids, max_length=
|
27 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
28 |
response = generated_text.split("->:")[-1]
|
29 |
return response
|
@@ -155,7 +155,8 @@ def chatbot(patient_id, user_data: dict=None):
|
|
155 |
else:
|
156 |
human_input = prompt + user_input + " ->:"
|
157 |
human_text = user_input.replace("'", "")
|
158 |
-
response = llm._call(human_input)
|
|
|
159 |
# response = response.replace("'", "")
|
160 |
# memory.save_context({"input": user_input}, {"output": response})
|
161 |
# summary = memory.load_memory_variables({})
|
|
|
23 |
def ask_bot(question):
|
24 |
input_ids = tokenizer.encode(question, return_tensors="pt").to(device)
|
25 |
with torch.no_grad():
|
26 |
+
output = model.generate(input_ids, max_length=200, num_return_sequences=1, do_sample=True, top_k=50)
|
27 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
28 |
response = generated_text.split("->:")[-1]
|
29 |
return response
|
|
|
155 |
else:
|
156 |
human_input = prompt + user_input + " ->:"
|
157 |
human_text = user_input.replace("'", "")
|
158 |
+
# response = llm._call(human_input)
|
159 |
+
response = ask_bot(human_input)
|
160 |
# response = response.replace("'", "")
|
161 |
# memory.save_context({"input": user_input}, {"output": response})
|
162 |
# summary = memory.load_memory_variables({})
|