Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -36,8 +36,7 @@ def respond(
|
|
36 |
max_tokens,
|
37 |
temperature,
|
38 |
top_p,
|
39 |
-
top_k
|
40 |
-
model,
|
41 |
):
|
42 |
chat_template = get_messages_formatter_type(model)
|
43 |
|
@@ -57,8 +56,6 @@ def respond(
|
|
57 |
settings.top_p = top_p
|
58 |
settings.max_tokens = max_tokens
|
59 |
settings.stream = True
|
60 |
-
settings.num_beams = 10
|
61 |
-
settings.num_return_sequences=10
|
62 |
|
63 |
messages = BasicChatHistory()
|
64 |
|
@@ -82,22 +79,10 @@ def respond(
|
|
82 |
print_output=False
|
83 |
)
|
84 |
|
85 |
-
outputs =
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
for index, output in enumerate(stream, start=1):
|
90 |
-
if output not in outputs:
|
91 |
-
outputs.add(output)
|
92 |
-
|
93 |
-
# Post-process the output
|
94 |
-
output1 = output[prompt_length:]
|
95 |
-
first_inst_index = output1.find("[/INST]")
|
96 |
-
second_inst_index = output1.find("[/IN", first_inst_index + len("[/INST]") + 1)
|
97 |
-
predicted_selfies = output1[first_inst_index + len("[/INST]") : second_inst_index].strip()
|
98 |
-
predicted_smiles = sf.decoder(predicted_selfies)
|
99 |
-
unique_responses.append(f"Predict {index}: {predicted_smiles}")
|
100 |
-
yield "\n".join(unique_responses)
|
101 |
|
102 |
|
103 |
PLACEHOLDER = """
|
|
|
36 |
max_tokens,
|
37 |
temperature,
|
38 |
top_p,
|
39 |
+
top_k
|
|
|
40 |
):
|
41 |
chat_template = get_messages_formatter_type(model)
|
42 |
|
|
|
56 |
settings.top_p = top_p
|
57 |
settings.max_tokens = max_tokens
|
58 |
settings.stream = True
|
|
|
|
|
59 |
|
60 |
messages = BasicChatHistory()
|
61 |
|
|
|
79 |
print_output=False
|
80 |
)
|
81 |
|
82 |
+
outputs = ""
|
83 |
+
for output in stream:
|
84 |
+
outputs += output
|
85 |
+
yield outputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
|
88 |
PLACEHOLDER = """
|