Spaces:
Running
on
Zero
Running
on
Zero
frankaging
commited on
Commit
·
7f543e6
1
Parent(s):
3cb38a4
final
Browse files
app.py
CHANGED
@@ -90,17 +90,14 @@ def generate(
|
|
90 |
max_new_tokens: int=DEFAULT_MAX_NEW_TOKENS,
|
91 |
) -> Iterator[str]:
|
92 |
|
93 |
-
print(chat_history)
|
94 |
-
|
95 |
# limit to last 3 turns
|
96 |
start_idx = max(0, len(chat_history) - 3)
|
97 |
recent_history = chat_history[start_idx:]
|
98 |
|
99 |
# build list of messages
|
100 |
messages = []
|
101 |
-
for
|
102 |
-
messages.append({"role": "
|
103 |
-
messages.append({"role": "model", "content": model_msg})
|
104 |
messages.append({"role": "user", "content": message})
|
105 |
|
106 |
input_ids = torch.tensor([tokenizer.apply_chat_template(
|
|
|
90 |
max_new_tokens: int=DEFAULT_MAX_NEW_TOKENS,
|
91 |
) -> Iterator[str]:
|
92 |
|
|
|
|
|
93 |
# limit to last 3 turns
|
94 |
start_idx = max(0, len(chat_history) - 3)
|
95 |
recent_history = chat_history[start_idx:]
|
96 |
|
97 |
# build list of messages
|
98 |
messages = []
|
99 |
+
for rh in recent_history:
|
100 |
+
messages.append({"role": rh["role"], "content": rh["content"]})
|
|
|
101 |
messages.append({"role": "user", "content": message})
|
102 |
|
103 |
input_ids = torch.tensor([tokenizer.apply_chat_template(
|