Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,10 +12,10 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
12 |
)
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
|
15 |
-
@spaces.GPU
|
16 |
def generate(prompt, history):
|
17 |
messages = [
|
18 |
-
{"role": "system", "content": "
|
19 |
{"role": "user", "content": prompt}
|
20 |
]
|
21 |
text = tokenizer.apply_chat_template(
|
@@ -27,7 +27,7 @@ def generate(prompt, history):
|
|
27 |
|
28 |
generated_ids = model.generate(
|
29 |
**model_inputs,
|
30 |
-
max_new_tokens=
|
31 |
)
|
32 |
generated_ids = [
|
33 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
|
|
12 |
)
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
|
15 |
+
@spaces.GPU(duration=100)
|
16 |
def generate(prompt, history):
|
17 |
messages = [
|
18 |
+
{"role": "system", "content": "γγͺγγ―γγ¬γ³γγͺγΌγͺγγ£γγγγγγ§γγ"},
|
19 |
{"role": "user", "content": prompt}
|
20 |
]
|
21 |
text = tokenizer.apply_chat_template(
|
|
|
27 |
|
28 |
generated_ids = model.generate(
|
29 |
**model_inputs,
|
30 |
+
max_new_tokens=864
|
31 |
)
|
32 |
generated_ids = [
|
33 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|