Sakalti commited on
Commit
650c835
Β·
verified Β·
1 Parent(s): 699d2be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,10 +12,10 @@ model = AutoModelForCausalLM.from_pretrained(
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
 
15
- @spaces.GPU
16
  def generate(prompt, history):
17
  messages = [
18
- {"role": "system", "content": "Je bent een vriendelijke, behulpzame assistent."},
19
  {"role": "user", "content": prompt}
20
  ]
21
  text = tokenizer.apply_chat_template(
@@ -27,7 +27,7 @@ def generate(prompt, history):
27
 
28
  generated_ids = model.generate(
29
  **model_inputs,
30
- max_new_tokens=512
31
  )
32
  generated_ids = [
33
  output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
 
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
 
15
+ @spaces.GPU(duration=100)
16
  def generate(prompt, history):
17
  messages = [
18
+ {"role": "system", "content": "あγͺγŸγ―γƒ•γƒ¬γƒ³γƒ‰γƒͺγƒΌγͺγƒγƒ£γƒƒγƒˆγƒœγƒƒγƒˆγ§γ™γ€‚"},
19
  {"role": "user", "content": prompt}
20
  ]
21
  text = tokenizer.apply_chat_template(
 
27
 
28
  generated_ids = model.generate(
29
  **model_inputs,
30
+ max_new_tokens=864
31
  )
32
  generated_ids = [
33
  output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)