theostos commited on
Commit
35f1f39
·
1 Parent(s): ae41fb2
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -27,19 +27,19 @@ def respond(
27
  max_tokens,
28
  temperature,
29
  ):
30
- prompt = """<|start_header_id|>system<|end_header_id|>
31
 
32
- You are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>
33
- {message}
34
- <|eot_id|><|start_header_id|>assistant<|end_header_id|>
35
- """
36
- model_inputs = generate_custom_mask(tokenizer, [prompt], device)
37
 
38
- outputs = model.generate(temperature=0.7, max_tokens=64, **model_inputs)
39
- outputs = outputs[:, model_inputs['input_ids'].shape[1]:]
40
- result = tokenizer.batch_decode(outputs, skip_special_tokens=True)
41
 
42
- return result, []
43
 
44
  """
45
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
27
  max_tokens,
28
  temperature,
29
  ):
30
+ # prompt = """<|start_header_id|>system<|end_header_id|>
31
 
32
+ # You are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>
33
+ # {message}
34
+ # <|eot_id|><|start_header_id|>assistant<|end_header_id|>
35
+ # """
36
+ # model_inputs = generate_custom_mask(tokenizer, [prompt], device)
37
 
38
+ # outputs = model.generate(temperature=0.7, max_tokens=64, **model_inputs)
39
+ # outputs = outputs[:, model_inputs['input_ids'].shape[1]:]
40
+ # result = tokenizer.batch_decode(outputs, skip_special_tokens=True)
41
 
42
+ return "test", []
43
 
44
  """
45
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface