aixsatoshi commited on
Commit
beb5c13
1 Parent(s): 691f3d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -17,10 +17,10 @@ model = AutoModelForCausalLM.from_pretrained(
17
  offload_state_dict=True # 必要に応じてstate_dictをオフロード
18
  )
19
 
20
- TITLE = "<h1><center>Meta-Llama-3.1-70B-Instruct-AWQ-INT4 Chat webui</center></h1>"
21
 
22
  DESCRIPTION = """
23
- <h3>MODEL: <a href="https://hf.co/hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4">Meta-Llama-3.1-70B-Instruct-AWQ-INT4</a></h3>
24
  <center>
25
  <p>This model is designed for conversational interactions.</p>
26
  </center>
@@ -70,6 +70,10 @@ def stream_chat(message: str, history: list, temperature: float, max_new_tokens:
70
  temperature=temperature,
71
  eos_token_id=[128001, 128009],
72
  )
 
 
 
 
73
 
74
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
75
  thread.start()
 
17
  offload_state_dict=True # 必要に応じてstate_dictをオフロード
18
  )
19
 
20
+ TITLE = "<h1><center>Tanuki-8x8B-dpo-v1.0-AWQ Chat webui</center></h1>"
21
 
22
  DESCRIPTION = """
23
+ <h3>MODEL: <a href="https://huggingface.co/weblab-GENIAC/Tanuki-8x8B-dpo-v1.0">Tanuki-8x8B-dpo-v1.0</a></h3>
24
  <center>
25
  <p>This model is designed for conversational interactions.</p>
26
  </center>
 
70
  temperature=temperature,
71
  eos_token_id=[128001, 128009],
72
  )
73
+
74
+ # `inputs` を直接渡すのではなく、不要な key を取り除いた後に渡す
75
+ input_data = {key: value for key, value in inputs.items() if key != 'token_type_ids'}
76
+ generate_kwargs.update(input_data)
77
 
78
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
79
  thread.start()