jonaschua commited on
Commit
7966586
Β·
verified Β·
1 Parent(s): ba20b0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -26
app.py CHANGED
@@ -78,40 +78,40 @@ def choose_model(model_name):
78
 
79
 
80
  @spaces.GPU(duration=duration)
81
- def respond(message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p):
82
 
83
- if mode=="text-to-text":
84
- print(model)
85
- model_name = choose_model(model)
86
-
87
- client = InferenceClient(model_name, provider=provider, token=os.getenv('deepseekv2'))
88
-
89
- messages = [{"role": "system", "content": system_message}]
90
-
91
- for val in history:
92
- if val[0]:
93
- messages.append({"role": "user", "content": val[0]})
94
- if val[1]:
95
- messages.append({"role": "assistant", "content": val[1]})
96
-
97
- messages.append({"role": "user", "content": message})
98
-
99
- response = ""
100
-
101
- for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
102
- token = message.choices[0].delta.content
103
 
104
- response += token
105
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- else:
108
- url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
109
- image = Image.open(requests.get(url, stream=True).raw)
110
 
111
 
112
  demo = gr.ChatInterface(
113
 
114
  respond,
 
115
  multimodal=True,
116
  stop_btn = "Stop generation",
117
  # multimodal = True,
 
78
 
79
 
80
  @spaces.GPU(duration=duration)
81
+ def respond(message, history: list[tuple[str, str]], image=None, model, system_message, max_tokens, temperature, top_p):
82
 
83
+ print(model)
84
+ model_name = choose_model(model)
85
+
86
+ client = InferenceClient(model_name, provider=provider, token=os.getenv('deepseekv2'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
+ messages = [{"role": "system", "content": system_message}]
89
+
90
+ for val in history:
91
+ if val[0]:
92
+ messages.append({"role": "user", "content": val[0]})
93
+ if val[1]:
94
+ messages.append({"role": "assistant", "content": val[1]})
95
+
96
+ messages.append({"role": "user", "content": message})
97
+
98
+ response = ""
99
+
100
+ for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
101
+ token = message.choices[0].delta.content
102
+
103
+ response += token
104
+ yield response
105
 
106
+ # else:
107
+ # url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
108
+ # image = Image.open(requests.get(url, stream=True).raw)
109
 
110
 
111
  demo = gr.ChatInterface(
112
 
113
  respond,
114
+ input_components=[gr.Textbox(label="Message"), gr.Image(label="image", type="pil")],
115
  multimodal=True,
116
  stop_btn = "Stop generation",
117
  # multimodal = True,