mobinln commited on
Commit
9d6f55d
·
1 Parent(s): 549b996

change to phi3 vision

Browse files
Files changed (1) hide show
  1. app.py +1 -13
app.py CHANGED
@@ -4,14 +4,7 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- choices = [
8
- "meta-llama/Meta-Llama-3-8B-Instruct",
9
- "microsoft/Phi-3-vision-128k-instruct",
10
- "Qwen/Qwen2-0.5B-Instruct",
11
- ]
12
- client1 = InferenceClient(model=choices[0])
13
- client2 = InferenceClient(model=choices[1])
14
- client3 = InferenceClient(model=choices[2])
15
 
16
 
17
  def respond(
@@ -21,7 +14,6 @@ def respond(
21
  max_tokens,
22
  temperature,
23
  top_p,
24
- model,
25
  ):
26
  messages = [{"role": "system", "content": system_message}]
27
 
@@ -35,9 +27,6 @@ def respond(
35
 
36
  response = ""
37
 
38
- client = (
39
- client1 if model == choices[0] else client2 if model == choices[1] else client3
40
- )
41
  for message in client.chat_completion(
42
  messages,
43
  max_tokens=max_tokens,
@@ -67,7 +56,6 @@ demo = gr.ChatInterface(
67
  step=0.05,
68
  label="Top-p (nucleus sampling)",
69
  ),
70
- gr.Dropdown(choices=choices, value=choices[0], label="Model"),
71
  ],
72
  multimodal=True,
73
  )
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ client = InferenceClient(model="microsoft/Phi-3-vision-128k-instruct")
 
 
 
 
 
 
 
8
 
9
 
10
  def respond(
 
14
  max_tokens,
15
  temperature,
16
  top_p,
 
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
 
 
27
 
28
  response = ""
29
 
 
 
 
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
 
56
  step=0.05,
57
  label="Top-p (nucleus sampling)",
58
  ),
 
59
  ],
60
  multimodal=True,
61
  )