jonaschua commited on
Commit
6459a05
Β·
verified Β·
1 Parent(s): 4fe8412

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -17
app.py CHANGED
@@ -10,6 +10,7 @@ model = ""
10
  duration = None
11
  token = os.getenv('deepseekv2')
12
  provider = None #'fal-ai' #None #replicate # sambanova
 
13
 
14
  print(f"Is CUDA available: {torch.cuda.is_available()}")
15
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
@@ -47,6 +48,10 @@ def choose_model(model_name):
47
  elif model_name == "Llama2-13b-chat":
48
  model = "meta-llama/Llama-2-13b-chat-hf"
49
 
 
 
 
 
50
  elif model_name == "Gemma-2-2b":
51
  model = "google/gemma-2-2b-it"
52
 
@@ -94,23 +99,41 @@ def respond(message, history: list[tuple[str, str]], model, system_message, max_
94
  response += token
95
  yield response
96
 
97
-
98
- demo = gr.ChatInterface(
99
- respond,
100
- stop_btn = "Stop generation",
101
- multimodal = True,
102
- title="Ask me anything",
103
- description="Hi there! I am your friendly AI chatbot. Choose from different language models under the Additional Inputs tab below.",
104
- examples=[["Explain quantum computing"], ["Explain forex trading"], ["What is the capital of China?"], ["Make a poem about nature"]],
105
- additional_inputs=[
106
- gr.Dropdown(["DeepSeek-R1-Distill-Qwen-1.5B", "DeepSeek-R1-Distill-Qwen-32B", "Gemma-2-2b", "Gemma-7b", "Llama2-13b-chat", "Llama3-8b-Instruct", "Llama3.1-8b-Instruct", "Microsoft-phi-2", "Mixtral-8x7B-Instruct", "Qwen2.5-Coder-32B-Instruct", "Zephyr-7b-beta"], label="Select Model"),
107
- gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
108
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
109
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
110
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
111
-
112
- ]
113
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
 
116
  if __name__ == "__main__":
 
10
  duration = None
11
  token = os.getenv('deepseekv2')
12
  provider = None #'fal-ai' #None #replicate # sambanova
13
+ mode = "text-to-text"
14
 
15
  print(f"Is CUDA available: {torch.cuda.is_available()}")
16
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
 
48
  elif model_name == "Llama2-13b-chat":
49
  model = "meta-llama/Llama-2-13b-chat-hf"
50
 
51
+ elif model_name == "Llama-3.2-11B-Vision-Instruct":
52
+ model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
53
+ mode = "image-to-text"
54
+
55
  elif model_name == "Gemma-2-2b":
56
  model = "google/gemma-2-2b-it"
57
 
 
99
  response += token
100
  yield response
101
 
102
+ if mode == "text-to-text":
103
+ demo = gr.ChatInterface(
104
+ respond,
105
+ stop_btn = "Stop generation",
106
+ multimodal = True,
107
+ title="Ask me anything",
108
+ description="Hi there! I am your friendly AI chatbot. Choose from different language models under the Additional Inputs tab below.",
109
+ examples=[["Explain quantum computing"], ["Explain forex trading"], ["What is the capital of China?"], ["Make a poem about nature"]],
110
+ additional_inputs=[
111
+ gr.Dropdown(["DeepSeek-R1-Distill-Qwen-1.5B", "DeepSeek-R1-Distill-Qwen-32B", "Gemma-2-2b", "Gemma-7b", "Llama2-13b-chat", "Llama3-8b-Instruct", "Llama3.1-8b-Instruct", "Microsoft-phi-2", "Mixtral-8x7B-Instruct", "Qwen2.5-Coder-32B-Instruct", "Zephyr-7b-beta"], label="Select Model"),
112
+ gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
113
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
114
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
115
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
116
+
117
+ ]
118
+ )
119
+
120
+ elif mode == "image-to-text":
121
+ demo = gr.ChatInterface(
122
+ respond,
123
+ stop_btn = "Stop generation",
124
+ multimodal = True,
125
+ title="Ask me anything",
126
+ description="Hi there! I am your friendly AI chatbot. Choose from different language models under the Additional Inputs tab below.",
127
+ examples=[["Explain quantum computing"], ["Explain forex trading"], ["What is the capital of China?"], ["Make a poem about nature"]],
128
+ additional_inputs=[
129
+ gr.Dropdown(["DeepSeek-R1-Distill-Qwen-1.5B", "DeepSeek-R1-Distill-Qwen-32B", "Gemma-2-2b", "Gemma-7b", "Llama2-13b-chat", "Llama3-8b-Instruct", "Llama3.1-8b-Instruct", "Llama-3.2-11B-Vision-Instruct", "Microsoft-phi-2", "Mixtral-8x7B-Instruct", "Qwen2.5-Coder-32B-Instruct", "Zephyr-7b-beta"], label="Select Model"),
130
+ gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
131
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
132
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
133
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
134
+
135
+ ]
136
+ )
137
 
138
 
139
  if __name__ == "__main__":