jonaschua commited on
Commit
ff96e27
·
verified ·
1 Parent(s): a6b26de

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +115 -0
  2. providers +3 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import spaces #0.32.0
4
+ import torch
5
+ import os
6
+ import platform
7
+ import requests
8
+
9
+ model = ""
10
+ duration = None
11
+ token = os.getenv('deepseekv2')
12
+ provider = None #'fal-ai' #None #replicate # sambanova
13
+
14
+ print(f"Is CUDA available: {torch.cuda.is_available()}")
15
+ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
16
+ print(f"CUDA version: {torch.version.cuda}")
17
+ print(f"Python version: {platform.python_version()}")
18
+ print(f"Pytorch version: {torch.__version__}")
19
+ print(f"Gradio version: {gr. __version__}")
20
+ # print(f"HFhub version: {huggingface_hub.__version__}")
21
+
22
+
23
+ """
24
+ Packages ::::::::::
25
+ Is CUDA available: True
26
+ CUDA device: NVIDIA A100-SXM4-80GB MIG 3g.40gb
27
+ CUDA version: 12.1
28
+ Python version: 3.10.13
29
+ Pytorch version: 2.4.0+cu121
30
+ Gradio version: 5.0.1
31
+ """
32
+
33
+
34
+ def choose_model(model_name):
35
+ if model_name == "DeepSeek-R1-Distill-Qwen-1.5B":
36
+ model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
37
+
38
+ elif model_name == "DeepSeek-R1-Distill-Qwen-32B":
39
+ model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
40
+
41
+ elif model_name == "Llama3-8b-Instruct":
42
+ model = "meta-llama/Meta-Llama-3-8B-Instruct"
43
+
44
+ elif model_name == "Llama3.1-8b-Instruct":
45
+ model = "meta-llama/Llama-3.1-8B-Instruct"
46
+
47
+ elif model_name == "Llama2-13b-chat":
48
+ model = "meta-llama/Llama-2-13b-chat-hf"
49
+
50
+ elif model_name == "Gemma-2-2b":
51
+ model = "google/gemma-2-2b-it"
52
+
53
+ elif model_name == "Gemma-7b":
54
+ model = "google/gemma-7b"
55
+
56
+ elif model_name == "Mixtral-8x7B-Instruct":
57
+ model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
58
+
59
+ elif model_name == "Microsoft-phi-2":
60
+ model = "microsoft/phi-2"
61
+
62
+ elif model_name == "Qwen2.5-Coder-32B-Instruct":
63
+ model = "Qwen/Qwen2.5-Coder-32B-Instruct"
64
+
65
+ else: # default to zephyr if no model chosen
66
+ model = "HuggingFaceH4/zephyr-7b-beta"
67
+
68
+ return model
69
+
70
+
71
+ @spaces.GPU(duration=duration)
72
+ def respond(message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p):
73
+
74
+ print(model)
75
+ model_name = choose_model(model)
76
+
77
+ client = InferenceClient(model_name, provider=provider, token=os.getenv('deepseekv2'))
78
+
79
+ messages = [{"role": "system", "content": system_message}]
80
+
81
+ for val in history:
82
+ if val[0]:
83
+ messages.append({"role": "user", "content": val[0]})
84
+ if val[1]:
85
+ messages.append({"role": "assistant", "content": val[1]})
86
+
87
+ messages.append({"role": "user", "content": message})
88
+
89
+ response = ""
90
+
91
+ for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
92
+ token = message.choices[0].delta.content
93
+
94
+ response += token
95
+ yield response
96
+
97
+
98
+ demo = gr.ChatInterface(
99
+ respond,
100
+ title="Ask me anything",
101
+ description="Hi there! I am your friendly AI chatbot. Choose from different language models under the Additional Inputs tab below.",
102
+ examples=[["Explain quantum computing"], ["Explain forex trading"], ["What is the capital of China?"], ["Make a poem about nature"]],
103
+ additional_inputs=[
104
+ gr.Dropdown(["DeepSeek-R1-Distill-Qwen-1.5B", "DeepSeek-R1-Distill-Qwen-32B", "Gemma-2-2b", "Gemma-7b", "Llama2-13b-chat", "Llama3-8b-Instruct", "Llama3.1-8b-Instruct", "Microsoft-phi-2", "Mixtral-8x7B-Instruct", "Qwen2.5-Coder-32B-Instruct", "Zephyr-7b-beta"], label="Select Model"),
105
+ gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
106
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
107
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
108
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
109
+
110
+ ]
111
+ )
112
+
113
+
114
+ if __name__ == "__main__":
115
+ demo.launch(share=True)
providers ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ provider (str, optional) — Name of the provider to use for inference. Can be "black-forest-labs", "fal-ai", "fireworks-ai", "hf-inference",
2
+ "hyperbolic", "nebius", "novita", "replicate", “sambanova”or“together”. defaults to hf-inference (Hugging Face Serverless Inference API).
3
+ If model is a URL or base_urlis passed, thenprovider` is not used.
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ huggingface_hub==0.28.1
2
+ --extra-index-url https://download.pytorch.org/whl/cu124
3
+ torch==2.4.0
4
+ spaces
5
+ gradio==5.12.0