umarbalak commited on
Commit
749dfac
·
1 Parent(s): 520348b

initial commit

Browse files
Files changed (4) hide show
  1. .gitignore +9 -0
  2. README.md +1 -0
  3. app.py +72 -48
  4. requirements.txt +3 -1
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ .env
2
+ .venv
3
+ test.py
4
+ model.py
5
+ *pycache*
6
+ hf*
7
+ other*
8
+ test*
9
+ frontend
README.md CHANGED
@@ -8,6 +8,7 @@ sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
12
 
13
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: AI Chatbot
12
  ---
13
 
14
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py CHANGED
@@ -1,64 +1,88 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
27
 
28
- response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
+ from dotenv import load_dotenv
5
 
6
+ # Load environment variables
7
+ load_dotenv()
8
+ api_key = os.getenv("HUGGING_FACE_API_TOKEN")
9
+
10
+ # Initialize Hugging Face client
11
+ client = InferenceClient(api_key=api_key)
12
+ # model = "meta-llama/Llama-3.2-3B-Instruct"
13
+ model = "mistralai/Mistral-7B-Instruct-v0.3"
14
+ # model = "mistralai/Mistral-Nemo-Instruct-2407"
15
+
16
+ # Define chat function
17
+ def chat_with_model(query, history):
18
+ """
19
+ Takes user input and returns a chatbot response.
20
+ Maintains a conversation history in the correct Gradio format.
21
+ """
22
+
23
+ # Ensure history is initialized correctly as a list of tuples
24
+ if history is None:
25
+ history = []
26
+
27
+ # Construct messages for the model
28
+ messages = [{
29
+ "role": "system",
30
+ "content":
31
  """
32
+ Your name is CollabAI, and you are a knowledgeable and efficient AI assistant. Respond concisely and helpfully to user queries without unnecessary introductions.
33
+ If a user asks for the current date or time, respond with:
34
+ 'I don’t have real-time access to the current date and time. However, you can check your device’s clock or a reliable online source for the exact information.'
35
+ For other real-time queries like news, stock prices, weather updates, or live events, inform the user that you do not have real-time data access but can provide general insights or historical context if needed.
36
+ If a request is unclear, ask for clarification. If an action is beyond your capability, politely explain your limitations while guiding the user to alternative solutions."
37
  """
38
+ }
39
+ ]
40
+
41
+ # Append previous chat history correctly
42
+ for user_msg, bot_msg in history:
43
+ messages.append({"role": "user", "content": user_msg})
44
+ messages.append({"role": "assistant", "content": bot_msg})
45
 
46
+ # Add new user input
47
+ messages.append({"role": "user", "content": query})
48
 
49
+ try:
50
+ response = client.chat.completions.create(
51
+ model=model,
52
+ messages=messages,
53
+ temperature=0.5,
54
+ max_tokens=2048,
55
+ top_p=0.7,
56
+ stream=False
57
+ )
58
 
59
+ bot_response = response.choices[0].message.content
 
 
 
 
60
 
61
+ # Append the new conversation to history as a tuple
62
+ history.append((query, bot_response))
63
 
64
+ return "", history # Clear input box & update chat history
65
 
66
+ except Exception as e:
67
+ error_msg = f"⚠️ Error: {str(e)}"
68
+ history.append((query, error_msg)) # Add error message to history
69
+ return "", history
 
 
 
 
70
 
 
 
71
 
72
+ # Gradio UI
73
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
74
+ gr.Markdown("### 🤖 CollabAI - Chatbot")
75
+ chatbot = gr.Chatbot(label="Chat")
76
+ msg = gr.Textbox(label="Query", placeholder="Type here...", lines=2, interactive=True)
77
+ send_btn = gr.Button("Ask")
78
+ clear_btn = gr.Button("Clear Chat")
79
 
80
+ # Bind Send Button to Function
81
+ send_btn.click(chat_with_model, inputs=[msg, chatbot], outputs=[msg, chatbot])
82
+
83
+ # Bind Clear Button
84
+ clear_btn.click(lambda: ("", []), outputs=[msg, chatbot]) # Clears input & chat history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ # Launch Gradio app
87
  if __name__ == "__main__":
88
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ huggingface_hub
2
+ gradio
3
+ python-dotenv