Bhaskar2611 commited on
Commit
29e18ff
·
verified ·
1 Parent(s): b2873e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -19
app.py CHANGED
@@ -95,36 +95,88 @@
95
  # # Load your model after launching the interface
96
  # gr.load("models/Bhaskar2611/Capstone").launch()
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  import os
99
  import gradio as gr
100
  from huggingface_hub import InferenceClient
101
  from dotenv import load_dotenv
102
 
103
- # Load API token from .env or environment
104
  load_dotenv()
105
- HF_TOKEN = os.getenv("HF_TOKEN") # or directly use your token here
106
 
107
- # Initialize the Hugging Face inference client
108
  client = InferenceClient(
109
  model="mistralai/Mistral-7B-Instruct-v0.3",
110
  token=HF_TOKEN
111
  )
112
 
113
- # Skin assistant prompt
114
- HAIR_ASSISTANT_PROMPT = (
115
  "You are an AI Dermatologist chatbot designed to assist users with Hair by only providing text "
116
- "and if user information is not provided related to Hair then ask what they want to know related to Hair."
117
  )
118
 
 
119
  def respond(message, history):
120
- messages = [{"role": "system", "content": HAIR_ASSISTANT_PROMPT}]
121
- for user_msg, bot_msg in history:
122
- if user_msg:
123
- messages.append({"role": "user", "content": user_msg})
124
- if bot_msg:
125
- messages.append({"role": "assistant", "content": bot_msg})
126
  messages.append({"role": "user", "content": message})
127
 
 
128
  response = ""
129
  for chunk in client.chat.completions.create(
130
  model="mistralai/Mistral-7B-Instruct-v0.3",
@@ -134,16 +186,15 @@ def respond(message, history):
134
  top_p=0.95,
135
  stream=True,
136
  ):
137
- token = chunk.choices[0].delta.get("content", "")
138
  response += token
139
  yield response
140
 
141
- # Launch Gradio interface
142
- demo = gr.ChatInterface(
143
- fn=respond,
144
- title="Hair-Bot",
145
- theme="default"
146
- )
147
 
 
148
  if __name__ == "__main__":
149
  demo.launch()
 
95
  # # Load your model after launching the interface
96
  # gr.load("models/Bhaskar2611/Capstone").launch()
97
 
98
+ # import os
99
+ # import gradio as gr
100
+ # from huggingface_hub import InferenceClient
101
+ # from dotenv import load_dotenv
102
+
103
+ # # Load API token from .env or environment
104
+ # load_dotenv()
105
+ # HF_TOKEN = os.getenv("HF_TOKEN") # or directly use your token here
106
+
107
+ # # Initialize the Hugging Face inference client
108
+ # client = InferenceClient(
109
+ # model="mistralai/Mistral-7B-Instruct-v0.3",
110
+ # token=HF_TOKEN
111
+ # )
112
+
113
+ # # Skin assistant prompt
114
+ # HAIR_ASSISTANT_PROMPT = (
115
+ # "You are an AI Dermatologist chatbot designed to assist users with Hair by only providing text "
116
+ # "and if user information is not provided related to Hair then ask what they want to know related to Hair."
117
+ # )
118
+
119
+ # def respond(message, history):
120
+ # messages = [{"role": "system", "content": HAIR_ASSISTANT_PROMPT}]
121
+ # for user_msg, bot_msg in history:
122
+ # if user_msg:
123
+ # messages.append({"role": "user", "content": user_msg})
124
+ # if bot_msg:
125
+ # messages.append({"role": "assistant", "content": bot_msg})
126
+ # messages.append({"role": "user", "content": message})
127
+
128
+ # response = ""
129
+ # for chunk in client.chat.completions.create(
130
+ # model="mistralai/Mistral-7B-Instruct-v0.3",
131
+ # messages=messages,
132
+ # max_tokens=1024,
133
+ # temperature=0.7,
134
+ # top_p=0.95,
135
+ # stream=True,
136
+ # ):
137
+ # token = chunk.choices[0].delta.get("content", "")
138
+ # response += token
139
+ # yield response
140
+
141
+ # # Launch Gradio interface
142
+ # demo = gr.ChatInterface(
143
+ # fn=respond,
144
+ # title="Hair-Bot",
145
+ # theme="default"
146
+ # )
147
+
148
+ # if __name__ == "__main__":
149
+ # demo.launch()
150
  import os
151
  import gradio as gr
152
  from huggingface_hub import InferenceClient
153
  from dotenv import load_dotenv
154
 
155
+ # Load Hugging Face API token
156
  load_dotenv()
157
+ HF_TOKEN = os.getenv("HF_TOKEN")
158
 
159
+ # Initialize Hugging Face client
160
  client = InferenceClient(
161
  model="mistralai/Mistral-7B-Instruct-v0.3",
162
  token=HF_TOKEN
163
  )
164
 
165
+ # System prompt about Indian monuments
166
+ system_message = (
167
  "You are an AI Dermatologist chatbot designed to assist users with Hair by only providing text "
168
+ "and if user information is not provided related to Hair then ask what they want to know related to Hair."
169
  )
170
 
171
+ # Streaming chatbot logic
172
  def respond(message, history):
173
+ # Prepare messages with system prompt
174
+ messages = [{"role": "system", "content": system_message}]
175
+ for msg in history:
176
+ messages.append(msg)
 
 
177
  messages.append({"role": "user", "content": message})
178
 
179
+ # Stream response from the model
180
  response = ""
181
  for chunk in client.chat.completions.create(
182
  model="mistralai/Mistral-7B-Instruct-v0.3",
 
186
  top_p=0.95,
187
  stream=True,
188
  ):
189
+ token = chunk.choices[0].delta.get("content", "") or ""
190
  response += token
191
  yield response
192
 
193
+ # Create Gradio interface
194
+ with gr.Blocks() as demo:
195
+ chatbot = gr.Chatbot(type='messages') # Use modern message format
196
+ gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages") # Match format
 
 
197
 
198
+ # Launch app
199
  if __name__ == "__main__":
200
  demo.launch()