Spaces:
Sleeping
Sleeping
from flask import Flask, request, jsonify | |
from huggingface_hub import InferenceClient | |
# Initialize the InferenceClient with your API key | |
x = "hf_uYlR" | |
y = "EMsDNbxQPJCSAHgwthrylHZZKKmGyg" | |
u = x + y | |
client = InferenceClient(api_key=f"{u}") | |
# Create a Flask app | |
app = Flask(__name__) | |
def chat(): | |
# Get user message from the request | |
user_message = request.json.get('message') | |
# Check if the user message is provided | |
if not user_message: | |
return jsonify({"error": "No message provided"}), 400 | |
# Create a single message list for the request | |
messages = [{"role": "user", "content": f"System : you are AI model from Curvo AI your task is chat with users and be fun use Emojeis etc and make sure to use uhh umm Emotions to be more human{user_message}"}] | |
# Create the chat completion request with the current message | |
response = client.chat.completions.create( | |
model="Qwen/Qwen2.5-72B-Instruct", | |
messages=messages, | |
max_tokens=1024, | |
stream=False # Set stream to False to get the full response at once | |
) | |
# Get the assistant's response | |
assistant_message = response.choices[0].message.content | |
# Replace newline characters with <br> for HTML rendering | |
assistant_message = assistant_message.replace("\n", "<br>") | |
# Return the assistant's response as JSON | |
return jsonify({"response": assistant_message}) | |
if __name__ == '__main__': | |
app.run(host="0.0.0.0", port=7860) |