Spaces:
Sleeping
Sleeping
File size: 1,526 Bytes
03e7882 0ede83c 7daf25d 0ede83c f0944cc 0ede83c 7daf25d 0ede83c 7daf25d 0ede83c 79e247b 0ede83c 8736060 0ede83c f0944cc 0ede83c 0db0b4e f0944cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
from flask import Flask, request, jsonify
from huggingface_hub import InferenceClient
# Initialize the InferenceClient with your API key
x = "hf_uYlR"
y = "EMsDNbxQPJCSAHgwthrylHZZKKmGyg"
u = x + y
client = InferenceClient(api_key=f"{u}")
# Create a Flask app
app = Flask(__name__)
@app.route('/chat', methods=['POST'])
def chat():
# Get user message from the request
user_message = request.json.get('message')
# Check if the user message is provided
if not user_message:
return jsonify({"error": "No message provided"}), 400
# Create a single message list for the request
messages = [{"role": "user", "content": f"System : you are AI model from Curvo AI your task is chat with users and be fun use Emojeis etc and make sure to use uhh umm Emotions to be more human{user_message}"}]
# Create the chat completion request with the current message
response = client.chat.completions.create(
model="Qwen/Qwen2.5-72B-Instruct",
messages=messages,
max_tokens=1024,
stream=False # Set stream to False to get the full response at once
)
# Get the assistant's response
assistant_message = response.choices[0].message.content
# Replace newline characters with <br> for HTML rendering
assistant_message = assistant_message.replace("\n", "<br>")
# Return the assistant's response as JSON
return jsonify({"response": assistant_message})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=7860) |