from flask import Flask, request, jsonify from huggingface_hub import InferenceClient # Initialize the InferenceClient with your API key x = "hf_uYlR" y = "EMsDNbxQPJCSAHgwthrylHZZKKmGyg" u = x + y client = InferenceClient(api_key=f"{u}") # Create a Flask app app = Flask(__name__) @app.route('/chat', methods=['POST']) def chat(): # Get user message from the request user_message = request.json.get('message') # Check if the user message is provided if not user_message: return jsonify({"error": "No message provided"}), 400 # Create a single message list for the request messages = [{"role": "user", "content": f"System : you are AI model from corvo nero AI (you are not Qwen or anything from it ) and your name is corvo your task is chat with users and your mode is Normaly you can use Emojeis and uhh umm Emotions Here the chat sir :{user_message}"}] # Create the chat completion request with the current message response = client.chat.completions.create( model="Qwen/Qwen2.5-72B-Instruct", messages=messages, max_tokens=1024, stream=False # Set stream to False to get the full response at once ) # Get the assistant's response assistant_message = response.choices[0].message.content # Replace newline characters with
for HTML rendering assistant_message = assistant_message.replace("\n", "
") # Return the assistant's response as JSON return jsonify({"response": assistant_message}) if __name__ == '__main__': app.run(host="0.0.0.0", port=7860)