from transformers import AutoModelForCausalLM, AutoTokenizer from flask import Flask, request, jsonify app = Flask(__name__) model_name = "gordicaleksa/YugoGPT" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) @app.route("/chat", methods=["POST"]) def chat(): user_input = request.json.get("inputs") inputs = tokenizer.encode(user_input, return_tensors="pt") outputs = model.generate(inputs, max_length=100, num_return_sequences=1) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return jsonify({"response": response}) if __name__ == "__main__": app.run(host="0.0.0.0", port=8080)