AI-API / app.py
JJ94's picture
Update app.py
8d8525b verified
raw
history blame
1.34 kB
from flask import Flask, request, Response, jsonify
from llama_cpp import Llama
app = Flask(__name__)
# Load the model
print("πŸ”„ Loading model...")
llm = Llama.from_pretrained(
repo_id="bartowski/google_gemma-3-1b-it-GGUF",
filename="google_gemma-3-1b-it-IQ4_XS.gguf",
n_ctx=2048
)
print("βœ… Model loaded!")
def generate_response(user_input):
"""Generator function to stream model output"""
try:
response = llm.create_chat_completion(
messages=[{"role": "user", "content": user_input}],
stream=True # Enable streaming
)
for chunk in response:
if "choices" in chunk and len(chunk["choices"]) > 0:
token = chunk["choices"][0]["delta"].get("content", "")
if token:
print(f"πŸ“ Token: {token}", flush=True) # Debugging
yield token
except Exception as e:
print(f"❌ Error generating response: {e}")
yield "[Error occurred]"
@app.route("/chat", methods=["POST"])
def chat():
user_input = request.json.get("message", "")
if not user_input:
return jsonify({"error": "Empty input"}), 400
return Response(generate_response(user_input), content_type="text/plain")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860, debug=True)