|
from flask import Flask, render_template, request, Response, stream_with_context |
|
from llama_cpp import Llama |
|
import time |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
print("π Loading model...") |
|
llm = Llama.from_pretrained( |
|
repo_id="bartowski/google_gemma-3-1b-it-GGUF", |
|
filename="google_gemma-3-1b-it-IQ4_XS.gguf", |
|
) |
|
print("β
Model loaded!") |
|
|
|
@app.route("/") |
|
def home(): |
|
print("π’ Serving index.html") |
|
return render_template("index.html") |
|
|
|
@app.route("/chat", methods=["POST"]) |
|
def chat(): |
|
user_input = request.json.get("message", "") |
|
print(f"π¬ Received message: {user_input}") |
|
|
|
def generate_response(): |
|
print("π€ Generating response...") |
|
response = llm.create_chat_completion( |
|
messages=[{"role": "user", "content": user_input}], |
|
stream=True |
|
) |
|
|
|
for chunk in response: |
|
token = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") |
|
if token: |
|
print(f"π Token: {token}", end="", flush=True) |
|
yield token |
|
time.sleep(0.05) |
|
|
|
return Response(stream_with_context(generate_response()), content_type="text/plain") |
|
|
|
if __name__ == "__main__": |
|
app.run(debug=True, host="0.0.0.0", port=7860) |
|
|
|
|
|
|