|
from flask import Flask, request, jsonify, Response, stream_with_context |
|
from flask_cors import CORS |
|
import os |
|
import torch |
|
import time |
|
import logging |
|
import threading |
|
import queue |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
os.environ["TRANSFORMERS_CACHE"] = "/tmp" |
|
os.environ["HF_HOME"] = "/tmp" |
|
os.environ["XDG_CACHE_HOME"] = "/tmp" |
|
|
|
app = Flask(__name__) |
|
CORS(app) |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
logger.info(f"Using device: {device}") |
|
|
|
|
|
tokenizer = None |
|
model = None |
|
|
|
|
|
def initialize_models(): |
|
global tokenizer, model |
|
try: |
|
logger.info("Loading language model...") |
|
model_name = "Qwen/Qwen2.5-1.5B-Instruct" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.float16, |
|
device_map="cpu", |
|
low_cpu_mem_usage=True |
|
) |
|
|
|
if tokenizer.pad_token is None: |
|
tokenizer.pad_token = tokenizer.eos_token |
|
model.config.pad_token_id = model.config.eos_token_id |
|
|
|
logger.info("Models initialized successfully") |
|
except Exception as e: |
|
logger.error(f"Error initializing models: {str(e)}") |
|
raise |
|
|
|
|
|
def thinking_process(message, result_queue): |
|
""" |
|
This function simulates a thinking process and puts the result in the queue |
|
""" |
|
try: |
|
|
|
logger.info(f"Thinking about: '{message}'") |
|
|
|
|
|
prompt = f"""<|im_start|>system |
|
You are a helpful, friendly, and thoughtful AI assistant. Think carefully and provide informative, detailed responses. |
|
<|im_end|> |
|
<|im_start|>user |
|
{message}<|im_end|> |
|
<|im_start|>assistant |
|
""" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024) |
|
inputs = {k: v.to('cpu') for k, v in inputs.items()} |
|
|
|
|
|
streamer = TextStreamer(tokenizer, result_queue) |
|
|
|
|
|
model.generate( |
|
**inputs, |
|
max_new_tokens=512, |
|
temperature=0.7, |
|
top_p=0.9, |
|
do_sample=True, |
|
streamer=streamer, |
|
num_beams=1, |
|
no_repeat_ngram_size=3 |
|
) |
|
|
|
|
|
result_queue.put(None) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in thinking process: {str(e)}") |
|
result_queue.put(f"I apologize, but I encountered an error while processing your request: {str(e)}") |
|
|
|
result_queue.put(None) |
|
|
|
|
|
class TextStreamer: |
|
def __init__(self, tokenizer, queue): |
|
self.tokenizer = tokenizer |
|
self.queue = queue |
|
self.current_tokens = [] |
|
|
|
def put(self, token_ids): |
|
self.current_tokens.extend(token_ids.tolist()) |
|
text = self.tokenizer.decode(self.current_tokens, skip_special_tokens=True) |
|
self.queue.put(text) |
|
|
|
def end(self): |
|
pass |
|
|
|
|
|
@app.route('/') |
|
def home(): |
|
return jsonify({"message": "AI Chat API is running!"}) |
|
|
|
|
|
@app.route('/chat', methods=['POST']) |
|
def chat(): |
|
data = request.get_json() |
|
message = data.get("message", "") |
|
|
|
if not message: |
|
return jsonify({"error": "Message is required"}), 400 |
|
|
|
try: |
|
def generate(): |
|
|
|
result_queue = queue.Queue() |
|
|
|
|
|
thread = threading.Thread(target=thinking_process, args=(message, result_queue)) |
|
thread.start() |
|
|
|
|
|
previous_text = "" |
|
while True: |
|
try: |
|
result = result_queue.get(block=True, timeout=30) |
|
if result is None: |
|
break |
|
|
|
|
|
if isinstance(result, str): |
|
new_part = result[len(previous_text):] |
|
previous_text = result |
|
if new_part: |
|
yield f"data: {new_part}\n\n" |
|
|
|
except queue.Empty: |
|
|
|
yield "data: [Generation timeout. The model is taking too long to respond.]\n\n" |
|
break |
|
|
|
yield "data: [DONE]\n\n" |
|
|
|
return Response(stream_with_context(generate()), mimetype='text/event-stream') |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing chat request: {str(e)}") |
|
return jsonify({"error": f"An error occurred: {str(e)}"}), 500 |
|
|
|
|
|
@app.route('/chat-simple', methods=['POST']) |
|
def chat_simple(): |
|
data = request.get_json() |
|
message = data.get("message", "") |
|
|
|
if not message: |
|
return jsonify({"error": "Message is required"}), 400 |
|
|
|
try: |
|
|
|
prompt = f"""<|im_start|>system |
|
You are a helpful, friendly, and thoughtful AI assistant. Think carefully and provide informative, detailed responses. |
|
<|im_end|> |
|
<|im_start|>user |
|
{message}<|im_end|> |
|
<|im_start|>assistant |
|
""" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024) |
|
inputs = {k: v.to('cpu') for k, v in inputs.items()} |
|
|
|
|
|
output = model.generate( |
|
**inputs, |
|
max_new_tokens=512, |
|
temperature=0.7, |
|
top_p=0.9, |
|
do_sample=True, |
|
num_beams=1, |
|
no_repeat_ngram_size=3 |
|
) |
|
|
|
|
|
answer = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
if "<|im_end|>" in answer: |
|
answer = answer.split("<|im_start|>assistant")[-1].split("<|im_end|>")[0].strip() |
|
|
|
return jsonify({"response": answer}) |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing chat request: {str(e)}") |
|
return jsonify({"error": f"An error occurred: {str(e)}"}), 500 |
|
|
|
if __name__ == "__main__": |
|
try: |
|
|
|
initialize_models() |
|
logger.info("Starting Flask application") |
|
app.run(host="0.0.0.0", port=7860) |
|
except Exception as e: |
|
logger.critical(f"Failed to start application: {str(e)}") |