|
''' |
|
from fastapi import FastAPI |
|
|
|
app = FastAPI() |
|
|
|
@app.get("/") |
|
def greet_json(): |
|
return {"Hello": "World!"} |
|
|
|
''' |
|
|
|
import sys |
|
print(sys.path) |
|
|
|
import os |
|
|
|
|
|
os.system('pip install git+https://github.com/meta-llama/llama3.git#egg=llama3') |
|
|
|
|
|
|
|
from flask import Flask, request, jsonify, render_template, send_from_directory |
|
from flask_cors import CORS |
|
import os |
|
from llama3 import LlaMa3 |
|
|
|
app = Flask(__name__) |
|
CORS(app) |
|
|
|
|
|
llama3_model = LlaMa3() |
|
|
|
@app.route('/') |
|
def index(): |
|
|
|
return render_template('index.html') |
|
|
|
@app.route('/chat', methods=['POST']) |
|
def chat(): |
|
|
|
user_message = request.json.get('message', '') |
|
|
|
if not user_message.strip(): |
|
return jsonify({"response": "请输入有效内容!"}), 400 |
|
|
|
try: |
|
|
|
messages = [{"role": "user", "content": user_message}] |
|
|
|
|
|
ai_response = llama3_model.chat(messages) |
|
|
|
|
|
return jsonify({"response": ai_response}) |
|
except Exception as e: |
|
print(f"Error during llama3 call: {e}") |
|
return jsonify({"response": "发生错误,请稍后重试!"}), 500 |
|
|
|
@app.route('/favicon.ico') |
|
def favicon(): |
|
return send_from_directory(os.path.join(app.root_path, 'static'), |
|
'favicon.ico', mimetype='image/vnd.microsoft.icon') |
|
|
|
if __name__ == '__main__': |
|
app.run(debug=True, host='127.0.0.1', port=5000) |