File size: 1,595 Bytes
b891a21 6a18a1c b891a21 60471a3 fdda49c 60471a3 b891a21 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
'''
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def greet_json():
return {"Hello": "World!"}
'''
import sys
print(sys.path)
import os
# 安装 llama3 库
os.system('pip install git+https://github.com/meta-llama/llama3.git#egg=llama3')
from flask import Flask, request, jsonify, render_template, send_from_directory
from flask_cors import CORS
import os
from llama3 import LlaMa3 # 导入您的 LlaMa3 类
app = Flask(__name__)
CORS(app)
# 实例化 LlaMa3 模型
llama3_model = LlaMa3()
@app.route('/')
def index():
# 返回 HTML 页面
return render_template('index.html')
@app.route('/chat', methods=['POST'])
def chat():
# 获取前端发送的用户消息
user_message = request.json.get('message', '')
if not user_message.strip():
return jsonify({"response": "请输入有效内容!"}), 400
try:
# 构造聊天上下文
messages = [{"role": "user", "content": user_message}]
# 调用 LlaMa3 的 chat 方法生成回复
ai_response = llama3_model.chat(messages)
# 返回 AI 的回复
return jsonify({"response": ai_response})
except Exception as e:
print(f"Error during llama3 call: {e}")
return jsonify({"response": "发生错误,请稍后重试!"}), 500
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
if __name__ == '__main__':
app.run(debug=True, host='127.0.0.1', port=5000) |