paleDriver7 commited on
Commit
efd11a3
·
verified ·
1 Parent(s): fdda49c

Upload 3 files

Browse files
backend/__pycache__/llama3.cpython-312.pyc ADDED
Binary file (1.87 kB). View file
 
backend/app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, render_template, send_from_directory
2
+ from flask_cors import CORS
3
+ import os
4
+ from llama3 import LlaMa3 # 导入您的 LlaMa3 类
5
+
6
+ app = Flask(__name__)
7
+ CORS(app)
8
+
9
+ # 实例化 LlaMa3 模型
10
+ llama3_model = LlaMa3()
11
+
12
+ @app.route('/')
13
+ def index():
14
+ # 返回 HTML 页面
15
+ return render_template('index.html')
16
+
17
+ @app.route('/chat', methods=['POST'])
18
+ def chat():
19
+ # 获取前端发送的用户消息
20
+ user_message = request.json.get('message', '')
21
+
22
+ if not user_message.strip():
23
+ return jsonify({"response": "请输入有效内容!"}), 400
24
+
25
+ try:
26
+ # 构造聊天上下文
27
+ messages = [{"role": "user", "content": user_message}]
28
+
29
+ # 调用 LlaMa3 的 chat 方法生成回复
30
+ ai_response = llama3_model.chat(messages)
31
+
32
+ # 返回 AI 的回复
33
+ return jsonify({"response": ai_response})
34
+ except Exception as e:
35
+ print(f"Error during llama3 call: {e}")
36
+ return jsonify({"response": "发生错误,请稍后重试!"}), 500
37
+
38
+ @app.route('/favicon.ico')
39
+ def favicon():
40
+ return send_from_directory(os.path.join(app.root_path, 'static'),
41
+ 'favicon.ico', mimetype='image/vnd.microsoft.icon')
42
+
43
+ if __name__ == '__main__':
44
+ app.run(debug=True, host='127.0.0.1', port=5000)
backend/llama3.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ class LlaMa3:
3
+ def __init__(self) -> None:
4
+ self.client = OpenAI(
5
+ base_url="https://integrate.api.nvidia.com/v1",
6
+ api_key="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"
7
+ )
8
+ self.name = "Llama3"
9
+
10
+ # Initial greeting and request for decision topic
11
+ self.initial_prompt = """
12
+ Hello! I can assist you in making a decision. What decision would you like to make today?
13
+ Please describe the decision and provide any relevant details to help me understand.
14
+ """
15
+
16
+ def chat(self, messages):
17
+ # If this is the first message, we use the initial prompt to greet and ask for the decision topic
18
+ if len(messages) == 0: # Initial conversation step
19
+ messages.append({"role": "system", "content": self.initial_prompt})
20
+
21
+ # Call the API to get the model's response
22
+ completion = self.client.chat.completions.create(
23
+ model="nvidia/llama-3.1-nemotron-70b-instruct",
24
+ messages=messages,
25
+ temperature=0.5,
26
+ top_p=1,
27
+ max_tokens=1024,
28
+ stream=True
29
+ )
30
+
31
+ response = ""
32
+ for chunk in completion:
33
+ if chunk.choices[0].delta.content is not None:
34
+ response += chunk.choices[0].delta.content
35
+
36
+ return response