play7284 commited on
Commit
07f58cf
1 Parent(s): 8196095

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +151 -25
app.py CHANGED
@@ -1,26 +1,152 @@
1
- import torch
2
- from transformers import pipeline
3
-
4
- from huggingface_hub import login
5
-
6
- t = '12345hf_SzbYflPZnVCdVbKUeKABSWSCzbokqOdjWh'
7
-
8
- print(t[5:])
9
- login(t[5:])
10
-
11
- model_id = "meta-llama/Llama-3.2-1B-Instruct"
12
- pipe = pipeline(
13
- "text-generation",
14
- model=model_id,
15
- torch_dtype=torch.bfloat16,
16
- device_map="auto",
17
- )
18
- messages = [
19
- {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
20
- {"role": "user", "content": "Who are you?"},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ]
22
- outputs = pipe(
23
- messages,
24
- max_new_tokens=256,
25
- )
26
- print(outputs[0]["generated_text"][-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from huggingface_hub import InferenceClient
3
+ from dotenv import load_dotenv
4
+ import os
5
+ import json
6
+ import time
7
+ import requests
8
+
9
+ # 加载 .env 文件
10
+ load_dotenv()
11
+
12
+ # .env 文件获取 API key
13
+ api_key = os.getenv("HUGGINGFACE_API_KEY")
14
+
15
+ client = InferenceClient(api_key=api_key)
16
+
17
+ # 预设提示词文件路径
18
+ PROMPTS_FILE = "custom_prompts.json"
19
+
20
+ # 加载预设提示词
21
+ def load_prompts():
22
+ if os.path.exists(PROMPTS_FILE):
23
+ with open(PROMPTS_FILE, 'r', encoding='utf-8') as f:
24
+ return json.load(f)
25
+ return {
26
+ "翻译助手": "翻译助手,给你中文你翻译成英文,给你英文你翻译成中文。直接翻译,不要解释。",
27
+ "代码解释器": "你是一个代码解释器。请解释用户提供的代码,并在需要时提供改进建议。",
28
+ "创意写作": "你是一个创意写作助手。根据用户提供的主题或开头,继续创作故事或文章。",
29
+ "数学导师": "你是一个数学导师。帮助用户解决数学问题,并解释解题步骤。",
30
+ "历史学家": "你是一个历史学家。回答用户关于历史事、人物和时期的问题,提供详细和准确的信息。"
31
+ }
32
+
33
+ # 保存预设提示词
34
+ def save_prompts(prompts):
35
+ with open(PROMPTS_FILE, 'w', encoding='utf-8') as f:
36
+ json.dump(prompts, f, ensure_ascii=False, indent=2)
37
+
38
+ # 初始化会话状态
39
+ if 'messages' not in st.session_state:
40
+ st.session_state.messages = []
41
+ if 'custom_prompts' not in st.session_state:
42
+ st.session_state.custom_prompts = load_prompts()
43
+
44
+ # 设置页面标题
45
+ st.title("AI 助手")
46
+
47
+ # 创建侧边栏用于参数调整
48
+ st.sidebar.header("参数设置")
49
+
50
+ # 模型选择
51
+ models = [
52
+ "Qwen/Qwen2.5-72B-Instruct",
53
  ]
54
+ selected_model = st.sidebar.selectbox("选择模型", models)
55
+
56
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7, 0.1)
57
+ max_tokens = st.sidebar.slider("Max Tokens", 100, 32768, 8196, 100)
58
+ top_p = st.sidebar.slider("Top P", 0.1, 1.0, 0.9, 0.1)
59
+
60
+ # 提示词选择
61
+ selected_prompt = st.sidebar.selectbox("选择系统提示", list(st.session_state.custom_prompts.keys()) + ["自定义"])
62
+ if selected_prompt == "自定义":
63
+ system_prompt = st.sidebar.text_area("输入自定义系统提示", "")
64
+ else:
65
+ system_prompt = st.session_state.custom_prompts[selected_prompt]
66
+
67
+ # 如果提示词改变,重置对话
68
+ if 'current_prompt' not in st.session_state or st.session_state.current_prompt != system_prompt:
69
+ st.session_state.messages = [{"role": "system", "content": system_prompt}]
70
+ st.session_state.current_prompt = system_prompt
71
+
72
+ # 显示对话历史
73
+ for message in st.session_state.messages[1:]: # 跳过系统消息
74
+ with st.chat_message(message["role"]):
75
+ if message["role"] == "assistant":
76
+ st.code(message["content"], language="markdown")
77
+ else:
78
+ st.write(message["content"])
79
+
80
+ # 获取用户输入
81
+ user_input = st.chat_input("请输入你的问题:")
82
+
83
+ if user_input:
84
+ # 添加用户消息到历史
85
+ st.session_state.messages.append({"role": "user", "content": user_input})
86
+ with st.chat_message("user"):
87
+ st.write(user_input)
88
+
89
+ # 创建助手消息占位符
90
+ with st.chat_message("assistant"):
91
+ message_placeholder = st.empty()
92
+ full_response = ""
93
+
94
+ max_retries = 3
95
+ retry_delay = 2
96
+
97
+ for attempt in range(max_retries):
98
+ try:
99
+ # 发送请求到模型
100
+ for chunk in client.chat.completions.create(
101
+ model=selected_model,
102
+ messages=st.session_state.messages,
103
+ stream=True,
104
+ temperature=temperature,
105
+ max_tokens=max_tokens,
106
+ top_p=top_p
107
+ ):
108
+ content = chunk.choices[0].delta.content
109
+ if content:
110
+ full_response += content
111
+ message_placeholder.markdown(full_response + "▌")
112
+
113
+ break # 如果成功,跳出重试循环
114
+ except (requests.exceptions.RequestException, ConnectionError) as e:
115
+ if attempt < max_retries - 1:
116
+ st.warning(f"连接错误,正在重试... ({attempt + 1}/{max_retries})")
117
+ time.sleep(retry_delay)
118
+ else:
119
+ st.error(f"连接失败,请稍后再试。错误信息:{str(e)}")
120
+ full_response = "抱歉,我现在无法回答。请稍后再试。"
121
+
122
+ message_placeholder.markdown(full_response)
123
+
124
+ # 将助手的回复添加到消息历史
125
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
126
+
127
+ # 在对话历史下方添加一个可复制的代码块
128
+ st.code(full_response, language="markdown")
129
+
130
+ # 添加清除对话按钮
131
+ if st.button("清除对话"):
132
+ st.session_state.messages = [{"role": "system", "content": system_prompt}]
133
+ st.rerun() # 使用 st.rerun() 替代 st.experimental_rerun()
134
+
135
+ # 系统提示管理(放在左侧最下面)
136
+ st.sidebar.markdown("---")
137
+ st.sidebar.subheader("系统提示管理")
138
+ new_prompt_name = st.sidebar.text_input("新系统提示名称")
139
+ new_prompt_content = st.sidebar.text_area("新系统提示内容")
140
+ if st.sidebar.button("添加新系统提示"):
141
+ if new_prompt_name and new_prompt_content:
142
+ st.session_state.custom_prompts[new_prompt_name] = new_prompt_content
143
+ save_prompts(st.session_state.custom_prompts)
144
+ st.sidebar.success(f"已添加新系统提示:{new_prompt_name}")
145
+
146
+ # 删除系统提示
147
+ prompt_to_delete = st.sidebar.selectbox("选择要删除的系统提示", list(st.session_state.custom_prompts.keys()))
148
+ if st.sidebar.button("删除选中的系统提示"):
149
+ if prompt_to_delete in st.session_state.custom_prompts:
150
+ del st.session_state.custom_prompts[prompt_to_delete]
151
+ save_prompts(st.session_state.custom_prompts)
152
+ st.sidebar.success(f"已删除系统提示:{prompt_to_delete}")