doctorsafe commited on
Commit
677c941
·
1 Parent(s): 44ede5e

Upload 8 files

Browse files
Files changed (8) hide show
  1. check_proxy.py +22 -0
  2. config.py +29 -0
  3. functional.py +55 -0
  4. functional_crazy.py +68 -0
  5. predict.py +191 -0
  6. show_math.py +80 -0
  7. theme.py +82 -0
  8. toolbox.py +187 -0
check_proxy.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def check_proxy(proxies):
3
+ import requests
4
+ proxies_https = proxies['https'] if proxies is not None else '无'
5
+ try:
6
+ response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
7
+ data = response.json()
8
+ print(f'查询代理的地理位置,返回的结果是{data}')
9
+ country = data['country_name']
10
+ result = f"代理配置 {proxies_https}, 代理所在地:{country}"
11
+ print(result)
12
+ return result
13
+ except:
14
+ result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
15
+ print(result)
16
+ return result
17
+
18
+
19
+ if __name__ == '__main__':
20
+ try: from config_private import proxies # 放自己的秘密如API和代理网址 os.path.exists('config_private.py')
21
+ except: from config import proxies
22
+ check_proxy(proxies)
config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # API_KEY = "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" 此key无效
2
+ API_KEY = "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"
3
+ API_URL = "https://api.openai.com/v1/chat/completions"
4
+
5
+ # 改为True应用代理
6
+ USE_PROXY = True
7
+ if USE_PROXY:
8
+ # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
9
+ proxies = { "http": "socks5h://127.0.0.1:7890", "https": "socks5h://127.0.0.1:7890", }
10
+ print('网络代理状态:运行。')
11
+ else:
12
+ proxies = None
13
+ print('网络代理状态:未配置。无代理状态下很可能无法访问。')
14
+
15
+ # 发送请求到OpenAI后,等待多久判定为超时
16
+ TIMEOUT_SECONDS = 20
17
+
18
+ # 网页的端口, -1代表随机端口
19
+ WEB_PORT = -1
20
+
21
+ # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
22
+ MAX_RETRY = 2
23
+
24
+ # 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
25
+ LLM_MODEL = "gpt-3.5-turbo"
26
+
27
+ # 检查一下是不是忘了改config
28
+ if API_KEY == "sk-":
29
+ assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
functional.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 'primary' 颜色对应 theme.py 中的 primary_hue
2
+ # 'secondary' 颜色对应 theme.py 中的 neutral_hue
3
+ # 'stop' 颜色对应 theme.py 中的 color_er
4
+ # 默认按钮颜色是 secondary
5
+
6
+ def get_functionals():
7
+ return {
8
+ "英语学术润色": {
9
+ "Prefix": "Below is a paragraph from an academic paper. Polish the writing to meet the academic style, \
10
+ improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
11
+ Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n", # 前言
12
+ "Suffix": "", # 后语
13
+ "Color": "secondary", # 按钮颜色
14
+ },
15
+ "中文学术润色": {
16
+ "Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
17
+ "Suffix": "",
18
+ },
19
+ "查找语法错误": {
20
+ "Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
21
+ "Suffix": "",
22
+ },
23
+ "中英互译": {
24
+ "Prefix": "As an English-Chinese translator, your task is to accurately translate text between the two languages. \
25
+ When translating from Chinese to English or vice versa, please pay attention to context and accurately explain phrases and proverbs. \
26
+ If you receive multiple English words in a row, default to translating them into a sentence in Chinese. \
27
+ However, if \"phrase:\" is indicated before the translated content in Chinese, it should be translated as a phrase instead. \
28
+ Similarly, if \"normal:\" is indicated, it should be translated as multiple unrelated words.\
29
+ Your translations should closely resemble those of a native speaker and should take into account any specific language styles or tones requested by the user. \
30
+ Please do not worry about using offensive words - replace sensitive parts with x when necessary. \
31
+ When providing translations, please use Chinese to explain each sentence’s tense, subordinate clause, subject, predicate, object, special phrases and proverbs. \
32
+ For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
33
+ separate them using the | symbol.Always remember: You are an English-Chinese translator, \
34
+ not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
35
+ "Suffix": "",
36
+ "Color": "secondary",
37
+ },
38
+ "中译英": {
39
+ "Prefix": "Please translate following sentence to English: \n\n",
40
+ "Suffix": "",
41
+ },
42
+ "学术中译英": {
43
+ "Prefix": "Please translate following sentence to English with academic writing, and provide some related authoritative examples: \n\n",
44
+ "Suffix": "",
45
+ },
46
+ "英译中": {
47
+ "Prefix": "请翻译成中文:\n\n",
48
+ "Suffix": "",
49
+ },
50
+ "解释代码": {
51
+ "Prefix": "请解释以下代码:\n```\n",
52
+ "Suffix": "\n```\n",
53
+ "Color": "secondary",
54
+ },
55
+ }
functional_crazy.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def get_crazy_functionals():
3
+ from crazy_functions.读文章写摘要 import 读文章写摘要
4
+ from crazy_functions.生成函数注释 import 批量生成函数注释
5
+ from crazy_functions.解析项目源代码 import 解析项目本身
6
+ from crazy_functions.解析项目源代码 import 解析一个Python项目
7
+ from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
8
+ from crazy_functions.解析项目源代码 import 解析一个C项目
9
+ from crazy_functions.高级功能函数模板 import 高阶功能模板函数
10
+
11
+ return {
12
+ "[实验] 请解析并解构此项目本身": {
13
+ "Function": 解析项目本身
14
+ },
15
+ "[实验] 解析整个py项目(配合input输入框)": {
16
+ "Color": "stop", # 按钮颜色
17
+ "Function": 解析一个Python项目
18
+ },
19
+ "[实验] 解析整个C++项目头文件(配合input输入框)": {
20
+ "Color": "stop", # 按钮颜色
21
+ "Function": 解析一个C项目的头文件
22
+ },
23
+ "[实验] 解析整个C++项目(配合input输入框)": {
24
+ "Color": "stop", # 按钮颜色
25
+ "Function": 解析一个C项目
26
+ },
27
+ "[实验] 读tex论文写摘要(配合input输入框)": {
28
+ "Color": "stop", # 按钮颜色
29
+ "Function": 读文章写摘要
30
+ },
31
+ "[实验] 批量生成函数注释(配合input输入框)": {
32
+ "Color": "stop", # 按钮颜色
33
+ "Function": 批量生成函数注释
34
+ },
35
+ "[实验] 实验功能函数模板": {
36
+ "Color": "stop", # 按钮颜色
37
+ "Function": 高阶功能模板函数
38
+ },
39
+ }
40
+
41
+ def on_file_uploaded(files, chatbot, txt):
42
+ if len(files) == 0: return chatbot, txt
43
+ import shutil, os, time, glob
44
+ from toolbox import extract_archive
45
+ try: shutil.rmtree('./private_upload/')
46
+ except: pass
47
+ time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
48
+ os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
49
+ for file in files:
50
+ file_origin_name = os.path.basename(file.orig_name)
51
+ shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
52
+ extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
53
+ dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
54
+ moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
55
+ txt = f'private_upload/{time_tag}'
56
+ moved_files_str = '\t\n\n'.join(moved_files)
57
+ chatbot.append(['我上传了文件,请查收',
58
+ f'[Local Message] 收到以下文件: \n\n{moved_files_str}\n\n调用路径参数已自动修正到: \n\n{txt}\n\n现在您可以直接选择任意实现性功能'])
59
+ return chatbot, txt
60
+
61
+ def on_report_generated(files, chatbot):
62
+ from toolbox import find_recent_files
63
+ report_files = find_recent_files('gpt_log')
64
+ if len(report_files) == 0: return report_files, chatbot
65
+ # files.extend(report_files)
66
+ chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧文件上传区,请查收。'])
67
+ return report_files, chatbot
68
+
predict.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
2
+
3
+ import json
4
+ import gradio as gr
5
+ import logging
6
+ import traceback
7
+ import requests
8
+ import importlib
9
+
10
+ # config_private.py放自己的秘密如API和代理网址
11
+ # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
12
+ try: from config_private import proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL
13
+ except: from config import proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL
14
+
15
+ timeout_bot_msg = '[local] Request timeout, network error. please check proxy settings in config.py.'
16
+
17
+ def get_full_error(chunk, stream_response):
18
+ """
19
+ 获取完整的从Openai返回的报错
20
+ """
21
+ while True:
22
+ try:
23
+ chunk += next(stream_response)
24
+ except:
25
+ break
26
+ return chunk
27
+
28
+ def predict_no_ui(inputs, top_p, temperature, history=[]):
29
+ """
30
+ 发送至chatGPT,等待回复,一次性完成,不显示中间过程。
31
+ predict函数的简化版。
32
+ 用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能。
33
+
34
+ inputs 是本次问询的输入
35
+ top_p, temperature是chatGPT的内部调优参数
36
+ history 是之前的对话列表
37
+ (注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
38
+ """
39
+ headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt="", stream=False)
40
+
41
+ retry = 0
42
+ while True:
43
+ try:
44
+ # make a POST request to the API endpoint, stream=False
45
+ response = requests.post(API_URL, headers=headers, proxies=proxies,
46
+ json=payload, stream=False, timeout=TIMEOUT_SECONDS*2); break
47
+ except requests.exceptions.ReadTimeout as e:
48
+ retry += 1
49
+ traceback.print_exc()
50
+ if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
51
+ if retry > MAX_RETRY: raise TimeoutError
52
+
53
+ try:
54
+ result = json.loads(response.text)["choices"][0]["message"]["content"]
55
+ return result
56
+ except Exception as e:
57
+ if "choices" not in response.text: print(response.text)
58
+ raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
59
+
60
+
61
+ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
62
+ stream = True, additional_fn=None):
63
+ """
64
+ 发送至chatGPT,流式获取输出。
65
+ 用于基础的对话功能。
66
+ inputs 是本次问询的输入
67
+ top_p, temperature是chatGPT的内部调优参数
68
+ history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
69
+ chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
70
+ additional_fn代表点击的哪个按钮,按钮见functional.py
71
+ """
72
+ if additional_fn is not None:
73
+ import functional
74
+ importlib.reload(functional)
75
+ functional = functional.get_functionals()
76
+ inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"]
77
+
78
+ if stream:
79
+ raw_input = inputs
80
+ logging.info(f'[raw_input] {raw_input}')
81
+ chatbot.append((inputs, ""))
82
+ yield chatbot, history, "等待响应"
83
+
84
+ headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
85
+ history.append(inputs); history.append(" ")
86
+
87
+ retry = 0
88
+ while True:
89
+ try:
90
+ # make a POST request to the API endpoint, stream=True
91
+ response = requests.post(API_URL, headers=headers, proxies=proxies,
92
+ json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
93
+ except:
94
+ retry += 1
95
+ chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
96
+ retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
97
+ yield chatbot, history, "请求超时"+retry_msg
98
+ if retry > MAX_RETRY: raise TimeoutError
99
+
100
+ gpt_replying_buffer = ""
101
+
102
+ is_head_of_the_stream = True
103
+ if stream:
104
+ stream_response = response.iter_lines()
105
+ while True:
106
+ chunk = next(stream_response)
107
+ # print(chunk.decode()[6:])
108
+ if is_head_of_the_stream:
109
+ # 数据流的第一帧不携带content
110
+ is_head_of_the_stream = False; continue
111
+
112
+ if chunk:
113
+ try:
114
+ if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
115
+ # 判定为数据流的结束,gpt_replying_buffer也写完了
116
+ logging.info(f'[response] {gpt_replying_buffer}')
117
+ break
118
+ # 处理数据流的主体
119
+ chunkjson = json.loads(chunk.decode()[6:])
120
+ status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
121
+ # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
122
+ gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
123
+ history[-1] = gpt_replying_buffer
124
+ chatbot[-1] = (history[-2], history[-1])
125
+ yield chatbot, history, status_text
126
+
127
+ except Exception as e:
128
+ traceback.print_exc()
129
+ yield chatbot, history, "Json解析不合常规"
130
+ chunk = get_full_error(chunk, stream_response)
131
+ error_msg = chunk.decode()
132
+ if "reduce the length" in error_msg:
133
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refleshing this page.")
134
+ history = []
135
+ elif "Incorrect API key" in error_msg:
136
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
137
+ else:
138
+ from toolbox import regular_txt_to_markdown
139
+ tb_str = regular_txt_to_markdown(traceback.format_exc())
140
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] Json Error \n\n {tb_str} \n\n {regular_txt_to_markdown(chunk.decode()[4:])}")
141
+ yield chatbot, history, "Json解析不合常规" + error_msg
142
+ return
143
+
144
+ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
145
+ """
146
+ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
147
+ """
148
+ headers = {
149
+ "Content-Type": "application/json",
150
+ "Authorization": f"Bearer {API_KEY}"
151
+ }
152
+
153
+ conversation_cnt = len(history) // 2
154
+
155
+ messages = [{"role": "system", "content": system_prompt}]
156
+ if conversation_cnt:
157
+ for index in range(0, 2*conversation_cnt, 2):
158
+ what_i_have_asked = {}
159
+ what_i_have_asked["role"] = "user"
160
+ what_i_have_asked["content"] = history[index]
161
+ what_gpt_answer = {}
162
+ what_gpt_answer["role"] = "assistant"
163
+ what_gpt_answer["content"] = history[index+1]
164
+ if what_i_have_asked["content"] != "":
165
+ if what_gpt_answer["content"] == "": continue
166
+ if what_gpt_answer["content"] == timeout_bot_msg: continue
167
+ messages.append(what_i_have_asked)
168
+ messages.append(what_gpt_answer)
169
+ else:
170
+ messages[-1]['content'] = what_gpt_answer['content']
171
+
172
+ what_i_ask_now = {}
173
+ what_i_ask_now["role"] = "user"
174
+ what_i_ask_now["content"] = inputs
175
+ messages.append(what_i_ask_now)
176
+
177
+ payload = {
178
+ "model": LLM_MODEL,
179
+ "messages": messages,
180
+ "temperature": temperature, # 1.0,
181
+ "top_p": top_p, # 1.0,
182
+ "n": 1,
183
+ "stream": stream,
184
+ "presence_penalty": 0,
185
+ "frequency_penalty": 0,
186
+ }
187
+
188
+ print(f" {LLM_MODEL} : {conversation_cnt} : {inputs}")
189
+ return headers,payload
190
+
191
+
show_math.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This program is written by: https://github.com/polarwinkel/mdtex2html
2
+
3
+ from latex2mathml.converter import convert as tex2mathml
4
+ import re
5
+
6
+ incomplete = '<font style="color:orange;" class="tooltip">&#9888;<span class="tooltiptext">formula incomplete</span></font>'
7
+ convError = '<font style="color:red" class="tooltip">&#9888;<span class="tooltiptext">LaTeX-convert-error</span></font>'
8
+
9
+ def convert(mdtex, extensions=[], splitParagraphs=True):
10
+ ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
11
+ found = False
12
+ # handle all paragraphs separately (prevents aftereffects)
13
+ if splitParagraphs:
14
+ parts = re.split("\n\n", mdtex)
15
+ result = ''
16
+ for part in parts:
17
+ result += convert(part, extensions, splitParagraphs=False)
18
+ return result
19
+ # find first $$-formula:
20
+ parts = re.split('\${2}', mdtex, 2)
21
+ if len(parts)>1:
22
+ found = True
23
+ result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
24
+ try:
25
+ result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
26
+ except:
27
+ result += '<div class="blockformula">'+convError+'</div>'
28
+ if len(parts)==3:
29
+ result += convert(parts[2], extensions, splitParagraphs=False)
30
+ else:
31
+ result += '<div class="blockformula">'+incomplete+'</div>'
32
+ # else find first $-formulas:
33
+ else:
34
+ parts = re.split('\${1}', mdtex, 2)
35
+ if len(parts)>1 and not found:
36
+ found = True
37
+ try:
38
+ mathml = tex2mathml(parts[1])
39
+ except:
40
+ mathml = convError
41
+ if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
42
+ parts[0]=parts[0]+'&#x200b;'
43
+ if len(parts)==3:
44
+ result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
45
+ else:
46
+ result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
47
+ # else find first \[..\]-equation:
48
+ else:
49
+ parts = re.split(r'\\\[', mdtex, 1)
50
+ if len(parts)>1 and not found:
51
+ found = True
52
+ result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
53
+ parts = re.split(r'\\\]', parts[1], 1)
54
+ try:
55
+ result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
56
+ except:
57
+ result += '<div class="blockformula">'+convError+'</div>'
58
+ if len(parts)==2:
59
+ result += convert(parts[1], extensions, splitParagraphs=False)
60
+ else:
61
+ result += '<div class="blockformula">'+incomplete+'</div>'
62
+ # else find first \(..\)-equation:
63
+ else:
64
+ parts = re.split(r'\\\(', mdtex, 1)
65
+ if len(parts)>1 and not found:
66
+ found = True
67
+ subp = re.split(r'\\\)', parts[1], 1)
68
+ try:
69
+ mathml = tex2mathml(subp[0])
70
+ except:
71
+ mathml = convError
72
+ if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
73
+ parts[0]=parts[0]+'&#x200b;'
74
+ if len(subp)==2:
75
+ result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
76
+ else:
77
+ result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
78
+ if not found:
79
+ result = mdtex
80
+ return result
theme.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # gradio可用颜色列表
4
+ # gr.themes.utils.colors.slate (石板色)
5
+ # gr.themes.utils.colors.gray (灰色)
6
+ # gr.themes.utils.colors.zinc (锌色)
7
+ # gr.themes.utils.colors.neutral (中性色)
8
+ # gr.themes.utils.colors.stone (石头色)
9
+ # gr.themes.utils.colors.red (红色)
10
+ # gr.themes.utils.colors.orange (橙色)
11
+ # gr.themes.utils.colors.amber (琥珀色)
12
+ # gr.themes.utils.colors.yellow (黄色)
13
+ # gr.themes.utils.colors.lime (酸橙色)
14
+ # gr.themes.utils.colors.green (绿色)
15
+ # gr.themes.utils.colors.emerald (祖母绿)
16
+ # gr.themes.utils.colors.teal (青蓝色)
17
+ # gr.themes.utils.colors.cyan (青色)
18
+ # gr.themes.utils.colors.sky (天蓝色)
19
+ # gr.themes.utils.colors.blue (蓝色)
20
+ # gr.themes.utils.colors.indigo (靛蓝色)
21
+ # gr.themes.utils.colors.violet (紫罗兰色)
22
+ # gr.themes.utils.colors.purple (紫色)
23
+ # gr.themes.utils.colors.fuchsia (洋红色)
24
+ # gr.themes.utils.colors.pink (粉红色)
25
+ # gr.themes.utils.colors.rose (玫瑰色)
26
+
27
+ def adjust_theme():
28
+ try:
29
+ color_er = gr.themes.utils.colors.pink
30
+ set_theme = gr.themes.Default(
31
+ primary_hue=gr.themes.utils.colors.orange,
32
+ neutral_hue=gr.themes.utils.colors.gray,
33
+ font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
34
+ font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
35
+ set_theme.set(
36
+ # Colors
37
+ input_background_fill_dark="*neutral_800",
38
+ # Transition
39
+ button_transition="none",
40
+ # Shadows
41
+ button_shadow="*shadow_drop",
42
+ button_shadow_hover="*shadow_drop_lg",
43
+ button_shadow_active="*shadow_inset",
44
+ input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
45
+ input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
46
+ input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
47
+ checkbox_label_shadow="*shadow_drop",
48
+ block_shadow="*shadow_drop",
49
+ form_gap_width="1px",
50
+ # Button borders
51
+ input_border_width="1px",
52
+ input_background_fill="white",
53
+ # Gradients
54
+ stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
55
+ stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
56
+ error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
57
+ error_background_fill_dark="*background_fill_primary",
58
+ checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
59
+ checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
60
+ checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
61
+ checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
62
+ button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
63
+ button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
64
+ button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
65
+ button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
66
+ button_primary_border_color_dark="*primary_500",
67
+ button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
68
+ button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
69
+ button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
70
+ button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
71
+ button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
72
+ button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
73
+ button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
74
+ button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
75
+ button_cancel_border_color=color_er.c200,
76
+ button_cancel_border_color_dark=color_er.c600,
77
+ button_cancel_text_color=color_er.c600,
78
+ button_cancel_text_color_dark="white",
79
+ )
80
+ except:
81
+ set_theme = None; print('gradio版本较旧, 不能自定义字体和颜色')
82
+ return set_theme
toolbox.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import markdown, mdtex2html, threading
2
+ from show_math import convert as convert_math
3
+ from functools import wraps
4
+
5
+ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]):
6
+ """
7
+ 调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
8
+ """
9
+ import time
10
+ try: from config_private import TIMEOUT_SECONDS, MAX_RETRY
11
+ except: from config import TIMEOUT_SECONDS, MAX_RETRY
12
+ from predict import predict_no_ui
13
+ # 多线程的时候,需要一个mutable结构在不同线程之间传递信息
14
+ # list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
15
+ mutable = [None, '']
16
+ # multi-threading worker
17
+ def mt(i_say, history):
18
+ while True:
19
+ try:
20
+ mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
21
+ break
22
+ except ConnectionAbortedError as e:
23
+ if len(history) > 0:
24
+ history = [his[len(his)//2:] for his in history if his is not None]
25
+ mutable[1] = 'Warning! History conversation is too long, cut into half. '
26
+ else:
27
+ i_say = i_say[:len(i_say)//2]
28
+ mutable[1] = 'Warning! Input file is too long, cut into half. '
29
+ except TimeoutError as e:
30
+ mutable[0] = '[Local Message] Failed with timeout'
31
+ # 创建新线程发出http请求
32
+ thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
33
+ # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
34
+ cnt = 0
35
+ while thread_name.is_alive():
36
+ cnt += 1
37
+ chatbot[-1] = (i_say_show_user, f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt%4)))
38
+ yield chatbot, history, '正常'
39
+ time.sleep(1)
40
+ # 把gpt的输出从mutable中取出来
41
+ gpt_say = mutable[0]
42
+ return gpt_say
43
+
44
+ def write_results_to_file(history, file_name=None):
45
+ """
46
+ 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
47
+ """
48
+ import os, time
49
+ if file_name is None:
50
+ # file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
51
+ file_name = 'chatGPT分析报告' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
52
+ os.makedirs('./gpt_log/', exist_ok=True)
53
+ with open(f'./gpt_log/{file_name}', 'w', encoding = 'utf8') as f:
54
+ f.write('# chatGPT 分析报告\n')
55
+ for i, content in enumerate(history):
56
+ if i%2==0: f.write('## ')
57
+ f.write(content)
58
+ f.write('\n\n')
59
+ res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
60
+ print(res)
61
+ return res
62
+
63
+ def regular_txt_to_markdown(text):
64
+ """
65
+ 将普通文本转换为Markdown格式的文本。
66
+ """
67
+ text = text.replace('\n', '\n\n')
68
+ text = text.replace('\n\n\n', '\n\n')
69
+ text = text.replace('\n\n\n', '\n\n')
70
+ return text
71
+
72
+ def CatchException(f):
73
+ """
74
+ 装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
75
+ """
76
+ @wraps(f)
77
+ def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
78
+ try:
79
+ yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
80
+ except Exception as e:
81
+ import traceback
82
+ from check_proxy import check_proxy
83
+ try: from config_private import proxies
84
+ except: from config import proxies
85
+ tb_str = regular_txt_to_markdown(traceback.format_exc())
86
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n {tb_str} \n\n 当前代理可用性: \n\n {check_proxy(proxies)}")
87
+ yield chatbot, history, f'异常 {e}'
88
+ return decorated
89
+
90
+ def report_execption(chatbot, history, a, b):
91
+ """
92
+ 向chatbot中添加错误信息
93
+ """
94
+ chatbot.append((a, b))
95
+ history.append(a); history.append(b)
96
+
97
+ def text_divide_paragraph(text):
98
+ """
99
+ 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
100
+ """
101
+ if '```' in text:
102
+ # careful input
103
+ return text
104
+ else:
105
+ # wtf input
106
+ lines = text.split("\n")
107
+ for i, line in enumerate(lines):
108
+ if i!=0: lines[i] = "<p>"+lines[i].replace(" ", "&nbsp;")+"</p>"
109
+ text = "".join(lines)
110
+ return text
111
+
112
+ def markdown_convertion(txt):
113
+ """
114
+ 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
115
+ """
116
+ if ('$' in txt) and ('```' not in txt):
117
+ return markdown.markdown(txt,extensions=['fenced_code','tables']) + '<br><br>' + \
118
+ markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables'])
119
+ else:
120
+ return markdown.markdown(txt,extensions=['fenced_code','tables'])
121
+
122
+
123
+ def format_io(self, y):
124
+ """
125
+ 将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
126
+ """
127
+ if y is None: return []
128
+ i_ask, gpt_reply = y[-1]
129
+ i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
130
+ y[-1] = (
131
+ None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
132
+ None if gpt_reply is None else markdown_convertion(gpt_reply)
133
+ )
134
+ return y
135
+
136
+
137
+ def find_free_port():
138
+ """
139
+ 返回当前系统中可用的未使用端口。
140
+ """
141
+ import socket
142
+ from contextlib import closing
143
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
144
+ s.bind(('', 0))
145
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
146
+ return s.getsockname()[1]
147
+
148
+
149
+ def extract_archive(file_path, dest_dir):
150
+ import zipfile
151
+ import tarfile
152
+ import os
153
+ # Get the file extension of the input file
154
+ file_extension = os.path.splitext(file_path)[1]
155
+
156
+ # Extract the archive based on its extension
157
+ if file_extension == '.zip':
158
+ with zipfile.ZipFile(file_path, 'r') as zipobj:
159
+ zipobj.extractall(path=dest_dir)
160
+ print("Successfully extracted zip archive to {}".format(dest_dir))
161
+
162
+ elif file_extension in ['.tar', '.gz', '.bz2']:
163
+ with tarfile.open(file_path, 'r:*') as tarobj:
164
+ tarobj.extractall(path=dest_dir)
165
+ print("Successfully extracted tar archive to {}".format(dest_dir))
166
+ else:
167
+ return
168
+
169
+ def find_recent_files(directory):
170
+ """
171
+ me: find files that is created with in one minutes under a directory with python, write a function
172
+ gpt: here it is!
173
+ """
174
+ import os
175
+ import time
176
+ current_time = time.time()
177
+ one_minute_ago = current_time - 60
178
+ recent_files = []
179
+
180
+ for filename in os.listdir(directory):
181
+ file_path = os.path.join(directory, filename)
182
+ if file_path.endswith('.log'): continue
183
+ created_time = os.path.getctime(file_path)
184
+ if created_time >= one_minute_ago:
185
+ recent_files.append(file_path)
186
+
187
+ return recent_files