Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,75 +1,43 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from huggingface_hub import hf_hub_download
|
3 |
import os
|
4 |
import subprocess
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
if not os.path.exists("llama.cpp"):
|
18 |
-
os.system("git clone https://github.com/ggerganov/llama.cpp.git")
|
19 |
-
os.system("cd llama.cpp && mkdir build && cd build && cmake .. && make")
|
20 |
-
|
21 |
-
model_path = download_model(model_url)
|
22 |
|
23 |
-
|
24 |
-
os.
|
25 |
-
with open(os.path.join(prompts_dir, "TcmChat.txt"), "w") as f:
|
26 |
-
f.write("You are a helpful TCM medical assistant named 仲景中医大语言模型.\n")
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
def chat_with_model(user_input, history):
|
32 |
-
prompt = f"User: {user_input}\nAssistant:"
|
33 |
-
with open(os.path.join("llama.cpp/prompts", "TcmChat.txt"), "a") as f:
|
34 |
-
f.write(prompt + "\n")
|
35 |
-
|
36 |
-
# 执行命令并捕获输出
|
37 |
-
command = f"./llama.cpp/build/bin/main -m models/ZhongJing1_5-1_8b-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r \"User:\" -f llama.cpp/prompts/chat-with-bob.txt"
|
38 |
-
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
|
43 |
-
capture = False
|
44 |
-
for line in output_lines:
|
45 |
-
if "User:" in line:
|
46 |
-
capture = True
|
47 |
-
if capture:
|
48 |
-
response += line + "\n"
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
with gr.Column():
|
61 |
-
setup_btn = gr.Button("Setup Environment")
|
62 |
-
setup_output = gr.Textbox(label="Setup Output")
|
63 |
-
|
64 |
-
with gr.Column():
|
65 |
-
user_input = gr.Textbox(show_label=False, placeholder="Enter your message...")
|
66 |
-
submit_btn = gr.Button("Submit")
|
67 |
|
68 |
-
|
69 |
-
|
|
|
70 |
|
71 |
if __name__ == "__main__":
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
|
4 |
+
def run_command(command):
|
5 |
+
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
6 |
+
stdout, stderr = process.communicate()
|
7 |
+
if process.returncode != 0:
|
8 |
+
raise Exception(f"Command failed with error: {stderr.decode('utf-8')}")
|
9 |
+
return stdout.decode('utf-8')
|
10 |
+
|
11 |
+
def main():
|
12 |
+
# Clone the repository
|
13 |
+
print("Cloning the repository...")
|
14 |
+
run_command("git clone https://github.com/ggerganov/llama.cpp.git")
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
# Change directory to the cloned repository
|
17 |
+
os.chdir("/content/llama.cpp")
|
|
|
|
|
18 |
|
19 |
+
# Download the file
|
20 |
+
print("Downloading the GGUF file...")
|
21 |
+
run_command("wget -O ZhongJing1_5-1_8b-q4_0.gguf https://huggingface.co/CMLL/ZhongJing-2-1_8b-GGUF/resolve/main/ZhongJing1_5-1_8b-q4_0.gguf?download=true")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# Compile the project
|
24 |
+
print("Compiling the project...")
|
25 |
+
run_command("make")
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Modify the prompts/chat-with-bob.txt file
|
28 |
+
print("Modifying the chat-with-bob.txt file...")
|
29 |
+
dialog_content = """
|
30 |
+
Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is a helpful TCM medical assistant, also named 仲景中医大语言模型, and never fails to answer the User's requests immediately and with precision.
|
31 |
+
|
32 |
+
User: Hello, Bob.
|
33 |
+
Bob: Hello. How may I help you today?
|
34 |
+
"""
|
35 |
+
with open("prompts/chat-with-bob.txt", "w") as f:
|
36 |
+
f.write(dialog_content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Run the llama-cli command
|
39 |
+
print("Running the llama-cli command...")
|
40 |
+
run_command("./llama-cli -m /content/ZhongJing1_5-1_8b-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r 'User:' -f prompts/chat-with-bob.txt")
|
41 |
|
42 |
if __name__ == "__main__":
|
43 |
+
main()
|
|
|
|
|
|