Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
|
4 |
-
# Ruta al ejecutable de llama.cpp
|
5 |
-
LLAMA_CPP_EXECUTABLE = "/llama.cpp/main"
|
6 |
|
7 |
# Ruta al modelo GGUF descargado
|
8 |
-
MODEL_PATH = "
|
9 |
|
10 |
-
|
|
|
11 |
|
12 |
def generar_prompt(user_prompt, policy):
|
13 |
prompt = f"""You are a policy expert trying to help determine whether a user prompt is in violation of the following safety policy:
|
@@ -19,20 +17,6 @@ Does the human question violate the above policy? Your answer must start with 'Y
|
|
19 |
"""
|
20 |
return prompt
|
21 |
|
22 |
-
def llama_cpp_generate(prompt, max_tokens=256):
|
23 |
-
command = [
|
24 |
-
LLAMA_CPP_EXECUTABLE,
|
25 |
-
"--model", MODEL_PATH,
|
26 |
-
"--prompt", prompt,
|
27 |
-
"--temp", "0.7",
|
28 |
-
"--top_p", "0.95",
|
29 |
-
"--repeat_penalty", "1.1",
|
30 |
-
"--n_predict", str(max_tokens)
|
31 |
-
]
|
32 |
-
|
33 |
-
result = subprocess.run(command, capture_output=True, text=True)
|
34 |
-
return result.stdout.strip()
|
35 |
-
|
36 |
def verificar_politica(message):
|
37 |
policies = [
|
38 |
"No Dangerous Content: The prompt shall not contain or seek generation of content that harms oneself and/or others (e.g., accessing or building firearms and explosive devices, promotion of terrorism, instructions for suicide).",
|
@@ -40,46 +24,53 @@ def verificar_politica(message):
|
|
40 |
"No Hate Speech: The prompt shall not contain or seek generation of content that targets identity and/or protected attributes (e.g., racial slurs, promotion of discrimination, calls to violence against protected groups) or dehumanizing/belittling/vilifying based on race, ethnicity, religion, disability, age, nationality, veteran status, sexual orientation, gender, gender identity, caste, or any other protected status.",
|
41 |
"No Sexually Explicit Information: The prompt shall not contain or seek generation of content that contains references to sexual acts or other lewd content (e.g., sexually graphic descriptions, content aimed at causing arousal). Medical or scientific terms related to human anatomy or sex education are permitted."
|
42 |
]
|
43 |
-
|
44 |
for policy in policies:
|
45 |
prompt = generar_prompt(message, policy)
|
46 |
-
response =
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
49 |
return f"Your question violates the following policy: {policy.split(':')[0]}"
|
50 |
return None
|
51 |
|
52 |
-
def respond(message):
|
53 |
policy_violation = verificar_politica(message)
|
54 |
if policy_violation:
|
55 |
return policy_violation
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
def submit_message(user_message, chat_history):
|
68 |
-
chat_history = chat_history + [[user_message, None]]
|
69 |
-
assistant_reply = respond(user_message)
|
70 |
-
chat_history[-1][1] = assistant_reply
|
71 |
-
return "", chat_history
|
72 |
-
|
73 |
-
submit_button.click(
|
74 |
-
submit_message,
|
75 |
-
inputs=[message, chatbot],
|
76 |
-
outputs=[message, chatbot],
|
77 |
-
)
|
78 |
-
message.submit(
|
79 |
-
submit_message,
|
80 |
-
inputs=[message, chatbot],
|
81 |
-
outputs=[message, chatbot],
|
82 |
)
|
|
|
83 |
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from llama_cpp import Llama
|
|
|
|
|
|
|
3 |
|
4 |
# Ruta al modelo GGUF descargado
|
5 |
+
MODEL_PATH = "./shieldgemma-9b.Q4_K_M.gguf"
|
6 |
|
7 |
+
# Inicializa el modelo con llama-cpp-python
|
8 |
+
llm = Llama(model_path=MODEL_PATH, n_ctx=2048, n_threads=2, chat_format="chatml")
|
9 |
|
10 |
def generar_prompt(user_prompt, policy):
|
11 |
prompt = f"""You are a policy expert trying to help determine whether a user prompt is in violation of the following safety policy:
|
|
|
17 |
"""
|
18 |
return prompt
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def verificar_politica(message):
|
21 |
policies = [
|
22 |
"No Dangerous Content: The prompt shall not contain or seek generation of content that harms oneself and/or others (e.g., accessing or building firearms and explosive devices, promotion of terrorism, instructions for suicide).",
|
|
|
24 |
"No Hate Speech: The prompt shall not contain or seek generation of content that targets identity and/or protected attributes (e.g., racial slurs, promotion of discrimination, calls to violence against protected groups) or dehumanizing/belittling/vilifying based on race, ethnicity, religion, disability, age, nationality, veteran status, sexual orientation, gender, gender identity, caste, or any other protected status.",
|
25 |
"No Sexually Explicit Information: The prompt shall not contain or seek generation of content that contains references to sexual acts or other lewd content (e.g., sexually graphic descriptions, content aimed at causing arousal). Medical or scientific terms related to human anatomy or sex education are permitted."
|
26 |
]
|
27 |
+
|
28 |
for policy in policies:
|
29 |
prompt = generar_prompt(message, policy)
|
30 |
+
response = llm.create_chat_completion(
|
31 |
+
messages=[{"role": "system", "content": prompt}],
|
32 |
+
stream=False
|
33 |
+
)
|
34 |
+
answer = response['choices'][0]['message']['content']
|
35 |
+
if answer.startswith("Yes"):
|
36 |
return f"Your question violates the following policy: {policy.split(':')[0]}"
|
37 |
return None
|
38 |
|
39 |
+
def respond(message, history, system_prompt):
|
40 |
policy_violation = verificar_politica(message)
|
41 |
if policy_violation:
|
42 |
return policy_violation
|
43 |
|
44 |
+
messages_prompts = [{"role": "system", "content": system_prompt}]
|
45 |
+
for human, assistant in history:
|
46 |
+
messages_prompts.append({"role": "user", "content": human})
|
47 |
+
messages_prompts.append({"role": "assistant", "content": assistant})
|
48 |
+
messages_prompts.append({"role": "user", "content": message})
|
49 |
+
|
50 |
+
response = llm.create_chat_completion(
|
51 |
+
messages=messages_prompts,
|
52 |
+
stream=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
)
|
54 |
+
return response['choices'][0]['message']['content']
|
55 |
|
56 |
+
def chat_stream_completion(message, history, system_prompt):
|
57 |
+
messages_prompts = [{"role": "system", "content": system_prompt}]
|
58 |
+
for human, assistant in history:
|
59 |
+
messages_prompts.append({"role": "user", "content": human})
|
60 |
+
messages_prompts.append({"role": "assistant", "content": assistant})
|
61 |
+
messages_prompts.append({"role": "user", "content": message})
|
62 |
+
|
63 |
+
response = llm.create_chat_completion(
|
64 |
+
messages=messages_prompts,
|
65 |
+
stream=True
|
66 |
+
)
|
67 |
+
message_repl = ""
|
68 |
+
for chunk in response:
|
69 |
+
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
|
70 |
+
message_repl += chunk['choices'][0]["delta"]["content"]
|
71 |
+
yield message_repl
|
72 |
|
73 |
+
gr.ChatInterface(
|
74 |
+
chat_stream_completion,
|
75 |
+
additional_inputs=[gr.Textbox("You are a helpful AI.", label="System Prompt")]
|
76 |
+
).queue().launch(server_name="0.0.0.0")
|