Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
|
4 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
print("CUDA version:", torch.version.cuda)
|
9 |
|
10 |
-
|
11 |
-
token = os.environ.get("HF_TOKEN")
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# use quantization to lower GPU usage
|
16 |
bnb_config = BitsAndBytesConfig(
|
@@ -25,109 +34,104 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
25 |
quantization_config=bnb_config,
|
26 |
token=token
|
27 |
)
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
politicas = {
|
33 |
-
"Contenido Peligroso": """
|
34 |
-
* "No al contenido peligroso": El mensaje no debe contener ni buscar la generaci贸n de contenido que cause da帽o a uno mismo o a otros (por ejemplo, acceder o fabricar armas de fuego o explosivos, promoci贸n del terrorismo, instrucciones para el suicidio).
|
35 |
-
""",
|
36 |
-
"Acoso": """
|
37 |
-
* "No al acoso": El mensaje no debe contener ni buscar la generaci贸n de contenido que sea malicioso, intimidante, que fomente el bullying o abusivo dirigido a otra persona (por ejemplo, amenazas f铆sicas, negaci贸n de eventos tr谩gicos, menosprecio a v铆ctimas de violencia).
|
38 |
-
""",
|
39 |
-
"Discurso de Odio": """
|
40 |
-
* "No al discurso de odio": El mensaje no debe contener ni buscar la generaci贸n de contenido que ataque la identidad o atributos protegidos (por ejemplo, insultos raciales, promoci贸n de la discriminaci贸n, llamados a la violencia contra grupos protegidos) o que deshumanice, menosprecie o vilipendie por motivos de raza, etnia, religi贸n, discapacidad, edad, nacionalidad, estatus de veterano, orientaci贸n sexual, g茅nero, identidad de g茅nero, casta, u otro estatus protegido.
|
41 |
-
""",
|
42 |
-
"Contenido Sexual": """
|
43 |
-
* "No al contenido sexual expl铆cito": El mensaje no debe contener ni buscar la generaci贸n de contenido que haga referencia a actos sexuales u otro contenido lascivo (por ejemplo, descripciones gr谩ficas sexuales, contenido destinado a causar excitaci贸n). Los t茅rminos m茅dicos o cient铆ficos relacionados con la anatom铆a humana o la educaci贸n sexual est谩n permitidos.
|
44 |
-
"""
|
45 |
-
}
|
46 |
-
else: # English
|
47 |
-
politicas = {
|
48 |
-
"Dangerous Content": """
|
49 |
-
* "No Dangerous Content": The prompt shall not contain or seek generation of content that harms oneself and/or others (e.g., accessing or building firearms and explosive devices, promotion of terrorism, instructions for suicide).
|
50 |
-
""",
|
51 |
-
"Harassment": """
|
52 |
-
* "No Harassment": The prompt shall not contain or seek generation of content that is malicious, intimidating, bullying, or abusive content targeting another individual (e.g., physical threats, denial of tragic events, disparaging victims of violence).
|
53 |
-
""",
|
54 |
-
"Hate Speech": """
|
55 |
-
* "No Hate Speech": The prompt shall not contain or seek generation of content that targets identity and/or protected attributes (e.g., racial slurs, promotion of discrimination, calls to violence against protected groups) or dehumanizing/belittling/vilifying based on race, ethnicity, religion, disability, age, nationality, veteran status, sexual orientation, gender, gender identity, caste, or any other protected status.
|
56 |
-
""",
|
57 |
-
"Sexually Explicit Information": """
|
58 |
-
* "No Sexually Explicit Information": The prompt shall not contain or seek generation of content that contains references to sexual acts or other lewd content (e.g., sexually graphic descriptions, content aimed at causing arousal). Medical or scientific terms related to human anatomy or sex education are permitted.
|
59 |
-
"""
|
60 |
-
}
|
61 |
-
|
62 |
-
# Formatear el prompt seg煤n el tipo de clasificaci贸n seleccionado
|
63 |
-
politica_seguridad = politicas.get(tipo_clasificacion, "")
|
64 |
-
|
65 |
-
if idioma == "Espa帽ol":
|
66 |
-
prompt = f"""Eres un experto en pol铆ticas tratando de determinar si un mensaje del usuario
|
67 |
-
viola las pol铆ticas de seguridad definidas.
|
68 |
-
<inicio_turno>
|
69 |
-
Pregunta del usuario: {message.strip()}
|
70 |
-
<fin_turno>
|
71 |
-
Nuestro principio de seguridad se define a continuaci贸n:
|
72 |
-
{politica_seguridad.strip()}
|
73 |
-
驴La pregunta del usuario viola el principio anterior? Tu respuesta debe comenzar con
|
74 |
-
'S铆' o 'No'. Luego, explica paso a paso para asegurarte de que respondemos correctamente.
|
75 |
-
"""
|
76 |
-
else: # Default to English
|
77 |
-
prompt = f"""You are a policy expert trying to help determine whether a user prompt is in
|
78 |
-
violation of the defined safety policies.
|
79 |
-
<start_of_turn>
|
80 |
-
Human Question: {message.strip()}
|
81 |
-
<end_of_turn>
|
82 |
-
Our safety principle is defined below:
|
83 |
-
{politica_seguridad.strip()}
|
84 |
-
Does the human question violate the above principle? Your answer must start with
|
85 |
-
'Yes' or 'No'. Then walk through step by step to be sure we answer correctly.
|
86 |
-
"""
|
87 |
-
|
88 |
-
return prompt
|
89 |
-
|
90 |
-
@spaces.GPU(duration=150)
|
91 |
-
# Funci贸n para procesar la respuesta y clasificar seg煤n las pol铆ticas de seguridad
|
92 |
-
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, language, harm_type):
|
93 |
-
prompt = generar_prompt(message, harm_type, language)
|
94 |
|
95 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)["input_ids"]
|
96 |
|
97 |
-
with torch.no_grad():
|
98 |
-
logits = model(**inputs).logits
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
106 |
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
109 |
|
110 |
-
# Devolver la probabilidad de 'S铆'/'Yes' y la respuesta generada
|
111 |
-
score_yes = probabilities[0].item()
|
112 |
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
-
return response
|
119 |
|
120 |
-
# Crear la interfaz de Gradio con selecci贸n de idioma y tipo de contenido
|
121 |
demo = gr.ChatInterface(
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
131 |
)
|
132 |
-
|
133 |
demo.launch(debug=True)
|
|
|
1 |
import gradio as gr
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
import os
|
5 |
+
import spaces
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
|
7 |
+
import torch
|
8 |
+
from threading import Thread
|
9 |
+
from sentence_transformers import SentenceTransformer
|
10 |
+
from datasets import load_dataset
|
11 |
+
import time
|
12 |
|
13 |
+
token = os.environ["HF_TOKEN"]
|
14 |
+
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
|
|
|
15 |
|
16 |
+
dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
|
|
|
17 |
|
18 |
+
data = dataset["train"]
|
19 |
+
data = data.add_faiss_index("embeddings") # column name that has the embeddings of the dataset
|
20 |
+
|
21 |
+
|
22 |
+
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
23 |
|
24 |
# use quantization to lower GPU usage
|
25 |
bnb_config = BitsAndBytesConfig(
|
|
|
34 |
quantization_config=bnb_config,
|
35 |
token=token
|
36 |
)
|
37 |
+
terminators = [
|
38 |
+
tokenizer.eos_token_id,
|
39 |
+
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
40 |
+
]
|
41 |
|
42 |
+
SYS_PROMPT = """You are an assistant for answering questions.
|
43 |
+
You are given the extracted parts of a long document and a question. Provide a conversational answer.
|
44 |
+
If you don't know the answer, just say "I do not know." Don't make up an answer."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
|
|
46 |
|
|
|
|
|
47 |
|
48 |
+
def search(query: str, k: int = 3 ):
|
49 |
+
"""a function that embeds a new query and returns the most probable results"""
|
50 |
+
embedded_query = ST.encode(query) # embed new query
|
51 |
+
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
|
52 |
+
"embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
|
53 |
+
k=k # get only top k results
|
54 |
+
)
|
55 |
+
return scores, retrieved_examples
|
56 |
|
57 |
+
def format_prompt(prompt,retrieved_documents,k):
|
58 |
+
"""using the retrieved documents we will prompt the model to generate our responses"""
|
59 |
+
PROMPT = f"Question:{prompt}\nContext:"
|
60 |
+
for idx in range(k) :
|
61 |
+
PROMPT+= f"{retrieved_documents['text'][idx]}\n"
|
62 |
+
return PROMPT
|
63 |
|
|
|
|
|
64 |
|
65 |
+
@spaces.GPU(duration=150)
|
66 |
+
def talk(prompt,history):
|
67 |
+
k = 1 # number of retrieved documents
|
68 |
+
scores , retrieved_documents = search(prompt, k)
|
69 |
+
formatted_prompt = format_prompt(prompt,retrieved_documents,k)
|
70 |
+
formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
|
71 |
+
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
|
72 |
+
# tell the model to generate
|
73 |
+
input_ids = tokenizer.apply_chat_template(
|
74 |
+
messages,
|
75 |
+
add_generation_prompt=True,
|
76 |
+
return_tensors="pt"
|
77 |
+
).to(model.device)
|
78 |
+
outputs = model.generate(
|
79 |
+
input_ids,
|
80 |
+
max_new_tokens=1024,
|
81 |
+
eos_token_id=terminators,
|
82 |
+
do_sample=True,
|
83 |
+
temperature=0.6,
|
84 |
+
top_p=0.9,
|
85 |
+
)
|
86 |
+
streamer = TextIteratorStreamer(
|
87 |
+
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
|
88 |
+
)
|
89 |
+
generate_kwargs = dict(
|
90 |
+
input_ids= input_ids,
|
91 |
+
streamer=streamer,
|
92 |
+
max_new_tokens=1024,
|
93 |
+
do_sample=True,
|
94 |
+
top_p=0.95,
|
95 |
+
temperature=0.75,
|
96 |
+
eos_token_id=terminators,
|
97 |
+
)
|
98 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
99 |
+
t.start()
|
100 |
+
|
101 |
+
outputs = []
|
102 |
+
for text in streamer:
|
103 |
+
outputs.append(text)
|
104 |
+
print(outputs)
|
105 |
+
yield "".join(outputs)
|
106 |
+
|
107 |
+
|
108 |
+
TITLE = "# RAG"
|
109 |
+
|
110 |
+
DESCRIPTION = """
|
111 |
+
A rag pipeline with a chatbot feature
|
112 |
+
Resources used to build this project :
|
113 |
+
* embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
|
114 |
+
* dataset : https://huggingface.co/datasets/not-lain/wikipedia
|
115 |
+
* faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
|
116 |
+
* chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
|
117 |
+
* Full documentation : https://huggingface.co/blog/not-lain/rag-chatbot-using-llama3
|
118 |
+
"""
|
119 |
|
|
|
120 |
|
|
|
121 |
demo = gr.ChatInterface(
|
122 |
+
fn=talk,
|
123 |
+
chatbot=gr.Chatbot(
|
124 |
+
show_label=True,
|
125 |
+
show_share_button=True,
|
126 |
+
show_copy_button=True,
|
127 |
+
likeable=True,
|
128 |
+
layout="bubble",
|
129 |
+
bubble_full_width=False,
|
130 |
+
),
|
131 |
+
theme="Soft",
|
132 |
+
examples=[["what's anarchy ? "]],
|
133 |
+
title=TITLE,
|
134 |
+
description=DESCRIPTION,
|
135 |
+
|
136 |
)
|
|
|
137 |
demo.launch(debug=True)
|