Spaces:
Running
on
L4
Running
on
L4
File size: 9,394 Bytes
4e42e7b 323a0f2 d09f7dc 7b371cc 323a0f2 50155f5 323a0f2 d09f7dc e31ed5e 5d416d8 36c6074 323a0f2 50155f5 f44f648 50155f5 8f9126b cad7e4e 8f9126b d09f7dc 323a0f2 4785c60 b974b73 4785c60 8e64dab 4785c60 323a0f2 4785c60 b974b73 4785c60 b974b73 4785c60 323a0f2 4785c60 323a0f2 4785c60 323a0f2 4785c60 b974b73 4785c60 323a0f2 4785c60 b974b73 323a0f2 db7857b 323a0f2 4f51cd9 d09f7dc c9d404d d09f7dc 7b371cc d09f7dc 7b371cc 323a0f2 7b371cc c5ab1ee 4785c60 c5ab1ee 4785c60 c5ab1ee 4785c60 7b371cc 4785c60 c5ab1ee 4785c60 9e7a8dc c5ab1ee 323a0f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
# This is a project of Chakra Lab LLC. All rights reserved.
import gradio as gr
import os
import torch
import torch.nn.functional as F
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#base_model_name = "google/gemma-7b"
base_model_name = "google/gemma-2-9b"
#adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
#adapter_model_name = "cope-project/cope-g2.9b-2c-hs.s1.5fpc.9-sx.s1.5.9o-vl.s1.5.9-hr.s5-sh.s5.l5e5-e3-d0-r8"
#adapter_model_name = "samidh/cope-g2.9b-2c-hs.at-sx.at-vl.s5-sh.s5-hr.s5-e2"
adapter_model_name = "samidh/cope.a.r09.2"
bnb_config = BitsAndBytesConfig(
load_in_8bit=True,
#bnb_4bit_quant_type="nf4",
#bnb_4bit_compute_dtype=torch.bfloat16,
#bnb_4bit_use_double_quant=True
)
model = AutoModelForCausalLM.from_pretrained(base_model_name,
token=os.environ['HF_TOKEN'],
quantization_config=bnb_config,
device_map="auto")
model = PeftModel.from_pretrained(model, adapter_model_name, token=os.environ['HF_TOKEN'])
model.merge_and_unload()
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
PROMPT = """
INSTRUCTIONS
============
Examine the given POLICY and determine if the given CONTENT meets the criteria for ANY of the LABELS. Answer "1" if yes, and "0" if no.
POLICY
======
{policy}
CONTENT
=======
{content}
ANSWER
======
"""
DEFAULT_POLICY = """# Policy
## Overview
This policy is designed to determine whether or not content is hate speech.
## Definition of Terms
- Protected Classes: Individuals or groups distinguished by race (e.g. Black, White), ethnicity (e.g. Hispanic), nationality (e.g. Canadian, Argentinian), caste (e.g. Brahmins, Kshatriyas, Vaishyas), immigration status (e.g. Immigrants), religion (e.g. Catholics, Jews), disability (e.g. blind people, deaf people), disease status (e.g. people with AIDS), sexual orientation (e.g. gays, bisexuals), transgender status (e.g. trans people, cis people), gender identity (e.g. women, men), gender expression (e.g. masculine, feminine), or sex (e.g. male, female).
- Derogatory Slur: Any term or phrase used pejoratively or contemptuously towards a member or members of a protected class, including those historically reclaimed but used in a derogatory context.
- Non-Protected Classes: Categories defined by, political party membership (e.g. Democrats, Tories), political beliefs (e.g. fascists, communists), hobbies (e.g. games, knitters), criminal history (e.g. criminals, convicts), or profession (e.g. doctors, farmers).
## Interpretation of Language:
- Terminology or phrasing that can be construed in various ways IS hate speech UNLESS context clearly indicates it unambiguously IS NOT hate speech. So, for example, attacks on a religion as an institution (e.g. "Hinduism is a garbage religion") SHOULD be understood as attacks on the followers of that religion.
## Definition of Labels
### (HS): Hate Speech
#### Includes
- Direct and Indirect Violence Advocacy: Any form of expression that celebrates, justifies, incites, or threatens physical or psychological harm against individuals or groups identified by their membership in a protected class.
- Dehumanization and Vilification: Content suggesting that individuals or groups, explicitly identified by their membership in a specific protected class, are subhuman, inherently lesser beings, or using animalistic descriptions in a way that promotes disdain or hate.
- Derogatory and Dehumanizing Language: Use of slurs, epithets, or any derogatory language aimed at belittling, humiliating, or inciting hatred against individuals or groups explicitly identified by their membership in a specific protected class.
- Explicit and Implicit Discrimination Advocacy: Promoting exclusion, segregation, or denial of rights against individuals or groups explicitly identified by their membership in a specific protected class.
- Collective Attribution of Negative Actions: Assigning collective blame or advocating collective punishment based on the actions or perceived characteristics of individuals or groups identified by their membership in a protected class.
- Inferiority and Superiority Claims: Statements that categorically assign inferiority or superiority, moral or intellectual, to individuals or groups identified by their membership in a protected class.
- Denial or Distortion of Historical Atrocities: Denying, grossly trivializing, or distorting documented atrocities against groups identified by their membership in a protected class, undermining their significance or the suffering of their members.
- Conspiracy Theories: Propagating unfounded allegations that individuals or groups, identified by their membership in a protected class, are responsible for serious harms or controlling significant institutions to the detriment of society.
#### Excludes
- Attacks on Non-Protected Classes: Content that attacks or criticizes individuals or groups identified by their membership in a Non-Protected Class, EVEN if that attack is violent, threatening, or otherwise hateful (e.g. "Criminals should all be rounded up and shot!").
- Criticism of Beliefs and Institutions: Constructive critique or discussion of political ideologies, religious doctrines, or institutions without resorting to hate speech or targeting individuals or groups identified by their membership in a protected class.
- Neutrally Reporting Historical Events: Neutrally and descriptively reporting or discussion of factual events in the past that could be construed as negative about individuals or groups identified by their membership in a protected class.
- Pushing Back on Hateful Language: Content where the writer pushes back on, condemns, questions, criticizes, or mocks a different person's hateful language or ideas.
- Artistic and Educational Content: Content with legitimate artistic, educational, or documentary value that discusses or portrays issues related to hate speech in a context clearly aimed at enlightening or informing without promoting hate.
"""
DEFAULT_CONTENT = "Put your content sample here."
# Function to make predictions
def predict(content, policy):
input_text = PROMPT.format(policy=policy, content=content)
input_ids = tokenizer.encode(input_text, return_tensors="pt")
with torch.inference_mode():
outputs = model(input_ids)
# Get logits for the last token
logits = outputs.logits[:, -1, :]
# Apply softmax to get probabilities
probabilities = F.softmax(logits, dim=-1)
# Get the predicted token ID
predicted_token_id = torch.argmax(logits, dim=-1).item()
# Decode the predicted token
decoded_output = tokenizer.decode([predicted_token_id])
# Get the probability of the predicted token
predicted_prob = probabilities[0, predicted_token_id].item()
# Function to get probability for a specific token
def get_token_probability(token):
token_id = tokenizer.encode(token, add_special_tokens=False)[0]
return probabilities[0, token_id].item()
predicted_prob_0 = get_token_probability('0')
predicted_prob_1 = get_token_probability('1')
if decoded_output == '1':
return f'VIOLATING\n(P: {predicted_prob_1:.2f})'
else:
return f'NON-Violating\n(P: {predicted_prob_0:.2f})'
with gr.Blocks() as iface:
gr.Markdown("# CoPE Alpha Preview")
gr.Markdown("See if the given content violates your given policy.")
with gr.Row():
content_input = gr.Textbox(label="Content", lines=2, value=DEFAULT_CONTENT)
policy_input = gr.Textbox(label="Policy", lines=10, value=DEFAULT_POLICY)
submit_btn = gr.Button("Submit")
output = gr.Label(label="Label")
gr.Markdown("""
## About CoPE
CoPE (the COntent Policy Evaluation engine) is a small language model capable of accurate content policy labeling. This is a **preview** of our alpha release and is strictly for **research** purposes. This should **NOT** be used for any production use cases.
## How to Use
1. Enter your content in the "Content" box.
2. Specify your policy in the "Policy" box.
3. Click "Submit" to see the results.
**Note**: Inference times are **slow** (1-2 seconds) since this is built on dev infra and not yet optimized for live systems. Please be patient!
## More Info
- [Give us feedback](https://forms.gle/BHpt6BpH2utaf4ez9) to help us improve
- [Read our FAQ](https://docs.google.com/document/d/1Cp3GJ5k2I-xWZ4GK9WI7Xv8TpKdHmjJ3E9RbzP5Cc_Y/edit) to learn more about CoPE
- [Join our mailing list](https://forms.gle/PCABrZdhTuXE9w9ZA) to keep in touch
""")
submit_btn.click(predict, inputs=[content_input, policy_input], outputs=output)
# Launch the app
iface.launch()
|