Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
|
19 |
|
20 |
#gr.load("models/samidh/cope-gemma2b-hs-2c-skr-s1.5.9.d25", hf_token=os.environ['HF_TOKEN']).launch()
|
21 |
|
22 |
-
|
23 |
INSTRUCTIONS
|
24 |
============
|
25 |
|
@@ -43,20 +43,22 @@ ANSWER
|
|
43 |
"""
|
44 |
|
45 |
# Function to make predictions
|
46 |
-
def predict(
|
47 |
-
input_text =
|
|
|
48 |
inputs = tokenizer.encode(input_text, return_tensors="pt")
|
49 |
outputs = model.generate(inputs, max_new_tokens=1)
|
50 |
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
51 |
-
|
|
|
52 |
|
53 |
# Create Gradio interface
|
54 |
iface = gr.Interface(
|
55 |
fn=predict,
|
56 |
-
inputs=[gr.Textbox(label="
|
57 |
outputs="label",
|
58 |
title="CoPE Demo",
|
59 |
-
description="
|
60 |
)
|
61 |
|
62 |
# Launch the app
|
|
|
19 |
|
20 |
#gr.load("models/samidh/cope-gemma2b-hs-2c-skr-s1.5.9.d25", hf_token=os.environ['HF_TOKEN']).launch()
|
21 |
|
22 |
+
PROMPT = """
|
23 |
INSTRUCTIONS
|
24 |
============
|
25 |
|
|
|
43 |
"""
|
44 |
|
45 |
# Function to make predictions
|
46 |
+
def predict(content, policy):
|
47 |
+
input_text = PROMPT.format(policy=policy, content=content)
|
48 |
+
print(input_text)
|
49 |
inputs = tokenizer.encode(input_text, return_tensors="pt")
|
50 |
outputs = model.generate(inputs, max_new_tokens=1)
|
51 |
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
52 |
+
print(decoded_output)
|
53 |
+
return decoded_output
|
54 |
|
55 |
# Create Gradio interface
|
56 |
iface = gr.Interface(
|
57 |
fn=predict,
|
58 |
+
inputs=[gr.Textbox(gr.Textbox(label="Content", lines=2), label="Policy", lines=10)],
|
59 |
outputs="label",
|
60 |
title="CoPE Demo",
|
61 |
+
description="See if the given content violates your given policy."
|
62 |
)
|
63 |
|
64 |
# Launch the app
|