Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from transformers import SynthIDTextWatermarkingConfig
|
4 |
-
import json
|
5 |
|
6 |
class SynthIDApp:
|
7 |
def __init__(self):
|
8 |
self.client = None
|
9 |
self.watermarking_config = None
|
|
|
10 |
|
11 |
def login(self, hf_token):
|
12 |
"""Initialize the inference client with authentication."""
|
@@ -17,27 +17,33 @@ class SynthIDApp:
|
|
17 |
token=hf_token
|
18 |
)
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
self.watermarking_config = SynthIDTextWatermarkingConfig(
|
23 |
-
keys=WATERMARK_KEYS,
|
24 |
-
ngram_len=5
|
25 |
-
)
|
26 |
-
|
27 |
-
# Test the connection
|
28 |
-
_ = self.client.token_count("Test")
|
29 |
return "Inference client initialized successfully!"
|
30 |
except Exception as e:
|
31 |
self.client = None
|
32 |
-
self.watermarking_config = None
|
33 |
return f"Error initializing client: {str(e)}"
|
34 |
|
35 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
"""Apply SynthID watermark to input text using the inference endpoint."""
|
37 |
if not self.client:
|
38 |
return text, "Error: Client not initialized. Please login first."
|
39 |
|
40 |
try:
|
|
|
|
|
|
|
41 |
# Convert watermarking config to dict for the API call
|
42 |
watermark_dict = {
|
43 |
"keys": self.watermarking_config.keys,
|
@@ -56,7 +62,7 @@ class SynthIDApp:
|
|
56 |
)
|
57 |
|
58 |
watermarked_text = response
|
59 |
-
return watermarked_text, "Watermark applied successfully!"
|
60 |
except Exception as e:
|
61 |
return text, f"Error applying watermark: {str(e)}"
|
62 |
|
@@ -65,19 +71,12 @@ class SynthIDApp:
|
|
65 |
try:
|
66 |
total_words = len(text.split())
|
67 |
avg_word_length = sum(len(word) for word in text.split()) / total_words if total_words > 0 else 0
|
68 |
-
|
69 |
-
# Get token count if client is available
|
70 |
-
token_info = ""
|
71 |
-
if self.client:
|
72 |
-
try:
|
73 |
-
token_count = self.client.token_count(text)
|
74 |
-
token_info = f"\n- Token count: {token_count}"
|
75 |
-
except:
|
76 |
-
pass
|
77 |
|
78 |
analysis = f"""Text Analysis:
|
|
|
79 |
- Total words: {total_words}
|
80 |
-
- Average word length: {avg_word_length:.2f}
|
81 |
|
82 |
Note: This is a basic analysis. The official SynthID detector is not yet available in the public transformers package."""
|
83 |
|
@@ -105,15 +104,36 @@ with gr.Blocks(title="SynthID Text Watermarking Tool") as app:
|
|
105 |
|
106 |
with gr.Tab("Apply Watermark"):
|
107 |
with gr.Row():
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
apply_btn = gr.Button("Apply Watermark")
|
116 |
-
apply_btn.click(
|
|
|
|
|
|
|
|
|
117 |
|
118 |
with gr.Tab("Analyze Text"):
|
119 |
with gr.Row():
|
@@ -130,6 +150,7 @@ with gr.Blocks(title="SynthID Text Watermarking Tool") as app:
|
|
130 |
### Instructions:
|
131 |
1. Enter your Hugging Face token and click Login
|
132 |
2. Once connected, you can use the tabs to apply watermarks or analyze text
|
|
|
133 |
|
134 |
### Notes:
|
135 |
- This version uses Hugging Face's Inference Endpoints for faster processing
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from transformers import SynthIDTextWatermarkingConfig
|
|
|
4 |
|
5 |
class SynthIDApp:
|
6 |
def __init__(self):
|
7 |
self.client = None
|
8 |
self.watermarking_config = None
|
9 |
+
self.WATERMARK_KEYS = [654, 400, 836, 123, 340, 443, 597, 160, 57, 789]
|
10 |
|
11 |
def login(self, hf_token):
|
12 |
"""Initialize the inference client with authentication."""
|
|
|
17 |
token=hf_token
|
18 |
)
|
19 |
|
20 |
+
# Test the connection with a simple generation
|
21 |
+
_ = self.client.text_generation("Test", max_new_tokens=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
return "Inference client initialized successfully!"
|
23 |
except Exception as e:
|
24 |
self.client = None
|
|
|
25 |
return f"Error initializing client: {str(e)}"
|
26 |
|
27 |
+
def update_watermark_config(self, ngram_len):
|
28 |
+
"""Update the watermarking configuration with new ngram_len."""
|
29 |
+
try:
|
30 |
+
self.watermarking_config = SynthIDTextWatermarkingConfig(
|
31 |
+
keys=self.WATERMARK_KEYS,
|
32 |
+
ngram_len=ngram_len
|
33 |
+
)
|
34 |
+
return f"Watermark config updated: ngram_len = {ngram_len}"
|
35 |
+
except Exception as e:
|
36 |
+
return f"Error updating config: {str(e)}"
|
37 |
+
|
38 |
+
def apply_watermark(self, text, ngram_len):
|
39 |
"""Apply SynthID watermark to input text using the inference endpoint."""
|
40 |
if not self.client:
|
41 |
return text, "Error: Client not initialized. Please login first."
|
42 |
|
43 |
try:
|
44 |
+
# Update watermark config with current ngram_len
|
45 |
+
self.update_watermark_config(ngram_len)
|
46 |
+
|
47 |
# Convert watermarking config to dict for the API call
|
48 |
watermark_dict = {
|
49 |
"keys": self.watermarking_config.keys,
|
|
|
62 |
)
|
63 |
|
64 |
watermarked_text = response
|
65 |
+
return watermarked_text, f"Watermark applied successfully! (ngram_len: {ngram_len})"
|
66 |
except Exception as e:
|
67 |
return text, f"Error applying watermark: {str(e)}"
|
68 |
|
|
|
71 |
try:
|
72 |
total_words = len(text.split())
|
73 |
avg_word_length = sum(len(word) for word in text.split()) / total_words if total_words > 0 else 0
|
74 |
+
char_count = len(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
analysis = f"""Text Analysis:
|
77 |
+
- Total characters: {char_count}
|
78 |
- Total words: {total_words}
|
79 |
+
- Average word length: {avg_word_length:.2f}
|
80 |
|
81 |
Note: This is a basic analysis. The official SynthID detector is not yet available in the public transformers package."""
|
82 |
|
|
|
104 |
|
105 |
with gr.Tab("Apply Watermark"):
|
106 |
with gr.Row():
|
107 |
+
with gr.Column(scale=3):
|
108 |
+
input_text = gr.Textbox(
|
109 |
+
label="Input Text",
|
110 |
+
lines=5,
|
111 |
+
placeholder="Enter text to watermark..."
|
112 |
+
)
|
113 |
+
output_text = gr.Textbox(label="Watermarked Text", lines=5)
|
114 |
+
with gr.Column(scale=1):
|
115 |
+
ngram_len = gr.Slider(
|
116 |
+
label="N-gram Length",
|
117 |
+
minimum=2,
|
118 |
+
maximum=5,
|
119 |
+
step=1,
|
120 |
+
value=5,
|
121 |
+
info="Controls watermark detectability (2-5)"
|
122 |
+
)
|
123 |
+
status = gr.Textbox(label="Status")
|
124 |
+
|
125 |
+
gr.Markdown("""
|
126 |
+
### N-gram Length Parameter:
|
127 |
+
- Higher values (4-5): More detectable watermark, but more brittle to changes
|
128 |
+
- Lower values (2-3): More robust to changes, but harder to detect
|
129 |
+
- Default (5): Maximum detectability""")
|
130 |
+
|
131 |
apply_btn = gr.Button("Apply Watermark")
|
132 |
+
apply_btn.click(
|
133 |
+
app_instance.apply_watermark,
|
134 |
+
inputs=[input_text, ngram_len],
|
135 |
+
outputs=[output_text, status]
|
136 |
+
)
|
137 |
|
138 |
with gr.Tab("Analyze Text"):
|
139 |
with gr.Row():
|
|
|
150 |
### Instructions:
|
151 |
1. Enter your Hugging Face token and click Login
|
152 |
2. Once connected, you can use the tabs to apply watermarks or analyze text
|
153 |
+
3. Adjust the N-gram Length slider to control watermark characteristics
|
154 |
|
155 |
### Notes:
|
156 |
- This version uses Hugging Face's Inference Endpoints for faster processing
|