Update src/streamlit_app.py
Browse files- src/streamlit_app.py +85 -165
src/streamlit_app.py
CHANGED
@@ -1,166 +1,86 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
import
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
else:
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
for chunk in chunk_texts:
|
58 |
-
input_ids, token_offsets = tokenize_with_indices(chunk)
|
59 |
-
input_ids_tensor = torch.tensor(input_ids).unsqueeze(0).to(device)
|
60 |
-
with torch.no_grad():
|
61 |
-
outputs = model(input_ids_tensor)
|
62 |
-
logits = outputs.logits
|
63 |
-
predictions = torch.argmax(logits, dim=-1).squeeze().tolist()
|
64 |
-
softmax_scores = F.softmax(logits, dim=-1).squeeze().tolist()
|
65 |
-
|
66 |
-
word_info = {}
|
67 |
-
|
68 |
-
for idx, (start, end) in enumerate(token_offsets):
|
69 |
-
if idx == 0 or idx == len(token_offsets) - 1:
|
70 |
-
continue
|
71 |
-
|
72 |
-
word_start = start
|
73 |
-
while word_start > 0 and chunk[word_start-1] != ' ':
|
74 |
-
word_start -= 1
|
75 |
-
|
76 |
-
if word_start not in word_info:
|
77 |
-
word_info[word_start] = {'prediction': 0, 'confidence': 0.0, 'subtokens': []}
|
78 |
-
|
79 |
-
confidence_percentage = softmax_scores[idx][predictions[idx]] * 100
|
80 |
-
|
81 |
-
if predictions[idx] == 1 and confidence_percentage >= confidence_threshold:
|
82 |
-
word_info[word_start]['prediction'] = 1
|
83 |
-
|
84 |
-
word_info[word_start]['confidence'] = max(word_info[word_start]['confidence'], confidence_percentage)
|
85 |
-
word_info[word_start]['subtokens'].append((start, end, chunk[start:end]))
|
86 |
-
|
87 |
-
last_end = 0
|
88 |
-
for word_start in sorted(word_info.keys()):
|
89 |
-
word_data = word_info[word_start]
|
90 |
-
for subtoken_start, subtoken_end, subtoken_text in word_data['subtokens']:
|
91 |
-
escaped_subtoken_text = subtoken_text.replace('$', '\\$')
|
92 |
-
if last_end < subtoken_start:
|
93 |
-
reconstructed_text += chunk[last_end:subtoken_start]
|
94 |
-
if word_data['prediction'] == 1:
|
95 |
-
reconstructed_text += f"<span style='background-color: rgba(0, 255, 0); display: inline;'>{escaped_subtoken_text}</span>"
|
96 |
-
else:
|
97 |
-
reconstructed_text += escaped_subtoken_text
|
98 |
-
last_end = subtoken_end
|
99 |
-
|
100 |
-
df_data['Word'].append(escaped_subtoken_text)
|
101 |
-
df_data['Prediction'].append(word_data['prediction'])
|
102 |
-
df_data['Confidence'].append(word_info[word_start]['confidence'])
|
103 |
-
df_data['Start'].append(subtoken_start + original_position_offset)
|
104 |
-
df_data['End'].append(subtoken_end + original_position_offset)
|
105 |
-
|
106 |
-
original_position_offset += len(chunk) + 1
|
107 |
-
|
108 |
-
reconstructed_text += chunk[last_end:].replace('$', '\\$')
|
109 |
-
|
110 |
-
df_tokens = pd.DataFrame(df_data)
|
111 |
-
return reconstructed_text, df_tokens
|
112 |
-
|
113 |
-
# Streamlit Interface
|
114 |
-
|
115 |
-
st.title('LinkBERT')
|
116 |
-
st.markdown("""
|
117 |
-
LinkBERT is a model developed by [Dejan Marketing](https://dejanmarketing.com/) designed to predict natural link placement within web content. You can either enter plain text or the URL for automated plain text extraction. To reduce the number of link predictions increase the threshold slider value.
|
118 |
-
""")
|
119 |
-
|
120 |
-
confidence_threshold = st.slider('Confidence Threshold', 50, 100, 50)
|
121 |
-
|
122 |
-
tab1, tab2 = st.tabs(["Text Input", "URL Input"])
|
123 |
-
|
124 |
-
with tab1:
|
125 |
-
user_input = st.text_area("Enter text to process:")
|
126 |
-
if st.button('Process Text'):
|
127 |
-
highlighted_text, df_tokens = process_text(user_input, confidence_threshold)
|
128 |
-
st.markdown(highlighted_text, unsafe_allow_html=True)
|
129 |
-
st.dataframe(df_tokens)
|
130 |
-
|
131 |
-
with tab2:
|
132 |
-
url_input = st.text_input("Enter URL to process:")
|
133 |
-
if st.button('Fetch and Process'):
|
134 |
-
content = fetch_and_extract_content(url_input)
|
135 |
-
if content:
|
136 |
-
highlighted_text, df_tokens = process_text(content, confidence_threshold)
|
137 |
-
st.markdown(highlighted_text, unsafe_allow_html=True)
|
138 |
-
st.dataframe(df_tokens)
|
139 |
-
else:
|
140 |
-
st.error("Could not fetch content from the URL. Please check the URL and try again.")
|
141 |
-
|
142 |
-
# Additional information at the end
|
143 |
-
st.divider()
|
144 |
-
st.markdown("""
|
145 |
-
|
146 |
-
## Applications of LinkBERT
|
147 |
-
|
148 |
-
LinkBERT's applications are vast and diverse, tailored to enhance both the efficiency and quality of web content creation and analysis:
|
149 |
-
|
150 |
-
- **Anchor Text Suggestion:** Acts as a mechanism during internal link optimization, suggesting potential anchor texts to web authors.
|
151 |
-
- **Evaluation of Existing Links:** Assesses the naturalness of link placements within existing content, aiding in the refinement of web pages.
|
152 |
-
- **Link Placement Guide:** Offers guidance to link builders by suggesting optimal placement for links within content.
|
153 |
-
- **Anchor Text Idea Generator:** Provides creative anchor text suggestions to enrich content and improve SEO strategies.
|
154 |
-
- **Spam and Inorganic SEO Detection:** Helps identify unnatural link patterns, contributing to the detection of spam and inorganic SEO tactics.
|
155 |
-
|
156 |
-
## Training and Performance
|
157 |
-
|
158 |
-
LinkBERT was fine-tuned on a dataset of organic web content and editorial links.
|
159 |
-
|
160 |
-
[Watch the video](https://www.youtube.com/watch?v=A0ZulyVqjZo)
|
161 |
-
|
162 |
-
# Engage Our Team
|
163 |
-
Interested in using this in an automated pipeline for bulk link prediction?
|
164 |
-
|
165 |
-
Please [book an appointment](https://dejanmarketing.com/conference/) to discuss your needs.
|
166 |
-
""")
|
|
|
1 |
+
"""
|
2 |
+
Drop-in replacement for: from lxml.html.clean import Cleaner
|
3 |
+
Implements Cleaner.clean_html(html) via bleach.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from __future__ import annotations
|
7 |
+
from typing import Dict, Iterable, Optional, Set, Union
|
8 |
+
|
9 |
+
import bleach
|
10 |
+
from bleach.css_sanitizer import CSSSanitizer
|
11 |
+
|
12 |
+
_DEFAULT_TAGS: Set[str] = set(bleach.sanitizer.ALLOWED_TAGS) | {
|
13 |
+
"p", "div", "span", "br", "hr", "pre", "code",
|
14 |
+
"img", "figure", "figcaption",
|
15 |
+
"h1", "h2", "h3", "h4", "h5", "h6",
|
16 |
+
"table", "thead", "tbody", "tfoot", "tr", "th", "td"
|
17 |
+
}
|
18 |
+
|
19 |
+
_DEFAULT_ATTRS: Dict[str, Union[Iterable[str], dict]] = {
|
20 |
+
**bleach.sanitizer.ALLOWED_ATTRIBUTES,
|
21 |
+
"a": {"href", "title", "name", "target", "rel"},
|
22 |
+
"img": {"src", "alt", "title", "width", "height"},
|
23 |
+
"*": {"class", "id", "data-*", "dir", "lang", "title", "aria-*"},
|
24 |
+
}
|
25 |
+
|
26 |
+
_DEFAULT_PROTOCOLS: Set[str] = set(bleach.sanitizer.ALLOWED_PROTOCOLS) | {
|
27 |
+
"data" # allow data: for small inline images if you wish
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
class Cleaner:
|
32 |
+
"""
|
33 |
+
Minimal API-compatible shim:
|
34 |
+
- init(...) accepts common lxml Cleaner flags (ignored/mapped as sensible)
|
35 |
+
- clean_html(html: str) -> str
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
allow_tags: Optional[Iterable[str]] = None,
|
41 |
+
safe_attrs: Optional[Dict[str, Iterable[str]]] = None,
|
42 |
+
strip: bool = True,
|
43 |
+
strip_comments: bool = True,
|
44 |
+
scripts: bool = True, # kept for signature parity (ignored; bleach strips <script>)
|
45 |
+
javascript: bool = True, # kept for signature parity
|
46 |
+
style: bool = True, # if True, drop <style> blocks
|
47 |
+
inline_style: bool = False, # if True, allow style="" with CSS sanitizer
|
48 |
+
links: bool = True, # kept for parity
|
49 |
+
allow_protocols: Optional[Iterable[str]] = None,
|
50 |
+
):
|
51 |
+
self.tags = set(allow_tags) if allow_tags else _DEFAULT_TAGS.copy()
|
52 |
+
# Always forbid script/style elements via tags unless explicitly allowed
|
53 |
+
if style:
|
54 |
+
self.tags.discard("style")
|
55 |
+
self.tags.discard("script")
|
56 |
+
|
57 |
+
self.attrs = dict(_DEFAULT_ATTRS)
|
58 |
+
if safe_attrs:
|
59 |
+
# merge/override
|
60 |
+
for k, v in safe_attrs.items():
|
61 |
+
self.attrs[k] = set(v)
|
62 |
+
|
63 |
+
self.protocols = set(allow_protocols) if allow_protocols else _DEFAULT_PROTOCOLS.copy()
|
64 |
+
|
65 |
+
self.strip = bool(strip)
|
66 |
+
self.strip_comments = bool(strip_comments)
|
67 |
+
|
68 |
+
self.css_sanitizer = None
|
69 |
+
if inline_style:
|
70 |
+
# allow inline style with safe CSS
|
71 |
+
self.css_sanitizer = CSSSanitizer(allowed_css_properties=None) # default safe list
|
72 |
else:
|
73 |
+
# disallow style="" attributes
|
74 |
+
if "*" in self.attrs:
|
75 |
+
self.attrs["*"] = {a for a in self.attrs["*"] if a != "style"}
|
76 |
+
|
77 |
+
def clean_html(self, html: str) -> str:
|
78 |
+
return bleach.clean(
|
79 |
+
html,
|
80 |
+
tags=list(self.tags),
|
81 |
+
attributes=self.attrs,
|
82 |
+
protocols=list(self.protocols),
|
83 |
+
strip=self.strip,
|
84 |
+
strip_comments=self.strip_comments,
|
85 |
+
css_sanitizer=self.css_sanitizer,
|
86 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|