dejanseo commited on
Commit
5ecbaa9
·
verified ·
1 Parent(s): 118946b

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +164 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,166 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch.nn.functional import softmax
5
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
6
+ import pandas as pd
7
+ import trafilatura
8
+
9
+ # Set Streamlit configuration
10
+ st.set_page_config(layout="wide", page_title="LinkBERT")
11
+
12
+ # Load model and tokenizer (correct for XLM-RoBERTa Large)
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ tokenizer = AutoTokenizer.from_pretrained("dejanseo/LinkBERT-XL")
15
+ model = AutoModelForTokenClassification.from_pretrained("dejanseo/LinkBERT-XL").to(device)
16
+ model.eval()
17
+
18
+ # Functions
19
+
20
+ def tokenize_with_indices(text: str):
21
+ encoded = tokenizer.encode_plus(text, return_offsets_mapping=True, add_special_tokens=True)
22
+ return encoded['input_ids'], encoded['offset_mapping']
23
+
24
+ def fetch_and_extract_content(url: str):
25
+ downloaded = trafilatura.fetch_url(url)
26
+ if downloaded:
27
+ content = trafilatura.extract(downloaded, include_comments=False, include_tables=False)
28
+ return content
29
+ return None
30
+
31
+ def process_text(inputs: str, confidence_threshold: float):
32
+ max_chunk_length = 512 - 2
33
+ words = inputs.split()
34
+ chunk_texts = []
35
+ current_chunk = []
36
+ current_length = 0
37
+ for word in words:
38
+ if len(tokenizer.tokenize(word)) + current_length > max_chunk_length:
39
+ chunk_texts.append(" ".join(current_chunk))
40
+ current_chunk = [word]
41
+ current_length = len(tokenizer.tokenize(word))
42
+ else:
43
+ current_chunk.append(word)
44
+ current_length += len(tokenizer.tokenize(word))
45
+ chunk_texts.append(" ".join(current_chunk))
46
+
47
+ df_data = {
48
+ 'Word': [],
49
+ 'Prediction': [],
50
+ 'Confidence': [],
51
+ 'Start': [],
52
+ 'End': []
53
+ }
54
+ reconstructed_text = ""
55
+ original_position_offset = 0
56
+
57
+ for chunk in chunk_texts:
58
+ input_ids, token_offsets = tokenize_with_indices(chunk)
59
+ input_ids_tensor = torch.tensor(input_ids).unsqueeze(0).to(device)
60
+ with torch.no_grad():
61
+ outputs = model(input_ids_tensor)
62
+ logits = outputs.logits
63
+ predictions = torch.argmax(logits, dim=-1).squeeze().tolist()
64
+ softmax_scores = F.softmax(logits, dim=-1).squeeze().tolist()
65
+
66
+ word_info = {}
67
+
68
+ for idx, (start, end) in enumerate(token_offsets):
69
+ if idx == 0 or idx == len(token_offsets) - 1:
70
+ continue
71
+
72
+ word_start = start
73
+ while word_start > 0 and chunk[word_start-1] != ' ':
74
+ word_start -= 1
75
+
76
+ if word_start not in word_info:
77
+ word_info[word_start] = {'prediction': 0, 'confidence': 0.0, 'subtokens': []}
78
+
79
+ confidence_percentage = softmax_scores[idx][predictions[idx]] * 100
80
+
81
+ if predictions[idx] == 1 and confidence_percentage >= confidence_threshold:
82
+ word_info[word_start]['prediction'] = 1
83
+
84
+ word_info[word_start]['confidence'] = max(word_info[word_start]['confidence'], confidence_percentage)
85
+ word_info[word_start]['subtokens'].append((start, end, chunk[start:end]))
86
+
87
+ last_end = 0
88
+ for word_start in sorted(word_info.keys()):
89
+ word_data = word_info[word_start]
90
+ for subtoken_start, subtoken_end, subtoken_text in word_data['subtokens']:
91
+ escaped_subtoken_text = subtoken_text.replace('$', '\\$')
92
+ if last_end < subtoken_start:
93
+ reconstructed_text += chunk[last_end:subtoken_start]
94
+ if word_data['prediction'] == 1:
95
+ reconstructed_text += f"<span style='background-color: rgba(0, 255, 0); display: inline;'>{escaped_subtoken_text}</span>"
96
+ else:
97
+ reconstructed_text += escaped_subtoken_text
98
+ last_end = subtoken_end
99
+
100
+ df_data['Word'].append(escaped_subtoken_text)
101
+ df_data['Prediction'].append(word_data['prediction'])
102
+ df_data['Confidence'].append(word_info[word_start]['confidence'])
103
+ df_data['Start'].append(subtoken_start + original_position_offset)
104
+ df_data['End'].append(subtoken_end + original_position_offset)
105
+
106
+ original_position_offset += len(chunk) + 1
107
+
108
+ reconstructed_text += chunk[last_end:].replace('$', '\\$')
109
+
110
+ df_tokens = pd.DataFrame(df_data)
111
+ return reconstructed_text, df_tokens
112
+
113
+ # Streamlit Interface
114
+
115
+ st.title('LinkBERT')
116
+ st.markdown("""
117
+ LinkBERT is a model developed by [Dejan Marketing](https://dejanmarketing.com/) designed to predict natural link placement within web content. You can either enter plain text or the URL for automated plain text extraction. To reduce the number of link predictions increase the threshold slider value.
118
+ """)
119
+
120
+ confidence_threshold = st.slider('Confidence Threshold', 50, 100, 50)
121
+
122
+ tab1, tab2 = st.tabs(["Text Input", "URL Input"])
123
+
124
+ with tab1:
125
+ user_input = st.text_area("Enter text to process:")
126
+ if st.button('Process Text'):
127
+ highlighted_text, df_tokens = process_text(user_input, confidence_threshold)
128
+ st.markdown(highlighted_text, unsafe_allow_html=True)
129
+ st.dataframe(df_tokens)
130
+
131
+ with tab2:
132
+ url_input = st.text_input("Enter URL to process:")
133
+ if st.button('Fetch and Process'):
134
+ content = fetch_and_extract_content(url_input)
135
+ if content:
136
+ highlighted_text, df_tokens = process_text(content, confidence_threshold)
137
+ st.markdown(highlighted_text, unsafe_allow_html=True)
138
+ st.dataframe(df_tokens)
139
+ else:
140
+ st.error("Could not fetch content from the URL. Please check the URL and try again.")
141
+
142
+ # Additional information at the end
143
+ st.divider()
144
+ st.markdown("""
145
+
146
+ ## Applications of LinkBERT
147
+
148
+ LinkBERT's applications are vast and diverse, tailored to enhance both the efficiency and quality of web content creation and analysis:
149
+
150
+ - **Anchor Text Suggestion:** Acts as a mechanism during internal link optimization, suggesting potential anchor texts to web authors.
151
+ - **Evaluation of Existing Links:** Assesses the naturalness of link placements within existing content, aiding in the refinement of web pages.
152
+ - **Link Placement Guide:** Offers guidance to link builders by suggesting optimal placement for links within content.
153
+ - **Anchor Text Idea Generator:** Provides creative anchor text suggestions to enrich content and improve SEO strategies.
154
+ - **Spam and Inorganic SEO Detection:** Helps identify unnatural link patterns, contributing to the detection of spam and inorganic SEO tactics.
155
+
156
+ ## Training and Performance
157
+
158
+ LinkBERT was fine-tuned on a dataset of organic web content and editorial links.
159
+
160
+ [Watch the video](https://www.youtube.com/watch?v=A0ZulyVqjZo)
161
+
162
+ # Engage Our Team
163
+ Interested in using this in an automated pipeline for bulk link prediction?
164
 
165
+ Please [book an appointment](https://dejanmarketing.com/conference/) to discuss your needs.
166
+ """)