|
import numpy as np |
|
import pandas as pd |
|
import random |
|
from gensim.models import Word2Vec |
|
import gradio as gr |
|
from sklearn.decomposition import PCA |
|
import plotly.graph_objects as go |
|
import nltk |
|
from nltk.tokenize import word_tokenize |
|
from nltk.corpus import stopwords |
|
from nltk.stem import PorterStemmer, WordNetLemmatizer |
|
from nltk.tag import pos_tag |
|
|
|
from docs import NOVEL_TEXT |
|
|
|
def download_nltk_library(): |
|
try: |
|
nltk.download('punkt') |
|
nltk.download('stopwords') |
|
nltk.download('wordnet') |
|
nltk.download('averaged_perceptron_tagger') |
|
nltk.download('punkt_tab') |
|
return True |
|
except: |
|
return False |
|
|
|
|
|
def process_text(text): |
|
|
|
lemmatizer = WordNetLemmatizer() |
|
stop_words = set(stopwords.words('english')) |
|
|
|
|
|
tokens = word_tokenize(text.lower()) |
|
|
|
|
|
processed_tokens = [ |
|
lemmatizer.lemmatize(token) |
|
for token in tokens if token.isalnum() and token not in stop_words |
|
] |
|
|
|
return processed_tokens |
|
|
|
|
|
def train_word2vec(sentences): |
|
model = Word2Vec(sentences, vector_size=100, window=3, min_count=2, workers=4, sg=0, epochs=100) |
|
return model |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def apply_pca(word_vectors): |
|
pca = PCA(n_components=3) |
|
return pca.fit_transform(word_vectors) |
|
|
|
|
|
|
|
def get_unique(model): |
|
vocablist1=list(model.wv.index_to_key) |
|
vocablist =[] |
|
for i in vocablist1: |
|
vocablist.append(i) |
|
return vocablist |
|
|
|
def train_model(sentence): |
|
|
|
sentences=sentence |
|
|
|
|
|
model = train_word2vec(sentences) |
|
unique_words = get_unique(model) |
|
|
|
return model, unique_words |
|
|
|
def process_model(target_word): |
|
|
|
|
|
model = Word2Vec.load("word2vec.model") |
|
unique_words = get_unique(model) |
|
|
|
|
|
word_vectors = np.array([model.wv[word] for word in unique_words]) |
|
|
|
|
|
word_vectors_3d = apply_pca(word_vectors) |
|
|
|
|
|
colors = ['rgba(255, 255, 255, 0.15)' if word != target_word else 'rgba(255, 20, 147, 0.9)' for word in unique_words] |
|
|
|
|
|
if target_word in model.wv: |
|
similar_words = model.wv.most_similar(target_word, topn=10) |
|
similar_word_indices = [unique_words.index(word) for word, _ in similar_words] |
|
for idx in similar_word_indices: |
|
colors[idx] = 'rgba(255, 165, 0, 1)' |
|
|
|
|
|
if target_word in model.wv: |
|
all_words = model.wv.index_to_key |
|
dissimilar_words = sorted( |
|
[(word, model.wv.similarity(target_word, word)) for word in all_words if word != target_word], |
|
key=lambda x: x[1] |
|
)[:10] |
|
|
|
dissimilar_word_indices = [unique_words.index(word) for word, _ in dissimilar_words] |
|
for idx in dissimilar_word_indices: |
|
colors[idx] = 'rgba(138, 43, 226, 0.8)' |
|
|
|
|
|
fig = go.Figure(data=[go.Scatter3d( |
|
x=word_vectors_3d[:, 0], |
|
y=word_vectors_3d[:, 1], |
|
z=word_vectors_3d[:, 2], |
|
mode='markers+text', |
|
text=unique_words, |
|
textposition="top center", |
|
marker=dict( |
|
size=4, |
|
color=colors, |
|
) |
|
)]) |
|
|
|
fig.update_layout( |
|
title="Word Embeddings 3D Visualization", |
|
scene=dict( |
|
xaxis_title="X", |
|
yaxis_title="Y", |
|
zaxis_title="Z" |
|
), |
|
width=800, |
|
height=800 |
|
) |
|
|
|
|
|
similar_words_text = "" |
|
if target_word in model.wv: |
|
similar_words_text = "๊ฐ์ฅ ๊ฐ๊น์ด ๋จ์ด 10๊ฐ:\n" + "\n".join([f"{word}: {score:.4f}" for word, score in similar_words]) |
|
|
|
dissimilar_words_text = "" |
|
if target_word in model.wv: |
|
dissimilar_words_text = "๊ฐ์ฅ ๋จผ ๋จ์ด 10๊ฐ:\n" + "\n".join([f"{word}: {score:.4f}" for word, score in dissimilar_words]) |
|
|
|
return fig, similar_words_text, dissimilar_words_text |
|
|
|
def change_button_state_true(): |
|
|
|
return gr.update(interactive=True) |
|
|
|
def change_button_state_false(): |
|
|
|
return gr.update(interactive=False) |
|
|
|
|
|
|
|
with gr.Blocks(css=".plot-box {width: 70%; height: 500px;}") as iface: |
|
gr.Markdown("# Word Embedding 3D ์๊ฐํ") |
|
gr.Markdown("๋จ์ด๋ฅผ ์
๋ ฅํ์ธ์. Word2Vec๊ณผ PCA๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ด ์๋ฒ ๋ฉ์ 3D๋ก ์๊ฐํํฉ๋๋ค. ์
๋ ฅํ ๋จ์ด๋ ๋นจ๊ฐ์์ผ๋ก, ๊ฐ์ฅ ์ ์ฌํ 10๊ฐ ๋จ์ด๋ ์ด๋ก์, ๊ฐ์ฅ ๋จผ ๋จ์ด๋ ๋ณด๋ผ์์ผ๋ก ๊ฐ์กฐ๋ฉ๋๋ค. ์ ์ฌํ ๋จ์ด ๋ชฉ๋ก์ ๊ทธ๋ํ ์๋์ ํ์๋ฉ๋๋ค.") |
|
|
|
download_nltk_library() |
|
|
|
with gr.Row(): |
|
word_input = gr.Textbox(label="**๊ฐ์กฐํ ๋จ์ด ์
๋ ฅ**", elem_id="input-box", placeholder="๋จ์ด๋ฅผ ์
๋ ฅํ์ธ์", lines=1, interactive=False) |
|
with gr.Column(scale=1): |
|
|
|
|
|
load_btn = gr.Button("๋ชจ๋ธ ๋ก๋ง", elem_id="submit-btn") |
|
submit_btn = gr.Button("๋จ์ด ์
๋ ฅ", elem_id="submit-btn", interactive=False) |
|
|
|
with gr.Row(): |
|
|
|
plot_output = gr.Plot(label="Word Embedding 3D ์๊ฐํ", elem_id="plot-box") |
|
|
|
with gr.Column(scale=0.3): |
|
similar_words_output = gr.Textbox(label="์ ์ฌํ ๋จ์ด", interactive=False, lines=5) |
|
dissimilar_words_output = gr.Textbox(label="์ ์ฌํ์ง ์์ ๋จ์ด", interactive=False, lines=5) |
|
|
|
load_btn.click( |
|
fn=process_model, |
|
inputs=[word_input], |
|
outputs=[plot_output, similar_words_output, dissimilar_words_output] |
|
) |
|
load_btn.click( |
|
fn=change_button_state_true, |
|
outputs=submit_btn |
|
) |
|
load_btn.click( |
|
fn=change_button_state_true, |
|
outputs=word_input |
|
) |
|
submit_btn.click( |
|
fn=process_model, |
|
inputs=[word_input], |
|
outputs=[plot_output, similar_words_output, dissimilar_words_output] |
|
) |
|
submit_btn.click( |
|
fn=change_button_state_false, |
|
outputs=load_btn |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|