|
import numpy as np |
|
import pandas as pd |
|
from gensim.models import Word2Vec |
|
import gradio as gr |
|
from sklearn.decomposition import PCA |
|
import plotly.graph_objects as go |
|
import nltk |
|
from nltk.tokenize import word_tokenize |
|
import nltk |
|
from nltk.corpus import stopwords |
|
from nltk.stem import PorterStemmer, WordNetLemmatizer |
|
from nltk.tag import pos_tag |
|
|
|
nltk.download('punkt') |
|
nltk.download('stopwords') |
|
nltk.download('wordnet') |
|
nltk.download('averaged_perceptron_tagger') |
|
|
|
from docs import NOVEL_TEXT |
|
|
|
|
|
lemmatizer = WordNetLemmatizer() |
|
stop_words = set(stopwords.words('english')) |
|
|
|
|
|
def process_text(text): |
|
|
|
tokens = word_tokenize(text.lower()) |
|
|
|
|
|
processed_tokens = [ |
|
lemmatizer.lemmatize(token) |
|
for token in tokens if token.isalnum() and token not in stop_words |
|
] |
|
|
|
return processed_tokens |
|
|
|
|
|
sentences = nltk.sent_tokenize(NOVEL_TEXT) |
|
|
|
|
|
processed_corpus = [process_text(sentence) for sentence in sentences] |
|
|
|
import random |
|
|
|
emotion_words = ['emotion', 'joy', 'fear', 'anger', 'sadness', 'disgust', 'anxiety', 'team', 'console', 'headquarters', 'feelings'] |
|
hockey_words = ['hockey', 'game', 'team', 'skates', 'stick', 'rink', 'practice', 'championship', 'score', 'goal', 'penalty'] |
|
memory_words = ['memory', 'sphere', 'shelves', 'life', 'experience', 'recall', 'remember', 'color', 'happy', 'sad', 'joyful'] |
|
friend_words = ['friend', 'riley', 'grace', 'bree', 'team', 'support', 'help', 'together', 'loyal', 'fun', 'friendship'] |
|
school_words = ['school', 'class', 'teacher', 'student', 'homework', 'study', 'exam', 'lesson', 'classmates', 'learn'] |
|
|
|
train_data = [] |
|
|
|
for _ in range(40): |
|
train_data.append(random.sample(emotion_words, k=random.randint(4, 6))) |
|
train_data.append(random.sample(hockey_words, k=random.randint(4, 6))) |
|
train_data.append(random.sample(memory_words, k=random.randint(4, 6))) |
|
train_data.append(random.sample(friend_words, k=random.randint(4, 6))) |
|
train_data.append(random.sample(school_words, k=random.randint(4, 6))) |
|
|
|
|
|
random.shuffle(train_data) |
|
|
|
|
|
|
|
def train_word2vec(sentences): |
|
model = Word2Vec(sentences, vector_size=100, window=3, min_count=2, workers=4, sg=0, epochs=100) |
|
return model |
|
|
|
def apply_pca(word_vectors): |
|
pca = PCA(n_components=3) |
|
return pca.fit_transform(word_vectors) |
|
|
|
|
|
def get_unique(model): |
|
vocablist1=list(model.wv.index_to_key) |
|
vocablist =[] |
|
for i in vocablist1: |
|
vocablist.append(i) |
|
return vocablist |
|
|
|
def process_text(target_word): |
|
|
|
sentences=X |
|
|
|
|
|
model = train_word2vec(sentences) |
|
unique_words = get_unique(model) |
|
|
|
|
|
word_vectors = np.array([model.wv[word] for word in unique_words]) |
|
|
|
|
|
word_vectors_3d = apply_pca(word_vectors) |
|
|
|
|
|
colors = ['rgba(128, 128, 128, 0.15)' if word != target_word else 'rgba(255, 0, 0, 1)' for word in unique_words] |
|
|
|
|
|
if target_word in model.wv: |
|
similar_words = model.wv.most_similar(target_word, topn=10) |
|
similar_word_indices = [unique_words.index(word) for word, _ in similar_words] |
|
for idx in similar_word_indices: |
|
colors[idx] = 'rgba(0, 255, 0, 1)' |
|
|
|
|
|
if target_word in model.wv: |
|
all_words = model.wv.index_to_key |
|
dissimilar_words = sorted([(word, model.wv.similarity(target_word, word)) |
|
for word in all_words if word != target_word], |
|
key=lambda x: x[1])[:10] |
|
|
|
dissimilar_word_indices = [unique_words.index(word) for word, _ in dissimilar_words] |
|
for idx in dissimilar_word_indices: |
|
colors[idx] = 'rgba(128, 0, 128, 1)' |
|
|
|
|
|
fig = go.Figure(data=[go.Scatter3d( |
|
x=word_vectors_3d[:, 0], |
|
y=word_vectors_3d[:, 1], |
|
z=word_vectors_3d[:, 2], |
|
mode='markers+text', |
|
text=unique_words, |
|
textposition="top center", |
|
marker=dict( |
|
size=6, |
|
color=colors, |
|
) |
|
)]) |
|
|
|
fig.update_layout( |
|
title="Word Embeddings 3D Visualization", |
|
scene=dict( |
|
xaxis_title="PCA 1", |
|
yaxis_title="PCA 2", |
|
zaxis_title="PCA 3" |
|
), |
|
width=1000, |
|
height=1000 |
|
) |
|
|
|
|
|
similar_words_text = "" |
|
if target_word in model.wv: |
|
similar_words_text = "๊ฐ์ฅ ๊ฐ๊น์ด ๋จ์ด 10๊ฐ:\n" + "\n".join([f"{word}: {score:.4f}" for word, score in similar_words]) |
|
|
|
return fig, similar_words_text |
|
|
|
|
|
|
|
with gr.Blocks() as iface: |
|
gr.Markdown("# Word Embedding 3D ์๊ฐํ") |
|
gr.Markdown("๋จ์ด๋ฅผ ์
๋ ฅํ์ธ์. Word2Vec๊ณผ PCA๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ด ์๋ฒ ๋ฉ์ 3D๋ก ์๊ฐํํฉ๋๋ค. ์
๋ ฅํ ๋จ์ด๋ ๋นจ๊ฐ์์ผ๋ก, ๊ฐ์ฅ ์ ์ฌํ 10๊ฐ ๋จ์ด๋ ์ด๋ก์, ๊ฐ์ฅ ๋จผ ๋จ์ด๋ ๋ณด๋ผ์์ผ๋ก ๊ฐ์กฐ๋ฉ๋๋ค. ์ ์ฌํ ๋จ์ด ๋ชฉ๋ก์ ๊ทธ๋ํ ์๋์ ํ์๋ฉ๋๋ค.") |
|
|
|
with gr.Row(): |
|
|
|
word_input = gr.Textbox(label="๊ฐ์กฐํ ๋จ์ด ์
๋ ฅ") |
|
submit_btn = gr.Button("์ ์ถ") |
|
|
|
|
|
plot_output = gr.Plot(label="Word Embedding 3D ์๊ฐํ") |
|
similar_words_output = gr.Textbox(label="์ ์ฌํ ๋จ์ด") |
|
|
|
submit_btn.click( |
|
fn=process_text, |
|
inputs=[word_input], |
|
outputs=[plot_output, similar_words_output] |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |