File size: 6,238 Bytes
770e786 aca3b1d 9fcb29b 770e786 aca3b1d 770e786 aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b 770e786 9fcb29b 770e786 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 770e786 aca3b1d 9fcb29b aca3b1d 9fcb29b 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 770e786 aca3b1d 9fcb29b 770e786 aca3b1d 8c42cdb 770e786 9fcb29b aca3b1d 9fcb29b aca3b1d 9fcb29b aca3b1d 770e786 aca3b1d 9fcb29b 770e786 9fcb29b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import numpy as np
import pandas as pd
import random
from gensim.models import Word2Vec
import gradio as gr
from sklearn.decomposition import PCA
import plotly.graph_objects as go
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.tag import pos_tag
from docs import NOVEL_TEXT
def download_nltk_library():
try:
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt_tab')
return True
except:
return False
# Function to process each sentence
def process_text(text):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
# Tokenization
tokens = word_tokenize(text.lower())
# Remove stop words and apply lemmatization
processed_tokens = [
lemmatizer.lemmatize(token)
for token in tokens if token.isalnum() and token not in stop_words
]
return processed_tokens
# Word2Vec ๋ชจ๋ธ ํ์ต ํจ์
def train_word2vec(sentences):
model = Word2Vec(sentences, vector_size=100, window=3, min_count=2, workers=4, sg=0, epochs=100)
return model
# def preprocess_text(file_path):
# with open(file_path, 'r', encoding='utf-8') as file:
# text = file.read()
# # ํ ํฐํ ๋ฐ ํ์ฌ ํ๊น
# tokens = word_tokenize(text)
# tagged = pos_tag(tokens)
# # ๋ช
์ฌ๋ง ์ถ์ถ (NN, NNS, NNP, NNPS)
# nouns = [word.lower() for word, pos in tagged if pos.startswith('NN')]
# # ์ค๋ณต ์ ๊ฑฐ ๋ฐ ์ ๋ ฌ
# unique_nouns = sorted(set(nouns))
# # ๊ฐ๋จํ ๋ฌธ์ฅ ์์ฑ (๊ฐ ๋ช
์ฌ๋ฅผ ๊ฐ๋ณ ๋ฌธ์ฅ์ผ๋ก ์ทจ๊ธ)
# sentences = [[noun] for noun in unique_nouns]
# return sentences, unique_nouns
def apply_pca(word_vectors):
pca = PCA(n_components=3)
return pca.fit_transform(word_vectors)
# def process_text(file_path, target_word):
def get_unique(model):
vocablist1=list(model.wv.index_to_key)
vocablist =[]
for i in vocablist1:
vocablist.append(i)
return vocablist
def train_model(sentence):
# ์ ์ฒ๋ฆฌ
sentences=sentence
# Word2Vec ๋ชจ๋ธ ํ์ต
model = train_word2vec(sentences)
unique_words = get_unique(model)
return model, unique_words
def process_model(target_word):
# Word2Vec ๋ชจ๋ธ ๋ก๋
model = Word2Vec.load("word2vec.model")
unique_words = get_unique(model)
# ๊ฐ ๋จ์ด์ ์๋ฒ ๋ฉ ๋ฒกํฐ ์ถ์ถ
word_vectors = np.array([model.wv[word] for word in unique_words])
# PCA๋ก ์ฐจ์ ์ถ์
word_vectors_3d = apply_pca(word_vectors)
# ์์ ์ค์ (ํฌ๋ช
๋ ์ถ๊ฐ)
colors = ['rgba(128, 128, 128, 0.15)' if word != target_word else 'rgba(255, 0, 0, 1)' for word in unique_words]
# ๊ฐ์ฅ ๊ฐ๊น์ด ๋จ์ด 10๊ฐ ์ฐพ๊ธฐ
if target_word in model.wv:
similar_words = model.wv.most_similar(target_word, topn=10)
similar_word_indices = [unique_words.index(word) for word, _ in similar_words]
for idx in similar_word_indices:
colors[idx] = 'rgba(0, 255, 0, 1)' # ๊ฐ๊น์ด ๋จ์ด๋ค์ ์ด๋ก์์ผ๋ก ํ์
# ๊ฐ์ฅ ๋จผ ๋จ์ด 10๊ฐ ์ฐพ๊ธฐ
if target_word in model.wv:
all_words = model.wv.index_to_key # ๋ชจ๋ธ์ ํฌํจ๋ ๋ชจ๋ ๋จ์ด ๋ฆฌ์คํธ
dissimilar_words = sorted([(word, model.wv.similarity(target_word, word))
for word in all_words if word != target_word],
key=lambda x: x[1])[:10] # ์ ์ฌ๋๊ฐ ๊ฐ์ฅ ๋ฎ์ 10๊ฐ ๋จ์ด ์ ํ
dissimilar_word_indices = [unique_words.index(word) for word, _ in dissimilar_words]
for idx in dissimilar_word_indices:
colors[idx] = 'rgba(128, 0, 128, 1)' # ๊ฐ์ฅ ๋จผ ๋จ์ด๋ค์ ๋ณด๋ผ์์ผ๋ก ํ์
# Plotly๋ฅผ ์ฌ์ฉํ 3D ์ฐ์ ๋ ์์ฑ
fig = go.Figure(data=[go.Scatter3d(
x=word_vectors_3d[:, 0],
y=word_vectors_3d[:, 1],
z=word_vectors_3d[:, 2],
mode='markers+text',
text=unique_words,
textposition="top center",
marker=dict(
size=6,
color=colors,
)
)])
fig.update_layout(
title="Word Embeddings 3D Visualization",
scene=dict(
xaxis_title="PCA 1",
yaxis_title="PCA 2",
zaxis_title="PCA 3"
),
width=1000,
height=1000
)
# ๊ฐ์ฅ ๊ฐ๊น์ด ๋จ์ด 10๊ฐ ๋ชฉ๋ก ์์ฑ
similar_words_text = ""
if target_word in model.wv:
similar_words_text = "๊ฐ์ฅ ๊ฐ๊น์ด ๋จ์ด 10๊ฐ:\n" + "\n".join([f"{word}: {score:.4f}" for word, score in similar_words])
return fig, similar_words_text
# Gradio ์ธํฐํ์ด์ค ์์
with gr.Blocks(css=".plot-box {width: 70%; height: 500px;}") as iface:
gr.Markdown("# Word Embedding 3D ์๊ฐํ")
gr.Markdown("๋จ์ด๋ฅผ ์
๋ ฅํ์ธ์. Word2Vec๊ณผ PCA๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ด ์๋ฒ ๋ฉ์ 3D๋ก ์๊ฐํํฉ๋๋ค. ์
๋ ฅํ ๋จ์ด๋ ๋นจ๊ฐ์์ผ๋ก, ๊ฐ์ฅ ์ ์ฌํ 10๊ฐ ๋จ์ด๋ ์ด๋ก์, ๊ฐ์ฅ ๋จผ ๋จ์ด๋ ๋ณด๋ผ์์ผ๋ก ๊ฐ์กฐ๋ฉ๋๋ค. ์ ์ฌํ ๋จ์ด ๋ชฉ๋ก์ ๊ทธ๋ํ ์๋์ ํ์๋ฉ๋๋ค.")
download_nltk_library()
with gr.Row():
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ์ค๋ฅผ ๊ฐ์กฐํ๊ธฐ ์ํด ์คํ์ผ์ ๋ณ๊ฒฝ
word_input = gr.Textbox(label="**๊ฐ์กฐํ ๋จ์ด ์
๋ ฅ**", elem_id="input-box", placeholder="๋จ์ด๋ฅผ ์
๋ ฅํ์ธ์", lines=1)
submit_btn = gr.Button("์ ์ถ", elem_id="submit-btn")
with gr.Row():
# ์๊ฐํ ํ๋ฉด์ ํฌ๊ธฐ๋ฅผ CSS๋ก ์ฆ๊ฐ
plot_output = gr.Plot(label="Word Embedding 3D ์๊ฐํ", elem_id="plot-box")
with gr.Column(scale=0.3): # ์ปฌ๋ผ์ ๋๋น๋ฅผ ์ค์ด๊ธฐ ์ํด scale ๊ฐ์ ๋ฎ์ถค
similar_words_output = gr.Textbox(label="์ ์ฌํ ๋จ์ด", interactive=False, lines=5)
dissimilar_words_output = gr.Textbox(label="์ ์ฌํ์ง ์์ ๋จ์ด", interactive=False, lines=5)
submit_btn.click(
fn=process_text,
inputs=[word_input],
outputs=[plot_output, similar_words_output, dissimilar_words_output]
)
if __name__ == "__main__":
iface.launch()
|