|
import fasttext |
|
import jieba |
|
train_data_path = 'sel.txt' |
|
parse_data_path = 'parse.txt' |
|
|
|
model_path = 'word_vectors.bin' |
|
|
|
def train(): |
|
|
|
with open(train_data_path, 'r', encoding='utf-8') as f: |
|
with open(parse_data_path, 'w', encoding='utf-8') as f1: |
|
lines=f.readlines() |
|
for i,line in enumerate(lines): |
|
print(i,len(lines)) |
|
words = jieba.cut(line) |
|
f1.write(" ".join(words) + "\n") |
|
model = fasttext.train_unsupervised( parse_data_path, model='skipgram', dim=300, epoch=10, lr=0.1) |
|
model.save_model(model_path) |
|
|
|
|
|
|
|
model = fasttext.load_model(model_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
targets = ['精液','爱液'] |
|
|
|
for target in targets: |
|
print(target,end=' ') |
|
for _,i in model.get_nearest_neighbors(target, k=20): |
|
print(i,end=' ') |
|
print() |
|
|
|
|
|
|
|
texts_to_cluster = ["飞机", "肉棒", "汽车", "阴茎", "猫", "B", "老婆"] |
|
|
|
vectors = [model.get_sentence_vector(text) for text in texts_to_cluster] |
|
|
|
from sklearn.cluster import KMeans |
|
kmeans = KMeans(n_clusters=3) |
|
clusters = kmeans.fit_predict(vectors) |
|
|
|
|
|
|