使用gensim训练word2vec
如果在以词为基本单元输入的自然语言处理任务中,都避免不了使用词的表示,词的表示有很多种,这里主要介绍的就是词向量,word2vec是目前比较通用的训练词向量的工具,使用Gensim模块,可以使词向量的训练变的简单,那么我们知道对于word2vec来说,不论的Skip-Gram models还是CBOW models,他们的输入以及输出都是以单词为基本单位的,只是他们对应的输入以及输出不一样:
- Skip-Gram models:输入为单个词,输出目标为多个上下文单词;
- CBOW models:输入为多个上下文单词,输出目标为一个单词;
我们从上面可以看出,无论是Skip-Gram models还是CBOW models基本的单元都是词,那么我们获取到的语料,必须要经过分词处理以后才能用于词向量的训练语料。
import logging
import randomimport numpy as np
import torchlogging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s: %(message)s')# set seed
seed = 666
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
# split data to 10 fold
fold_num = 10
data_file = '../data/train_set.csv'
import pandas as pddef all_data2fold(fold_num, num=10000):fold_data = []f = pd.read_csv(data_file, sep='\t', encoding='UTF-8')texts = f['text'].tolist()[:num]labels = f['label'].tolist()[:num]total = len(labels)index = list(range(total))np.random.shuffle(index)all_texts = []all_labels = []for i in index:all_texts.append(texts[i])all_labels.append(labels[i])label2id = {}for i in range(total):label = str(all_labels[i])if label not in label2id:label2id[label] = [i]else:label2id[label].append(i)all_index = [[] for _ in range(fold_num)]for label, data in label2id.items():# print(label, len(data))batch_size = int(len(data) / fold_num)other = len(data) - batch_size * fold_numfor i in range(fold_num):cur_batch_size = batch_size + 1 if i < other else batch_size# print(cur_batch_size)batch_data = [data[i * batch_size + b] for b in range(cur_batch_size)]all_index[i].extend(batch_data)batch_size = int(total / fold_num)other_texts = []other_labels = []other_num = 0start = 0for fold in range(fold_num):num = len(all_index[fold])texts = [all_texts[i] for i in all_index[fold]]labels = [all_labels[i] for i in all_index[fold]]if num > batch_size:fold_texts = texts[:batch_size]other_texts.extend(texts[batch_size:])fold_labels = labels[:batch_size]other_labels.extend(labels[batch_size:])other_num += num - batch_sizeelif num < batch_size:end = start + batch_size - numfold_texts = texts + other_texts[start: end]fold_labels = labels + other_labels[start: end]start = endelse:fold_texts = textsfold_labels = labelsassert batch_size == len(fold_labels)# shuffleindex = list(range(batch_size))np.random.shuffle(index)shuffle_fold_texts = []shuffle_fold_labels = []for i in index:shuffle_fold_texts.append(fold_texts[i])shuffle_fold_labels.append(fold_labels[i])data = {'label': shuffle_fold_labels, 'text': shuffle_fold_texts}fold_data.append(data)logging.info("Fold lens %s", str([len(data['label']) for data in fold_data]))return fold_datafold_data = all_data2fold(10)
# build train data for word2vec
fold_id = 9train_texts = []
for i in range(0, fold_id):data = fold_data[i]train_texts.extend(data['text'])logging.info('Total %d docs.' % len(train_texts))
logging.info('Start training...')
from gensim.models.word2vec import Word2Vecnum_features = 100 # Word vector dimensionality
num_workers = 8 # Number of threads to run in paralleltrain_texts = list(map(lambda x: list(x.split()), train_texts))
model = Word2Vec(train_texts, workers=num_workers, size=num_features)
model.init_sims(replace=True)# save model
model.save("./word2vec.bin")