美文网首页
kaggle-word2vec

kaggle-word2vec

作者: ForgetThatNight | 来源:发表于2018-07-02 21:42 被阅读12次
    # bs4 nltk gensim
    import os
    import re
    import numpy as np
    import pandas as pd
    
    from bs4 import BeautifulSoup
    
    from sklearn.feature_extraction.text import CountVectorizer
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.metrics import confusion_matrix
    from sklearn.linear_model import LogisticRegression
    import nltk
    from nltk.corpus import stopwords
    
    nltk.download()
    

    用pandas读入训练数据

    df = pd.read_csv('../data/labeledTrainData.tsv', sep='\t', escapechar='\\')
    print('Number of reviews: {}'.format(len(df)))
    df.head()
    
    df['review'][1000]
    

    输出

    "I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, and jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members were cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one over 8 watch it, it's just too annoying."
    

    去掉HTML标签的数据

    example = BeautifulSoup(df['review'][1000], 'html.parser').get_text()
    example
    

    输出

    "I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, and jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members were cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one over 8 watch it, it's just too annoying."
    

    去掉标点符号

    example_letters = re.sub(r'[^a-zA-Z]', ' ', example)
    example_letters
    

    输出

    'I watched this movie really late last night and usually if it s late then I m pretty forgiving of movies  Although I tried  I just could not stand this movie at all  it kept getting worse and worse as the movie went on  Although I know it s suppose to be a comedy but I didn t find it very funny  It was also an especially unrealistic  and jaded portrayal of rural life  In case this is what any of you think country life is like  it s definitely not  I do have to agree that some of the guy cast members were cute  but the french guy was really fake  I do have to agree that it tried to have a good lesson in the story  but overall my recommendation is that no one over   watch it  it s just too annoying '
    
    words = example_letters.lower().split()
    words
    

    输出

    ['i',
     'watched',
     'this',
     'movie',
     'really',
     'late',
     'last',
     'night',
    ...
     'watch',
     'it',
     'it',
     's',
     'just',
     'too',
     'annoying']
    

    输出

    #下载停用词和其他语料会用到
    #nltk.download()
    

    去停用词

    stopwords = {}.fromkeys([ line.rstrip() for line in open('../stopwords.txt')])
    words_nostop = [w for w in words if w not in stopwords]
    words_nostop
    

    输出

    ['watched',
     'movie',
     'late',
     'night',
     'late',
     'pretty',
     'forgiving',
    ...
     'story',
     'recommendation',
     'watch',
     'annoying']
    
    eng_stopwords = set(stopwords)
    
    def clean_text(text):
        text = BeautifulSoup(text, 'html.parser').get_text()
        text = re.sub(r'[^a-zA-Z]', ' ', text)
        words = text.lower().split()
        words = [w for w in words if w not in eng_stopwords]
        return ' '.join(words)
    
    df['review'][1000]
    

    输出

    "I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, and jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members were cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one over 8 watch it, it's just too annoying."
    
    clean_text(df['review'][1000])
    

    输出

    'watched movie late night late pretty forgiving movies stand movie worse worse movie suppose comedy didn funny unrealistic jaded portrayal rural life country life agree guy cast cute french guy fake agree lesson story recommendation watch annoying'
    

    清洗数据添加到dataframe里

    df['clean_review'] = df.review.apply(clean_text)
    df.head()
    

    抽取bag of words特征(用sklearn的CountVectorizer)

    vectorizer = CountVectorizer(max_features = 5000) 
    train_data_features = vectorizer.fit_transform(df.clean_review).toarray()
    train_data_features.shape
    

    输出

    (25000, 5000)
    
    from sklearn.cross_validation import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(train_data_features,df.sentiment,test_size = 0.2, random_state = 0)
    
    import matplotlib.pyplot as plt
    import itertools
    def plot_confusion_matrix(cm, classes,
                              title='Confusion matrix',
                              cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        """
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=0)
        plt.yticks(tick_marks, classes)
    
        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j, i, cm[i, j],
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")
    
        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
    

    训练分类器

    LR_model = LogisticRegression()
    LR_model = LR_model.fit(X_train, y_train)
    y_pred = LR_model.predict(X_test)
    cnf_matrix = confusion_matrix(y_test,y_pred)
    
    print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
    
    print("accuracy metric in the testing dataset: ", (cnf_matrix[1,1]+cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[1,1]+cnf_matrix[1,0]+cnf_matrix[0,1]))
    
    # Plot non-normalized confusion matrix
    class_names = [0,1]
    plt.figure()
    plot_confusion_matrix(cnf_matrix
                          , classes=class_names
                          , title='Confusion matrix')
    plt.show()
    
    df = pd.read_csv('../data/unlabeledTrainData.tsv', sep='\t', escapechar='\\')
    print('Number of reviews: {}'.format(len(df)))
    df.head()
    
    df['clean_review'] = df.review.apply(clean_text)
    df.head()
    
    review_part = df['clean_review']
    review_part.shape
    

    输出

    (50000,)
    
    import warnings
    warnings.filterwarnings("ignore")
    
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    
    
    def split_sentences(review):
        raw_sentences = tokenizer.tokenize(review.strip())
        sentences = [clean_text(s) for s in raw_sentences if s]
        return sentences
    sentences = sum(review_part.apply(split_sentences), [])
    print('{} reviews -> {} sentences'.format(len(review_part), len(sentences)))
    

    输出

    50000 reviews -> 50000 sentences
    
    sentences[0]
    

    输出

    'watching time chasers obvious bunch friends sitting day film school hey pool money bad movie bad movie dull story bad script lame acting poor cinematography bottom barrel stock music corners cut prevented film release life'
    
    sentences_list = []
    for line in sentences:
        sentences_list.append(nltk.word_tokenize(line))
    

    sentences:可以是一个list
    sg: 用于设置训练算法,默认为0,对应CBOW算法;sg=1则采用skip-gram算法。
    size:是指特征向量的维度,默认为100。大的size需要更多的训练数据,但是效果会更好. 推荐值为几十到几百。
    window:表示当前词与预测词在一个句子中的最大距离是多少
    alpha: 是学习速率
    seed:用于随机数发生器。与初始化词向量有关。
    min_count: 可以对字典做截断. 词频少于min_count次数的单词会被丢弃掉, 默认值为5
    max_vocab_size: 设置词向量构建期间的RAM限制。如果所有独立单词个数超过这个,则就消除掉其中最不频繁的一个。每一千万个单词需要大约1GB的RAM。设置成None则没有限制。
    workers参数控制训练的并行数。
    hs: 如果为1则会采用hierarchica·softmax技巧。如果设置为0(defau·t),则negative sampling会被使用。
    negative: 如果>0,则会采用negativesamp·ing,用于设置多少个noise words
    iter: 迭代次数,默认为5

    # 设定词向量训练的参数
    num_features = 300    # Word vector dimensionality
    min_word_count = 40   # Minimum word count
    num_workers = 4       # Number of threads to run in parallel
    context = 10          # Context window size
    model_name = '{}features_{}minwords_{}context.model'.format(num_features, min_word_count, context)
    
    from gensim.models.word2vec import Word2Vec
    model = Word2Vec(sentences_list, workers=num_workers, \
                size=num_features, min_count = min_word_count, \
                window = context)
    
    # If you don't plan to train the model any further, calling 
    # init_sims will make the model much more memory-efficient.
    model.init_sims(replace=True)
    
    # It can be helpful to create a meaningful model name and 
    # save the model for later use. You can load it later using Word2Vec.load()
    model.save(os.path.join('..', 'models', model_name))
    
    print(model.doesnt_match(['man','woman','child','kitchen']))
    #print(model.doesnt_match('france england germany berlin'.split())
    

    输出

    kitchen
    
    model.most_similar("boy")
    

    输出

    [('girl', 0.7018299698829651),
     ('astro', 0.6647905707359314),
     ('teenage', 0.6317306160926819),
     ('frat', 0.60948246717453),
     ('dad', 0.6011481285095215),
     ('yr', 0.6010577082633972),
     ('teenager', 0.5974895358085632),
     ('brat', 0.5941195487976074),
     ('joshua', 0.5832049250602722),
     ('father', 0.5825375914573669)]
    
    model.most_similar("bad")
    

    输出

    [('worse', 0.7071679830551147),
     ('horrible', 0.7065873742103577),
     ('terrible', 0.6872220635414124),
     ('sucks', 0.6666240692138672),
     ('crappy', 0.6634873747825623),
     ('lousy', 0.6494461297988892),
     ('horrendous', 0.6371070742607117),
     ('atrocious', 0.62550288438797),
     ('suck', 0.6224384307861328),
     ('awful', 0.619296669960022)]
    
    df = pd.read_csv('../data/labeledTrainData.tsv', sep='\t', escapechar='\\')
    df.head()
    
    from nltk.corpus import stopwords
    eng_stopwords = set(stopwords.words('english'))
    
    def clean_text(text, remove_stopwords=False):
        text = BeautifulSoup(text, 'html.parser').get_text()
        text = re.sub(r'[^a-zA-Z]', ' ', text)
        words = text.lower().split()
        if remove_stopwords:
            words = [w for w in words if w not in eng_stopwords]
        return words
    
    def to_review_vector(review):
        global word_vec
        
        review = clean_text(review, remove_stopwords=True)
        #print (review)
        #words = nltk.word_tokenize(review)
        word_vec = np.zeros((1,300))
        for word in review:
            #word_vec = np.zeros((1,300))
            if word in model:
                word_vec += np.array([model[word]])
        #print (word_vec.mean(axis = 0))
        return pd.Series(word_vec.mean(axis = 0))
    
    train_data_features = df.review.apply(to_review_vector)
    train_data_features.head()
    
    from sklearn.cross_validation import train_test_split
    
    X_train, X_test, y_train, y_test = train_test_split(train_data_features,df.sentiment,test_size = 0.2, random_state = 0)
    
    LR_model = LogisticRegression()
    LR_model = LR_model.fit(X_train, y_train)
    y_pred = LR_model.predict(X_test)
    cnf_matrix = confusion_matrix(y_test,y_pred)
    
    print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
    
    print("accuracy metric in the testing dataset: ", (cnf_matrix[1,1]+cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[1,1]+cnf_matrix[1,0]+cnf_matrix[0,1]))
    
    # Plot non-normalized confusion matrix
    class_names = [0,1]
    plt.figure()
    plot_confusion_matrix(cnf_matrix
                          , classes=class_names
                          , title='Confusion matrix')
    plt.show()
    
    df = pd.read_csv('../data/unlabeledTrainData.tsv', sep='\t', escapechar='\\')
    print('Number of reviews: {}'.format(len(df)))
    df.head()
    
    df['clean_review'] = df.review.apply(clean_text)
    df.head()
    
    review_part = df['clean_review']
    review_part.shape
    

    输出

    (50000,)
    
    import warnings
    warnings.filterwarnings("ignore")
    
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    
    
    def split_sentences(review):
        raw_sentences = tokenizer.tokenize(review.strip())
        sentences = [clean_text(s) for s in raw_sentences if s]
        return sentences
    sentences = sum(review_part.apply(split_sentences), [])
    print('{} reviews -> {} sentences'.format(len(review_part), len(sentences)))
    

    输出

    50000 reviews -> 50000 sentences
    
    sentences[0]
    

    输出

    'watching time chasers obvious bunch friends sitting day film school hey pool money bad movie bad movie dull story bad script lame acting poor cinematography bottom barrel stock music corners cut prevented film release life'
    
    sentences_list = []
    for line in sentences:
        sentences_list.append(nltk.word_tokenize(line))
    
    # 设定词向量训练的参数
    num_features = 300    # Word vector dimensionality
    min_word_count = 40   # Minimum word count
    num_workers = 4       # Number of threads to run in parallel
    context = 10          # Context window size
    model_name = '{}features_{}minwords_{}context.model'.format(num_features, min_word_count, context)
    
    from gensim.models.word2vec import Word2Vec
    model = Word2Vec(sentences_list, workers=num_workers, \
                size=num_features, min_count = min_word_count, \
                window = context)
    
    # If you don't plan to train the model any further, calling 
    # init_sims will make the model much more memory-efficient.
    model.init_sims(replace=True)
    
    # It can be helpful to create a meaningful model name and 
    # save the model for later use. You can load it later using Word2Vec.load()
    model.save(os.path.join('..', 'models', model_name))
    
    print(model.doesnt_match(['man','woman','child','kitchen']))
    #print(model.doesnt_match('france england germany berlin'.split())
    

    输出

    kitchen
    
    model.most_similar("boy")
    

    输出

    [('girl', 0.7018299698829651),
     ('astro', 0.6647905707359314),
     ('teenage', 0.6317306160926819),
     ('frat', 0.60948246717453),
     ('dad', 0.6011481285095215),
     ('yr', 0.6010577082633972),
     ('teenager', 0.5974895358085632),
     ('brat', 0.5941195487976074),
     ('joshua', 0.5832049250602722),
     ('father', 0.5825375914573669)]
    
    model.most_similar("bad")
    

    输出

    [('worse', 0.7071679830551147),
     ('horrible', 0.7065873742103577),
     ('terrible', 0.6872220635414124),
     ('sucks', 0.6666240692138672),
     ('crappy', 0.6634873747825623),
     ('lousy', 0.6494461297988892),
     ('horrendous', 0.6371070742607117),
     ('atrocious', 0.62550288438797),
     ('suck', 0.6224384307861328),
     ('awful', 0.619296669960022)]
    
    df = pd.read_csv('../data/labeledTrainData.tsv', sep='\t', escapechar='\\')
    df.head()
    
    from nltk.corpus import stopwords
    eng_stopwords = set(stopwords.words('english'))
    
    def clean_text(text, remove_stopwords=False):
        text = BeautifulSoup(text, 'html.parser').get_text()
        text = re.sub(r'[^a-zA-Z]', ' ', text)
        words = text.lower().split()
        if remove_stopwords:
            words = [w for w in words if w not in eng_stopwords]
        return words
    
    def to_review_vector(review):
        global word_vec
        
        review = clean_text(review, remove_stopwords=True)
        #print (review)
        #words = nltk.word_tokenize(review)
        word_vec = np.zeros((1,300))
        for word in review:
            #word_vec = np.zeros((1,300))
            if word in model:
                word_vec += np.array([model[word]])
        #print (word_vec.mean(axis = 0))
        return pd.Series(word_vec.mean(axis = 0))
    
    train_data_features = df.review.apply(to_review_vector)
    train_data_features.head()
    
    from sklearn.cross_validation import train_test_split
    
    X_train, X_test, y_train, y_test = train_test_split(train_data_features,df.sentiment,test_size = 0.2, random_state = 0)
    
    LR_model = LogisticRegression()
    LR_model = LR_model.fit(X_train, y_train)
    y_pred = LR_model.predict(X_test)
    cnf_matrix = confusion_matrix(y_test,y_pred)
    
    print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
    
    print("accuracy metric in the testing dataset: ", (cnf_matrix[1,1]+cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[1,1]+cnf_matrix[1,0]+cnf_matrix[0,1]))
    
    # Plot non-normalized confusion matrix
    class_names = [0,1]
    plt.figure()
    plot_confusion_matrix(cnf_matrix
                          , classes=class_names
                          , title='Confusion matrix')
    plt.show()
    

    相关文章

      网友评论

          本文标题:kaggle-word2vec

          本文链接:https://www.haomeiwen.com/subject/azaluftx.html