写在前面
- 态度决定高度!让优秀成为一种习惯!
- 世界上没有什么事儿是加一次班解决不了的,如果有,就加两次!(- - -茂强)
TF-IDF
- 先看公式
这个公式并不是一个很好的公式,一版的都用经过平滑的公司,避免分母为0的情况
本文采用的是sklearn的默认公司
tf-idf(d, t) = tf(t) * idf(d, t)
idf(d, t) = log [ n / (df(d, t) + 1) ])
# t就是词
- 数据准备
从文档中读取数据
读取后的数据如下:
-
声明依赖以及静态参数
import tensorflow as tf
import matplotlib.pyplot as plt
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
sess = tf.Session()
batch_size= 200
max_featurtes = 10000 -
利用sklearn.feature_extraction.text中的TfidfVectorizer对文本进行向量化
def tokenizer(text):
words = text.split(" ")
return words
stop_words = set()
tfidf = TfidfVectorizer(tokenizer=tokenizer,stop_words=stop_words,max_features=max_featurtes)
sparse_tfidf_texts = tfidf.fit_transform(texts) -
把数据分成训练集和测试集
train_indices = np.random.choice(sparse_tfidf_texts.shape[0],round(0.8*sparse_tfidf_texts.shape[0]), replace=False)
test_indices = np.array(list(set(range(sparse_tfidf_texts.shape[0])) -set(train_indices)))
texts_train = sparse_tfidf_texts[train_indices]
texts_test = sparse_tfidf_texts[test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices]) -
定义逻辑回归模型的变量和placeholder
A = tf.Variable(tf.random_normal(shape=[max_featurtes,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[None, max_featurtes], dtype=tf. float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) -
定义模型和损失函数
model_output = tf.add(tf.matmul(x_data, A), b)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=model_output, logits=y_target)) -
定义预测方程和精确度计算
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target),tf.float32)
accuracy = tf.reduce_mean(predictions_correct) -
定义优化算法以及初始化变量
my_opt = tf.train.GradientDescentOptimizer(0.05)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.initialize_all_variables()
sess.run(init) -
开始训练模型
train_loss = []
test_loss = []
train_acc = []
test_acc = []
i_data = []
for i in range(10000):
rand_index = np.random.choice(texts_train.shape[0],size=batch_size)
rand_x = texts_train[rand_index].todense()
rand_y = np.transpose([target_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target:rand_y})
# Only record loss and accuracy every 100 generations
if (i+1)%100==0:
i_data.append(i+1)
train_loss_temp = sess.run(loss, feed_dict={x_data:rand_x, y_target: rand_y})
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(loss, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])})
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict={x_data:rand_x, y_target: rand_y})
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={x_data:texts_test.todense(), y_target: np.transpose([target_test])})
test_acc.append(test_acc_temp)
if (i+1)%500==0:
acc_and_loss = [i+1, train_loss_temp, test_loss_temp,train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Generation # {}. Train Loss (Test Loss): {:.2f}({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
其中每个批次喂给模型的数据如下图
rand_x
-
最后就是画出训练时的损失函数的计算结果图和精确度
这里没有去调整参数,请读者自行调整参数进行训练,以达到更好的效果plt.figure(1) # 创建图表1 x = [i for i in range(0, len(train_loss))] plt.plot(x, train_loss,"b-*") plt.plot(x, test_loss,"r-+") plt.figure(2) # 创建图表2 x = [i for i in range(0, len(train_acc))] plt.plot(x, train_acc,"b-*") plt.plot(x, test_acc,"r-+") plt.show()
网友评论