美文网首页收入即学习
线性、逻辑回归模型,神经网络、卷积神经网络模型

线性、逻辑回归模型,神经网络、卷积神经网络模型

作者: Radiance_sty | 来源:发表于2019-04-01 19:42 被阅读0次
    • 一个简单的线性回归模型

        import os
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        
        import tensorflow as tf
        import numpy as np
        import matplotlib.pyplot as plt
        
        # 随机生成1000个点,分布在 y=0.1x+0.3 附近
        num_point = 1000
        vectors_set = []
        for i in range(num_point):
            x1 = np.random.normal(0.0, 0.55)        # x 取值范围
            y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)
            vectors_set.append([x1, y1])
        
        # 生成样本
        x_data = [v[0] for v in vectors_set]
        y_data = [v[1] for v in vectors_set]
        
        plt.scatter(x_data, y_data, c='r')          # 生成散点图
        
        # 生成一维矩阵 W,取值为[-1,1]之间的随机值
        W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W')
      
        # 生成一维矩阵 b,初始值为0
        b = tf.Variable(tf.zeros([1]), name='b')
      
        # 经过计算得出预估值 y
        y = W * x_data + b
        
        # 以预估值 y 和实际值 y_data 之间的均方误差作为损失
        loss = tf.reduce_mean(tf.square(y - y_data), name='loss')
      
        # 采样梯度下降法来优化参数
        optimizer = tf.train.GradientDescentOptimizer(0.5)
      
        # 训练的过程就是最小化这个误差值
        train = optimizer.minimize(loss, name='train')
        
        sess = tf.Session()
        init = tf.global_variables_initializer()
        sess.run(init)
        
        # 初始化的 W 和 b 是多少
        print('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss))
        
        # 执行20次训练
        for step in range(20):
            sess.run(train)
      
            # 输出训练好的 W 和 b
            print('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss))
        
        # 将函数构造成一条直线
        plt.scatter(x_data, y_data, c='r')
        plt.plot(x_data, sess.run(W) * x_data + sess.run(b))
        plt.show()
      
    • 一个简单的逻辑回归模型迭代

        import tensorflow as tf
        import numpy as np
        from tensorflow.examples.tutorials.mnist import input_data
        
        tf.logging.set_verbosity(tf.logging.ERROR)
      
        # 数据读取以及样本导入
        mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)
        trainimg = mnist.train.images
        trainlabel = mnist.train.labels
        testimg = mnist.test.images
        testlabel = mnist.test.labels
        print('mnist loaded...')
        
        print(trainimg.shape)
        print(trainlabel.shape)
        print(testimg.shape)
        print(testlabel.shape)
        print(trainimg)
        print(trainlabel[0])
        
        # 变量初始化,None 表示无穷
        x = tf.placeholder('float', [None, 784])
        y = tf.placeholder('float', [None, 10])
        W = tf.Variable(tf.zeros([784, 10]))
        b = tf.Variable(tf.zeros([10]))
        
        # 逻辑参数模型
        actv = tf.nn.softmax(tf.matmul(x, W) + b)
        
        # 损失函数(cost function)
        cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv), reduction_indices=1))
        
        # 优化,使用梯度下降
        learning_rate = 0.01
        optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
        
        # 预测,取出每行里面的最大值
        pred = tf.equal(tf.argmax(actv, 1), tf.argmax(y, 1))
        
        # 准确率,精度
        accr = tf.reduce_mean(tf.cast(pred, 'float'))
        
        # 初始化
        init = tf.global_variables_initializer()
        
        training_epochs = 50            # 迭代50次
        batch_size = 100                # 每次迭代选阵100个样本
        display_step = 5
        
        sess = tf.Session()
        sess.run(init)
        
        # 最小批次训练
        for epoch in range(training_epochs):
            avg_cost = 0.
            num_batch = int(mnist.train.num_examples/batch_size)
            for i in range(num_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})            # 求解
                feeds = {x: batch_xs, y: batch_ys}
                avg_cost += sess.run(cost, feed_dict=feeds)/num_batch           # 损失值
        
            if epoch % display_step == 0:
                feeds_train = {x: batch_xs, y: batch_ys}
                feeds_test = {x: mnist.test.images, y: mnist.test.labels}
                train_acc = sess.run(accr, feed_dict=feeds_train)
                test_acc = sess.run(accr, feed_dict=feeds_test)
                print('Epoch: %03d/%03d cost: %.9f train_acc: %03f test_acc: %.3f'
                      % (epoch, training_epochs, avg_cost, train_acc, test_acc))
        
        print('DONE')
      
    • 一个简单的卷积神经网络

        import tensorflow as tf      
        from tensorflow.examples.tutorials.mnist import input_data
        
        tf.logging.set_verbosity(tf.logging.ERROR)
        # 只显示错误
        
        mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)
      
        trainimg   = mnist.train.images
        trainlabel = mnist.train.labels
        testimg    = mnist.test.images
        testlabel  = mnist.test.labels
        
        print('MNIST Ready...')
        
        n_input = 784       # 输入像素点个数(28*28)
        n_output = 10       # 输出的分类数
        
        # 权重参数
        weights = {
            # 卷积层第一层参数,filter = 3*3,深度为1,得出的特征图为64个
            'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),
        
            # 卷积层第一层参数,filter = 3*3,上一步得到64个特征图,深度为64,输出深度为128
            'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1)),
        
            # 全连接层1,28*28*1——14*14*64——7*7*128,将其转换为1024维向量
            'wd1': tf.Variable(tf.random_normal([7*7*128, 1024], stddev=0.1)),
        
            # 全连接层2,将1024维向量输出为 n_output = 10 个分类
            'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
        }
        
        # 偏置参数
        biases = {
            'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
            'bc2': tf.Variable(tf.random_normal([128], stddev=0.1)),
            'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
            'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1)),
        }
        
        # 卷积与池化
        def conv_basic(_input, _w, _b, _keepratio):
            # 输入,对输入进行预处理,将数据转换为 tensorflow 格式
            _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])
        
            # 第一层卷积层,一般 strides 只修改中间的 width 和 deep
            _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
            # _mean, _var = tf.nn.moments(_conv1, [0, 1, 2])
            # _conv1 = tf.nn.batch_normalization(_conv1, _mean, _var, 0, 1, 0.0001)
        
            _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))
            _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            # 随机杀死一些节点,保留一部分节点
            _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
        
            # 第二层卷积层
            _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')
            # _mean, _var = tf.nn.moments(_conv2, [0, 1, 2])
            # _conv2 = tf.nn.batch_normalization(_conv2, _mean, _var, 0, 1, 0.0001)
        
            _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))
            _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
        
            # 矢量化,将 tensor 转换为 list
            _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
        
            # 全连接层第一层
            _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
            _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
        
            # 全连接层第二层
            _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
            out = {
                'input_r': _input_r,
                'conv1': _conv1,
                'pool1': _pool1,
                'pool_dr1': _pool_dr1,
                'conv2': _conv2,
                'pool2': _pool2,
                'pool_dr2': _pool_dr2,
                'densel': _dense1,
                'fc1': _fc1,
                'fc_dr1':_fc_dr1,
                'out': _out
            }
            return out
        print('CNN Ready...')
        
        a = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1))
        a = tf.Print(a, [a], 'a: ')
        
        # 初始化变量
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)
        
        # 占位 x,y
        x = tf.placeholder(tf.float32, [None, n_input])
        y = tf.placeholder(tf.float32, [None, n_output])
        keepratio = tf.placeholder(tf.float32)
        
        _pred = conv_basic(x, weights, biases, keepratio)['out']
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred,labels=y))
        optm = tf.train.AdadeltaOptimizer(learning_rate=0.001).minimize(cost)
        _corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1))
        accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
        
        # 初始化变量
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)
        
        print('Graph Ready...')
        
        training_epochs = 15           # 迭代15次
        batch_size      = 16           # 每次迭代选择16个样品
        display_step    = 1
        
        # 参数优化
        for epoch in range(training_epochs):
            avg_cost = 0.
            # total_batch = int(mnist.train.num_examples / batch_size)
            total_batch = 10
        
            # 循环遍历所有批次
            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        
                # 使用批量数据进行训练
                sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio: 0.7})
                # 电脑平均损失
                avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio: 1.}) / total_batch
        
            # 显示每个时期的日志
            if epoch % display_step == 0:
                print('Epoch: %03d/%03d cost : %.9f' % (epoch, training_epochs, avg_cost))
        
                train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio: 1.})
                print('Training Accuracy: %.3f' % (train_acc))
        
        
                # test_acc = sess.run(accr, feed_dict={x: mnist.test.images, y: mnist.test.labels, keepratio:1.})
                # print('Test Accuracy: %.3f' % (test_acc))
        
        print('Optimization Finished...')
      
    • 一个简单的神经网络模型

        import tensorflow as tf
        import numpy as np
        from tensorflow.examples.tutorials.mnist import input_data
      
        tf.logging.set_verbosity(tf.logging.ERROR)
        # 只显示错误
      
        mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)
      
        # 网络拓扑
        n_hidden_1 = 256        # 第一层神经元个数
        n_hidden_2 = 128        # 第二层神经元个数
        n_input = 784           # 输入像素点个数
        n_classes = 10          # 输出的分类的类别
        
        # 输入和输出
        x = tf.placeholder('float', [None, n_input])
        y = tf.placeholder('float', [None, n_classes])
        
        # 神经网络参数初始化
        stddev = 0.1
        
        # 权重参数,初始化
        weights = {
            'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
            'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
            'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev))
        }
        
        # 偏置参数,初始化
        biases = {
            'b1': tf.Variable(tf.random_normal([n_hidden_1])),
            'b2': tf.Variable(tf.random_normal([n_hidden_2])),
            'out': tf.Variable(tf.random_normal([n_classes]))
        }
        print('Network Ready...')
        
        def multilayer_preceptron(_X, _weights, _biases):
            layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))
            layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))
            return (tf.matmul(layer_2, _weights['out']) + _biases['out'])
        
        # 预测
        pred = multilayer_preceptron(x, weights, biases)
        
        # 损失和优化参数
        # 损失函数,两种方式
        # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(pred, y))
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
        
        # 梯度下降
        optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
        corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))              # 准确率
        accr = tf.reduce_mean(tf.cast(corr, 'float'))                     # 精度
        
        # 初始化
        init = tf.global_variables_initializer()
        print('Function Ready...')
        
        training_epochs = 20            # 迭代20次
        batch_size = 100                # 每次迭代选择100个样本
        display_step = 4
        
        sess = tf.Session()
        sess.run(init)
        
        # 优化
        for epoch in range(training_epochs):
            avg_cost = 0
            total_batch = int(mnist.train.num_examples/batch_size)
        
            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                feeds = {x: batch_xs, y: batch_ys}
                sess.run(optm, feed_dict=feeds)
                avg_cost += sess.run(cost, feed_dict=feeds)
            avg_cost = avg_cost/total_batch
        
            if (epoch+1) % display_step == 0:
                print('Epoch: %03d/%03d cost : %.9f'%(epoch, training_epochs, avg_cost))
        
                feeds = {x: batch_xs, y: batch_ys}
                train_acc = sess.run(accr, feed_dict=feeds)
                print('Train Accuracy: %.3f'%(train_acc))
        
                feeds = {x:mnist.test.images, y: mnist.test.labels}
                test_acc = sess.run(accr, feed_dict=feeds)
                print('Test Accuracy: %.3f'%(test_acc))
      
        print('Optimization Finished...')
      

    相关文章

      网友评论

        本文标题:线性、逻辑回归模型,神经网络、卷积神经网络模型

        本文链接:https://www.haomeiwen.com/subject/yqgjbqtx.html