美文网首页
线性回归

线性回归

作者: 无事扯淡 | 来源:发表于2017-03-06 17:02 被阅读0次

    http://blog.csdn.net/xiazdong/article/details/7950084

    线性回归

    卷积神经网络
    http://www.hackcv.com/index.php/archives/104/

    https://github.com/aymericdamien/TensorFlow-Examples

    http://bcomposes.com/2015/11/26/simple-end-to-end-tensorflow-examples/

    # -*- coding: utf-8 -*-
    # encoding: utf-8
    """
    @author: monitor1379 
    @contact: yy4f5da2@hotmail.com
    @site: www.monitor1379.com
    
    @version: 1.0
    @license: Apache Licence
    @file: mnist_decoder.py
    @time: 2016/8/16 20:03
    
    对MNIST手写数字数据文件转换为bmp图片文件格式。
    数据集下载地址为http://yann.lecun.com/exdb/mnist。
    相关格式转换见官网以及代码注释。
    
    ========================
    关于IDX文件格式的解析规则:
    ========================
    THE IDX FILE FORMAT
    
    the IDX file format is a simple format for vectors and multidimensional matrices of various numerical types.
    The basic format is
    
    magic number
    size in dimension 0
    size in dimension 1
    size in dimension 2
    .....
    size in dimension N
    data
    
    The magic number is an integer (MSB first). The first 2 bytes are always 0.
    
    The third byte codes the type of the data:
    0x08: unsigned byte
    0x09: signed byte
    0x0B: short (2 bytes)
    0x0C: int (4 bytes)
    0x0D: float (4 bytes)
    0x0E: double (8 bytes)
    
    The 4-th byte codes the number of dimensions of the vector/matrix: 1 for vectors, 2 for matrices....
    
    The sizes in each dimension are 4-byte integers (MSB first, high endian, like in most non-Intel processors).
    
    The data is stored like in a C array, i.e. the index in the last dimension changes the fastest.
    """
    
    import numpy as np
    import struct
    import matplotlib.pyplot as plt
    
    import tensorflow as tf
    
    # 训练集文件
    train_images_idx3_ubyte_file = './train-images-idx3-ubyte/train-images.idx3-ubyte'
    # 训练集标签文件
    train_labels_idx1_ubyte_file = './train-labels-idx1-ubyte/train-labels.idx1-ubyte'
    
    # 测试集文件
    test_images_idx3_ubyte_file = './t10k-images-idx3-ubyte/t10k-images.idx3-ubyte'
    # 测试集标签文件
    test_labels_idx1_ubyte_file = './t10k-labels-idx1-ubyte/t10k-labels.idx1-ubyte'
    
    
    def decode_idx3_ubyte(idx3_ubyte_file):
        """
        解析idx3文件的通用函数
        :param idx3_ubyte_file: idx3文件路径
        :return: 数据集
        """
        # 读取二进制数据
        bin_data = open(idx3_ubyte_file, 'rb').read()
    
        # 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
        offset = 0
        fmt_header = '>iiii'
        magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
        print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
    
        # 解析数据集
        image_size = num_rows * num_cols
        offset += struct.calcsize(fmt_header)
        fmt_image = '>' + str(image_size) + 'B'
        # images = np.empty((num_images, num_rows, num_cols))
        # images = np.empty((num_images, num_rows, num_cols),dtype=np.uint8)
        images = np.empty((num_images, image_size),dtype=np.uint8)
        for i in range(num_images):
            if (i + 1) % 10000 == 0:
                print('已解析 %d' % (i + 1) + '张')
            # images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
            images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset))
            offset += struct.calcsize(fmt_image)
        return images
    
    
    def decode_idx1_ubyte(idx1_ubyte_file):
        """
        解析idx1文件的通用函数
        :param idx1_ubyte_file: idx1文件路径
        :return: 数据集
        """
        # 读取二进制数据
        bin_data = open(idx1_ubyte_file, 'rb').read()
    
        # 解析文件头信息,依次为魔数和标签数
        offset = 0
        fmt_header = '>ii'
        magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
        print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))
    
        # 解析数据集
        offset += struct.calcsize(fmt_header)
        fmt_image = '>B'
        # labels = np.empty(num_images)
        labels = np.empty(num_images,dtype=np.uint8)
        for i in range(num_images):
            if (i + 1) % 10000 == 0:
                print('已解析 %d' % (i + 1) + '张')
            labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
            offset += struct.calcsize(fmt_image)
        return labels
    
    
    def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
        """
        TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
        [offset] [type]          [value]          [description]
        0000     32 bit integer  0x00000803(2051) magic number
        0004     32 bit integer  60000            number of images
        0008     32 bit integer  28               number of rows
        0012     32 bit integer  28               number of columns
        0016     unsigned byte   ??               pixel
        0017     unsigned byte   ??               pixel
        ........
        xxxx     unsigned byte   ??               pixel
        Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
    
        :param idx_ubyte_file: idx文件路径
        :return: n*row*col维np.array对象,n为图片数量
        """
        return decode_idx3_ubyte(idx_ubyte_file)
    
    
    def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
        """
        TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
        [offset] [type]          [value]          [description]
        0000     32 bit integer  0x00000801(2049) magic number (MSB first)
        0004     32 bit integer  60000            number of items
        0008     unsigned byte   ??               label
        0009     unsigned byte   ??               label
        ........
        xxxx     unsigned byte   ??               label
        The labels values are 0 to 9.
    
        :param idx_ubyte_file: idx文件路径
        :return: n*1维np.array对象,n为图片数量
        """
        return decode_idx1_ubyte(idx_ubyte_file)
    
    
    def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
        """
        TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
        [offset] [type]          [value]          [description]
        0000     32 bit integer  0x00000803(2051) magic number
        0004     32 bit integer  10000            number of images
        0008     32 bit integer  28               number of rows
        0012     32 bit integer  28               number of columns
        0016     unsigned byte   ??               pixel
        0017     unsigned byte   ??               pixel
        ........
        xxxx     unsigned byte   ??               pixel
        Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
    
        :param idx_ubyte_file: idx文件路径
        :return: n*row*col维np.array对象,n为图片数量
        """
        return decode_idx3_ubyte(idx_ubyte_file)
    
    
    def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
        """
        TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
        [offset] [type]          [value]          [description]
        0000     32 bit integer  0x00000801(2049) magic number (MSB first)
        0004     32 bit integer  10000            number of items
        0008     unsigned byte   ??               label
        0009     unsigned byte   ??               label
        ........
        xxxx     unsigned byte   ??               label
        The labels values are 0 to 9.
    
        :param idx_ubyte_file: idx文件路径
        :return: n*1维np.array对象,n为图片数量
        """
        return decode_idx1_ubyte(idx_ubyte_file)
    
    
    
    '''
    def run():
        train_images = load_train_images()
        train_labels = load_train_labels()
        # test_images = load_test_images()
        # test_labels = load_test_labels()
    
        # 查看前十个数据及其标签以读取是否正确
        for i in range(10):
            print(train_labels[i])
            plt.imshow(train_images[i], cmap='gray')
            plt.show()
        print('done')
    '''
    
    def loaddata():
        train_images = load_train_images()
        train_labels = load_train_labels()
        test_images = load_test_images()
        test_labels = load_test_labels()
        return (train_images,train_labels,test_images,test_labels)
    
    '''
    样本读取之后要归一化处理
    ''' 
    def normalize(train_images,train_labels,test_images,test_labels):
        train_images = train_images/255
        new_lables = np.zeros((60000,10))
        for i in range(60000):
            l = train_labels[i]
            new_lables[i][int(l)] = 1
        train_labels = new_lables
    
        test_images = test_images/255
        new_lables = np.zeros((10000,10))
        for i in range(10000):
            l = test_labels[i]
            new_lables[i][int(l)] = 1
        test_labels = new_lables
        return (train_images,train_labels,test_images,test_labels)     
    
    def nextbatch(num):
        pass
    
    '''
    随机从样本中获取num个样本
    初期的想法:
    使用np.random.permutation打乱样本
    这里打乱样本和标签要一起打乱,这就需要把两者组合在一起,然后打乱
    组合需要np.hstack函数,分解用np.hsplit函数
    '''
    def gettrains(train_images,train_labels,num):
        start = np.random.randint(60000)
        limit = 60000-num
        if start>limit:
            start = limit
    
        t_x = train_images[start:start+num]
        t_y = train_labels[start:start+num]
        return (t_x,t_y)
    
    '''
    卷积部分
    '''
    def conv2d(x, W):
      return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
    
    def max_pool_2x2(x):
      return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1], padding='SAME')
    def weight_variable(shape):
      initial = tf.truncated_normal(shape, stddev=0.1)
      return tf.Variable(initial)
    
    def bias_variable(shape):
      initial = tf.constant(0.1, shape=shape)
      return tf.Variable(initial)
    
    def cnn_train(train_images,train_labels,test_images,test_labels):
        
    
        x = tf.placeholder("float", [None, 784])
        y_ = tf.placeholder("float", [None,10])
    
        x_image = tf.reshape(x, [-1,28,28,1])
    
        '''
        卷积第一层
        '''
        with tf.name_scope('first_conv'):
            W_conv1 = weight_variable([5, 5, 1, 32])
            b_conv1 = bias_variable([32])
    
            h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
            h_pool1 = max_pool_2x2(h_conv1)
    
        '''
        卷积第二层
        '''
        with tf.name_scope('second_conv'):
            W_conv2 = weight_variable([5, 5, 32, 64])
            b_conv2 = bias_variable([64])
    
            h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
            h_pool2 = max_pool_2x2(h_conv2)
    
        '''
        全连接层
        '''
        with tf.name_scope('full_connect1'):
            W_fc1 = weight_variable([7 * 7 * 64, 1024])
            b_fc1 = bias_variable([1024])
    
            h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
            h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    
    
    
        '''
        抛弃部分节点
        '''
        with tf.name_scope('dropout'):
            keep_prob = tf.placeholder(tf.float32)
            h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    
        '''
        输出层
        '''
        with tf.name_scope('out'):
            W_fc2 = weight_variable([1024, 10])
            b_fc2 = bias_variable([10])
    
            y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
    
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    
        tf.summary.scalar('cross_entropy', cross_entropy)
    
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
        correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
        tf.summary.scalar('accuracy', accuracy)
        merged = tf.summary.merge_all()
        sess = tf.Session()
        train_writer = tf.summary.FileWriter('./traindata',sess.graph)
    
        sess.run(tf.global_variables_initializer())
        for i in range(1000):
            batch_xs, batch_ys = gettrains(train_images,train_labels,100)
            if i%100 == 0:
                summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs, y_: batch_ys, keep_prob: 1.0})
                train_accuracy = accuracy.eval(session=sess,feed_dict={x:batch_xs, y_: batch_ys, keep_prob: 1.0})
                train_writer.add_summary(summary,i)
                print("step %d, training accuracy %g"%(i, train_accuracy))
            else:
                train_step.run(session=sess,feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
    
        print("test accuracy %g"%accuracy.eval(session=sess,feed_dict={x: test_images, y_: test_labels, keep_prob: 1.0}))
    
    
    def runtraining(train_images,train_labels,test_images,test_labels):
        x = tf.placeholder("float", [None, 784])
        W = tf.Variable(tf.zeros([784,10]))
        b = tf.Variable(tf.zeros([10]))
        y = tf.nn.softmax(tf.matmul(x,W) + b)
    
        y_ = tf.placeholder("float", [None,10])
        cross_entropy = -tf.reduce_sum(y_*tf.log(y))
        train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)
        for i in range(1000):
            batch_xs, batch_ys = gettrains(train_images,train_labels,100) #随机产生100个点
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
    
        correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(sess.run(accuracy, feed_dict={x: test_images, y_: test_labels}))
    
    def start():
        train_images,train_labels,test_images,test_labels = loaddata()
        t,l,ti,tl = normalize(train_images,train_labels,test_images,test_labels)
        runtraining(t,l,ti,tl)
    
    def start_cnn():
        train_images,train_labels,test_images,test_labels = loaddata()
        t,l,ti,tl = normalize(train_images,train_labels,test_images,test_labels)
        cnn_train(t,l,ti,tl)
    
    if __name__ == '__main__':
        #run()
        '''
        train_images = load_train_images()
        train_labels = load_train_labels()
        train_images = train_images/255
        new_lables = np.zeros((60000,10))
        for i in range(60000):
            l = train_labels[i]
            new_lables[i][int(l)] = 1
    
        plt.imshow(train_images[0].reshape(28,28), cmap='gray')
        plt.show()  
        print(new_lables[0])
        '''
        start_cnn()
        '''
        ------------------------------------------------
        '''
        '''
        x = tf.placeholder("float", [None, 784])
        W = tf.Variable(tf.zeros([784,10]))
        b = tf.Variable(tf.zeros([10]))
        y = tf.nn.softmax(tf.matmul(x,W) + b)
    
        y_ = tf.placeholder("float", [None,10])
        cross_entropy = -tf.reduce_sum(y_*tf.log(y))
        train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
        init = tf.initialize_all_variables()
        sess = tf.Session()
        sess.run(init)
        for i in range(1000):
            batch_xs, batch_ys = mnist.train.next_batch(100) #随机产生100个点
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        '''
    

    相关文章

      网友评论

          本文标题:线性回归

          本文链接:https://www.haomeiwen.com/subject/rteugttx.html