美文网首页TensorFlow
TensorFlow学习笔记

TensorFlow学习笔记

作者: SIENTIST | 来源:发表于2018-08-31 11:07 被阅读0次

    使用“图”(graph)表示计算任务;
    在被称为“会话”(session)的“上下文”(context)中执行图;
    使用“张量”(tensor)表示数据,tensor可以任务是一个n维的数组或列表;
    通过“变量”(varible)维护状态;
    使用feed和fetch可以为任意的操作赋值或从中获取数据


    tensorflow.jpg

    graph中的节点称为op(operation),每个op能把输入的tensor进行计算,输出计算得到的tensor;
    graph必须在session中启动;
    一个session可有多个graph。

    Fetch: 指同时进行多个op。例:

    add=tf.add(a,b)
    mul=tf.multiply(b,c)
    with tf .session as sess:
      result=sess.run([[mul,add]) #同时计算2个op
    

    在例程中学习:

    import tensorflosw as tf
    ml=tf.constant([3,3]) #创建一个常量的op
    m2=tf.constant([2,3]) #创建一个常量的op
    product=tf.matmul(m1,m2) #创建一个矩阵乘法的op,并输入m1与m2
    
    sess=tf.Session() #定义一个session,会有一个默认的graph启动
    result=sess.run(product) #用session中的run()来执行op
    sess.close #关闭session
    
    以上程序的最后3句可以用以下代替:
    with tf.session() as sess:
      result=sess.run(product)
    
    初始化所有变量
    init=tf.global_varibles_initializer()
    with tf.session() as sess:
      sess.run(init)
    
    state=tf.varible(0,name='counter')    定义一个变量(varible),起名为“counter”
    new_value=tf.add(state,1)       创建一个op,为state+1,得到的值给new_value这个tensor
    update=tf.assign(state,new_value)  创建一个op,assign是赋值,new_value赋值给state
    init=tf.global_variables_initalizer()   初始化所有变量
    
    with tf.Session() as sess:   
      sess.run(init)     运行初始化
      for _ int range(5):    循环5次
        sess.run(update)    运行update这个op
        printf(session.run(state))   输出变量state,需要用run
    

    ——————————————————————————————
    一个有完整训练过程的小例程

    import numpy as np
    import tensorflow as tf
    
    x_data = np.random.rand(100).astype(np.float32)  # random是随机数,astype是数据类型转换
    y_data = x_data * 0.1 + 0.3  # 这是目标的参数 0.1、0.3
    
    Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))  # Variable是使用tensorflow在默认的图中创建节点,这个节点是一个变量
    biases = tf.Variable(tf.zeros([1]))
    
    y = Weights * x_data + biases  # Weights的目标是0.1,biases目标是0.3
    
    loss = tf.reduce_mean(tf.square(y - y_data))  # reduce_mean是求平均值
    optimizer = tf.train.GradientDescentOptimizer(0.5)  # 0.5是学习速率 GradientDescentOptimizer是梯度下降优化函数
    train = optimizer.minimize(loss)
    
    init = tf.initialize_all_variables()  # 初始化
    
    sess = tf.Session()
    sess.run(init)  # 非常重要
    
    for step in range(401):  # 迭代401次
        sess.run(train)
        if step % 40 == 0:  # 每40次输出一下数据
            print(step, sess.run(Weights), sess.run(biases))
    
    

    ——————————————————————————————
    对照着一个例程,尝试着写了下基于卷积神经网络的图像风格转换,不过没有成功……

    import os
    import sys
    import cv2
    import numpy as np
    import scipy.io
    import tensorflow as tf
    from PIL import Image
    
    im = cv2.imread('./0.png')
    # Define command line args
    tf.app.flags.DEFINE_string('style_image', 'starry_night.jpg', 'style image')
    tf.app.flags.DEFINE_string('content_image', 'flower.jpg', 'content image')
    tf.app.flags.DEFINE_integer('epochs', 500, 'training epochs')
    tf.app.flags.DEFINE_float('learning_rate', 0.5, 'learning rate')
    FLAGS = tf.app.flags.FLAGS
    
    # Define hyper-parameters
    STYLE_WEIGHT = 10.
    CONTENT_WEIGHT = 1.
    STYLE_LAYERS = ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1']
    CONTENT_LAYERS = ['relu4_2']
    _vgg_params = None
    
    
    def vgg_params():
        # Load pre-trained VGG19 params
        global _vgg_params
        if _vgg_params is None:
            file = 'imagenet-vgg-verydeep-19.mat'
            if os.path.isfile(file):
                _vgg_params = scipy.io.loadmat(file)
            else:
                sys.stderr.write('Please download imagenet-vgg-verydeep-19.mat from'
                                 ' http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat\n')
                sys.exit(1)
        return _vgg_params
    
    
    def vgg19(input_image):
        # VGG19 network
        layers = (
            'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
            'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
            'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
            'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
            'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
            'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
            'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
            'relu5_3', 'conv5_4', 'relu5_4', 'pool5'
        )
        weights = vgg_params()['layers'][0]
        net = input_image
        network = {}
        for i, name in enumerate(layers):
            layer_type = name[:4]
            if layer_type == 'conv':
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet weights: [width, height, in_channels, out_channels]
                # tensorflow weights: [height, width, in_channels, out_channels]
                kernels = np.transpose(kernels, (1, 0, 2, 3))
                conv = tf.nn.conv2d(net, tf.constant(kernels),
                                    strides=(1, 1, 1, 1), padding='SAME',
                                    name=name)
                net = tf.nn.bias_add(conv, bias.reshape(-1))
                net = tf.nn.relu(net)
            elif layer_type == 'pool':
                net = tf.nn.max_pool(net, ksize=(1, 2, 2, 1),
                                     strides=(1, 2, 2, 1),
                                     padding='SAME')
            network[name] = net
        return network
    
    
    def content_loss(target_features, content_features):
        _, height, width, channel = content_features.get_shape().as_list()
        content_size = height * width * channel
        return tf.nn.l2_loss(target_features - content_features) / content_size
    
    
    def style_loss(target_features, style_features):
        _, height, width, channel = target_features.get_shape().as_list()
        size = height * width * channel
        target_features = tf.reshape(target_features, (-1, channel))
        target_gram = tf.matmul(tf.transpose(target_features),
                                target_features) / size
        style_features = tf.reshape(style_features, (-1, channel))
        style_gram = tf.matmul(tf.transpose(style_features),
                               style_features) / size
        gram_size = channel * channel
        return tf.nn.l2_loss(target_gram - style_gram) / gram_size
    
    
    def total_loss(content_image, style_image, target_image):
        style_feats = vgg19([style_image])
        content_feats = vgg19([content_image])
        target_feats = vgg19([target_image])
        loss = 0.0
        for layer in CONTENT_LAYERS:
            layer_loss = content_loss(target_feats[layer], content_feats[layer])
            loss += CONTENT_WEIGHT * layer_loss
        for layer in STYLE_LAYERS:
            layer_loss = style_loss(target_feats[layer], style_feats[layer])
            loss += STYLE_WEIGHT * layer_loss
        return loss
    
    
    def stylize(style_image, content_image, learning_rate=0.1, epochs=500):
        # target is initialized with content image
        target = tf.Variable(content_image, dtype=tf.float32)
        style_input = tf.constant(style_image, dtype=tf.float32)
        content_input = tf.constant(content_image, dtype=tf.float32)
        cost = total_loss(content_input, style_input, target)
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            for i in range(epochs):
                _, loss, target_image = sess.run([train_op, cost, target])
                print("iter:%d, loss:%.9f" % (i, loss))
                if (i + 1) % 100 == 0:
                    # save target image every 100 iterations
                    image = np.clip(target_image + 128.0, 0, 255).astype(np.uint8)
                    Image.fromarray(image).save("out/neural_%d.jpg" % (i + 1))
    
    
    if __name__ == '__main__':
        # images are preprocessed to be zero-center
        style = Image.open(FLAGS.style_image)
        style = np.array(style).astype(np.float32) - 128.0
        content = Image.open(FLAGS.content_image)
        content = np.array(content).astype(np.float32) - 128.0
        stylize(style, content, FLAGS.learning_rate, FLAGS.epochs)
    
    

    相关文章

      网友评论

        本文标题:TensorFlow学习笔记

        本文链接:https://www.haomeiwen.com/subject/prcswftx.html