美文网首页
tensordlow的C3D

tensordlow的C3D

作者: 黑恶歌王 | 来源:发表于2018-08-03 21:33 被阅读0次

下面这个是测试的时候用的,说实话就是因为数据太少搞得他确实只能识别这一个,明天尝试一下弄点多的分类,不过目前还真么看到过除了0和1之外的数字。

image

下面是训练的那一个,说实话还是因为弄得训练集太少,而且因为是一样的,所以说他肯定只能识别这一个了。只跑了1个epoch真是抱歉啊。

image

下面贴出改了一下参数的代码,其实也很简单,感谢知乎Tensorflow小练习(三):C3D完成视频动作识别这位大佬的帮助。

train.py


import tensorflow as tf

import numpy as np

import C3D_model

import time

import data_processing

import os

import os.path

from os.path import join

TRAIN_LOG_DIR = os.path.join('Log/train/', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

TRAIN_CHECK_POINT = 'check_point/'

TRAIN_LIST_PATH = 'train.list'

TEST_LIST_PATH = 'test.list'

BATCH_SIZE = 1

NUM_CLASSES = 1##这里原来是101

CROP_SZIE = 112

CHANNEL_NUM = 3

CLIP_LENGTH = 16

EPOCH_NUM = 1##本来是50个epoch

INITIAL_LEARNING_RATE = 1e-4

LR_DECAY_FACTOR = 0.5

EPOCHS_PER_LR_DECAY = 2

MOVING_AV_DECAY = 0.9999

#Get shuffle index

train_video_indices, validation_video_indices = data_processing.get_video_indices(TRAIN_LIST_PATH)

with tf.Graph().as_default():

    batch_clips = tf.placeholder(tf.float32, [BATCH_SIZE, CLIP_LENGTH, CROP_SZIE, CROP_SZIE, CHANNEL_NUM], name='X')

    batch_labels = tf.placeholder(tf.int32, [BATCH_SIZE, NUM_CLASSES], name='Y')

    keep_prob = tf.placeholder(tf.float32)

    logits = C3D_model.C3D(batch_clips, NUM_CLASSES, keep_prob)

    with tf.name_scope('loss'):

        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_labels))

        tf.summary.scalar('entropy_loss', loss)

    with tf.name_scope('accuracy'):

        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(batch_labels, 1)), np.float32))

        tf.summary.scalar('accuracy', accuracy)

    #global_step = tf.Variable(0, name='global_step', trainable=False)

    #decay_step = EPOCHS_PER_LR_DECAY * len(train_video_indices) // BATCH_SIZE

    learning_rate = 1e-4#tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_step, LR_DECAY_FACTOR, staircase=True)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)#, global_step=global_step)

    saver = tf.train.Saver()

    summary_op = tf.summary.merge_all()

    config = tf.ConfigProto()

    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:

        train_summary_writer = tf.summary.FileWriter(TRAIN_LOG_DIR, sess.graph)

        sess.run(tf.global_variables_initializer())

        sess.run(tf.local_variables_initializer())

        step = 0

        for epoch in range(EPOCH_NUM):

            accuracy_epoch = 0

            loss_epoch = 0

            batch_index = 0

            for i in range(len(train_video_indices) // BATCH_SIZE):

                step += 1

                batch_data, batch_index = data_processing.get_batches(TRAIN_LIST_PATH, NUM_CLASSES, batch_index,

                                                        train_video_indices, BATCH_SIZE)

                _, loss_out, accuracy_out, summary = sess.run([optimizer, loss, accuracy, summary_op],

                                                              feed_dict={batch_clips:batch_data['clips'],

                                                              batch_labels:batch_data['labels'],

                                                                        keep_prob: 0.5})

                loss_epoch += loss_out

                accuracy_epoch += accuracy_out

                if i % 10 == 0:

                    print('Epoch %d, Batch %d: Loss is %.5f; Accuracy is %.5f'%(epoch+1, i, loss_out, accuracy_out))

                    train_summary_writer.add_summary(summary, step)

            print('Epoch %d: Average loss is: %.5f; Average accuracy is: %.5f'%(epoch+1, loss_epoch / (len(train_video_indices) // BATCH_SIZE),

                                                                                accuracy_epoch / (len(train_video_indices) // BATCH_SIZE)))

            accuracy_epoch = 0

            loss_epoch = 0

            batch_index = 0

            for i in range(len(validation_video_indices) // BATCH_SIZE):

                batch_data, batch_index = data_processing.get_batches(TRAIN_LIST_PATH, NUM_CLASSES, batch_index,

                                                                      validation_video_indices, BATCH_SIZE)

                loss_out, accuracy_out = sess.run([loss, accuracy],

                                                  feed_dict={batch_clips:batch_data['clips'],

                                                            batch_labels:batch_data['labels'],

                                                            keep_prob: 1.0})

                loss_epoch += loss_out

                accuracy_epoch += accuracy_out

            print('Validation loss is %.5f; Accuracy is %.5f'%(loss_epoch / (len(validation_video_indices) // BATCH_SIZE),

                                                              accuracy_epoch /(len(validation_video_indices) // BATCH_SIZE)))

            saver.save(sess, TRAIN_CHECK_POINT + 'train.ckpt', global_step=epoch)

test.py

import tensorflow as tf
import numpy as np
import C3D_model
import data_processing
TRAIN_LOG_DIR = 'Log/train/'
TRAIN_CHECK_POINT = 'check_point/train.ckpt-0'
TEST_LIST_PATH = 'test.list'
BATCH_SIZE = 1
NUM_CLASSES = 1##原来是101
CROP_SZIE = 112
CHANNEL_NUM = 3
CLIP_LENGTH = 16
EPOCH_NUM = 1##这里原来是50
test_num = data_processing.get_test_num(TEST_LIST_PATH)

test_video_indices = range(test_num)

with tf.Graph().as_default():
    batch_clips = tf.placeholder(tf.float32, [BATCH_SIZE, CLIP_LENGTH, CROP_SZIE, CROP_SZIE, CHANNEL_NUM], name='X')
    batch_labels = tf.placeholder(tf.int32, [BATCH_SIZE, NUM_CLASSES], name='Y')
    keep_prob = tf.placeholder(tf.float32)
    logits = C3D_model.C3D(batch_clips, NUM_CLASSES, keep_prob)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(batch_labels, 1)), np.float32))

    restorer = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        restorer.restore(sess, TRAIN_CHECK_POINT)
        accuracy_epoch = 0
        batch_index = 0
        for i in range(test_num // BATCH_SIZE):
            if i % 10 == 0:
               print('Testing %d of %d'%(i + 1, test_num // BATCH_SIZE))
            batch_data, batch_index = data_processing.get_batches(TEST_LIST_PATH, NUM_CLASSES, batch_index,
                                                              test_video_indices, BATCH_SIZE)
            accuracy_out = sess.run(accuracy,feed_dict={batch_clips: batch_data['clips'],
                                                     batch_labels: batch_data['labels'],
                                                       keep_prob: 1.0})
            accuracy_epoch += accuracy_out

    print('Test accuracy is %.5f' % (accuracy_epoch / (test_num // BATCH_SIZE)))

相关文章

  • tensordlow的C3D

    下面这个是测试的时候用的,说实话就是因为数据太少搞得他确实只能识别这一个,明天尝试一下弄点多的分类,不过目前还真么...

  • 菜鸟实习日记~day11(C3D+mxnet编译)

    科研: 一、C3D(Learning Spatiotemporal Features with 3D Convol...

  • 33组--Learning Spatiotemporal Fea

    可以访问C3D network的项目主页或是github获得其项目代码及模型,项目基于caffe实现。 项目主页(...

  • 行为识别总结

    行为识别两大流派:(1)Two-Stream (2)C3D Two-Stream流派思想:利用视频帧图像(...

  • 关于C3dServer

    最近在用C3dServer将一些文本格式的marker点数据转换为.c3d格式,可是marker点的名称一直没法很...

  • c3d数据处理

    使用的编程语言和包 编程语言:python库:ezc3d数据集:CMU Graphics Lab Motion C...

  • 双流法 (Two-Stream) 以及 C3D卷积

    简介 双流法 以及 C3D 算是行为识别中比较经典也是比较基本的两种方法,一下就对这两种方法进行一个简单的记录。简...

  • 论文笔记C3D:Learning Spatiotemporal

    思考 对视频进行描述,其描述子必须: 1.具有普遍性以适应各种场景; 2.必须短小紧凑 3.必须利于计算 4.必须...

  • C3D:使用3D卷积网络学习时空特征

    摘要 针对时空特征的学习,我们提出了一个简单有效的方法,在大规模有监督视频数据集上使用深度3维卷积网络(3D Co...

  • 的的的

    来看看我

网友评论

      本文标题:tensordlow的C3D

      本文链接:https://www.haomeiwen.com/subject/eqouvftx.html