tf.transpose():
转置, 若输入数据格式为NCHW可转化为NHWC
if data_format == 'NCHW':
image = tf.transpose(image, perm=(2, 0, 1))
或image = tf.transpose(image, [0, 3, 1, 2])
tf.expand_dims(): 扩张一维, axis=0代表Batch
b_image = tf.expand_dims(processed_image, axis = 0)
tf.reshape(): 一般用于flatten操作, 改变tensor形状
values = tf.reshape(values, shape = [shape[0], -1, shape[-1]])
若参数为shape = [-1]则转变为1-Dtensor
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
[-1]还可用于infer shape
# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]]
当输入为1-Dtensor时, []代表reshape成标量
# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
tf.reduce_mean():
参考 https://blog.csdn.net/dcrmg/article/details/79797826
用于求某一维度的平均值, 进行降维
reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None)
axis为指定维度, keep_dims表示是否保持维度不变, 即是否进行降维, reduction_indices为旧API已弃用.
类似函数还有:
- tf.reduce_sum :计算tensor指定轴方向上的所有元素的累加和;
- tf.reduce_max : 计算tensor指定轴方向上的各个元素的最大值;
- tf.reduce_all : 计算tensor指定轴方向上的各个元素的逻辑和(and运算);
- tf.reduce_any: 计算tensor指定轴方向上的各个元素的逻辑或(or运算);
打印tensor:
第一种:利用session.run()进行处理
sess = tf.Session()
print(sess.run(tensor))
第二种方法:
with tf.Session():
print(inp.eval())
tf.assign():
tf.assign(ref, value, validate_shape=None, use_locking=None, name=None)
函数完成了将value赋值给ref的作用. 其中:ref必须是tf.Variable创建的tensor,如果ref=tf.constant()会报错! value可以是变量或常量数组.
tf.assign()操作必须在sess.run()中运行, 否则不生效. 若在定义变量阶段定义需指定操作对象, 并将其放入sess.run()中.
同时,默认参数validate_shape = True时, shape(value)必须等于shape(ref). 当validate_shape = False时, 则直接不考虑形状完全复制.
类似的用法还有tf.assign_add(), tf.assign_sub()
也可直接调用tensor自带方法如w1.assign([[1, 2, 3], [4, 5, 6]])
tf.clip_by_value():
tf.clip_by_value(v, 2.5, 4.5)
限制范围, 低于2.5的替换成2.5, 高于4.5的替换成4.5, 一般用来保证概率属于[0, 1]
tf.matmul():
*是直接点乘, tf.matmul()是矩阵乘法
v1 = tf.constant([[1.0, 2.0], [3.0, 4.0]])
v2 = tf.constant([[5.0, 6.0], [7.0, 8.0]])
print (v1 * v2).eval() #输出[[5. 12.] [21. 32.]]
print tf.matmul(v1, v2).eval() #输出[[19. 22.] [43. 50.]]
tf.split()
h, w = tf.split(tf.cast(shape, tf.float32), 2, axis=0)
将tensor分割成2部分, 在对应的维度上
tf.gather()
根据索引从tensor中提取出对应项
import tensorflow as tf
temp = (tf.range(0,10) + tf.constant(1,shape=[10])) * 10
temp2 = tf.gather(temp,[1,5,9])
with tf.Session() as sess:
print sess.run(temp)
print sess.run(temp2)
输出
[ 10 20 30 40 50 60 70 80 90 100]
[ 20 60 100]
tf.where()
tf.where()只有condition一个参数时返回condition中True的部分和其位置坐标, 形状为(num_of_True, 1).
如condition=[True, False, True, False], tf.where(condition)返回[[0], [2]], 加上[:, 0]后 tf.where(condition)[:, 0]则只有位置坐标[0, 2]
tf.pad()
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0):
"""The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`"""
paddings的rank必须和tensor相同, 分别代表在该维度上前后padding的数量
For example:
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
# 以行为对称轴
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
# 以间隔为对称轴
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
网友评论