张量的一些高阶的操作

1、合并与分割
合并
主要就是concat和stack了。
In [1]: import tensorflow as tf
In [2]: import numpy as np
In [3]: a = tf.fill([4,20,6],6)
In [4]: b = tf.fill([2,20,6],6)
In [5]: tf.concat([a,b],axis=0).shape
Out[5]: TensorShape([6, 20, 6])
In [6]: tf.concat([a,b],axis=1).shape
InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [4,20,6] vs. shape[1] = [2,20,6] [Op:ConcatV2] name: concat
In [7]: a = tf.fill([4,20,6],6)
In [8]: b = tf.fill([4,20,6],6)
In [9]: tf.stack([a,b],axis=0).shape
Out[9]: TensorShape([2, 4, 20, 6])
In [10]: tf.stack([a,b],axis=1).shape
Out[10]: TensorShape([4, 2, 20, 6])
In [11]: tf.stack([a,b],axis=-1).shape
Out[11]: TensorShape([4, 20, 6, 2])
In [12]: tf.stack([a,b],axis=-2).shape
Out[12]: TensorShape([4, 20, 2, 6])
分割:
unstack的时候,相当于降维了一样,看下面的输出。
split的时候,是没有降维的,然后通过num_or_size_splits来控制分割后对应维度上元素的个数。
In [13]: tf.unstack(a,axis=0).shape
AttributeError: 'list' object has no attribute 'shape'
In [15]: res = tf.unstack(a,axis=0)
In [16]: res.__len__()
Out[16]: 4 #这里可以看出我们得到的结果list里面有四个
In [17]: res[0].shape,res[1].shape,res[2].shape,res[3].shape
Out[17]:
(TensorShape([20, 6]),
TensorShape([20, 6]),
TensorShape([20, 6]),
TensorShape([20, 6]))
In [18]: res = tf.split(a,axis=2,num_or_size_splits=2)
In [19]: res.__len__()
Out[19]: 2
In [20]: res[0].shape,res[1].shape
Out[20]: (TensorShape([4, 20, 3]), TensorShape([4, 20, 3]))
2、数据统计
求范数
In [21]: a = tf.reshape(tf.convert_to_tensor([0.,1.,2.,3.]),[2,2])
In [22]: a
Out[22]:
<tf.Tensor: id=36, shape=(2, 2), dtype=float32, numpy=
array([[0., 1.],
[2., 3.]], dtype=float32)>
In [23]: tf.norm(a)
Out[23]: <tf.Tensor: id=42, shape=(), dtype=float32, numpy=3.7416575>
In [24]: tf.norm(a,axis=0)
Out[24]: <tf.Tensor: id=48, shape=(2,), dtype=float32, numpy=array([2. , 3.1622777], dtype=float32)>
In [25]: tf.norm(a,axis=1)
Out[25]: <tf.Tensor: id=54, shape=(2,), dtype=float32, numpy=array([1. , 3.6055512], dtype=float32)>
In [26]: tf.norm(a,ord=1)
Out[26]: <tf.Tensor: id=59, shape=(), dtype=float32, numpy=6.0>
reduce_min/max/mean:有一个reduce在,说明有降维的存在
In [27]: a = tf.random.normal([3,6])
In [28]: a
Out[28]:
<tf.Tensor: id=66, shape=(3, 6), dtype=float32, numpy=
array([[-0.20108105, -1.0516099 , 0.54092735, 1.1491438 , -0.23376776,
-1.1189908 ],
[ 0.25646028, 0.70162463, 0.9700568 , -0.20591606, -0.15748529,
-1.1059471 ],
[ 0.5782163 , -0.29484197, -0.03458792, -2.7165434 , 1.9942758 ,
-1.8699492 ]], dtype=float32)>
In [29]: tf.reduce_max(a), tf.reduce_min(a), tf.reduce_mean(a)
Out[29]:
(<tf.Tensor: id=69, shape=(), dtype=float32, numpy=1.9942758>,
<tf.Tensor: id=71, shape=(), dtype=float32, numpy=-2.7165434>,
<tf.Tensor: id=73, shape=(), dtype=float32, numpy=-0.15555641>)
In [31]: tf.reduce_max(a,axis=1), tf.reduce_min(a,axis=1), tf.reduce_mean(a,axis=1)
Out[31]:
(<tf.Tensor: id=78, shape=(3,), dtype=float32, numpy=array([1.1491438, 0.9700568, 1.9942758], dtype=float32)>,
<tf.Tensor: id=80, shape=(3,), dtype=float32, numpy=array([-1.1189908, -1.1059471, -2.7165434], dtype=float32)>,
<tf.Tensor: id=82, shape=(3,), dtype=float32, numpy=array([-0.15256305, 0.07646555, -0.39057174], dtype=float32)>)
argmax/argmin
In [32]: a.shape
Out[32]: TensorShape([3, 6])
In [33]: tf.argmax(a)
Out[33]: <tf.Tensor: id=87, shape=(6,), dtype=int64, numpy=array([2, 1, 1, 0, 2, 1])>
In [34]: tf.argmax(a).shape
Out[34]: TensorShape([6])
In [35]: a = tf.reshape(tf.range(4),[2,2])
In [36]: b = tf.fill([2,2],1)
In [37]: tf.equal(a,b)
Out[37]:
<tf.Tensor: id=100, shape=(2, 2), dtype=bool, numpy=
array([[False, True],
[False, False]])>
unique
In [41]: a = tf.constant([2,3,2,3,5])
In [42]: tf.unique(a)
Out[42]: Unique(y=<tf.Tensor: id=104, shape=(3,), dtype=int32, numpy=array([2, 3, 5], dtype=int32)>, idx=<tf.Tensor: id=105, shape=(5,), dtype=int32, numpy=array([0, 1, 0, 1, 2], dtype=int32)>)
3、排序
sort和argsor
In [43]: a = tf.random.shuffle(tf.range(5))
In [44]: a
Out[44]: <tf.Tensor: id=112, shape=(5,), dtype=int32, numpy=array([4, 3, 1, 0, 2], dtype=int32)>
In [45]: tf.sort(a)
Out[45]: <tf.Tensor: id=125, shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>
In [46]: tf.argsort(a)
Out[46]: <tf.Tensor: id=137, shape=(5,), dtype=int32, numpy=array([3, 2, 4, 1, 0], dtype=int32)>
top_k
In [47]: a = tf.reshape(tf.convert_to_tensor([2,9,5,3,5,7,8,5,1]),[3,3])
In [48]: res = tf.math.top_k(a,3)
In [49]: res.indices
Out[49]:
<tf.Tensor: id=61, shape=(3, 3), dtype=int32, numpy=
array([[1, 2, 0],
[2, 1, 0],
[0, 1, 2]], dtype=int32)>
In [50]: res.values
Out[50]:
<tf.Tensor: id=60, shape=(3, 3), dtype=int32, numpy=
array([[9, 5, 2],
[7, 5, 3],
[8, 5, 1]], dtype=int32)>
4、填充与复制
tf.pad
In [51]: a = tf.fill([3,3],2)
In [52]: tf.pad(a,[[1,1],[2,2]])
Out[52]:
<tf.Tensor: id=125, shape=(5, 7), dtype=int32, numpy=
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=int32)>
In [53]: tf.pad(a,[[1,1],[0,0]])
Out[53]:
<tf.Tensor: id=128, shape=(5, 3), dtype=int32, numpy=
array([[0, 0, 0],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 0, 0]], dtype=int32)>
tf.tile
In [54]: a = tf.reshape(tf.range(4),[2,2])
In [55]: tf.tile(a,[1,1])
Out[55]:
<tf.Tensor: id=137, shape=(2, 2), dtype=int32, numpy=
array([[0, 1],
[2, 3]], dtype=int32)>
In [56]: tf.tile(a,[1,2])
Out[56]:
<tf.Tensor: id=140, shape=(2, 4), dtype=int32, numpy=
array([[0, 1, 0, 1],
[2, 3, 2, 3]], dtype=int32)>
In [57]: tf.tile(a,[2,1])
Out[57]:
<tf.Tensor: id=143, shape=(4, 2), dtype=int32, numpy=
array([[0, 1],
[2, 3],
[0, 1],
[2, 3]], dtype=int32)>
5、高阶op
网友评论