【碎语】今天需要观看的八节网课只剩下张量排序这两节课了,整理完,我就看《楚汉传奇》。然后明天重新启程,早上早点起来阅读文献。为了心中的季桃姑娘,需的有韩信般的沉重和忍耐。
1. 完全顺序排列
//排序,默认升序排列
tf.sort(
values,
axis=-1,
direction='ASCENDING',
name=None
)
Args:
-
values
: 1-D or higher numeric Tensor. -
axis
: The axis along which to sort. The default is -1, which sorts the last axis. -
direction
: The direction in which to sort the values ('ASCENDING'
or'DESCENDING'
).
//获取数据中顺序排列中数据对应的序列号.,默认升序排列
tf.argsort(
values,
axis=-1,
direction='ASCENDING',
stable=False,
name=None
)
Args:
-
values
: 1-D or higher numeric Tensor. -
axis
: The axis along which to sort. The default is -1, which sorts the last axis. -
direction
: The direction in which to sort the values ('ASCENDING'
or'DESCENDING'
). -
stable
: If True, equal elements in the original tensor will not be re-ordered in the returned order. Unstable sort is not yet implemented, but will eventually be the default for performance reasons. If you require a stable order, passstable=True
for forwards compatibility.
//练习示例
In [118]: a = tf.random.shuffle(tf.range(5))
In [119]: a
Out[119]: <tf.Tensor: id=253, shape=(5,), dtype=int32, numpy=array([2, 1, 0, 3, 4])>
//降序排列
In [120]: tf.sort(a, direction='DESCENDING')
Out[120]: <tf.Tensor: id=261, shape=(5,), dtype=int32, numpy=array([4, 3, 2, 1, 0])>
//获取降序排列元素的序列号
In [121]: tf.argsort(a, direction='DESCENDING')
Out[121]: <tf.Tensor: id=271, shape=(5,), dtype=int32, numpy=array([4, 3, 0, 1, 2])>
In [122]: idx = tf.argsort(a, direction='DESCENDING')
//通过降序排列的序列号和数据,可以实现顺序排列
In [123]: tf.gather(a, idx)
Out[123]: <tf.Tensor: id=282, shape=(5,), dtype=int32, numpy=array([4, 3, 2, 1, 0])>
//对于高维数据进行排序,还可以通过指定维度来指定排序的维度
In [124]: a = tf.random.uniform([3, 3], maxval=10, dtype=tf.int32)
In [125]: a
Out[125]: <tf.Tensor: id=286, shape=(3, 3), dtype=int32, numpy=
array([[4, 5, 9],
[8, 9, 7],
[3, 2, 0]])>
//默认升序排列
In [126]: tf.sort(a)
Out[126]: <tf.Tensor: id=297, shape=(3, 3), dtype=int32, numpy=
array([[4, 5, 9],
[7, 8, 9],
[0, 2, 3]])>
//降序排列
In [127]: tf.sort(a, direction='DESCENDING')
Out[127]: <tf.Tensor: id=305, shape=(3, 3), dtype=int32, numpy=
array([[9, 5, 4],
[9, 8, 7],
[3, 2, 0]])>
In [128]: idx = tf.argsort(a)
//获取对应维度的排列序号
In [129]: idx
Out[129]: <tf.Tensor: id=316, shape=(3, 3), dtype=int32, numpy=
array([[0, 1, 2],
[2, 0, 1],
[2, 1, 0]])>
2. 【tf.math.top_k()】:只返回最值的前几个元素值和索引号
tf.math.top_k(
input,
k=1,
sorted=True, //默认降序
name=None
)
//运行实例
In [130]: a
Out[130]: <tf.Tensor: id=286, shape=(3, 3), dtype=int32, numpy=
array([[4, 5, 9],
[8, 9, 7],
[3, 2, 0]])>
In [132]: res = tf.math.top_k(a, 2)
In [133]: res.indices
Out[133]: <tf.Tensor: id=319, shape=(3, 2), dtype=int32, numpy=
array([[2, 1],
[1, 0],
[0, 1]])>
In [134]: res.values
Out[134]: <tf.Tensor: id=318, shape=(3, 2), dtype=int32, numpy=
array([[9, 5],
[9, 8],
[3, 2]])>
===================================================
//相关操作演示
In [136]: prob = tf.constant([[0.1, 0.2, 0.7], [0.2, 0.7, 0.1]])
In [138]: target = tf.constant([2, 0])
In [139]: k_b = tf.math.top_k(prob, 3).indices
In [140]: k_b
Out[140]: <tf.Tensor: id=324, shape=(2, 3), dtype=int32, numpy=
array([[2, 1, 0],
[1, 0, 2]])>
In [141]: k_b = tf.transpose(k_b, [1, 0])
In [142]: k_b
Out[142]: <tf.Tensor: id=326, shape=(3, 2), dtype=int32, numpy=
array([[2, 1],
[1, 0],
[0, 2]])>
In [143]: target = tf.broadcast_to(target, [3, 2])
In [144]: target
Out[144]: <tf.Tensor: id=328, shape=(3, 2), dtype=int32, numpy=
array([[2, 0],
[2, 0],
[2, 0]])>
- Top_k Accuracy计算流程
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.random.set_seed(2467)
# output为神经网络输出值
# target为实际值
# topk为想输出的top_k的值
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.shape[0]
pred = tf.math.top_k(output, maxk).indices # 计算top_k的索引,也即预测值
pred = tf.transpose(pred, perm=[1, 0]) # 将索引值进行转置
target_ = tf.broadcast_to(target, pred.shape) # 对target进行扩充
correct = tf.equal(pred, target_) # 比较输出结果
res = []
for k in topk: # 输出不同的Top_k对应的准确度
# correct[:k] 取不同的top_k的数据
correct_k = tf.reduce_sum(tf.cast(correct[:k], dtype=tf.float32))
acc = float(correct_k * (100.0 / batch_size))
res.append(acc)
return res
output = tf.random.normal([10, 6]) # 生成一个六类的样本,样本数为10
output = tf.math.softmax(output, axis=1) # softmax可以使得六类的概率为1
target = tf.random.uniform([10], maxval=6, dtype=tf.int32) # 结果为【0~5】的10个样本
print('prob:', output.numpy())
pred = tf.argmax(output, axis=1)
print('pred:', pred.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1, 2, 3, 4, 5, 6)) # 调用精确度计算函数
print('top-1-6 acc:', acc)
网友评论