博主: 明天依旧可好
代码:VX公众H「明天依旧可好」内回复04
思维导图完整版:回复tf2思维导图
import tensorflow as tf
import tensorflow_datasets as tfds
import os
print(tf.__version__)
"""
输出:2.5.0-dev20201226
"""
数据下载
import pathlib
DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
FILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt']
for name in FILE_NAMES:
text_dir = tf.keras.utils.get_file(name, origin=DIRECTORY_URL+name)
print(text_dir)
parent_dir = os.path.dirname(text_dir)
"""
输出:
C:\Users\Administrator\.keras\datasets\cowper.txt
C:\Users\Administrator\.keras\datasets\derby.txt
C:\Users\Administrator\.keras\datasets\butler.txt
"""
将文本加载到数据集中
迭代整个文件,将整个文件加载到自己的数据集中。
每个样本都需要单独标记,所以请使用 tf.data.Dataset.map 来为每个样本设定标签。这将迭代数据集中的每一个样本并且返回( example, label )对。
def labeler(example, index):
return example, tf.cast(index, tf.int64)
labeled_data_sets = []
for i, file_name in enumerate(FILE_NAMES):
lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name))
labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))
labeled_data_sets.append(labeled_dataset)
将这些标记的数据集合并到一个数据集中,然后对其进行随机化操作。
BUFFER_SIZE = 50000
#将所有数据合并到一个数据集当中
all_labeled_data = labeled_data_sets[0]
for labeled_dataset in labeled_data_sets[1:]:
all_labeled_data = all_labeled_data.concatenate(labeled_dataset)
#将数据进行打乱
all_labeled_data = all_labeled_data.shuffle(
BUFFER_SIZE, reshuffle_each_iteration=False)
#显示前5条数据
for ex in all_labeled_data.take(5):
print(ex)
"""
(<tf.Tensor: shape=(), dtype=string, numpy=b'Instructed duly, and himself, his steps'>, <tf.Tensor: shape=(), dtype=int64, numpy=0>)
(<tf.Tensor: shape=(), dtype=string, numpy=b'not forget the threat that he had made Achilles, and called his trusty'>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
(<tf.Tensor: shape=(), dtype=string, numpy=b"Standing encompass'd by his dauntless troops,">, <tf.Tensor: shape=(), dtype=int64, numpy=0>)
(<tf.Tensor: shape=(), dtype=string, numpy=b'held Oechalia, the city of Oechalian Eurytus, these were commanded by'>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
(<tf.Tensor: shape=(), dtype=string, numpy=b'him."'>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
"""
将文本编码成数字
建立词汇表
#features这个模块已经被官方弃用,等待更新吧
# tokenizer = tfds.features.text.Tokenizer()
#可暂时使用下面这个进行调用
tokenizer = tfds.deprecated.text.Tokenizer()
vocabulary_set = set()
for text_tensor, _ in all_labeled_data:
some_tokens = tokenizer.tokenize(text_tensor.numpy())
vocabulary_set.update(some_tokens)
vocab_size = len(vocabulary_set)
vocab_size
"""
输出:17178
"""
通过传递 vocabulary_set 到 tfds.features.text.TokenTextEncoder 来构建一个编码器。编码器的 encode 方法传入一行文本,返回一个整数列表。
encoder = tfds.deprecated.text.TokenTextEncoder(vocabulary_set)
def encode(text_tensor, label):
encoded_text = encoder.encode(text_tensor.numpy())
return encoded_text, label
def encode_map_fn(text, label):
#py_func不能设置返回的tensors的shape
encoded_text, label = tf.py_function(encode,
inp=[text, label],
Tout=(tf.int64, tf.int64))
encoded_text.set_shape([None])
label.set_shape([])
return encoded_text, label
all_encoded_data = all_labeled_data.map(encode_map_fn)
#显示前5条数据
for ex in all_encoded_data.take(5):
print(ex)
"""
输出:
(<tf.Tensor: shape=(6,), dtype=int64, numpy=array([ 2724, 4813, 14154, 7272, 12376, 16442], dtype=int64)>, <tf.Tensor: shape=(), dtype=int64, numpy=0>)
(<tf.Tensor: shape=(13,), dtype=int64, numpy=
array([12719, 5246, 4778, 6683, 411, 11103, 4013, 14029, 13412,
14154, 14991, 12376, 4255], dtype=int64)>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
(<tf.Tensor: shape=(7,), dtype=int64, numpy=array([14472, 14592, 8885, 4068, 12376, 16337, 11432], dtype=int64)>, <tf.Tensor: shape=(), dtype=int64, numpy=0>)
(<tf.Tensor: shape=(11,), dtype=int64, numpy=
array([10492, 5873, 4778, 14421, 15779, 9325, 4625, 15330, 9176,
2358, 4068], dtype=int64)>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([4992], dtype=int64)>, <tf.Tensor: shape=(), dtype=int64, numpy=2>)
"""
注: 本文参考了官网并对其进行了删减以及部分注释与修改
网友评论