TensorFlow2-基于神经网络的耗油预测问题
image image采用的是
Auto MPG
数据集,包含5
个字段,具体信息见前5
行数据和相应的字段解释信息:
# 1. 数据集
import pandas as pd
import tensorflow as tf
from tensorflow import keras
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin'] # 6个属性字段
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values="?",comment='\t',sep=" ",skipinitial=True)
dataset = raw_dataset.copy()
dataset.head()
dataset.isna().sum() # 统计空白数据
dataset = dataset.dropna() #删除空白数据
dataset.isna().sum() # 再次统计空白数据
# 将Origin的3个产地分别用1,2,3代替
# 弹出并且返回Origin这列数据
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1) * 1.0
dataset['Europe'] = (origin == 2) * 1.0
dataset['Japan'] = (origin == 3) * 1.0
dataset.tail()
# 8:2划分数据集
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# 移动MPG油耗效能这列数据为真实标签y
train_lables = train_dataset.pop('MPG')
test_lables = test_dataset.pop('MPG')
# 查看训练集X的数据
train_stats = train_dataset.describe()
train_status.pop("MPG")
train_status = train_stats.transpose()
# 标准化数据
def norm(x):
return (x - train.stats['mean']) / (train_stats['std'])
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# 打印训练集和测试机的大小
print(normed_train_data.shape, train_labels.shape)
print(normed_test_data.shape, test_labels.shape)
# 利用切分的训练集数据构建数据集对象
train_db = tf.data.Dataset.from_tensor_slices((normed_train_data.values, train_labels.values)) # 构建Dataset对象
train_db = train_db_shuffle(100).batch(32) # 随机散打,批量化过程
# 2.创建网络
class Network(keras.Model):
# 回归网络
def __init__(self):
super(Network, self).__init__()
# 创建3个全连接层
self.fc1 = layers.Dense(64, activation="relu")
self.fc2 = layers.Dense(64, activation="relu")
self.fc3 = layers.Dense(1)
def call(self, input, training=None, mask=None):
# 依次通过3个连接层
x = self.fc1(inputs)
x = self.fc2(x)
x = self.fc3(x)
return x
# 3. 训练与测试:实例化网络对象和创建优化器
model = Network()
model.build(input_shape=(4,9))
model.summary()
optimizer = tf.keras.optimizers.RMSprop(0.001)
for epoch in range(200):
for step, (x,y) in enumerate(train_db):
with tf.GradientTape() as tape: # 梯度记录器
out = model(x) # 通过网络获得输出
loss = tf.reduce_mean(losses.MSE(y, out)) # 计算MSE
mae_loss = tf.reduce_mean(losses.MAE(y, out)) # 计算MAE
if step % 10 == 0:
print(epoch, step, float(loss))
# 更新梯度信息
grads = tape.gradient(loss, model.train_varciables)
optimizer.apply_gradients(zip(grads, model.train_variables))
image
网友评论