这道题的目的是建立一个只有一层隐藏层的神经网络
在开始之前需要先准备测试数据文件和工具类
链接: https://pan.baidu.com/s/1nHzEKyJHVzrKlbBFGmM9lQ 密码: dc4b

testCases 是测试数据,planar_utils是工具函数。
然后安装h5py、matplotlib、sklearn库,numpy库如果没有也需要安装。
下面直接上代码
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
#设置一个固定的随机种子
np.random.seed(1)
#定义神经网络结构
def layer_sizes(X, Y):
n_x = X.shape[0]
n_y = Y.shape[0]
n_h = 4
return (n_x, n_h, n_y)
#构造初始化函数
def initialize_parameters(n_x, n_h, n_y):
np.random.seed(2)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros(shape = (n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros(shape = (n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2
}
return parameters
#构造向前传播函数
def forward_propagation(X, parameters):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
assert(A2.shape == (1, X.shape[1]))
cache = {
"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2,
}
return A2, cache
def compute_cost(A2, Y, parameters):
m = Y.shape[1]
logprobs = Y * np.log(A2) + (1 - Y) * np.log(1 - A2)
cost = - 1 / m * np.sum(logprobs)
cost = np.squeeze(cost)
assert(isinstance(cost, float))
return cost
#构造反向传播函数
def backward_propagation(parameters, cache, X, Y):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dW2 = 1 / m * np.dot(dZ2, A1.T)
db2 = 1 / m * np.sum(dZ2, axis = 1, keepdims = True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1 / m * np.dot(dZ1, X.T)
db1 = 1 / m * np.sum(dZ1, axis = 1, keepdims = True)
grads = {
"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2,
}
return grads
def update_parameters(parameters, grads, learning_rate = 1.2):
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2
}
return parameters
#在nn_model中建立神经网络模型
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost = False):
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
for i in range(0, num_iterations):
A2, cache = forward_propagation(X, parameters)
cost = compute_cost(A2, Y, parameters)
grads = backward_propagation(parameters, cache, X, Y)
parameters = update_parameters(parameters, grads, learning_rate = 1.2)
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
#构造预测函数进行预测,使用正向传播来预测结果
def predict(parameters, X):
A2, cache = forward_propagation(X, parameters)
predictions = np.round(A2)
return predictions
#运行模型
X, Y = load_planar_dataset()
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost = True)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
#与逻辑回归相比,准确率有所提高,神经网络能够学习非线性的决策边界
#现在尝试几种不同的隐藏层大小
plt.figure(figsize = (16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 10, 20]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i + 1)
plt.title("Hidden Layer of size %d" % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100)
print("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
代码编写参考了以下两位博主的文章,在此感谢他们的无私奉献。
https://blog.csdn.net/u013733326/article/details/79702148
https://www.kesci.com/home/project/5dd3946900b0b900365f3a48
对比L1W2的编程题
建立神经网络的步骤有一些相似之处
1.定义神经网络结构,确定输入项,输出项和隐藏层
2.构造初始化函数,得到初始W和b参数
3.构造正向传播函数,选择合适的激活函数,例如sigmoid, tanh 等
4.构造成本函数,计算损失J
5.构造反向传播函数,得到参数的梯度
6.构造优化函数,设置学习率,对参数进行更新
7.合成神经网络模型函数,输出结果为参数
8.构造预测函数,根据神经网络模型获得的参数机型预测分析,得到准确率
网友评论