softmax.py
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_sample = X.shape[0]
num_class = W.shape[1]
for i in range(num_sample):
score_row = X[i].dot(W)
score_row -= np.max(score_row)
loss -= np.log(np.exp(score_row[y[i]])/np.sum(np.exp(score_row)))
for j in range(num_class):
P = np.exp(score_row[j])/np.sum(np.exp(score_row))
if(j == y[i]):
dW[:,j] += X[i,:].T * (P-1)
else:
dW[:,j] += X[i,:].T * P
loss /= num_sample
dW /= num_sample
loss += reg*np.sum(W * W)
dW += 2 * reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_sample = X.shape[0]
score = X.dot(W)
score -= np.max(score, axis = 1, keepdims = True)
prob = np.exp(score) / np.sum(np.exp(score), axis = 1, keepdims = True)
loss = -1 * np.sum(np.log(prob[range(num_sample), y]))
prob[range(num_sample), y] -= 1
dW = X.T.dot(prob)
loss /= num_sample
dW /= num_sample
loss += reg * np.sum(W*W)
dW += 2 * reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
网友评论