美文网首页
pytorch 交叉熵损失教程(2)-torch.nn.NLLL

pytorch 交叉熵损失教程(2)-torch.nn.NLLL

作者: 纵春水东流 | 来源:发表于2022-06-15 12:40 被阅读0次

    1.2 torch.nn.NLLLoss
    (1)计算公式
    计算N个标签的损失
    \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_{y_n} x_{n,y_n}, \quad w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}
    将N个标签的损失求和或平均(默认是平均)
    \ell(x, y) = \begin{cases} \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & \text{if reduction} = \text{`mean';}\\ \sum_{n=1}^N l_n, & \text{if reduction} = \text{`sum'.} \end{cases}

    (2)输入
    1)模型经过softmax后预测输出 ,shape(n_samples,n_class), dtype(torch.float)
    2)真实标签,shape=(n_samples), dtype(torch.long)

    (3)例子1

        >>> m = nn.LogSoftmax(dim=1)#0维度是n_samples, 1维度是n_class
        >>> loss = nn.NLLLoss()
        >>> # input is of size N x C = 3 x 5#三个样本,类别数为2
        >>> input = torch.randn(3, 5, requires_grad=True)
        >>> # each element in target has to have 0 <= value < C
        >>> target = torch.tensor([1, 0, 4])#三个样本的真实标签
        >>> output = loss(m(input), target)
        >>> output.backward()
    
    In [38]: input
    Out[38]: 
    tensor([[ 0.4306,  0.6048, -0.1052, -1.2991,  0.9104],
            [ 0.2345,  0.5741,  1.2577,  2.4803,  2.7261],
            [-0.1931,  0.7119,  0.2320,  0.6528, -1.4365]], requires_grad=True)
    

    (4)例子2
    load data

    from sklearn.datasets import load_iris
    from sklearn.model_selection import train_test_split
    from sklearn.preprocessing import StandardScaler
    
    iris = load_iris()
    X = iris['data']
    y = iris['target']
    names = iris['target_names']
    feature_names = iris['feature_names']
    
    # Scale data to have mean 0 and variance 1 
    # which is importance for convergence of the neural network
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    # Split the data set into training and testing
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=0.2, random_state=2)
    

    build model

    import torch
    import torch.nn as nn
    class Model(nn.Module):
        def __init__(self, input_dim):
            super(Model, self).__init__()
            self.layer1 = nn.Linear(input_dim, 50)
            self.layer2 = nn.Linear(50, 50)
            self.layer3 = nn.Linear(50, 3)
            
        def forward(self, x):
            x = F.relu(self.layer1(x))
            x = F.relu(self.layer2(x))
            x = F.softmax(self.layer3(x), dim=1)
            return x
    
    model     = Model(X_train.shape[1])
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_fn   = nn.NLLLoss()
    

    train model

    from torch.autograd import Variable
    import tqdm
    import numpy as np
    import torch.nn.functional as F
    
    EPOCHS  = 100
    X_train = Variable(torch.from_numpy(X_train)).float()
    y_train = Variable(torch.from_numpy(y_train)).long()
    X_test  = Variable(torch.from_numpy(X_test)).float()
    y_test  = Variable(torch.from_numpy(y_test)).long()
    
    loss_list     = np.zeros((EPOCHS,))
    accuracy_list = np.zeros((EPOCHS,))
    
    for epoch in tqdm.trange(EPOCHS):
        y_pred = model(X_train)
        loss = loss_fn(y_pred, y_train)
        loss_list[epoch] = loss.item()
        
        # Zero gradients
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        with torch.no_grad():
            y_pred = model(X_test)
            correct = (torch.argmax(y_pred, dim=1) == y_test).type(torch.FloatTensor)
            accuracy_list[epoch] = correct.mean()
    

    plot train accuracy and validation accuracy

     import matplotlib.pyplot as plt
    fig, (ax1, ax2) = plt.subplots(2, figsize=(12, 6), sharex=True)
    
    ax1.plot(accuracy_list)
    ax1.set_ylabel("validation accuracy")
    ax2.plot(loss_list)
    ax2.set_ylabel("validation loss")
    ax2.set_xlabel("epochs");
    plt.show()
    

    plot roc curve

    from sklearn.metrics import roc_curve, auc
    from sklearn.preprocessing import OneHotEncoder
    
    plt.figure(figsize=(10, 10))
    plt.plot([0, 1], [0, 1], 'k--')
    
    # One hot encoding
    enc = OneHotEncoder()
    Y_onehot = enc.fit_transform(y_test[:, np.newaxis]).toarray()
    
    with torch.no_grad():
        y_pred = model(X_test).numpy()
        fpr, tpr, threshold = roc_curve(Y_onehot.ravel(), y_pred.ravel())
        
    plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc(fpr, tpr)))
    plt.xlabel('False positive rate')
    plt.ylabel('True positive rate')
    plt.title('ROC curve')
    plt.legend();
    plt.show()
    

    相关文章

      网友评论

          本文标题:pytorch 交叉熵损失教程(2)-torch.nn.NLLL

          本文链接:https://www.haomeiwen.com/subject/sokxvrtx.html