- pytorch用nn.CrossEntropyLoss()计算损失时产生的错误
File "test.py", line 49, in predictFromDataset
loss = loss_fn(outputs,targets)
File "D:\anaconda3\envs\py3\lib\site-packages\torch\nn\modules\module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "D:\anaconda3\envs\py3\lib\site-packages\torch\nn\modules\loss.py", line 948, in forward
ignore_index=self.ignore_index, reduction=self.reduction)
File "D:\anaconda3\envs\py3\lib\site-packages\torch\nn\functional.py", line 2422, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "D:\anaconda3\envs\py3\lib\site-packages\torch\nn\functional.py", line 2218, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target' in call to _thnn_nll_loss_forward
- 代码
def predictFromDataset(model, dataLoader, tansform=None,device=torch.device("cpu")):
loss_fn = torch.nn.CrossEntropyLoss()
total_loss = 0
model.eval()
with torch.no_grad():
for inputs, targets in dataLoader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs)
print("outputs:",outputs)
print("targets:",targets)
loss = loss_fn(outputs,targets)
total_loss += loss.data.item() * inputs.size(0)
#print(loss)
print("avargeLoss:{}".format(total_loss / len(dataLoader.dataset)))
输出的outputs 和 targets
outputs: tensor([[-4.1964, -0.5367, -2.6573, 17.1077, -5.7188, 5.4721, -2.1010, -1.5360,
-1.3869, -4.9383],
[ 2.6306, 14.9913, -7.4001, -8.3108, -5.6915, -4.8937, -4.5119, -7.8608,
22.5722, -0.1781]], device='cuda:0')
targets: tensor([[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]], device='cuda:0')
错误原因
CrossEntropyLoss does not expect a one-hot encoded vector as the target, but class indices
也就是CrossEntropyLoss需要的是类别的索引,而不是one-hot编码的格式,即不需要像[0,0,0,1,0,0]这样而要3.这个修改一下自定义的dataset类中的target即可。
- 修改dataset的target,原来的target是one-hot编码的,把它改为类别索引,就是下面的label = np.argmax(label)
class myTestDataset(torch.utils.data.Dataset):
def __init__(self,transform=None):
images = np.load('data_src.npy')
labels = np.load('label_src.npy')
self.images = [Image.fromarray(x) for x in images]
self.labels = labels / labels.sum(axis=1, keepdims=True) # normalize
self.labels = self.labels.astype(np.float32)
self.transform = transform
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
label = np.argmax(label)#这一句就是相比原来增加的
if self.transform:
image = self.transform(image)
return image,label
def __len__(self):
return len(self.images)
注:不过需要注意targets计算时应该是int64或者long类型,可以在计算时用loss = loss_fn(outputs, targets.long())
网友评论