美文网首页
Pytorch小技巧集

Pytorch小技巧集

作者: 深思海数_willschang | 来源:发表于2021-08-28 10:39 被阅读0次
查看torch版本,系统版本
import os
import torch

print(torch.__version__)
# posix , nt , java, 对应linux/windows/java虚拟机
print(os.name)

查看torch是否支持cuda(GPU),GPU数量,显示名称

import torch

# 是否支持gpu
print(torch.cuda.is_available())
# cuda版本号
print(torch.version.cuda)
# gpu数量
print(torch.cuda.device_count())
# gpu名称,0代表第一块显卡
print(torch.cuda.get_device_name(0))
# 返回当前设备索引
print(torch.cuda.current_device())
"""
True
10.2
1
GeForce GTX 1660 Ti
0
"""

device和workers全局定义

import os
import torch

# CPU or GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# dataloader里的多进程用到num_workers
workers = 0 if os.name=='nt' else 4

Pytorch一般导入包

%matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import numpy as np
import pandas as pd
import os
import copy
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.transforms as transforms
from torchvision import utils
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchsummary import summary

# CPU or GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# dataloader里的多进程用到num_workers
workers = 0 if os.name=='nt' else 4
查看数据或模型在哪个device上
import torch
import torch.nn as nn
 
# ----------- 判断模型是在CPU还是GPU上 ----------------------
 
model = nn.LSTM(input_size=10, hidden_size=4, num_layers=1, batch_first=True)
print(next(model.parameters()).device)  # 输出:cpu
 
model = model.cuda()
print(next(model.parameters()).device)  # 输出:cuda:0
 
model = model.cpu()
print(next(model.parameters()).device)  # 输出:cpu
 
# ----------- 判断数据是在CPU还是GPU上 ----------------------
 
data = torch.ones([2, 3])
print(data.device)  # 输出:cpu
 
data = data.cuda()
print(data.device)  # 输出:cuda:0
 
data = data.cpu()
print(data.device)  # 输出:cpu

用.is_cuda也可以判断模型和数据是否在GPU上,例如: data.is_cuda

计算卷积层后的输出大小
# 计算卷积层后的输出大小
import torch.nn as nn

def get_conv2d_out_shape(H_in, W_in, conv, pool=2):
    # get conv arguments
    kernel_size = conv.kernel_size
    stride = conv.stride
    padding = conv.padding
    dilation = conv.dilation

    # Ref: https://pytorch.org/docs/stable/nn.html
    H_out=np.floor((H_in+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0]+1)
    W_out=np.floor((W_in+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1]+1)

    if pool:
        H_out/=pool
        W_out/=pool
    return int(H_out),int(W_out)


# 示例
conv1 = nn.Conv2d(3, 8, kernel_size=3)
h, w = get_conv2d_out_shape(96,96,conv1)
print(h,w)

相关文章

网友评论

      本文标题:Pytorch小技巧集

      本文链接:https://www.haomeiwen.com/subject/pezsiltx.html