美文网首页
【代码阅读】vision transformer

【代码阅读】vision transformer

作者: Joyner2018 | 来源:发表于2021-10-25 00:55 被阅读0次

    vit.py的模型文件

    #encoding=utf-8
    import torch
    from torch import nn
    
    from einops import rearrange, repeat
    from einops.layers.torch import Rearrange
    
    # helpers
    '''
        主要用于生成成对的输入
        比如
            卷积中的kennel size,当的size为x时,生成(x,y)
            图像中resize中,输入只有一个参数x时,则生成(x,x),即需要resize成(x,x)
                输入两个参数(x,y),则不做任何修改,直接返回(x,y)
    '''
    def pair(t):
        return t if isinstance(t, tuple) else (t, t)
    
    # classes
    
    '''
        PreNorm表示
            先归一化
            再执行fn的操作
    '''
    class PreNorm(nn.Module):
        def __init__(self, dim, fn):
            super().__init__()
            self.norm = nn.LayerNorm(dim)
            self.fn = fn
        def forward(self, x, **kwargs):
            return self.fn(self.norm(x), **kwargs)
    
    '''
        前向连接
            线性层
                激活层
                随机丢弃层
            线性层
                随机丢弃层
    '''
    class FeedForward(nn.Module):
        def __init__(self, dim, hidden_dim, dropout = 0.):
            super().__init__()
            self.net = nn.Sequential(
                nn.Linear(dim, hidden_dim),
                nn.GELU(),
                nn.Dropout(dropout),
                nn.Linear(hidden_dim, dim),
                nn.Dropout(dropout)
            )
        def forward(self, x):
            return self.net(x)
    
    '''
        注意力层
            输入的x,
                线性层
                分块得到q,k,v
                根据head的数量把q,k,v分成head个分支
                    rearrange(t, 'b n (h d) -> b h n d', h = self.heads)
    '''
    class Attention(nn.Module):
        def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
            super().__init__()
            inner_dim = dim_head *  heads
            project_out = not (heads == 1 and dim_head == dim)
    
            self.heads = heads
            self.scale = dim_head ** -0.5
    
            self.attend = nn.Softmax(dim = -1)
            self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
    
            self.to_out = nn.Sequential(
                nn.Linear(inner_dim, dim),
                nn.Dropout(dropout)
            ) if project_out else nn.Identity()
    
        def forward(self, x):
            qkv = self.to_qkv(x).chunk(3, dim = -1)
            q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
    
            dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
    
            attn = self.attend(dots)
    
            out = torch.matmul(attn, v)
            out = rearrange(out, 'b h n d -> b n (h d)')
            return self.to_out(out)
    
    class Transformer(nn.Module):
        def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
            super().__init__()
            self.layers = nn.ModuleList([])
            for _ in range(depth):
                self.layers.append(nn.ModuleList([
                    PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
                    PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
                ]))
        def forward(self, x):
            for attn, ff in self.layers:
                x = attn(x) + x
                x = ff(x) + x
            return x
    
    class ViT(nn.Module):
        def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
            super().__init__()
            image_height, image_width = pair(image_size)
            patch_height, patch_width = pair(patch_size)
    
            assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
    
            num_patches = (image_height // patch_height) * (image_width // patch_width)
            patch_dim = channels * patch_height * patch_width
            assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
    
            self.to_patch_embedding = nn.Sequential(
                Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
                nn.Linear(patch_dim, dim),
            )
    
            self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
            self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
            self.dropout = nn.Dropout(emb_dropout)
    
            self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
    
            self.pool = pool
            self.to_latent = nn.Identity()
    
            self.mlp_head = nn.Sequential(
                nn.LayerNorm(dim),
                nn.Linear(dim, num_classes) 
            )
    
        def forward(self, img):
            print(img.shape)
            x = self.to_patch_embedding(img)
            print(x.shape)
            print(x.type())
            b, n, _ = x.shape
    
            cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
            x = torch.cat((cls_tokens, x), dim=1)
            print("self.pos_embedding.size()",self.pos_embedding.size())
            print("n+1",n+1)
            x += self.pos_embedding[:, :(n + 1)]
            x = self.dropout(x)
    
            x = self.transformer(x)
    
            x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
    
            x = self.to_latent(x)
            return self.mlp_head(x)
    
    if __name__=="__main__":
        from torchsummary import summary
        vit= ViT(
            image_size = 256,
            patch_size = 32,
            num_classes = 1000,
            dim = 1024,
            depth = 6,
            heads = 16,
            mlp_dim = 2048,
            dropout = 0.1,
            emb_dropout = 0.1
        )
        
        img = torch.randn(1, 3, 256, 256)
        vit=vit.cuda()
        img=img.cuda()
        preds = vit(img)
        print(preds.size())
        summary(vit,(3,256,256))
    

    知识点

    • 代码须在pytorch1.7以上版本运行,因为nn.GELU()在低版本得pytorch不支持
    • 成对表达函数;简单有效pair(t)
    • 函数也可以作为输入参数
    • einops 是一个很棒的库函数,通过灵活而强大的张量操作符为你提供易读并可靠的代码。 支持 numpy、pytorch、tensorflow 等等。einops正在缓慢而有力地渗入我代码的每一个角落和缝隙。如果你发现自己困扰于一堆高维的张量,这可能会改变你的生活。
    • 如何使用einops,参考https://zhuanlan.zhihu.com/p/342675997
    • einops在vit的使用

    rearrange(t, 'b n (h d) -> b h n d', h = self.heads)
    rearrange(out, 'b h n d -> b n (h d)')
    Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width) 支持梯度反传
    repeat(self.cls_token, '() n d -> b n d', b = b)

    运行结果

    torch.Size([1, 3, 256, 256])
    torch.Size([1, 64, 1024])
    torch.cuda.FloatTensor
    self.pos_embedding.size() torch.Size([1, 65, 1024])
    n+1 65
    torch.Size([1, 1000])
    torch.Size([2, 3, 256, 256])
    torch.Size([2, 64, 1024])
    torch.cuda.FloatTensor
    self.pos_embedding.size() torch.Size([1, 65, 1024])
    n+1 65
    ----------------------------------------------------------------
            Layer (type)               Output Shape         Param #
    ================================================================
             Rearrange-1             [-1, 64, 3072]               0
                Linear-2             [-1, 64, 1024]       3,146,752
               Dropout-3             [-1, 65, 1024]               0
             LayerNorm-4             [-1, 65, 1024]           2,048
                Linear-5             [-1, 65, 3072]       3,145,728
               Softmax-6           [-1, 16, 65, 65]               0
                Linear-7             [-1, 65, 1024]       1,049,600
               Dropout-8             [-1, 65, 1024]               0
             Attention-9             [-1, 65, 1024]               0
              PreNorm-10             [-1, 65, 1024]               0
            LayerNorm-11             [-1, 65, 1024]           2,048
               Linear-12             [-1, 65, 2048]       2,099,200
                 GELU-13             [-1, 65, 2048]               0
              Dropout-14             [-1, 65, 2048]               0
               Linear-15             [-1, 65, 1024]       2,098,176
              Dropout-16             [-1, 65, 1024]               0
          FeedForward-17             [-1, 65, 1024]               0
              PreNorm-18             [-1, 65, 1024]               0
            LayerNorm-19             [-1, 65, 1024]           2,048
               Linear-20             [-1, 65, 3072]       3,145,728
              Softmax-21           [-1, 16, 65, 65]               0
               Linear-22             [-1, 65, 1024]       1,049,600
              Dropout-23             [-1, 65, 1024]               0
            Attention-24             [-1, 65, 1024]               0
              PreNorm-25             [-1, 65, 1024]               0
            LayerNorm-26             [-1, 65, 1024]           2,048
               Linear-27             [-1, 65, 2048]       2,099,200
                 GELU-28             [-1, 65, 2048]               0
              Dropout-29             [-1, 65, 2048]               0
               Linear-30             [-1, 65, 1024]       2,098,176
              Dropout-31             [-1, 65, 1024]               0
          FeedForward-32             [-1, 65, 1024]               0
              PreNorm-33             [-1, 65, 1024]               0
            LayerNorm-34             [-1, 65, 1024]           2,048
               Linear-35             [-1, 65, 3072]       3,145,728
              Softmax-36           [-1, 16, 65, 65]               0
               Linear-37             [-1, 65, 1024]       1,049,600
              Dropout-38             [-1, 65, 1024]               0
            Attention-39             [-1, 65, 1024]               0
              PreNorm-40             [-1, 65, 1024]               0
            LayerNorm-41             [-1, 65, 1024]           2,048
               Linear-42             [-1, 65, 2048]       2,099,200
                 GELU-43             [-1, 65, 2048]               0
              Dropout-44             [-1, 65, 2048]               0
               Linear-45             [-1, 65, 1024]       2,098,176
              Dropout-46             [-1, 65, 1024]               0
          FeedForward-47             [-1, 65, 1024]               0
              PreNorm-48             [-1, 65, 1024]               0
            LayerNorm-49             [-1, 65, 1024]           2,048
               Linear-50             [-1, 65, 3072]       3,145,728
              Softmax-51           [-1, 16, 65, 65]               0
               Linear-52             [-1, 65, 1024]       1,049,600
              Dropout-53             [-1, 65, 1024]               0
            Attention-54             [-1, 65, 1024]               0
              PreNorm-55             [-1, 65, 1024]               0
            LayerNorm-56             [-1, 65, 1024]           2,048
               Linear-57             [-1, 65, 2048]       2,099,200
                 GELU-58             [-1, 65, 2048]               0
              Dropout-59             [-1, 65, 2048]               0
               Linear-60             [-1, 65, 1024]       2,098,176
              Dropout-61             [-1, 65, 1024]               0
          FeedForward-62             [-1, 65, 1024]               0
              PreNorm-63             [-1, 65, 1024]               0
            LayerNorm-64             [-1, 65, 1024]           2,048
               Linear-65             [-1, 65, 3072]       3,145,728
              Softmax-66           [-1, 16, 65, 65]               0
               Linear-67             [-1, 65, 1024]       1,049,600
              Dropout-68             [-1, 65, 1024]               0
            Attention-69             [-1, 65, 1024]               0
              PreNorm-70             [-1, 65, 1024]               0
            LayerNorm-71             [-1, 65, 1024]           2,048
               Linear-72             [-1, 65, 2048]       2,099,200
                 GELU-73             [-1, 65, 2048]               0
              Dropout-74             [-1, 65, 2048]               0
               Linear-75             [-1, 65, 1024]       2,098,176
              Dropout-76             [-1, 65, 1024]               0
          FeedForward-77             [-1, 65, 1024]               0
              PreNorm-78             [-1, 65, 1024]               0
            LayerNorm-79             [-1, 65, 1024]           2,048
               Linear-80             [-1, 65, 3072]       3,145,728
              Softmax-81           [-1, 16, 65, 65]               0
               Linear-82             [-1, 65, 1024]       1,049,600
              Dropout-83             [-1, 65, 1024]               0
            Attention-84             [-1, 65, 1024]               0
              PreNorm-85             [-1, 65, 1024]               0
            LayerNorm-86             [-1, 65, 1024]           2,048
               Linear-87             [-1, 65, 2048]       2,099,200
                 GELU-88             [-1, 65, 2048]               0
              Dropout-89             [-1, 65, 2048]               0
               Linear-90             [-1, 65, 1024]       2,098,176
              Dropout-91             [-1, 65, 1024]               0
          FeedForward-92             [-1, 65, 1024]               0
              PreNorm-93             [-1, 65, 1024]               0
          Transformer-94             [-1, 65, 1024]               0
             Identity-95                 [-1, 1024]               0
            LayerNorm-96                 [-1, 1024]           2,048
               Linear-97                 [-1, 1000]       1,025,000
    ================================================================
    Total params: 54,554,600
    Trainable params: 54,554,600
    Non-trainable params: 0
    ----------------------------------------------------------------
    Input size (MB): 0.75
    Forward/backward pass size (MB): 64.02
    Params size (MB): 208.11
    Estimated Total Size (MB): 272.88
    ----------------------------------------------------------------
    

    相关文章

      网友评论

          本文标题:【代码阅读】vision transformer

          本文链接:https://www.haomeiwen.com/subject/gfqjaltx.html