Source code for decode.neuralfitter.models.unet_parts

# sub-parts of the U-Net model
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F


[docs]class double_conv(nn.Module): '''(conv => BN => ReLU) * 2''' def __init__(self, in_ch, out_ch): super(double_conv, self).__init__() self.conv = nn.Sequential( nn.Conv2d(in_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), nn.Conv2d(out_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True) )
[docs] def forward(self, x): x = self.conv(x) return x
[docs]class inconv(nn.Module): def __init__(self, in_ch, out_ch): super(inconv, self).__init__() self.conv = double_conv(in_ch, out_ch)
[docs] def forward(self, x): x = self.conv(x) return x
[docs]class inconv_3d(inconv): def __init__(self, in_ch, out_ch): super().__init__(in_ch, out_ch) self.conv = double_conv_3d(in_ch, out_ch, kernel=(1, 3, 3), padding=(0, 1, 1))
[docs]class down(nn.Module): def __init__(self, in_ch, out_ch): super(down, self).__init__() self.mpconv = nn.Sequential( nn.MaxPool2d(2), double_conv(in_ch, out_ch) )
[docs] def forward(self, x): x = self.mpconv(x) return x
[docs]class down_3d(down): def __init__(self, in_ch, out_ch): super(down, self).__init__() self.mpconv = nn.Sequential( nn.MaxPool3d(2), double_conv_3d(in_ch, out_ch, (1, 3, 3), (0, 1, 1)) )
[docs]class Upsample(nn.Module): """ Dummy wrapper for Upsampling, since for whatever reason nn.Upsample is deprecated ... """ def __init__(self, scale_factor, mode, align_corners): super().__init__() self.interp = nn.functional.interpolate self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners
[docs] def forward(self, x): x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) return x
[docs]class up(nn.Module): def __init__(self, in_ch, out_ch, bilinear=True): super(up, self).__init__() # would be a nice idea if the upsampling could be learned too, # but my machine do not have enough memory to handle all those weights if bilinear: # self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.up = Upsample(scale_factor=2, mode='bilinear', align_corners=True) else: self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2) self.conv = double_conv(in_ch, out_ch)
[docs] def forward(self, x1, x2): x1 = self.up(x1) # input is CHW diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] x1 = F.pad(x1, (diffX // 2, diffX - diffX//2, diffY // 2, diffY - diffY//2)) # for padding issues, see # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd x = torch.cat([x2, x1], dim=1) x = self.conv(x) return x
[docs]class outconv(nn.Module): def __init__(self, in_ch, out_ch): super(outconv, self).__init__() self.conv = nn.Conv2d(in_ch, out_ch, 1)
[docs] def forward(self, x): x = self.conv(x) return x