Definition of the classes and modules we use to build our 3D UNet
%load_ext autoreload
%autoreload 2
from nbdev.showdoc import *

init_func[source]

init_func(m, func=kaiming_normal_)

Initialize pytorch model `m` weights with `func`

layer_types[source]

layer_types(m)

returns list of pytorch models type

extract_layer[source]

extract_layer(m, name=Conv3d)

number_of_features_per_level[source]

number_of_features_per_level(init_channel_number, num_levels)

class SingleConv[source]

SingleConv(in_channels, out_channels, kernel_size=3, order='gcr', num_groups=8, padding=1) :: Sequential

Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output channels
    kernel_size (int or tuple): size of the convolving kernel
    order (string): determines the order of layers, e.g.
        'cr' -> conv + ReLU
        'crg' -> conv + ReLU + groupnorm
        'cl' -> conv + LeakyReLU
        'ce' -> conv + ELU
    num_groups (int): number of groups for the GroupNorm
    padding (int or tuple):

class DoubleConv[source]

DoubleConv(in_channels, out_channels, encoder, kernel_size=3, order='gcr', num_groups=8, padding=1) :: Sequential

A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output channels
    encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
    kernel_size (int or tuple): size of the convolving kernel
    order (string): determines the order of layers, e.g.
        'cr' -> conv + ReLU
        'crg' -> conv + ReLU + groupnorm
        'cl' -> conv + LeakyReLU
        'ce' -> conv + ELU
    num_groups (int): number of groups for the GroupNorm
    padding (int or tuple): add zero-padding added to all three sides of the input

class Upsampling[source]

Upsampling(transposed_conv, in_channels=None, out_channels=None, kernel_size=3, scale_factor=(2, 2, 2), mode='nearest') :: Module

Upsamples a given multi-channel 3D data using either interpolation or learned transposed convolution.
Args:
    transposed_conv (bool): if True uses ConvTranspose3d for upsampling, otherwise uses interpolation
    in_channels (int): number of input channels for transposed conv
        used only if transposed_conv is True
    out_channels (int): number of output channels for transpose conv
        used only if transposed_conv is True
    kernel_size (int or tuple): size of the convolving kernel
        used only if transposed_conv is True
    scale_factor (int or tuple): stride of the convolution
        used only if transposed_conv is True
    mode (str): algorithm used for upsampling:
        'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'. Default: 'nearest'
        used only if transposed_conv is False

class Encoder[source]

Encoder(in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, pool_kernel_size=2, pool_type='max', basic_module=DoubleConv, conv_layer_order='gcr', num_groups=8, padding=1) :: Module

A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output channels
    conv_kernel_size (int or tuple): size of the convolving kernel
    apply_pooling (bool): if True use MaxPool3d before DoubleConv
    pool_kernel_size (int or tuple): the size of the window
    pool_type (str): pooling layer: 'max' or 'avg'
    basic_module(nn.Module): either ResNetBlock or DoubleConv
    conv_layer_order (string): determines the order of layers
        in `DoubleConv` module. See `DoubleConv` for more info.
    num_groups (int): number of groups for the GroupNorm
    padding (int or tuple): add zero-padding added to all three sides of the input

class Decoder[source]

Decoder(in_channels, out_channels, conv_kernel_size=3, scale_factor=(2, 2, 2), basic_module=DoubleConv, conv_layer_order='gcr', num_groups=8, mode='nearest', padding=1) :: Module

A single module for decoder path consisting of the upsampling layer
(either learned ConvTranspose3d or nearest neighbor interpolation) followed by a basic module (DoubleConv or ExtResNetBlock).
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output channels
    conv_kernel_size (int or tuple): size of the convolving kernel
    scale_factor (tuple): used as the multiplier for the image H/W/D in
        case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation
        from the corresponding encoder
    basic_module(nn.Module): either ResNetBlock or DoubleConv
    conv_layer_order (string): determines the order of layers
        in `DoubleConv` module. See `DoubleConv` for more info.
    num_groups (int): number of groups for the GroupNorm
    padding (int or tuple): add zero-padding added to all three sides of the input

conv3d[source]

conv3d(in_channels, out_channels, kernel_size, bias, padding)

create_conv[source]

create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding)

Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output channels
    kernel_size(int or tuple): size of the convolving kernel
    order (string): order of things, e.g.
        'cr' -> conv + ReLU
        'gcr' -> groupnorm + conv + ReLU
        'cl' -> conv + LeakyReLU
        'ce' -> conv + ELU
        'bcr' -> batchnorm + conv + ReLU
    num_groups (int): number of groups for the GroupNorm
    padding (int or tuple): add zero-padding added to all three sides of the input
Return:
    list of tuple (name, module)

class Abstract3DUNet[source]

Abstract3DUNet(in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr', num_groups=8, num_levels=4, is_segmentation=True, testing=False, conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, inp_scale=1, inp_offset=0, **kwargs) :: Module

Base class for standard and residual UNet.
Args:
    in_channels (int): number of input channels
    out_channels (int): number of output segmentation masks;
        Note that that the of out_channels might correspond to either
        different semantic classes or to different binary segmentation mask.
        It's up to the user of the class to interpret the out_channels and
        use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
        or BCEWithLogitsLoss (two-class) respectively)
    f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
        of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
    final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
        final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
        to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
    basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
    layer_order (string): determines the order of layers
        in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
        See `SingleConv` for more info
    f_maps (int, tuple): if int: number of feature maps in the first conv layer of the encoder (default: 64);
        if tuple: number of feature maps at each level
    num_groups (int): number of groups for the GroupNorm
    num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
    is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
        after the final convolution; if False (regression problem) the normalization layer is skipped at the end
    testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
        will be applied as the last operation during the forward pass; if False the model is in training mode
        and the `final_activation` (even if present) won't be applied; default: False
    conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
    pool_kernel_size (int or tuple): the size of the window
    conv_padding (int or tuple): add zero-padding added to all three sides of the input

class UNet3D[source]

UNet3D(in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr', num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, inp_scale=1, inp_offset=0, **kwargs) :: Abstract3DUNet

3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
    <https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder

class IntensityDist[source]

IntensityDist(int_conc, int_rate, int_loc) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::

    import torch.nn as nn
    import torch.nn.functional as F

    class Model(nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.conv1 = nn.Conv2d(1, 20, 5)
            self.conv2 = nn.Conv2d(20, 20, 5)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.

:ivar training: Boolean represents whether this module is in training or
                evaluation mode.
:vartype training: bool

class UnetDecodeNoBn[source]

UnetDecodeNoBn(ch_in:int=1, ch_out:int=10, final_sigmoid:bool=False, depth:int=3, inp_scale:float=1.0, inp_offset:float=0.0, order='bcr', f_maps=64, p_offset=-5.0, int_conc=5, int_rate=1, int_loc=1) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::

    import torch.nn as nn
    import torch.nn.functional as F

    class Model(nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.conv1 = nn.Conv2d(1, 20, 5)
            self.conv2 = nn.Conv2d(20, 20, 5)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.

:ivar training: Boolean represents whether this module is in training or
                evaluation mode.
:vartype training: bool
# class InferenceNetwork(nn.Module):
#     def __init__(self, ch_in: int =1, ch_out: int=10, final_sigmoid : bool =False, depth: int =3, inp_scale: float=1., inp_offset: float=0.,  order='bcr', f_maps=64, p_offset=-5.,
#                 int_conc=5, int_rate=1, int_loc=1):
#         super().__init__()
#         self.unet = UNet3D(ch_in, ch_out, final_sigmoid=final_sigmoid, num_levels=depth, 
#                            layer_order = order, inp_scale=inp_scale, inp_offset=inp_offset, f_maps=f_maps)
#         self.p_offset = p_offset
#         self.int_dist = IntensityDist(int_conc, int_rate, int_loc)
        
#         self.p_out1 = nn.Conv3d(f_maps, f_maps, kernel_size=3, padding=1)
#         self.p_out2 = nn.Conv3d(f_maps, 1, kernel_size=1, padding=0)
#         nn.init.constant_(self.p_out2.bias,p_offset)
        
#         self.xyzi_out1 = nn.Conv3d(f_maps, f_maps, kernel_size=3, padding=1)
#         self.xyzi_out2 = nn.Conv3d(f_maps, 4, kernel_size=1, padding=0)
        
#         self.xyzis_out1 = nn.Conv3d(f_maps, f_maps, kernel_size=3, padding=1)
#         self.xyzis_out2 = nn.Conv3d(f_maps, 4, kernel_size=1, padding=0)
        
#         self.bg_out1 = nn.Conv3d(f_maps, f_maps, kernel_size=3, padding=1)
#         self.bg_out2 = nn.Conv3d(f_maps, 1, kernel_size=1, padding=0)
        
#         nn.init.kaiming_normal_(self.p_out1.weight, mode='fan_in', nonlinearity='relu')
#         nn.init.kaiming_normal_(self.p_out2.weight, mode='fan_in', nonlinearity='linear')
#         nn.init.kaiming_normal_(self.xyzi_out1.weight, mode='fan_in', nonlinearity='relu')
#         nn.init.kaiming_normal_(self.xyzi_out2.weight, mode='fan_in', nonlinearity='linear')
#         nn.init.kaiming_normal_(self.xyzis_out1.weight, mode='fan_in', nonlinearity='relu')
#         nn.init.kaiming_normal_(self.xyzis_out2.weight, mode='fan_in', nonlinearity='linear')
#         nn.init.kaiming_normal_(self.bg_out1.weight, mode='fan_in', nonlinearity='relu')
#         nn.init.kaiming_normal_(self.bg_out2.weight, mode='fan_in', nonlinearity='linear')
            
#     def forward(self, x):
#         out =  self.unet(x)
        
#         logit    = F.elu(self.p_out1(out))
#         logit    = self.p_out2(logit)
#         logit    = torch.clamp(logit, -15., 15)
        
#         xyzi = F.elu(self.xyzi_out1(out))
#         xyzi = self.xyzi_out2(xyzi)
        
#         xyz_mu   = torch.tanh(xyzi[:, :3])
#         i_mu     = F.softplus(xyzi[:, 3:]) + self.int_dist.int_loc.detach() + 0.01
#         xyzi_mu = torch.cat((xyz_mu, i_mu), dim=1)
        
#         xyzis = F.elu(self.xyzis_out1(out))
#         xyzis = self.xyzis_out2(xyzis)
#         xyzi_sig = F.softplus(xyzis) + 0.01
        
#         background = F.elu(self.bg_out1(out))
#         background = self.bg_out2(background)
#         background = self.unet.inp_scale * F.softplus(background)
        
#         return torch.cat([logit,xyzi_mu,xyzi_sig,background],1)
    
#     def tensor_to_dict(self, x):
    
#         return {'logits': x[:,0:1], 
#                 'xyzi_mu': x[:,1:5], 
#                 'xyzi_sigma': x[:,5:9], 
#                 'background': x[:,9:10]}
# output = model.tensor_to_dict(model(torch.randn([10,1,20,20,20])))
# for k in output.keys():
#     print(k, output[k].shape)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-19-0e57e8f4dfb8> in <module>
----> 1 model = InferenceNetwork(order= 'ce')
      2 output = model.tensor_to_dict(model(torch.randn([10,1,20,20,20])))
      3 for k in output.keys():
      4     print(k, output[k].shape)

NameError: name 'InferenceNetwork' is not defined
model = UnetDecodeNoBn(order= 'ce', f_maps=32)
output = model.tensor_to_dict(model(torch.randn([2,1,37,48,48])))
for k in output.keys():
    print(k, output[k].shape)
logits torch.Size([2, 1, 37, 48, 48])
xyzi_mu torch.Size([2, 4, 37, 48, 48])
xyzi_sigma torch.Size([2, 4, 37, 48, 48])
background torch.Size([2, 1, 37, 48, 48])
sum(p.numel() for p in model.parameters())
1093517
cfg = OmegaConf.load(default_conf)
model = hydra.utils.instantiate(cfg.model, int_loc=1, inp_scale=1, inp_offset=0)
model(torch.randn([10,1,20,20,20])).keys()
dict_keys(['logits', 'xyzi_mu', 'xyzi_sigma', 'background'])
pytorch_total_params = sum(p.numel() for p in model.parameters())
pytorch_total_params
319053
!nbdev_build_lib
Converted 00_models.ipynb.
Converted 01_psf.ipynb.
Converted 02_microscope.ipynb.
Converted 03_noise.ipynb.
Converted 04_pointsource.ipynb.
Converted 05_gmm_loss.ipynb.
Converted 06_plotting.ipynb.
Converted 07_file_io.ipynb.
Converted 08_dataset.ipynb.
Converted 09_output_trafo.ipynb.
Converted 10_evaluation.ipynb.
Converted 11_emitter_io.ipynb.
Converted 12_utils.ipynb.
Converted 13_train.ipynb.
Converted 15_fit_psf.ipynb.
Converted 16_visualization.ipynb.
Converted 17_eval_routines.ipynb.
Converted index.ipynb.