%load_ext autoreload
%autoreload 2

sample_to_df[source]

sample_to_df(locs, x_os, y_os, z_os, ints, px_size_zyx=[100, 100, 100])

df_to_micro[source]

df_to_micro(df, px_size_zyx=[100, 100, 100])

from decode_fish.engine.point_process import PointProcessUniform
locs_3d, x_os_3d, y_os_3d, z_os_3d, ints_3d, output_shape = PointProcessUniform(torch.ones([1,1,40,40,40])*0.001).sample()
sample_to_df(locs_3d, x_os_3d, y_os_3d, z_os_3d, ints_3d)
loc_idx frame_idx x y z int
0 0 0 1157.859375 3948.876465 506.832397 1.0
1 1 0 1700.929443 794.202209 603.152954 1.0
2 2 0 1075.507202 1976.463135 663.151733 1.0
3 3 0 1700.954956 1853.443848 731.253052 1.0
4 4 0 3551.407715 3283.605469 790.297607 1.0
... ... ... ... ... ... ...
69 69 0 3018.743896 36.139202 2593.828857 1.0
70 70 0 742.813599 2101.103516 2612.943359 1.0
71 71 0 499.883270 3249.107422 2780.383545 1.0
72 72 0 3084.075439 327.644379 3022.731689 1.0
73 73 0 2734.532715 1378.381226 3681.352295 1.0

74 rows × 6 columns

class SIPostProcess[source]

SIPostProcess(m1_threshold:float=0.03, m2_threshold:float=0.3, samp_threshold=0.1, px_size_zyx=[100, 100, 100], diag=0) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::

    import torch.nn as nn
    import torch.nn.functional as F

    class Model(nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.conv1 = nn.Conv2d(1, 20, 5)
            self.conv2 = nn.Conv2d(20, 20, 5)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.

:ivar training: Boolean represents whether this module is in training or
                evaluation mode.
:vartype training: bool

class ISIPostProcess[source]

ISIPostProcess(m1_threshold:float=0.1, samp_threshold=0.1, px_size_zyx=[100, 100, 100], diag=False) :: SIPostProcess

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::

    import torch.nn as nn
    import torch.nn.functional as F

    class Model(nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.conv1 = nn.Conv2d(1, 20, 5)
            self.conv2 = nn.Conv2d(20, 20, 5)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.

:ivar training: Boolean represents whether this module is in training or
                evaluation mode.
:vartype training: bool
class ISIPostProcess(SIPostProcess):
    
    def __init__(self, m1_threshold:float = 0.1, samp_threshold=0.1, px_size_zyx=[100,100,100], diag=False):
        
        super().__init__(m1_threshold = m1_threshold, samp_threshold=samp_threshold, px_size_zyx=px_size_zyx, diag=diag)
        self.m2_threshold = None
        
    def forward(self, logits):

        device = logits.device
        p = torch.sigmoid(logits)
        
        with torch.no_grad():
            
            p_SI = 0
            tot_mask = torch.ones_like(p)
            max_mask = torch.ones_like(p)
            
            while max_mask.sum():
                
                # voxels with probability values > threshold,
                # and which where not previously counted as locations, are canditates
                p_cand = torch.where(p>self.m1_threshold, p, torch.zeros_like(p)) * tot_mask

                # localize maximum (nonzero) values within a 3x3x3 volume
                p_cand = F.max_pool3d(p_cand,3,1,padding=1)
                max_mask = torch.eq(p, p_cand).float()
                max_mask[p==0] = 0
                
                # Add up probability values from the adjacent pixels
                conv = F.conv3d(p, self.filt.to(device), padding=1)
                p_sum = max_mask * conv
                
                # Add the integrated probabilities to the return tensor. 
                p_SI += torch.clamp_max(p_sum, 1) 
                # Voxels that where added can not be added again
                tot_mask *= (torch.ones_like(max_mask) - max_mask)
                
                # The probability mass that contributed to p_sum is removed.
                p_fac = 1/p_sum
                p_fac[torch.isinf(p_fac)] = 0
                p_fac = torch.clamp_max(p_fac, 1) 
                p_proc = F.conv3d(p_fac, self.filt.to(device),padding=1)*p

                p = p - p_proc
                torch.clamp_min_(p, 0)
            
            return p_SI
from decode_fish.funcs.utils import *
# model_out = torch.load('../data/model_output.pt')
# probs_inp = torch.sigmoid(model_out['logits'])[:,:,:,250:300,200:250]

# model_out = torch.load('../data/model_batch_output.pt')
# probs_inp = torch.sigmoid(model_out['logits'])

model_out = torch.load('../data/model_output_t.pt')
probs_inp = torch.sigmoid(model_out['logits'])[:,:,:,:,:]
# gt_df = torch.load('../data/gt_1.pt')
# len(gt_df)
from decode_fish.funcs.evaluation import *

post_proc1 = SIPostProcess(m1_threshold=0.03, m2_threshold=0.25, samp_threshold=0.6, px_size_zyx=[100,100,100], diag=True)
post_proc2 = ISIPostProcess(m1_threshold=0.03, samp_threshold=0.5, px_size_zyx=[100,100,100], diag=True)


# matching(px_to_nm(gt_df),  post_proc1.forward(model_out, ret='df'), tolerance=500, print_res=True)
# _=matching(px_to_nm(gt_df),  post_proc2.forward(model_out, ret='df'), tolerance=500, print_res=True)
plt.figure(figsize=(20,10))
plt.subplot(231)
probs = cpu(probs_inp[0,0])
probsf = probs + 0
probsf[probsf<0.01] = 0
im = plt.imshow(probs.sum(0))
# plt.scatter(gt_df['x'],gt_df['y'], color='red', s=5.)
plt.title(f'Net output {probs.sum().item():.1f} and {probsf.sum().item():.1f}'.format())
add_colorbar(im)

recs = post_proc1.get_si_resdict(model_out)
plt.subplot(232)
im = plt.imshow(cpu(recs['Probs_si'][0,0]).max(0))
add_colorbar(im)
N = cpu(recs['Probs_si'][0,0]).sum().item()
plt.title(f'SI Probs SI {N:.1f}')

plt.subplot(235)
im = plt.imshow(cpu(recs['Samples_si'][0,0]).sum(0))
add_colorbar(im)
plt.title(cpu(recs['Samples_si'][0,0]).sum().item())

recs = post_proc2.get_si_resdict(model_out)
plt.subplot(233)
im = plt.imshow(cpu(recs['Probs_si'][0,0]).max(0))
add_colorbar(im)
N = cpu(recs['Probs_si'][0,0]).sum().item()
plt.title(f'ISI Probs SI {N:.1f}')

plt.subplot(236)
im = plt.imshow(cpu(recs['Samples_si'][0,0]).sum(0))
add_colorbar(im)
plt.title(cpu(recs['Samples_si'][0,0]).sum().item())
Text(0.5, 1.0, '15.0')
plt.figure(figsize=(20,10))
plt.subplot(231)
probs = cpu(probs_inp[0,0])
probsf = probs + 0
probsf[probsf<0.01] = 0
im = plt.imshow(probs.sum(0))
# plt.scatter(gt_df['x'],gt_df['y'], color='red', s=5.)
plt.title(f'Net output {probs.sum().item():.1f} and {probsf.sum().item():.1f}'.format())
add_colorbar(im)

recs = post_proc1.forward(model_out, ret='dict')
plt.subplot(232)
im = plt.imshow(cpu(recs['Probs_si'][0,0]).max(0))
add_colorbar(im)
N = cpu(recs['Probs_si'][0,0]).sum().item()
plt.title(f'SI Probs SI {N:.1f}')

plt.subplot(235)
im = plt.imshow(cpu(recs['Samples_si'][0,0]).sum(0))
add_colorbar(im)
plt.title(cpu(recs['Samples_si'][0,0]).sum().item())

recs = post_proc2.forward(model_out, ret='dict')
plt.subplot(233)
im = plt.imshow(cpu(recs['Probs_si'][0,0]).max(0))
add_colorbar(im)
N = cpu(recs['Probs_si'][0,0]).sum().item()
plt.title(f'ISI Probs SI {N:.1f}')

plt.subplot(236)
im = plt.imshow(cpu(recs['Samples_si'][0,0]).sum(0))
add_colorbar(im)
plt.title(cpu(recs['Samples_si'][0,0]).sum().item())
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-31-f64285d8969e> in <module>
     10 add_colorbar(im)
     11 
---> 12 recs = post_proc1.forward(model_out, ret='dict')
     13 plt.subplot(232)
     14 im = plt.imshow(cpu(recs['Probs_si'][0,0]).max(0))

TypeError: forward() got an unexpected keyword argument 'ret'
sl = np.s_[:,:10,35:45,20:30]
gt_sub = crop_df(gt_df, sl)
p_sub = crop_df(nm_to_px(post_proc2.forward(model_out, ret='df')), sl)
axes=plot_3d_projections(probs[sl[1:]], 'max', size=15)
# print(probs[sl[1:]].sum(), len(gt_sub), len(p_sub))
# axes[0].scatter(gt_sub['x'],gt_sub['y'], color='red', s=5.)
# axes[1].scatter(gt_sub['x'],gt_sub['z'], color='red', s=5.)
# axes[2].scatter(gt_sub['y'],gt_sub['z'], color='red', s=5.)

axes[0].scatter(p_sub['x'],p_sub['y'], color='red', s=15.)
axes[1].scatter(p_sub['x'],p_sub['z'], color='red', s=15.)
axes[2].scatter(p_sub['y'],p_sub['z'], color='red', s=15.)
p.shape
torch.Size([1, 1, 48, 48, 48])
for p in p_col:
    plt.imshow(cpu(p[0,0][sl[1:]]).max(0))
#     plt.title(cpu(p[0,0][sl[1:]]).sum())
    plt.colorbar()
    plt.show()
plt.imshow(cpu(p[0,0][sl[1:]]).max(0))
<matplotlib.image.AxesImage at 0x7ff61c8d1b50>
plt.hist(probs.reshape(-1).numpy())
(array([1.10259e+05, 2.06000e+02, 5.20000e+01, 3.10000e+01, 1.30000e+01,
        1.10000e+01, 7.00000e+00, 8.00000e+00, 0.00000e+00, 5.00000e+00]),
 array([2.4743142e-05, 7.3928811e-02, 1.4783287e-01, 2.2173694e-01,
        2.9564101e-01, 3.6954507e-01, 4.4344914e-01, 5.1735324e-01,
        5.9125727e-01, 6.6516137e-01, 7.3906541e-01], dtype=float32),
 <BarContainer object of 10 artists>)
probs_si = post_proc.spatial_integration(probs_inp)

plt.figure(figsize=(10,5))
plt.subplot(121)
probs = probs_inp[0,0].detach().cpu()
# probs[probs<0.01] = 0
im = plt.imshow(probs.max(dim=0).values)
plt.title(probs.sum().item())
add_colorbar(im)
plt.subplot(122)
im = plt.imshow(probs_si[0,0].cpu().max(dim=0).values, vmax=1)
add_colorbar(im)
plt.title(probs_si[0].sum().item())
Text(0.5, 1.0, '2.4885129928588867')
model_out = torch.load('../data/model_output_1.pt')
out_df = post_proc2(model_out)
out_df
loc_idx frame_idx x y z prob int int_sig x_sig y_sig z_sig
0 0 0 811.790955 1518.304199 100.611565 0.171860 0.746066 0.181192 9.735320 9.856243 17.009920
1 1 0 757.667969 513.304199 95.219490 0.739548 0.795610 0.101314 4.362606 4.324372 6.956621
2 2 0 810.451111 1522.400513 142.510391 0.340551 0.841991 0.113622 6.207741 5.521046 9.254387
3 3 0 819.209473 1523.024902 138.445892 0.364614 0.811295 0.116416 4.794523 4.941160 8.449712
4 4 0 816.354736 1524.338989 146.961670 1.279463 0.847450 0.126523 4.698750 4.884948 7.432181
... ... ... ... ... ... ... ... ... ... ... ...
374 374 0 3418.935791 4603.155273 4707.065918 0.381243 0.798968 0.091368 4.679974 4.225513 7.495026
375 375 0 3411.747070 4607.343262 4706.444336 1.281189 0.856702 0.105119 4.368041 4.125608 6.490842
376 376 0 3455.059814 4608.461914 4705.541016 0.163221 0.674841 0.081988 8.215507 7.615615 11.739887
377 377 0 3420.222412 4607.221680 4704.833984 0.542263 0.840019 0.148112 5.328904 6.125201 8.839601
378 378 0 3418.314453 4601.889648 4713.702637 0.247846 0.797368 0.155506 8.564455 7.486840 11.944842

379 rows × 11 columns

plt.figure(figsize=(20,20))
plt.subplot(121)
im = plt.imshow(probs_inp[0,0].cpu().max(dim=0).values)
add_colorbar(im)
plt.title(len(out_df))
plt.scatter(out_df['x']/100,out_df['y']/100, color='red', s=5.)
<matplotlib.collections.PathCollection at 0x7f5878183950>
model_out = torch.load('../data/model_batch_output.pt')
from decode_fish.engine.psf import LinearInterpolatedPSF
from decode_fish.engine.noise import sCMOS
from decode_fish.engine.point_process import PointProcessUniform
from decode_fish.funcs.plotting import plot_3d_projections
from decode_fish.engine.microscope import Microscope

psf_state = torch.load('/groups/turaga/home/speisera/Mackebox/Artur/WorkDB/deepstorm/fishcod/simfish_psf.pkl')
_,xs,ys,zs = psf_state['psf_volume'].shape
psf = LinearInterpolatedPSF(fs_x=xs, fs_y=ys, fs_z=zs, upsample_factor= 1)
psf.load_state_dict(psf_state)

noise = sCMOS()

micro = Microscope(parametric_psf=[psf], noise=noise, multipl=10000).cuda()

point_process = PointProcessUniform(local_rate = torch.ones([1,1,48,48,48]).cuda()*.0001, min_int = 0.5)
locs_3d, x_os_3d, y_os_3d, z_os_3d, ints_3d, output_shape = point_process.sample()
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-41-6483ac933d38> in <module>
      7 psf_state = torch.load('/groups/turaga/home/speisera/Mackebox/Artur/WorkDB/deepstorm/fishcod/simfish_psf.pkl')
      8 _,xs,ys,zs = psf_state['psf_volume'].shape
----> 9 psf = LinearInterpolatedPSF(fs_x=xs, fs_y=ys, fs_z=zs, upsample_factor= 1)
     10 psf.load_state_dict(psf_state)
     11 

TypeError: __init__() got an unexpected keyword argument 'fs_x'
xsim = micro(locs_3d, x_os_3d, y_os_3d, z_os_3d, ints_3d, output_shape)
xrec = micro(locs_mod, x_os_mod, y_os_mod, z_os_mod, ints_mod, output_shape_mod)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-185-26ccbda0152d> in <module>
----> 1 xsim = micro(locs_3d, x_os_3d, y_os_3d, z_os_3d, ints_3d, output_shape)
      2 xrec = micro(locs_mod, x_os_mod, y_os_mod, z_os_mod, ints_mod, output_shape_mod)

NameError: name 'micro' is not defined
plot_3d_projections(xsim[0,0])
plot_3d_projections(xrec[0,0])
!nbdev_build_lib
Converted 00_models.ipynb.
Converted 01_psf.ipynb.
Converted 02_microscope.ipynb.
Converted 03_noise.ipynb.
Converted 04_pointsource.ipynb.
Converted 05_gmm_loss.ipynb.
Converted 06_plotting.ipynb.
Converted 07_file_io.ipynb.
Converted 08_dataset.ipynb.
Converted 09_output_trafo.ipynb.
Converted 10_evaluation.ipynb.
Converted 11_emitter_io.ipynb.
Converted 12_utils.ipynb.
Converted 13_train.ipynb.
Converted 15_fit_psf.ipynb.
Converted 16_visualization.ipynb.
Converted 17_eval_routines.ipynb.
Converted index.ipynb.