-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
55 changed files
with
2,427 additions
and
0 deletions.
There are no files selected for viewing
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
function sigma_i = NoiseGen(sigma,neta,band) | ||
%Generate noise as a bell curve function over band | ||
den = 0; | ||
num = []; | ||
|
||
for i=1:band | ||
den = den + exp(-(i-band/2)^2/(2*neta^2)) ; | ||
end | ||
for k = 1:band | ||
num=[num,exp(-(k-band/2)^2/(2*neta^2))]; | ||
end | ||
sigma_i = sqrt(sigma^2*(num/den)); |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
I1 = double(dc); | ||
y = 51; | ||
x = 601; | ||
I1 = I1(x:x+199,y:y+199,:); | ||
I1 = I1 - repmat(min(min(I1,[],1),[],2),[200 200]); | ||
I1 = I1 ./ repmat(max(max(I1,[],1),[],2),[200 200]); | ||
image = I1; | ||
|
||
image_noisy = image + randn(size(I1))*(100/255); | ||
image_noisy(image_noisy < 0) = 0; | ||
image_noisy(image_noisy > 1) = 1; | ||
save('denoising.mat','image','image_noisy'); | ||
imshow(cat(3,image_noisy(:,:,57),image_noisy(:,:,27),image_noisy(:,:,17))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
from .skip import skip | ||
from .texture_nets import get_texture_nets | ||
from .resnet import ResNet | ||
from .unet import UNet | ||
|
||
import torch.nn as nn | ||
|
||
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3, act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5, downsample_mode='stride'): | ||
if NET_TYPE == 'ResNet': | ||
# TODO | ||
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False) | ||
elif NET_TYPE == 'skip': | ||
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d, | ||
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u, | ||
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11, | ||
upsample_mode=upsample_mode, downsample_mode=downsample_mode, | ||
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun) | ||
|
||
elif NET_TYPE == 'texture_nets': | ||
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad) | ||
|
||
elif NET_TYPE =='UNet': | ||
net = UNet(num_input_channels=input_depth, num_output_channels=3, | ||
feature_scale=4, more_layers=0, concat_x=False, | ||
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True) | ||
elif NET_TYPE == 'identity': | ||
assert input_depth == 3 | ||
net = nn.Sequential() | ||
else: | ||
assert False | ||
|
||
return net |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,121 @@ | ||
import torch | ||
import torch.nn as nn | ||
import numpy as np | ||
from .downsampler import Downsampler | ||
|
||
def add_module(self, module): | ||
self.add_module(str(len(self) + 1), module) | ||
|
||
torch.nn.Module.add = add_module | ||
|
||
class Concat(nn.Module): | ||
def __init__(self, dim, *args): | ||
super(Concat, self).__init__() | ||
self.dim = dim | ||
|
||
for idx, module in enumerate(args): | ||
self.add_module(str(idx), module) | ||
|
||
def forward(self, input): | ||
inputs = [] | ||
for module in self._modules.values(): | ||
inputs.append(module(input)) | ||
|
||
inputs_shapes2 = [x.shape[2] for x in inputs] | ||
inputs_shapes3 = [x.shape[3] for x in inputs] | ||
|
||
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)): | ||
inputs_ = inputs | ||
else: | ||
target_shape2 = min(inputs_shapes2) | ||
target_shape3 = min(inputs_shapes3) | ||
|
||
inputs_ = [] | ||
for inp in inputs: | ||
diff2 = (inp.size(2) - target_shape2) // 2 | ||
diff3 = (inp.size(3) - target_shape3) // 2 | ||
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3]) | ||
|
||
return torch.cat(inputs_, dim=self.dim) | ||
|
||
def __len__(self): | ||
return len(self._modules) | ||
|
||
|
||
class GenNoise(nn.Module): | ||
def __init__(self, dim2): | ||
super(GenNoise, self).__init__() | ||
self.dim2 = dim2 | ||
|
||
def forward(self, input): | ||
a = list(input.size()) | ||
a[1] = self.dim2 | ||
# print (input.data.type()) | ||
|
||
b = torch.zeros(a).type_as(input.data) | ||
b.normal_() | ||
|
||
x = torch.autograd.Variable(b) | ||
|
||
return x | ||
|
||
|
||
class Swish(nn.Module): | ||
|
||
def __init__(self): | ||
super(Swish, self).__init__() | ||
self.s = nn.Sigmoid() | ||
|
||
def forward(self, x): | ||
return x * self.s(x) | ||
|
||
|
||
def act(act_fun = 'LeakyReLU'): | ||
''' | ||
Either string defining an activation function or module (e.g. nn.ReLU) | ||
''' | ||
if isinstance(act_fun, str): | ||
if act_fun == 'LeakyReLU': | ||
return nn.LeakyReLU(0.2, inplace=True) | ||
elif act_fun == 'Swish': | ||
return Swish() | ||
elif act_fun == 'ELU': | ||
return nn.ELU() | ||
elif act_fun == 'none': | ||
return nn.Sequential() | ||
else: | ||
assert False | ||
else: | ||
return act_fun() | ||
|
||
|
||
def bn(num_features): | ||
return nn.BatchNorm2d(num_features) | ||
|
||
|
||
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'): | ||
downsampler = None | ||
if stride != 1 and downsample_mode != 'stride': | ||
|
||
if downsample_mode == 'avg': | ||
downsampler = nn.AvgPool2d(stride, stride) | ||
elif downsample_mode == 'max': | ||
downsampler = nn.MaxPool2d(stride, stride) | ||
elif downsample_mode in ['lanczos2', 'lanczos3']: | ||
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True) | ||
else: | ||
assert False | ||
|
||
stride = 1 | ||
|
||
padder = None | ||
to_pad = int((kernel_size - 1) / 2) | ||
if pad == 'reflection': | ||
padder = nn.ReflectionPad2d(to_pad) | ||
to_pad = 0 | ||
|
||
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias) | ||
|
||
|
||
layers = filter(lambda x: x is not None, [padder, convolver, downsampler]) | ||
return nn.Sequential(*layers) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,121 @@ | ||
import torch | ||
import torch.nn as nn | ||
import numpy as np | ||
from .downsampler import Downsampler | ||
|
||
def add_module(self, module): | ||
self.add_module(str(len(self) + 1), module) | ||
|
||
torch.nn.Module.add = add_module | ||
|
||
class Concat(nn.Module): | ||
def __init__(self, dim, *args): | ||
super(Concat, self).__init__() | ||
self.dim = dim | ||
|
||
for idx, module in enumerate(args): | ||
self.add_module(str(idx), module) | ||
|
||
def forward(self, input): | ||
inputs = [] | ||
for module in self._modules.values(): | ||
inputs.append(module(input)) | ||
|
||
inputs_shapes2 = [x.shape[3] for x in inputs] | ||
inputs_shapes3 = [x.shape[4] for x in inputs] | ||
|
||
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)): | ||
inputs_ = inputs | ||
else: | ||
target_shape2 = min(inputs_shapes2) | ||
target_shape3 = min(inputs_shapes3) | ||
|
||
inputs_ = [] | ||
for inp in inputs: | ||
diff2 = (inp.size(3) - target_shape2) // 2 | ||
diff3 = (inp.size(4) - target_shape3) // 2 | ||
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3]) | ||
|
||
return torch.cat(inputs_, dim=self.dim) | ||
|
||
def __len__(self): | ||
return len(self._modules) | ||
|
||
|
||
class GenNoise(nn.Module): | ||
def __init__(self, dim2): | ||
super(GenNoise, self).__init__() | ||
self.dim2 = dim2 | ||
|
||
def forward(self, input): | ||
a = list(input.size()) | ||
a[1] = self.dim2 | ||
# print (input.data.type()) | ||
|
||
b = torch.zeros(a).type_as(input.data) | ||
b.normal_() | ||
|
||
x = torch.autograd.Variable(b) | ||
|
||
return x | ||
|
||
|
||
class Swish(nn.Module): | ||
|
||
def __init__(self): | ||
super(Swish, self).__init__() | ||
self.s = nn.Sigmoid() | ||
|
||
def forward(self, x): | ||
return x * self.s(x) | ||
|
||
|
||
def act(act_fun = 'LeakyReLU'): | ||
''' | ||
Either string defining an activation function or module (e.g. nn.ReLU) | ||
''' | ||
if isinstance(act_fun, str): | ||
if act_fun == 'LeakyReLU': | ||
return nn.LeakyReLU(0.2, inplace=True) | ||
elif act_fun == 'Swish': | ||
return Swish() | ||
elif act_fun == 'ELU': | ||
return nn.ELU() | ||
elif act_fun == 'none': | ||
return nn.Sequential() | ||
else: | ||
assert False | ||
else: | ||
return act_fun() | ||
|
||
|
||
def bn(num_features): | ||
return nn.BatchNorm3d(num_features) | ||
|
||
|
||
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'): | ||
downsampler = None | ||
if stride != 1 and downsample_mode != 'stride': | ||
|
||
if downsample_mode == 'avg': | ||
downsampler = nn.AvgPool3d(stride, stride) | ||
elif downsample_mode == 'max': | ||
downsampler = nn.MaxPool3d(stride, stride) | ||
elif downsample_mode in ['lanczos2', 'lanczos3']: | ||
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True) | ||
else: | ||
assert False | ||
|
||
stride = 1 | ||
|
||
padder = None | ||
to_pad = int((kernel_size - 1) / 2) | ||
if pad == 'reflection': | ||
padder = nn.ReplicationPad3d(to_pad) | ||
to_pad = 0 | ||
|
||
convolver = nn.Conv3d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias) | ||
|
||
|
||
layers = filter(lambda x: x is not None, [padder, convolver, downsampler]) | ||
return nn.Sequential(*layers) |
Oops, something went wrong.