|
|
@ -11,6 +11,7 @@ from common import layers
|
|
|
|
class HDBNet(nn.Module):
|
|
|
|
class HDBNet(nn.Module):
|
|
|
|
def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
super(HDBNet, self).__init__()
|
|
|
|
super(HDBNet, self).__init__()
|
|
|
|
|
|
|
|
assert scale == 4
|
|
|
|
self.scale = scale
|
|
|
|
self.scale = scale
|
|
|
|
self.stage1_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
self.stage1_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
self.stage1_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
self.stage1_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
@ -79,203 +80,20 @@ class HDBNet(nn.Module):
|
|
|
|
return x
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
stage_lut = lut.transfer_2x2_input_SxS_output(self.stage1_S, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
stage1_3H = lut.transfer_2x2_input_SxS_output(self.stage1_3H, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
lut_model = srlut.SRLut.init_from_numpy(stage_lut)
|
|
|
|
stage1_3D = lut.transfer_2x2_input_SxS_output(self.stage1_3D, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
return lut_model
|
|
|
|
stage1_3B = lut.transfer_2x2_input_SxS_output(self.stage1_3B, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
|
|
|
|
stage1_2H = lut.transfer_2x2_input_SxS_output(self.stage1_2H, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
class HDBLNet(nn.Module):
|
|
|
|
stage1_2D = lut.transfer_2x2_input_SxS_output(self.stage1_2D, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
|
|
|
|
super(HDBLNet, self).__init__()
|
|
|
|
stage2_3H = lut.transfer_2x2_input_SxS_output(self.stage2_3H, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
self.scale = scale
|
|
|
|
stage2_3D = lut.transfer_2x2_input_SxS_output(self.stage2_3D, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
self.stage1_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
stage2_3B = lut.transfer_2x2_input_SxS_output(self.stage2_3B, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
self.stage1_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
stage2_2H = lut.transfer_2x2_input_SxS_output(self.stage2_2H, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
self.stage1_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
stage2_2D = lut.transfer_2x2_input_SxS_output(self.stage2_2D, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
self.stage1_3L = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
|
|
|
|
|
|
|
|
lut_model = hdblut.HDBLut.init_from_numpy(
|
|
|
|
self.stage2_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
stage1_3H, stage1_3D, stage1_3B, stage1_2H, stage1_2D,
|
|
|
|
self.stage2_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
stage2_3H, stage2_3D, stage2_3B, stage2_2H, stage2_2D
|
|
|
|
self.stage2_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
)
|
|
|
|
self.stage2_3L = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2)
|
|
|
|
return lut_model
|
|
|
|
|
|
|
|
|
|
|
|
self._extract_pattern_3H = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[0,2]], center=[0,0], window_size=3)
|
|
|
|
|
|
|
|
self._extract_pattern_3D = layers.PercievePattern(receptive_field_idxes=[[0,0],[1,1],[2,2]], center=[0,0], window_size=3)
|
|
|
|
|
|
|
|
self._extract_pattern_3B = layers.PercievePattern(receptive_field_idxes=[[0,0],[1,2],[2,1]], center=[0,0], window_size=3)
|
|
|
|
|
|
|
|
self._extract_pattern_3L = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[1,1]], center=[0,0], window_size=3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward_stage(self, x, scale, percieve_pattern, stage):
|
|
|
|
|
|
|
|
b,c,h,w = x.shape
|
|
|
|
|
|
|
|
x = percieve_pattern(x)
|
|
|
|
|
|
|
|
x = stage(x)
|
|
|
|
|
|
|
|
x = round_func(x)
|
|
|
|
|
|
|
|
x = x.reshape(b, c, h, w, scale, scale)
|
|
|
|
|
|
|
|
x = x.permute(0,1,2,4,3,5)
|
|
|
|
|
|
|
|
x = x.reshape(b, c, h*scale, w*scale)
|
|
|
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
|
|
|
b,c,h,w = x.shape
|
|
|
|
|
|
|
|
x = x.reshape(b*c, 1, h, w)
|
|
|
|
|
|
|
|
lsb = x % 16
|
|
|
|
|
|
|
|
msb = x - lsb
|
|
|
|
|
|
|
|
output_msb = torch.zeros([b*c, 1, h*2, w*2], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
output_lsb = torch.zeros([b*c, 1, h*2, w*2], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
for rotations_count in range(4):
|
|
|
|
|
|
|
|
rotated_msb = torch.rot90(msb, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
rotated_lsb = torch.rot90(lsb, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3H, self.stage1_3H), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3D, self.stage1_3D), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3B, self.stage1_3B), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2, self._extract_pattern_3L, self.stage1_3L), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb /= 4*3
|
|
|
|
|
|
|
|
output_lsb /= 4
|
|
|
|
|
|
|
|
output_msb = output_msb + output_lsb
|
|
|
|
|
|
|
|
x = output_msb
|
|
|
|
|
|
|
|
lsb = x % 16
|
|
|
|
|
|
|
|
msb = x - lsb
|
|
|
|
|
|
|
|
output_msb = torch.zeros([b*c, 1, h*4, w*4], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
output_lsb = torch.zeros([b*c, 1, h*4, w*4], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
for rotations_count in range(4):
|
|
|
|
|
|
|
|
rotated_msb = torch.rot90(msb, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
rotated_lsb = torch.rot90(lsb, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3H, self.stage2_3H), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3D, self.stage2_3D), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3B, self.stage2_3B), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2, self._extract_pattern_3L, self.stage2_3L), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
output_msb /= 4*3
|
|
|
|
|
|
|
|
output_lsb /= 4
|
|
|
|
|
|
|
|
output_msb = output_msb + output_lsb
|
|
|
|
|
|
|
|
x = output_msb
|
|
|
|
|
|
|
|
x = x.reshape(b, c, h*self.scale, w*self.scale)
|
|
|
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
|
|
|
|
stage_lut = lut.transfer_2x2_input_SxS_output(self.stage1_S, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
|
|
|
|
lut_model = srlut.SRLut.init_from_numpy(stage_lut)
|
|
|
|
|
|
|
|
return lut_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# class SRNetY(nn.Module):
|
|
|
|
|
|
|
|
# def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
|
|
|
|
# super(SRNetY, self).__init__()
|
|
|
|
|
|
|
|
# self.scale = scale
|
|
|
|
|
|
|
|
# self.stage1_S = layers.UpscaleBlock(
|
|
|
|
|
|
|
|
# hidden_dim=hidden_dim,
|
|
|
|
|
|
|
|
# layers_count=layers_count,
|
|
|
|
|
|
|
|
# upscale_factor=self.scale
|
|
|
|
|
|
|
|
# )
|
|
|
|
|
|
|
|
# self._extract_pattern_S = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[1,0],[1,1]], center=[0,0], window_size=2)
|
|
|
|
|
|
|
|
# self.rgb_to_ycbcr = layers.RgbToYcbcr()
|
|
|
|
|
|
|
|
# self.ycbcr_to_rgb = layers.YcbcrToRgb()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward_stage(self, x, scale, percieve_pattern, stage):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = percieve_pattern(x)
|
|
|
|
|
|
|
|
# x = stage(x)
|
|
|
|
|
|
|
|
# x = round_func(x)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h, w, scale, scale)
|
|
|
|
|
|
|
|
# x = x.permute(0,1,2,4,3,5)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h*scale, w*scale)
|
|
|
|
|
|
|
|
# return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward(self, x):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = self.rgb_to_ycbcr(x)
|
|
|
|
|
|
|
|
# y = x[:,0:1,:,:]
|
|
|
|
|
|
|
|
# cbcr = x[:,1:,:,:]
|
|
|
|
|
|
|
|
# cbcr_scaled = F.interpolate(cbcr, size=[h*self.scale, w*self.scale], mode='bilinear')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# x = y.view(b, 1, h, w)
|
|
|
|
|
|
|
|
# output = self.forward_stage(x, self.scale, self._extract_pattern_S, self.stage1_S)
|
|
|
|
|
|
|
|
# output = torch.cat([output, cbcr_scaled], dim=1)
|
|
|
|
|
|
|
|
# output = self.ycbcr_to_rgb(output).clamp(0, 255)
|
|
|
|
|
|
|
|
# return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
|
|
|
|
# stage_lut = lut.transfer_2x2_input_SxS_output(self.stage1_S, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
|
|
|
|
# lut_model = srlut.SRLutY.init_from_numpy(stage_lut)
|
|
|
|
|
|
|
|
# return lut_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# class SRNetR90(nn.Module):
|
|
|
|
|
|
|
|
# def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
|
|
|
|
# super(SRNetR90, self).__init__()
|
|
|
|
|
|
|
|
# self.scale = scale
|
|
|
|
|
|
|
|
# self.stage1_S = layers.UpscaleBlock(
|
|
|
|
|
|
|
|
# hidden_dim=hidden_dim,
|
|
|
|
|
|
|
|
# layers_count=layers_count,
|
|
|
|
|
|
|
|
# upscale_factor=self.scale
|
|
|
|
|
|
|
|
# )
|
|
|
|
|
|
|
|
# self._extract_pattern_S = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[1,0],[1,1]], center=[0,0], window_size=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward_stage(self, x, scale, percieve_pattern, stage):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = percieve_pattern(x)
|
|
|
|
|
|
|
|
# x = stage(x)
|
|
|
|
|
|
|
|
# x = round_func(x)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h, w, scale, scale)
|
|
|
|
|
|
|
|
# x = x.permute(0,1,2,4,3,5)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h*scale, w*scale)
|
|
|
|
|
|
|
|
# return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward(self, x):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = x.reshape(b*c, 1, h, w)
|
|
|
|
|
|
|
|
# output = torch.zeros([b*c, 1, h*self.scale, w*self.scale], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
# output += self.forward_stage(x, self.scale, self._extract_pattern_S, self.stage1_S)
|
|
|
|
|
|
|
|
# for rotations_count in range(1,4):
|
|
|
|
|
|
|
|
# rotated = torch.rot90(x, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
# output += torch.rot90(self.forward_stage(rotated, self.scale, self._extract_pattern_S, self.stage1_S), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
# output /= 4
|
|
|
|
|
|
|
|
# output = output.reshape(b, c, h*self.scale, w*self.scale)
|
|
|
|
|
|
|
|
# return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
|
|
|
|
# stage_lut = lut.transfer_2x2_input_SxS_output(self.stage1_S, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
|
|
|
|
# lut_model = srlut.SRLutR90.init_from_numpy(stage_lut)
|
|
|
|
|
|
|
|
# return lut_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# class SRNetR90Y(nn.Module):
|
|
|
|
|
|
|
|
# def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4):
|
|
|
|
|
|
|
|
# super(SRNetR90Y, self).__init__()
|
|
|
|
|
|
|
|
# self.scale = scale
|
|
|
|
|
|
|
|
# s_pattern=[[0,0],[0,1],[1,0],[1,1]]
|
|
|
|
|
|
|
|
# self.stage1_S = layers.UpscaleBlock(
|
|
|
|
|
|
|
|
# hidden_dim=hidden_dim,
|
|
|
|
|
|
|
|
# layers_count=layers_count,
|
|
|
|
|
|
|
|
# upscale_factor=self.scale
|
|
|
|
|
|
|
|
# )
|
|
|
|
|
|
|
|
# self._extract_pattern_S = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[1,0],[1,1]], center=[0,0], window_size=2)
|
|
|
|
|
|
|
|
# self.rgb_to_ycbcr = layers.RgbToYcbcr()
|
|
|
|
|
|
|
|
# self.ycbcr_to_rgb = layers.YcbcrToRgb()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward_stage(self, x, scale, percieve_pattern, stage):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = percieve_pattern(x)
|
|
|
|
|
|
|
|
# x = stage(x)
|
|
|
|
|
|
|
|
# x = round_func(x)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h, w, scale, scale)
|
|
|
|
|
|
|
|
# x = x.permute(0,1,2,4,3,5)
|
|
|
|
|
|
|
|
# x = x.reshape(b, c, h*scale, w*scale)
|
|
|
|
|
|
|
|
# return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def forward(self, x):
|
|
|
|
|
|
|
|
# b,c,h,w = x.shape
|
|
|
|
|
|
|
|
# x = self.rgb_to_ycbcr(x)
|
|
|
|
|
|
|
|
# y = x[:,0:1,:,:]
|
|
|
|
|
|
|
|
# cbcr = x[:,1:,:,:]
|
|
|
|
|
|
|
|
# cbcr_scaled = F.interpolate(cbcr, size=[h*self.scale, w*self.scale], mode='bilinear')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# x = y.view(b, 1, h, w)
|
|
|
|
|
|
|
|
# output = torch.zeros([b, 1, h*self.scale, w*self.scale], dtype=x.dtype, device=x.device)
|
|
|
|
|
|
|
|
# output += self.forward_stage(x, self.scale, self._extract_pattern_S, self.stage1_S)
|
|
|
|
|
|
|
|
# for rotations_count in range(1,4):
|
|
|
|
|
|
|
|
# rotated = torch.rot90(x, k=rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
# output += torch.rot90(self.forward_stage(rotated, self.scale, self._extract_pattern_S, self.stage1_S), k=-rotations_count, dims=[2, 3])
|
|
|
|
|
|
|
|
# output /= 4
|
|
|
|
|
|
|
|
# output = torch.cat([output, cbcr_scaled], dim=1)
|
|
|
|
|
|
|
|
# output = self.ycbcr_to_rgb(output).clamp(0, 255)
|
|
|
|
|
|
|
|
# return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def get_lut_model(self, quantization_interval=16, batch_size=2**10):
|
|
|
|
|
|
|
|
# stage_lut = lut.transfer_2x2_input_SxS_output(self.stage1_S, quantization_interval=quantization_interval, batch_size=batch_size)
|
|
|
|
|
|
|
|
# lut_model = srlut.SRLutR90Y.init_from_numpy(stage_lut)
|
|
|
|
|
|
|
|
# return lut_model
|
|
|
|
|