diff --git a/src/common/data.py b/src/common/data.py index b34f5a8..01c6059 100644 --- a/src/common/data.py +++ b/src/common/data.py @@ -11,26 +11,26 @@ from pathlib import Path from common.utils import PIL_CONVERT_COLOR, pil2numpy image_extensions = ['.jpg', '.png'] -def load_images_cached(images_dir_path, color_model): +def load_images_cached(images_dir_path, color_model, reset_cache): image_paths = sorted([f for f in Path(images_dir_path).glob("*") if f.suffix.lower() in image_extensions]) cache_path = Path(images_dir_path).parent / f"{Path(images_dir_path).stem}_{color_model}_cache.npy" cache_path = cache_path.resolve() - if not Path(cache_path).exists(): + if not Path(cache_path).exists() or reset_cache: print("Caching to:", cache_path) - value = {f:pil2numpy(PIL_CONVERT_COLOR[color_model](Image.open(f))) for f in image_paths} - np.save(cache_path, value, allow_pickle=True) + value = {f:pil2numpy(PIL_CONVERT_COLOR[color_model](Image.open(f))) for f in image_paths} + np.save(cache_path, value, allow_pickle=True) else: value = np.load(cache_path, allow_pickle=True).item() print("Loaded cache from:", cache_path) return list(value.keys()), list(value.values()) class SRTrainDataset(Dataset): - def __init__(self, hr_dir_path, lr_dir_path, patch_size, color_model = "RGB", rigid_aug=True): + def __init__(self, hr_dir_path, lr_dir_path, patch_size, color_model = "RGB", rigid_aug=True, reset_cache = False): super(SRTrainDataset, self).__init__() self.sz = patch_size self.rigid_aug = rigid_aug - self.hr_image_names, self.hr_images = load_images_cached(hr_dir_path, color_model=color_model) - self.lr_image_names, self.lr_images = load_images_cached(lr_dir_path, color_model=color_model) + self.hr_image_names, self.hr_images = load_images_cached(hr_dir_path, color_model=color_model, reset_cache=reset_cache) + self.lr_image_names, self.lr_images = load_images_cached(lr_dir_path, color_model=color_model, reset_cache=reset_cache) assert len(self.hr_images) == len(self.lr_images) def __getitem__(self, idx): @@ -87,10 +87,10 @@ class SRTrainDataset(Dataset): return len(self.hr_images) class SRTestDataset(Dataset): - def __init__(self, hr_dir_path, lr_dir_path, color_model): + def __init__(self, hr_dir_path, lr_dir_path, color_model, reset_cache): super(SRTestDataset, self).__init__() - self.hr_image_paths, self.hr_images = load_images_cached(hr_dir_path, color_model=color_model) - self.lr_image_paths, self.lr_images = load_images_cached(lr_dir_path, color_model=color_model) + self.hr_image_paths, self.hr_images = load_images_cached(hr_dir_path, color_model=color_model, reset_cache=reset_cache) + self.lr_image_paths, self.lr_images = load_images_cached(lr_dir_path, color_model=color_model, reset_cache=reset_cache) assert len(self.hr_images) == len(self.lr_images) def __getitem__(self, idx): diff --git a/src/common/utils.py b/src/common/utils.py index 03c37eb..1f8ef27 100644 --- a/src/common/utils.py +++ b/src/common/utils.py @@ -10,6 +10,7 @@ PIL_CONVERT_COLOR = { 'RGB': lambda pil_image: pil_image.convert("RGB") if pil_image.mode != 'RGB' else pil_image, 'YCbCr': lambda pil_image: pil_image.convert("YCbCr") if pil_image.mode != 'YCbCr' else pil_image, 'Y': lambda pil_image: pil_image.convert("YCbCr").getchannel(0) if pil_image.mode != 'YCbCr' else pil_image.getchannel(0), + # 'Y': lambda pil_image: _rgb2ycbcr(np.array(pil_image))[:,:,0],# if pil_image.mode != 'YCbCr' else pil_image.getchannel(0), 'L': lambda pil_image: pil_image.convert("L") if pil_image.mode != 'L' else pil_image, } diff --git a/src/common/validation.py b/src/common/validation.py index c9fb820..8fc989e 100644 --- a/src/common/validation.py +++ b/src/common/validation.py @@ -74,7 +74,7 @@ def valid_steps(model, datasets, config, log_prefix="", print_progress = False): task = val_image_pair(model, hr_image, lr_image, color_model=config.color_model, output_image_path=output_image_path, device=config.device) tasks.append(task) if print_progress: - print(f"\r{datetime.now()-start_datetime} {idx}/{len(test_dataset)} {hr_image_path}", end=" "*25) + print(f"\r{datetime.now()-start_datetime} {idx+1}/{len(test_dataset)} {hr_image_path}", end=" "*25) if print_progress: print() total_time = time.time() - start_time diff --git a/src/models/hdbnet.py b/src/models/hdbnet.py index 94b0334..4f5bacf 100644 --- a/src/models/hdbnet.py +++ b/src/models/hdbnet.py @@ -8,23 +8,24 @@ from pathlib import Path from . import hdblut from common import layers from itertools import cycle +from models.base import SRNetBase -class HDBNet(nn.Module): +class HDBNet(SRNetBase): def __init__(self, hidden_dim = 64, layers_count = 4, scale = 4): super(HDBNet, self).__init__() assert scale == 4 self.scale = scale - self.stage1_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2*2) - self.stage1_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2*2) - self.stage1_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2*2) - self.stage1_2H = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2*2) - self.stage1_2D = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2*2) - - # self.stage2_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2) - # self.stage2_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2) - # self.stage2_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2) - # self.stage2_2H = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2) - # self.stage2_2D = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2) + self.stage1_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage1_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage1_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage1_2H = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=15, output_max_value=255) + self.stage1_2D = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=15, output_max_value=255) + + self.stage2_3H = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage2_3D = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage2_3B = layers.UpscaleBlock(in_features=3, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=255, output_max_value=255) + self.stage2_2H = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=15, output_max_value=255) + self.stage2_2D = layers.UpscaleBlock(in_features=2, hidden_dim=hidden_dim, layers_count=layers_count, upscale_factor=2, input_max_value=15, output_max_value=255) self._extract_pattern_3H = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1],[0,2]], center=[0,0], window_size=3) self._extract_pattern_3D = layers.PercievePattern(receptive_field_idxes=[[0,0],[1,1],[2,2]], center=[0,0], window_size=3) @@ -32,6 +33,8 @@ class HDBNet(nn.Module): self._extract_pattern_2H = layers.PercievePattern(receptive_field_idxes=[[0,0],[0,1]], center=[0,0], window_size=2) self._extract_pattern_2D = layers.PercievePattern(receptive_field_idxes=[[0,0],[1,1]], center=[0,0], window_size=2) + self.rotations = 4 + def forward_stage(self, x, scale, percieve_pattern, stage): b,c,h,w = x.shape x = percieve_pattern(x) @@ -41,47 +44,48 @@ class HDBNet(nn.Module): x = x.permute(0,1,2,4,3,5) x = x.reshape(b, c, h*scale, w*scale) return x - + def forward(self, x, config=None): b,c,h,w = x.shape x = x.reshape(b*c, 1, h, w) lsb = x % 16 msb = x - lsb - output_msb = torch.zeros([b*c, 1, h*2*2, w*2*2], dtype=x.dtype, device=x.device) - output_lsb = torch.zeros([b*c, 1, h*2*2, w*2*2], dtype=x.dtype, device=x.device) - for rotations_count in range(4): + output = torch.zeros([b*c, 1, h*2, w*2], dtype=x.dtype, device=x.device) + for rotations_count in range(self.rotations): rotated_msb = torch.rot90(msb, k=rotations_count, dims=[2, 3]) rotated_lsb = torch.rot90(lsb, k=rotations_count, dims=[2, 3]) - output_msb += torch.rot90(self.forward_stage(rotated_msb, 2*2, self._extract_pattern_3H, self.stage1_3H), k=-rotations_count, dims=[2, 3]) - output_msb += torch.rot90(self.forward_stage(rotated_msb, 2*2, self._extract_pattern_3D, self.stage1_3D), k=-rotations_count, dims=[2, 3]) - output_msb += torch.rot90(self.forward_stage(rotated_msb, 2*2, self._extract_pattern_3B, self.stage1_3B), k=-rotations_count, dims=[2, 3]) - output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2*2, self._extract_pattern_2H, self.stage1_2H), k=-rotations_count, dims=[2, 3]) - output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2*2, self._extract_pattern_2D, self.stage1_2D), k=-rotations_count, dims=[2, 3]) - output_msb /= 4*3 - output_lsb /= 4*2 - output_msb = round_func((output_msb / 255) * 16) * 15 - output_lsb = (output_lsb / 255) * 15 - # print(output_msb.min(), output_msb.max(), output_lsb.min(), output_lsb.max()) - x = output_msb + output_lsb - # lsb = x % 16 - # msb = x - lsb - - # output_msb = torch.zeros([b*c, 1, h*4, w*4], dtype=x.dtype, device=x.device) - # output_lsb = torch.zeros([b*c, 1, h*4, w*4], dtype=x.dtype, device=x.device) - # for rotations_count in range(4): - # rotated_msb = torch.rot90(msb, k=rotations_count, dims=[2, 3]) - # rotated_lsb = torch.rot90(lsb, k=rotations_count, dims=[2, 3]) - # output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3H, self.stage2_3H), k=-rotations_count, dims=[2, 3]) - # output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3D, self.stage2_3D), k=-rotations_count, dims=[2, 3]) - # output_msb += torch.rot90(self.forward_stage(rotated_msb, 2, self._extract_pattern_3B, self.stage2_3B), k=-rotations_count, dims=[2, 3]) - # output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2, self._extract_pattern_2H, self.stage2_2H), k=-rotations_count, dims=[2, 3]) - # output_lsb += torch.rot90(self.forward_stage(rotated_lsb, 2, self._extract_pattern_2D, self.stage2_2D), k=-rotations_count, dims=[2, 3]) - # output_msb /= 4*3 - # output_lsb /= 4*2 - # output_msb = round_func((output_msb / 255) * 16) * 15 - # output_lsb = (output_lsb / 255) * 15 - # # print(output_msb.min(), output_msb.max(), output_lsb.min(), output_lsb.max()) - # x = output_msb + output_lsb + output_msb = self.forward_stage(rotated_msb, 2, self._extract_pattern_3H, self.stage1_3H) + \ + self.forward_stage(rotated_msb, 2, self._extract_pattern_3D, self.stage1_3D) + \ + self.forward_stage(rotated_msb, 2, self._extract_pattern_3B, self.stage1_3B) + output_msb /= 3 + output_lsb = self.forward_stage(rotated_lsb, 2, self._extract_pattern_2H, self.stage1_2H) + \ + self.forward_stage(rotated_lsb, 2, self._extract_pattern_2D, self.stage1_2D) + output_lsb /= 2 + if not config is None and config.current_iter % config.display_step == 0: + config.writer.add_histogram('s1_output_lsb', output_lsb.detach().cpu().numpy(), config.current_iter) + config.writer.add_histogram('s1_output_msb', output_msb.detach().cpu().numpy(), config.current_iter) + output += torch.rot90(output_msb + output_lsb, k=-rotations_count, dims=[2, 3]).clamp(0, 255) + output /= self.rotations + x = output + lsb = x % 16 + msb = x - lsb + output = torch.zeros([b*c, 1, h*4, w*4], dtype=x.dtype, device=x.device) + for rotations_count in range(self.rotations): + rotated_msb = torch.rot90(msb, k=rotations_count, dims=[2, 3]) + rotated_lsb = torch.rot90(lsb, k=rotations_count, dims=[2, 3]) + output_msb = self.forward_stage(rotated_msb, 2, self._extract_pattern_3H, self.stage2_3H) + \ + self.forward_stage(rotated_msb, 2, self._extract_pattern_3D, self.stage2_3D) + \ + self.forward_stage(rotated_msb, 2, self._extract_pattern_3B, self.stage2_3B) + output_msb /= 3 + output_lsb = self.forward_stage(rotated_lsb, 2, self._extract_pattern_2H, self.stage2_2H) + \ + self.forward_stage(rotated_lsb, 2, self._extract_pattern_2D, self.stage2_2D) + output_lsb /= 2 + if not config is None and config.current_iter % config.display_step == 0: + config.writer.add_histogram('s2_output_lsb', output_lsb.detach().cpu().numpy(), config.current_iter) + config.writer.add_histogram('s2_output_msb', output_msb.detach().cpu().numpy(), config.current_iter) + output += torch.rot90(output_msb + output_lsb, k=-rotations_count, dims=[2, 3]).clamp(0, 255) + output /= self.rotations + x = output x = x.reshape(b, c, h*self.scale, w*self.scale) return x diff --git a/src/test.py b/src/test.py index 541babd..f0cd138 100644 --- a/src/test.py +++ b/src/test.py @@ -30,6 +30,7 @@ class ValOptions(): self.parser.add_argument('--device', type=str, default='cuda', help='Device of the model') self.parser.add_argument('--color_model', type=str, default="RGB", help="Color model for train and test dataset.") self.parser.add_argument('--progress', type=bool, default=True, help='Show progres bar') + self.parser.add_argument('--reset_cache', action='store_true', default=False, help='Discard datasets cache') def parse_args(self): args = self.parser.parse_args() @@ -84,7 +85,8 @@ if __name__ == "__main__": test_datasets[test_dataset_name] = SRTestDataset( hr_dir_path = Path(config.datasets_dir) / test_dataset_name / "HR", lr_dir_path = Path(config.datasets_dir) / test_dataset_name / "LR" / f"X{model.scale}", - color_model=config.color_model + color_model=config.color_model, + reset_cache=config.reset_cache, ) results = valid_steps(model=model, datasets=test_datasets, config=config, log_prefix=f"Model {config.model_name}", print_progress=config.progress) diff --git a/src/train.py b/src/train.py index fd8461a..4103f31 100644 --- a/src/train.py +++ b/src/train.py @@ -48,7 +48,8 @@ class TrainOptions: parser.add_argument('--save_predictions', action='store_true', default=True, help='Save model predictions to exp_dir/val/dataset_name') parser.add_argument('--device', default='cuda', help='Device of the model') parser.add_argument('--quantization_bits', '-q', type=int, default=4, help="Used when model is LUT. Number of 4DLUT buckets defined as 2**bits. Value is in range [1, 8].") - parser.add_argument('--color_model', type=str, default="RGB", help="Color model for train and test dataset.") + parser.add_argument('--color_model', type=str, default="RGB", help="Color model for train and test dataset.") + parser.add_argument('--reset_cache', action='store_true', default=False, help='Discard datasets cache') self.parser = parser @@ -137,7 +138,8 @@ if __name__ == "__main__": hr_dir_path = Path(config.datasets_dir) / train_dataset_name / "HR", lr_dir_path = Path(config.datasets_dir) / train_dataset_name / "LR" / f"X{config.scale}", patch_size = config.crop_size, - color_model = config.color_model + color_model = config.color_model, + reset_cache=config.reset_cache )) train_dataset = torch.utils.data.ConcatDataset(train_datasets) train_loader = DataLoader( @@ -156,7 +158,8 @@ if __name__ == "__main__": test_datasets[test_dataset_name] = SRTestDataset( hr_dir_path = Path(config.datasets_dir) / test_dataset_name / "HR", lr_dir_path = Path(config.datasets_dir) / test_dataset_name / "LR" / f"X{config.scale}", - color_model = config.color_model + color_model = config.color_model, + reset_cache=config.reset_cache ) l_accum = [0., 0., 0.] prepare_data_time = 0.