From e5a4c9531a6b120b2a276d91bcd1da61938e6600 Mon Sep 17 00:00:00 2001 From: Vladimir Protsenko Date: Tue, 7 May 2024 01:23:08 +0400 Subject: [PATCH] upd --- src/common/validation.py | 46 +++++++++++++++++++++++++++++++--------- src/validate.py | 2 +- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/src/common/validation.py b/src/common/validation.py index d585f56..47921a1 100644 --- a/src/common/validation.py +++ b/src/common/validation.py @@ -29,7 +29,8 @@ def val_image_pair(model, hr_image, lr_image, output_image_path=None, device='cu # metrics hr_image = modcrop(hr_image, model.scale) left, right = _rgb2ycbcr(pred_lr_image)[:, :, 0], _rgb2ycbcr(hr_image)[:, :, 0] - return PSNR(left, right, model.scale), cal_ssim(left, right), run_time_ns + lr_area = np.prod(lr_image.shape[-2:]) + return PSNR(left, right, model.scale), cal_ssim(left, right), run_time_ns, lr_area def valid_steps(model, datasets, config, log_prefix=""): # ray.init(num_cpus=16, num_gpus=1, ignore_reinit_error=True, log_to_driver=False, runtime_env={"working_dir": "../"}) @@ -39,7 +40,10 @@ def valid_steps(model, datasets, config, log_prefix=""): for i in range(len(dataset_names)): dataset_name = dataset_names[i] psnrs, ssims = [], [] - run_times_ns = [] + run_times_ns = [] + lr_areas = [] + total_area = 0 + start_time = time.time() predictions_path = config.valout_dir / dataset_name if not predictions_path.exists(): @@ -51,6 +55,8 @@ def valid_steps(model, datasets, config, log_prefix=""): output_image_path = predictions_path / f'{Path(hr_image_path).stem}_rcnet.png' if config.save_predictions else None task = val_image_pair(model, hr_image, lr_image, output_image_path, device=config.device) tasks.append(task) + + total_time = time.time() - start_time # ready_refs, remaining_refs = ray.wait(tasks, num_returns=1, timeout=None) # while len(remaining_refs) > 0: # print(f"\rReady {len(ready_refs)+1}/{len(test_dataset)}", end=" ") @@ -58,22 +64,42 @@ def valid_steps(model, datasets, config, log_prefix=""): # print("\r", end=" ") # tasks = [ray.get(task) for task in tasks] - for psnr, ssim, run_time_ns in tasks: + for psnr, ssim, run_time_ns, lr_area in tasks: psnrs.append(psnr) ssims.append(ssim) run_times_ns.append(run_time_ns) + lr_areas.append(lr_area) + total_area += lr_area - config.logger.info( - '\r{} | Dataset {} | AVG Val PSNR: {:02f}, AVG: SSIM: {:04f}'.format(log_prefix, dataset_name, np.mean(np.asarray(psnrs)), np.mean(np.asarray(ssims)))) - config.writer.flush() - - results.append([ + row = [ dataset_name, np.mean(psnrs), np.mean(ssims), np.mean(run_times_ns)*1e-9, - np.percentile(run_times_ns, q=95)*1e-9]) + np.percentile(run_times_ns, q=95)*1e-9, + len(test_dataset), + np.mean(lr_areas), + total_area, + total_time + ] + results.append(row) + column_names = [ + 'Dataset', + 'AVG PSNR', + 'AVG SSIM', + f'AVG {config.device} Time, s', + f'P95 {config.device} Time, s', + 'Image count', + 'AVG image area', + 'Total area', + 'Total time, s' + ] + config.logger.info(pd.DataFrame( + [row], + columns=column_names + ).set_index('Dataset')) + config.writer.flush() - results = pd.DataFrame(results, columns=['Dataset', 'PSNR', 'SSIM', f'AVG {config.device} Time, s', f'P95 {config.device} Time, s']).set_index('Dataset') + results = pd.DataFrame(results, columns=column_names).set_index('Dataset') return results \ No newline at end of file diff --git a/src/validate.py b/src/validate.py index 76640c0..9ebac2a 100644 --- a/src/validate.py +++ b/src/validate.py @@ -40,7 +40,7 @@ class ValOptions(): if not args.valout_dir.exists(): args.valout_dir.mkdir() args.current_iter = args.model_name.split('_')[-1] - args.results_path = os.path.join(args.valout_dir, f'results_{args.device}.csv') + args.results_path = os.path.join(args.valout_dir, f'results_{args.model_name}_{args.device}.csv') # Tensorboard for monitoring writer = SummaryWriter(log_dir=args.valout_dir) logger_name = f'val_{args.model_path.stem}'