NAFNet网络图像去模糊及模型转为onnx

面朝大海,春暖花开
感谢https://github.com/hzk7287对问题解决提供的支持,谢谢!

官方git:https://github.com/megvii-research/NAFNet/#results-and-pre-trained-models

官方介绍:
尽管最近在图像恢复领域取得了重大进展,但现有技术(SOTA)方法的系统复杂性也在增加,这可能会阻碍方法的方便分析和比较。在本文中,我们提出了一个简单的基线,它超越了 SOTA 方法并且计算效率很高。为了进一步简化基线,我们揭示了非线性激活函数,例如 Sigmoid、ReLU、GELU、Softmax 等不是必需的:它们可以被乘法替换或删除。因此,我们从基线推导出了一个非线性无激活网络,即 NAFNet。SOTA 结果是在各种具有挑战性的基准上实现的,例如 GoPro 上的 33.69 dB PSNR(用于图像去模糊),超过了之前的 SOTA 0.38 dB,计算成本仅为 8.4%;SIDD 上的 40.30 dB PSNR(用于图像去噪),超过之前的 SOTA 0.28 dB,计算成本不到一半。

按照官方的使用方法就可以。下面展示官方的一个demo代码

1.demo模型验证

------------------------------------------------------------------------
Copyright (c) 2022 megvii-model. All Rights Reserved.

------------------------------------------------------------------------
Modified from BasicSR (https:
Copyright 2018-2020 BasicSR Authors
------------------------------------------------------------------------
import torch

from basicsr.data import create_dataloader, create_dataset
from basicsr.models import create_model
from basicsr.train import parse_options
from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite

from basicsr.utils import (get_env_info, get_root_logger, get_time_str,
                           make_exp_dirs)
from basicsr.utils.options import dict2str

def main():
    # parse options, set distributed setting, set ramdom seed
    opt = parse_options(is_train=False)
    opt['num_gpu'] = torch.cuda.device_count()

    img_path = opt['img_path'].get('input_img')
    output_path = opt['img_path'].get('output_img')

    ## 1. read image
    file_client = FileClient('disk')

    img_bytes = file_client.get(img_path, None)
    try:
        img = imfrombytes(img_bytes, float32=True)
    except:
        raise Exception("path {} not working".format(img_path))

    img = img2tensor(img, bgr2rgb=True, float32=True)

    ## 2. run inference
    opt['dist'] = False
    model = create_model(opt)

    model.feed_data(data={'lq': img.unsqueeze(dim=0)})

    if model.opt['val'].get('grids', False):
        model.grids()

    model.test()

    if model.opt['val'].get('grids', False):
        model.grids_inverse()

    visuals = model.get_current_visuals()
    sr_img = tensor2img([visuals['result']])
    imwrite(sr_img, output_path)

    print(f'inference {img_path} .. finished. saved to {output_path}')

if __name__ == '__main__':
    main()

单个图像推理:

python basicsr/demo.py -opt options/test/REDS/NAFNet-width64.yml --input_path ./demo/blurry.jpg --output_path ./demo/deblur_img.png

2.模型导出ONNX(重点官方没有)

在转成onnx需要更改模型网络的一些代码:NAFNet-main\basicsr\models\archs\NAFNet_arch.py
需要更改的用##########包围,供大家参考。

class NAFBlock(nn.Module):
    def __init__(self, c, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.):
        super().__init__()
        dw_channel = c * DW_Expand
        self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
        self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel,
                               bias=True)
        self.conv3 = nn.Conv2d(in_channels=dw_channel

        # Simplified Channel Attention
        self.sca = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels=dw_channel
                      groups=1, bias=True),
        )

        # SimpleGate
        self.sg = SimpleGate()

        ffn_channel = FFN_Expand * c
        self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
        self.conv5 = nn.Conv2d(in_channels=ffn_channel

        # self.norm1 = LayerNorm2d(c)
        # self.norm2 = LayerNorm2d(c)

        ###########################
        self.norm1 = torch.nn.LayerNorm(c)
        self.norm2 = torch.nn.LayerNorm(c)
        ###########################

        self.dropout1 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()
        self.dropout2 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()

        self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
        self.gamma = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)

    def forward(self, inp):
        x = inp

        ##########################
        x = torch.permute(x, (0, 3, 2, 1))
        ############################

        x = self.norm1(x)

        ##########################
        x = torch.permute(x, (0, 3, 2, 1))
        #############################

        x = self.conv1(x)
        x = self.conv2(x)
        x = self.sg(x)
        x = x * self.sca(x)
        x = self.conv3(x)

        x = self.dropout1(x)

        y = inp + x * self.beta

        ################################
        yy = torch.permute(y, (0, 3, 2, 1))
        yy = self.norm2(yy)
        x = torch.permute(yy, (0, 3, 2, 1))
        x = self.conv4(x)
        ####################################

        # x = self.conv4(self.norm2(y))
        x = self.sg(x)
        x = self.conv5(x)

        x = self.dropout2(x)

        return y + x * self.gamma

下面是转为onnx的代码,代码已经将模型的网络提出来,在训练模型如果参数改变,对应修改参数即可。验证结果的代码也都写在一起,具体细节可以参考代码。


import os
import torch
import onnxruntime
from onnxruntime.datasets import get_example
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from basicsr.models.archs.NAFNet_arch import NAFNet
from basicsr.utils import FileClient, imfrombytes, img2tensor, tensor2img, imwrite
from copy import deepcopy
from torch.nn.parallel import DataParallel, DistributedDataParallel

def model_to_device(net):
    """Model to device. It also warps models with DistributedDataParallel
    or DataParallel.

    Args:
        net (nn.Module)
"""
    # opt = parse_options(is_train=False)
    num_gpu = torch.cuda.device_count()
    device = torch.device('cuda' if num_gpu != 0 else 'cpu')

    net = net.to(device)
    return net

def print_different_keys_loading(crt_net, load_net, strict=True):
    """Print keys with differnet name or different size when loading models.

    1. Print keys with differnet names.

    2. If strict=False, print the same key but with different tensor size.

        It also ignore these keys with different sizes (not load).

    Args:
        crt_net (torch model): Current network.

        load_net (dict): Loaded network.

        strict (bool): Whether strictly loaded. Default: True.

"""
    if isinstance(crt_net, (DataParallel, DistributedDataParallel)):
        crt_net = crt_net.module
    crt_net = crt_net.state_dict()
    crt_net_keys = set(crt_net.keys())
    load_net_keys = set(load_net.keys())

    # check the size for the same keys
    if not strict:
        common_keys = crt_net_keys & load_net_keys
        for k in common_keys:
            if crt_net[k].size() != load_net[k].size():
                load_net[k + '.ignore'] = load_net.pop(k)

def main():

    width = 64
    img_channel = 3
    enc_blk_nums = [1, 1, 1, 28]
    middle_blk_num = 1
    dec_blk_nums = [1, 1, 1, 1]
    net_g = NAFNet(img_channel=img_channel, width=width, middle_blk_num=middle_blk_num,
                   enc_blk_nums=enc_blk_nums, dec_blk_nums=dec_blk_nums)
    # net_g = model_to_device(net_g)
    net_g = net_g.to("cuda")

    # load模型训练好的权重
    load_path = r"E:\work\NAFNet-main\experiments\pretrained_models\NAFNET_SAISI-width64-95000.pth"
    param_key = 'params'
    load_net = torch.load(
        load_path, map_location=lambda storage, loc: storage)
    if param_key is not None:
        load_net = load_net[param_key]
    print(' load net keys', load_net.keys)
    # remove unnecessary 'module.'
    for k, v in deepcopy(load_net).items():
        if k.startswith('module.'):
            load_net[k[7:]] = v
            load_net.pop(k)
    print_different_keys_loading(net_g, load_net, strict=True)
    net_g.load_state_dict(load_net, strict=True)

    # dummy_input可以随机设置一个tensor,用于测试
    # dummy_input = torch.randn(1, 3, 280, 280, device="cuda")

    # 下面使用的原始图像经过变换变成dummy_input,上面随机生成的也可以
    # 用于测试和模型输入的图像,这里要注意的是图片的resize,后面转为onnx后模型就固定大小输入,不是动态的
    img_path = r"E:\work\NAFNet-main\datasets\caisi\test\Snipaste_2022-07-13_16-06-58.png"
    # 模型输出结果的图像路径
    output_path = r"E:\work\NAFNet-main\datasets\caisi\test\pp.png"
    file_client = FileClient('disk')
    img_bytes = file_client.get(img_path, None)
    img = imfrombytes(img_bytes, float32=True)
    img = img2tensor(img, bgr2rgb=True, float32=True).unsqueeze(dim=0)
    dummy_input = img.cuda()

    # 原始预测的结果可以和打包以后的onnx预测结果做对比
    pred = net_g(dummy_input)
    sr_img = tensor2img(pred)
    imwrite(sr_img, output_path)

    # 转为onnx及模型保存路径
    input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(16)]
    output_names = ["output1"]
    onnx_path = "E:\\work\\NAFNet-main\\NAFNet.onnx"
    torch.onnx.export(net_g, dummy_input, onnx_path, verbose=True,
                      input_names=input_names, output_names=output_names, opset_version=11)

    # 使用onnx做验证及onnx结果保存路径
    output_onnx_path = r"E:\work\NAFNet-main\datasets\caisi\test\onnx_pp.png"
    onnx_model_path = "E:\\work\\NAFNet-main\\NAFNet.onnx"
    example_model = get_example(onnx_model_path)
    sess = onnxruntime.InferenceSession(example_model, providers=['CUDAExecutionProvider'])
    imgs = img.cpu().numpy()
    input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(16)]
    onnx_out = sess.run(None, {input_names[0]: imgs})
    out_onnx_tensor = torch.from_numpy(onnx_out[0])
    sr_onnx_img = tensor2img(out_onnx_tensor)
    imwrite(sr_onnx_img, output_onnx_path)

if __name__ == '__main__':
    main()

运行该代码就可以。例如:python demo.py或者直接运行

在之前的进行更改,之前会有一些问题,目前问题已将解决,已经验证过,有问题可以在下面评论。

导出后模型进行验证,这里注意dummy_input = torch.randn(1, 3, 280, 280, device=”cuda”)你打包时候设置输入图像的大小(280, 280),做预测就要符合图像大小要求。

模型转为onnx的时候,需要将模型的网络单独拿出来,最好是和数据处理分开,这样模型在转为onnx出现问题比较少。

https://github.com/megvii-research/NAFNet/#results-and-pre-trained-models

Original: https://blog.csdn.net/TF666666/article/details/125678629
Author: TF666666
Title: NAFNet网络图像去模糊及模型转为onnx

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/651904/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球