YOLO系列 — YOLOV7算法(六):YOLO V7算法onnx模型部署

有很多人来问我,基于YOLO v7算法训练出来一个权重文件,如何进行部署。所以特地写一篇部署的blog~
一般,我们基于pytorch深度学习框架训练出来的权重文件是pt格式的,我们可以用python来直接调用这个文件。但是实际工业中,一般都是c++去调用权重文件的,所以我们需要将pt权重文件转换为能用c++去调用的格式。一般来说,我习惯用以下方式:

  • 使用libtorch进行转换,将pt转换为torchscript.pt格式的权重文件,然后直接用官方提供的libtorch来调用
  • 先将pt转换为onnx格式的权重,onnx是一种开放神经网络交换格式。然后用opencv里面的api去调用
  • 同样是先转换为onnx格式的,然后用onnx runtime去调用权重文件(本篇blog使用的方法)
  • 先将pt权重文件转换为tensort格式,然后用tensor去调用

ps:当然,还有很多很多支持c++调用深度学习权重文件的,这里我只是列举了我个人比较喜欢用的几种调用方式。

一、环境配置

本篇blog使用是用onnx runtime去调用onnx权重文件,然后基于visual studio来配置运行环境。我们先配置visual studio的环境,这里我们主要要配置两个外部库,一个是opencv(用于图片的读取和写入),另外一个就是onnx runtime(用于调用权重文件)。网上有很多关于该部分的讲解,我找了两个写的还不错的直接分享给大家吧:

二、转换权重文件

YOLO V7项目下载路径:YOLO V7
这里值得注意,一定一定一定要下载最新的项目,我第一次下载YOLO v7的时候作者还没有解决模型 export.py中的bug,导出的onnx模型没法被调用。我重新下载了最新的代码,才跑通。
简单说下 export.py的几个需要修改的参数:

    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='', help='weights path')
    parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
    parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
    parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
    parser.add_argument('--end2end', action='store_true', help='export end2end onnx')
    parser.add_argument('--max-wh', type=int, default=None, help='None for tensorrt nms, int value for onnx-runtime nms')
    parser.add_argument('--topk-all', type=int, default=100, help='topk objects for every images')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='iou threshold for NMS')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='conf threshold for NMS')
    parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--simplify', action='store_true', default=True, help='simplify onnx model')
    parser.add_argument('--include-nms', action='store_true', help='export end2end onnx')
    opt = parser.parse_args()

最后,导出onnx模型,发现权重文件大小较原先pt文件减少了一倍。

三、onnx runtime调用onnx模型


//

using namespace std;
using namespace cv;
using namespace Ort;

struct Net_config
{
    float confThreshold; // Confidence threshold
    float nmsThreshold;  // Non-maximum suppression threshold
    string modelpath;
};

typedef struct BoxInfo
{
    float x1;
    float y1;
    float x2;
    float y2;
    float score;
    int label;
} BoxInfo;

class YOLOV7
{
public:
    YOLOV7(Net_config config);
    void detect(Mat& frame);
private:
    int inpWidth;
    int inpHeight;
    int nout;
    int num_proposal;
    vector<string> class_names;
    int num_class;

    float confThreshold;
    float nmsThreshold;
    vector<float> input_image_;
    void normalize_(Mat img);
    void nms(vector<BoxInfo>& input_boxes);

    Env env = Env(ORT_LOGGING_LEVEL_ERROR, "YOLOV7");
    Ort::Session* ort_session = nullptr;
    SessionOptions sessionOptions = SessionOptions();
    vector<char*> input_names;
    vector<char*> output_names;
    vector<vector<int64_t>> input_node_dims; // >=1 outputs
    vector<vector<int64_t>> output_node_dims; // >=1 outputs
};

YOLOV7::YOLOV7(Net_config config)
{
    this->confThreshold = config.confThreshold;
    this->nmsThreshold = config.nmsThreshold;

    string classesFile = "";
    string model_path = config.modelpath;
    std::wstring widestr = std::wstring(model_path.begin(), model_path.end());
    //OrtStatus* status = OrtSessionOptionsAppendExecutionProvider_CUDA(sessionOptions, 0);
    sessionOptions.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
    ort_session = new Session(env, widestr.c_str(), sessionOptions);
    size_t numInputNodes = ort_session->GetInputCount();
    size_t numOutputNodes = ort_session->GetOutputCount();
    AllocatorWithDefaultOptions allocator;
    for (int i = 0; i < numInputNodes; i++)
    {
        input_names.push_back(ort_session->GetInputName(i, allocator));
        Ort::TypeInfo input_type_info = ort_session->GetInputTypeInfo(i);
        auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
        auto input_dims = input_tensor_info.GetShape();
        input_node_dims.push_back(input_dims);
    }
    for (int i = 0; i < numOutputNodes; i++)
    {
        output_names.push_back(ort_session->GetOutputName(i, allocator));
        Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i);
        auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
        auto output_dims = output_tensor_info.GetShape();
        output_node_dims.push_back(output_dims);
    }
    this->inpHeight = input_node_dims[0][2];
    this->inpWidth = input_node_dims[0][3];
    this->nout = output_node_dims[0][2];
    this->num_proposal = output_node_dims[0][1];

    ifstream ifs(classesFile.c_str());
    string line;
    while (getline(ifs, line)) this->class_names.push_back(line);
    this->num_class = class_names.size();
}

void YOLOV7::normalize_(Mat img)
{
    //    img.convertTo(img, CV_32F);
    int row = img.rows;
    int col = img.cols;
    this->input_image_.resize(row * col * img.channels());
    for (int c = 0; c < 3; c++)
    {
        for (int i = 0; i < row; i++)
        {
            for (int j = 0; j < col; j++)
            {
                float pix = img.ptr<uchar>(i)[j * 3 + 2 - c];
                this->input_image_[c * row * col + i * col + j] = pix / 255.0;
            }
        }
    }
}

void YOLOV7::nms(vector<BoxInfo>& input_boxes)
{
    sort(input_boxes.begin(), input_boxes.end(), [](BoxInfo a, BoxInfo b) { return a.score > b.score; });
    vector<float> vArea(input_boxes.size());
    for (int i = 0; i < int(input_boxes.size()); ++i)
    {
        vArea[i] = (input_boxes.at(i).x2 - input_boxes.at(i).x1 + 1)
            * (input_boxes.at(i).y2 - input_boxes.at(i).y1 + 1);
    }

    vector<bool> isSuppressed(input_boxes.size(), false);
    for (int i = 0; i < int(input_boxes.size()); ++i)
    {
        if (isSuppressed[i]) { continue; }
        for (int j = i + 1; j < int(input_boxes.size()); ++j)
        {
            if (isSuppressed[j]) { continue; }
            float xx1 = (max)(input_boxes[i].x1, input_boxes[j].x1);
            float yy1 = (max)(input_boxes[i].y1, input_boxes[j].y1);
            float xx2 = (min)(input_boxes[i].x2, input_boxes[j].x2);
            float yy2 = (min)(input_boxes[i].y2, input_boxes[j].y2);

            float w = (max)(float(0), xx2 - xx1 + 1);
            float h = (max)(float(0), yy2 - yy1 + 1);
            float inter = w * h;
            float ovr = inter / (vArea[i] + vArea[j] - inter);

            if (ovr >= this->nmsThreshold)
            {
                isSuppressed[j] = true;
            }
        }
    }
    // return post_nms;
    int idx_t = 0;
    input_boxes.erase(remove_if(input_boxes.begin(), input_boxes.end(), [&idx_t, &isSuppressed](const BoxInfo& f) { return isSuppressed[idx_t++]; }), input_boxes.end());
}

void YOLOV7::detect(Mat& frame)
{
    Mat dstimg;
    resize(frame, dstimg, Size(this->inpWidth, this->inpHeight));
    this->normalize_(dstimg);
    array<int64_t, 4> input_shape_{ 1, 3, this->inpHeight, this->inpWidth };

    auto allocator_info = MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Value input_tensor_ = Value::CreateTensor<float>(allocator_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());

    // 开始推理
    vector<Value> ort_outputs = ort_session->Run(RunOptions{ nullptr }, &input_names[0], &input_tensor_, 1, output_names.data(), output_names.size());
    /generate proposals
    vector<BoxInfo> generate_boxes;
    float ratioh = (float)frame.rows / this->inpHeight, ratiow = (float)frame.cols / this->inpWidth;
    int n = 0, k = 0; ///cx,cy,w,h,box_score, class_score
    const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
    for (n = 0; n < this->num_proposal; n++)
    {
        float box_score = pdata[4];
        if (box_score > this->confThreshold)
        {
            int max_ind = 0;
            float max_class_socre = 0;
            for (k = 0; k < num_class; k++)
            {
                if (pdata[k + 5] > max_class_socre)
                {
                    max_class_socre = pdata[k + 5];
                    max_ind = k;
                }
            }
            max_class_socre *= box_score;
            if (max_class_socre > this->confThreshold)
            {
                float cx = pdata[0] * ratiow;
                float cy = pdata[1] * ratioh;
                float w = pdata[2] * ratiow;
                float h = pdata[3] * ratioh;

                float xmin = cx - 0.5 * w;
                float ymin = cy - 0.5 * h;
                float xmax = cx + 0.5 * w;
                float ymax = cy + 0.5 * h;

                generate_boxes.push_back(BoxInfo{ xmin, ymin, xmax, ymax, max_class_socre, max_ind });
            }
        }
        pdata += nout;
    }

    // Perform non maximum suppression to eliminate redundant overlapping boxes with
    // lower confidences
    nms(generate_boxes);
    for (size_t i = 0; i < generate_boxes.size(); ++i)
    {
        int xmin = int(generate_boxes[i].x1);
        int ymin = int(generate_boxes[i].y1);
        rectangle(frame, Point(xmin, ymin), Point(int(generate_boxes[i].x2), int(generate_boxes[i].y2)), Scalar(0, 0, 255), 2);
        string label = format("%.2f", generate_boxes[i].score);
        label = this->class_names[generate_boxes[i].label] + ":" + label;
        putText(frame, label, Point(xmin, ymin - 5), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 255, 0), 1);
    }
}

int main()
{
    Net_config YOLOV7_nets = { 0.3, 0.5, "E:/work/People_Detect/yolov7-main/models/yolov7_640x640.onnx" };   choices=["models/yolov7_640x640.onnx", "models/yolov7-tiny_640x640.onnx", "models/yolov7_736x1280.onnx", "models/yolov7-tiny_384x640.onnx", "models/yolov7_480x640.onnx", "models/yolov7_384x640.onnx", "models/yolov7-tiny_256x480.onnx", "models/yolov7-tiny_256x320.onnx", "models/yolov7_256x320.onnx", "models/yolov7-tiny_256x640.onnx", "models/yolov7_256x640.onnx", "models/yolov7-tiny_480x640.onnx", "models/yolov7-tiny_736x1280.onnx", "models/yolov7_256x480.onnx"]
    YOLOV7 net(YOLOV7_nets);
    string imgpath = "";
    Mat srcimg = imread(imgpath);
    net.detect(srcimg);

    static const string kWinName = "Deep learning object detection in ONNXRuntime";
    namedWindow(kWinName, WINDOW_NORMAL);
    imshow(kWinName, srcimg);
    waitKey(0);
    destroyAllWindows();
}

上述需要修改的地方有三处:

person
animal
......

Original: https://blog.csdn.net/weixin_42206075/article/details/126098185
Author: 进我的收藏吃灰吧~~
Title: YOLO系列 — YOLOV7算法(六):YOLO V7算法onnx模型部署

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/704237/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球