线结构光平面方程自动标定

根据激光三角测量法原理写的线结构光传感器的自动标定程序,之前借助matlab和自己写的几个程序完成的步骤比较复杂,由于cpp容易部署及应用到项目中以及我懒,所以花了一个下午加晚上整了这么一个程序。
标定板使用的是棋盘格标定板,标定线结构光平面方程之前需要先进行相机标定,拍摄的图像如下图所示。

线结构光平面方程自动标定
需要按下面要求拍摄30张图像并存入./image文件夹:
图1-9为在平面a上标定板贴合平面平移的图像;
图9-13为在平面a上标定板店起一定高度的不同位姿图像;
图14为在平面a上激光条图像;
图15为在平面a上激光条照射到物体上的图像;
图16为在平面b上激光条图像;
图17为在平面b上激光条照射到物体上的图像;
图18-26为在平面b上标定板贴合平面平移的图像;
图27-30为在平面b上标定板店起一定高度的不同位姿图像。
注意事项:
标定板大小选择为图像高度的1/3到1/2为宜;
平面a和b应尽量平整,并尽量和相机z轴垂直;
确保每幅标定板图像在相机视野范围内并且成像清晰;
标定板垫起时最好不要倾斜角度过大;
线结构光平面标定过程中或者完成后若相机、激光器的相对位置改变
或者传感器受到剧烈碰撞时应重新标定。
上述内容在程序内也有注释。
依赖库为OpenCV。
程序如下:
global_head.h 全局头文件
#pragma once

#include
#include
#include
#include
#include
#include

camera_calibration.h 相机标定

#include "global_head.h"

#define BOARD_SCALE 2
#define BOARD_HEIGHT 8
#define BOARD_WIDTH 8

void CamraCalibration(std::vector<std::string>& files, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
    std::vector<cv::Mat>& tvecsMat, std::vector<cv::Mat>& rvecsMat)
{

    int image_count = 0;
    cv::Size image_size;
    cv::Size board_size = cv::Size(BOARD_HEIGHT, BOARD_WIDTH);
    std::vector<cv::Point2f> image_points_buf;
    std::vector<std::vector<cv::Point2f>> image_points_seq;

    for (int i = 0; i < files.size(); i++)
    {
        cv::Mat imageInput = cv::imread(files[i]);

        if (0 == findChessboardCorners(imageInput, board_size, image_points_buf))
        {
            std::cout << "can not find chessboard corners!\n";
            continue;
        }
        else
        {

            image_count++;
            if (image_count == 1)
            {
                image_size.width = imageInput.cols;
                image_size.height = imageInput.rows;
            }

            cv::Mat view_gray;
            cvtColor(imageInput, view_gray, cv::COLOR_RGB2GRAY);

            cornerSubPix(view_gray, image_points_buf, cv::Size(5, 5), cv::Size(-1, -1),
                cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 30, 0.1));

            image_points_seq.push_back(image_points_buf);
        }
    }

    int total = image_points_seq.size();
    std::cout << "共使用了" << total << "幅图片" << std::endl;
    std::cout << "角点提取完成!\n";

    cv::Size square_size = cv::Size(BOARD_SCALE, BOARD_SCALE);
    std::vector<std::vector<cv::Point3f>> object_points;
    cameraMatrix = cv::Mat(3, 3, CV_32FC1, cv::Scalar::all(0));
    std::vector<int> point_counts;
    distCoeffs = cv::Mat(1, 4, CV_32FC1, cv::Scalar::all(0));

    int i, j, t;
    for (t = 0; t < image_count; t++)
    {
        std::vector<cv::Point3f> tempPointSet;
        for (i = 0; i < board_size.height; i++)
        {
            for (j = 0; j < board_size.width; j++)
            {
                cv::Point3f realPoint;

                realPoint.x = i * square_size.width;
                realPoint.y = j * square_size.height;
                realPoint.z = 0;
                tempPointSet.push_back(realPoint);
            }
        }
        object_points.push_back(tempPointSet);
    }

    for (i = 0; i < image_count; i++)
    {
        point_counts.push_back(board_size.width * board_size.height);
    }

    cv::calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, cv::CALIB_FIX_K3);
    std::cout << "标定完成!\n" << std::endl;

    std::ofstream fout("calibration_result.txt");
    double total_err = 0.0;
    double err = 0.0;
    std::vector<cv::Point2f> image_points2;
    std::cout << "每幅图像的标定误差:\n";
    fout << "每幅图像的标定误差:\n";
    for (i = 0; i < image_count; i++)
    {
        std::vector<cv::Point3f> tempPointSet = object_points[i];

        projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);

        std::vector<cv::Point2f> tempImagePoint = image_points_seq[i];
        cv::Mat tempImagePointMat = cv::Mat(1, tempImagePoint.size(), CV_32FC2);
        cv::Mat image_points2Mat = cv::Mat(1, image_points2.size(), CV_32FC2);
        for (int j = 0; j < tempImagePoint.size(); j++)
        {
            image_points2Mat.at<cv::Vec2f>(0, j) = cv::Vec2f(image_points2[j].x, image_points2[j].y);
            tempImagePointMat.at<cv::Vec2f>(0, j) = cv::Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
        }
        err = norm(image_points2Mat, tempImagePointMat, cv::NORM_L2);
        total_err += err /= point_counts[i];
        std::cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << std::endl;
        fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << std::endl;
    }

    std::cout << "总体平均误差:" << total_err / image_count << "像素\n" << std::endl;
    fout << "总体平均误差:" << total_err / image_count << "像素" << std::endl << std::endl;

    cv::Mat rotation_matrix = cv::Mat(3, 3, CV_32FC1, cv::Scalar::all(0));
    std::cout << "相机内参数矩阵:" << std::endl;
    std::cout << cameraMatrix << std::endl << std::endl;
    std::cout << "畸变系数:" << std::endl;
    std::cout << distCoeffs << std::endl << std::endl;
    fout << "相机内参数矩阵:" << std::endl;
    fout << cameraMatrix << std::endl << std::endl;
    fout << "畸变系数:" << std::endl;
    fout << distCoeffs << std::endl << std::endl;
    for (int i = 0; i < image_count; i++)
    {
        fout << "第" << i + 1 << "幅图像的平移向量:" << std::endl;
        fout << tvecsMat[i].t() << std::endl;

        Rodrigues(rvecsMat[i], rotation_matrix);
        fout << "第" << i + 1 << "幅图像的旋转矩阵:" << std::endl;
        fout << rotation_matrix << std::endl << std::endl;
    }
    fout << std::endl;
}

cenline_extraction.h 线结构光中心线提取

#include "global_head.h"

void GetImage(std::string img_src, cv::Mat& src_image)
{
    src_image = cv::imread(img_src, 0);
}

void Correction(cv::Mat& src_image, cv::Mat& dst_image, cv::Mat& cameraMatrix, cv::Mat& distCoeffs)
{

    cv::undistort(src_image, dst_image, cameraMatrix, distCoeffs);
}

void RemoveSmallRegion(cv::Mat& InputImage, cv::Mat& OutputImage, int pixel)
{
    InputImage.copyTo(OutputImage);
    std::vector<cv::Vec4i> hierarchy;
    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(InputImage, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);

    double temp = 0;
    for (int i = 0; i<contours.size(); ++i)
    {
        temp = fabs(cv::contourArea(contours[i]));
    }

    for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
    {
        if (fabs(cv::contourArea(contours[idx])) < pixel)
        {
            cv::drawContours(OutputImage, contours, idx, cv::Scalar(0), cv::FILLED, 8, hierarchy);
        }
    }
}

void GrayCenter(cv::Mat& InputImage, std::vector<cv::Point2d>& Pt, cv::Rect bounding_rect, int threshold)
{
    std::vector<cv::Point2d> P;
    for (int i = bounding_rect.x; i < bounding_rect.x + bounding_rect.width; ++i)
    {
        int sum = 0;
        double y = 0;
        for (int j = bounding_rect.y; j < bounding_rect.y + bounding_rect.height; ++j)
        {
            int s = InputImage.at<uchar>(j, i);
            if (s)
            {
                sum += s;
                y += j*s;
            }
        }
        if (sum)
        {
            y /= sum;
            if (InputImage.at<uchar>(y, i) > 0)
            {
                P.emplace_back(cv::Point2d(i, y));
            }
        }
    }

    if (P.size() >= 3)
    {
        for (size_t i = 1; i < P.size() - 1; ++i)
        {
            P[i].y = (P[i - 1].y + P[i].y + P[i + 1].y) / 3;
        }
    }

    if (P.size() > 0)
    {
        int avg_scalar = 0;
        for (size_t i = 0; i < P.size(); ++i)
        {
            avg_scalar += InputImage.at<uchar>(round(P[i].y), round(P[i].x));
        }
        avg_scalar /= P.size();

        if (avg_scalar < threshold) P.clear();
    }

    for (size_t i = 0; i < P.size(); ++i)
    {
        if (P[i].x >= 0 && P[i].x < InputImage.cols && P[i].y >= 0 && P[i].y < InputImage.rows)
        {
            if (InputImage.at<uchar>(round(P[i].y), round(P[i].x)) > threshold)
            {
                Pt.emplace_back(P[i]);
            }
        }
    }
}

bool PointSortRule(const cv::Point2d pt1, const cv::Point2d pt2)
{
    if (pt1.x != pt2.x)
        return pt1.x < pt2.x;
    else
        return pt1.y < pt2.y;
}

void CenterLine(int n, cv::Mat& correct_image, std::vector<cv::Point2d>& P)
{
    cv::Mat dst_image = correct_image.clone();
    cv::cvtColor(dst_image, dst_image, cv::COLOR_GRAY2RGB);
    cv::Mat img1 = correct_image.clone(), img2;

    cv::GaussianBlur(img1, img1, cv::Size(0, 0), 1.1, 1.1);

    uchar *p = img1.data;
    std::vector<int> max_col_scalar(img1.cols);
    for (int i = 0; i < img1.cols; ++i)
    {
        for (int j = 0; j < img1.rows; ++j)
        {
            if (*(p + i + img1.cols * j) > max_col_scalar[i])
            {
                max_col_scalar[i] = *(p + i + img1.cols * j);
            }
        }
    }

    p = img1.data;
    for (int i = 0; i < img1.cols; ++i)
    {
        int threshold = std::max(max_col_scalar[i] - 20, 100);
        for (int j = 0; j < img1.rows; ++j)
        {
            if (*(p + i + img1.cols * j) < threshold)
                *(p + i + img1.cols * j) = 0;
        }
    }

    RemoveSmallRegion(img1, img2, 10);

    std::vector<cv::Vec4i> hierarchy;
    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(img2, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
    std::vector<cv::Rect> bounding_rect;
    for (size_t i = 0; i < contours.size(); ++i)
    {
        bounding_rect.emplace_back(cv::boundingRect(cv::Mat(contours[i])));
    }

    P.clear();
    for (size_t i = 0; i < contours.size(); ++i)
    {
        cv::Mat img3 = img2.clone();
        cv::drawContours(img3, contours, i, cv::Scalar(0), cv::FILLED, 8, hierarchy);
        cv::Mat img4 = img2 - img3;
        GrayCenter(img4, P, bounding_rect[i], 50);
        img2 = img3;
    }
    std::sort(P.begin(), P.end(), PointSortRule);

    std::ofstream fout(std::to_string(n) + "_2d.txt");
    for (int i = 0; i < P.size(); i++)
    {
        fout << P[i].x << " " << P[i].y << std::endl;
        cv::circle(dst_image, cv::Point(round(P[i].x), round(P[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
    }
    fout.close();
    cv::imwrite(std::to_string(n) + ".bmp", dst_image);
}

void Point2dSperate(std::vector<cv::Point2d>& P, std::vector<cv::Point2d>& P_plane, std::vector<cv::Point2d>& P_object)
{
    P_plane.clear();
    P_object.clear();

    int gap = 10;

    int object_begin1, object_end1;
    for (int i = 1; i < P.size(); ++i)
    {
        if (P[i - 1].y - P[i].y > gap)
            object_begin1 = i;
        if (P[i].y - P[i - 1].y > gap)
            object_end1 = i;
    }

    int object_begin2, object_end2;
    for (int i = 1; i < P.size(); ++i)
    {
        if (P[i - 1].y - P[i].y > gap)
        {
            object_begin2 = i;
            break;
        }
    }
    for (int i = object_begin1; i < P.size(); ++i)
    {
        if (P[i].y - P[i - 1].y > gap)
        {
            object_end2 = i;
            break;
        }
    }

    for (int i = 0; i < object_begin2; ++i)
        P_plane.push_back(P[i]);
    for (int i = object_end1; i < P.size(); ++i)
        P_plane.push_back(P[i]);
    for (int i = object_begin1; i < object_end2; ++i)
        P_object.push_back(P[i]);
}

plane_fitness.h 平面拟合

#include "global_head.h"

std::vector<double> findPlane(std::vector<cv::Point3d>& pts)
{

    double A, B, C, D;
    std::vector<double> parameters;
    double meanX = 0, meanY = 0, meanZ = 0;
    double meanXX = 0, meanYY = 0, meanZZ = 0;
    double meanXY = 0, meanXZ = 0, meanYZ = 0;

    for (int i = 0; i < pts.size(); ++i)
    {
        meanX += pts[i].x;
        meanY += pts[i].y;
        meanZ += pts[i].z;

        meanXX += pts[i].x * pts[i].x;
        meanYY += pts[i].y * pts[i].y;
        meanZZ += pts[i].z * pts[i].z;

        meanXY += pts[i].x * pts[i].y;
        meanXZ += pts[i].x * pts[i].z;
        meanYZ += pts[i].y * pts[i].z;
    }
    meanX /= pts.size();
    meanY /= pts.size();
    meanZ /= pts.size();
    meanXX /= pts.size();
    meanYY /= pts.size();
    meanZZ /= pts.size();
    meanXY /= pts.size();
    meanXZ /= pts.size();
    meanYZ /= pts.size();

    cv::Mat m = (cv::Mat_<double>(3, 3) << meanXX - meanX * meanX, meanXY - meanX * meanY, meanXZ - meanX * meanZ,
        meanXY - meanX * meanY, meanYY - meanY * meanY, meanYZ - meanY * meanZ,
        meanXZ - meanX * meanZ, meanYZ - meanY * meanZ, meanZZ - meanZ * meanZ);

    cv::Mat eigenvalue, eigenvector;
    cv::eigen(m, eigenvalue, eigenvector);
    double v0 = eigenvalue.at<double>(0, 0), v1 = eigenvalue.at<double>(1, 0), v2 = eigenvalue.at<double>(2, 0);

    int minNumber = 0;
    if ((abs(v1)  abs(v0)) && (abs(v1)  abs(v2)))
        minNumber = 1;
    if ((abs(v2)  abs(v0)) && (abs(v2)  abs(v1)))
        minNumber = 2;

    A = eigenvector.at<double>(minNumber, 0);
    B = eigenvector.at<double>(minNumber, 1);
    C = eigenvector.at<double>(minNumber, 2);
    D = -(A * meanX + B * meanY + C * meanZ);
    if (C < 0)
    {
        A *= -1.0;
        B *= -1.0;
        C *= -1.0;
        D *= -1.0;
    }
    parameters.push_back(- A / D);
    parameters.push_back(- B / D);
    parameters.push_back(- C / D);
    return parameters;
}

point2dto3d.h 坐标转换

#include "global_head.h"

void Point2dto3d(int n, std::vector<double> plane, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
    std::vector<cv::Point2d>& Pt2ds, std::vector<cv::Point3d>& Pt3ds)
{

    double a = plane[0], b = plane[1], c = plane[2];
    double u0 = cameraMatrix.at<double>(0, 2), v0 = cameraMatrix.at<double>(1, 2);
    double fx = cameraMatrix.at<double>(0, 0), fy = cameraMatrix.at<double>(1, 1);

    std::ofstream fout(std::to_string(n) + "_3d.txt");
    for (int i = 0; i < Pt2ds.size(); ++i)
    {
        double u = Pt2ds[i].x, v = Pt2ds[i].y;
        double x1 = (double)((u - u0) / fx), y1 = (double)((v - v0) / fy);
        cv::Point3d pt;
        pt.z = (double)(1 / (a*x1 + b*y1 + c));
        pt.x = x1*pt.z;
        pt.y = y1*pt.z;
        Pt3ds.push_back(pt);
        fout << std::setprecision(16) << pt.x << " " << pt.y << " " << pt.z << std::endl;
    }
    fout.close();
}

void PointtoPlaneEvaluation(const std::vector<cv::Point3d>& Pt3ds, std::vector<double> plane)
{
    double a = plane[0], b = plane[1], c = plane[2];
    std::vector<double> distance;

    double distance_mean = 0;
    for (int i = 0; i < Pt3ds.size(); ++i)
    {
        double dis = abs(a*Pt3ds[i].x + b*Pt3ds[i].y + c*Pt3ds[i].z - 1) / sqrt(a*a + b*b + c*c);
        distance.push_back(dis);
        distance_mean += dis;
    }
    distance_mean /= Pt3ds.size();
    std::cout << "\n距离平均值:"<< distance_mean << std::endl;

    double sigma = 0;
    for (int i = 0; i < Pt3ds.size(); ++i)
    {
        sigma += pow(distance[i] - distance_mean, 2);
    }
    sigma /= Pt3ds.size();
    std::cout << "距离标准差:"<< sqrt(sigma) << std::endl;
}

main.cpp 主程序


#include "camera_calibration.h"
#include "cenline_extraction.h"
#include "plane_fitness.h"
#include "point2dto3d.h"

int main(int argc, char* argv[])
{

    std::vector<std::string> files;
    std::vector<cv::Mat> tvecsMat, rvecsMat;
    cv::Mat cameraMatrix, distCoeffs;
    for (int i = 1; i  13; ++i)
        files.push_back("images//" + std::to_string(i) + ".bmp");
    for (int i = 18; i  30; ++i)
        files.push_back("images//" + std::to_string(i) + ".bmp");
    CamraCalibration(files, cameraMatrix, distCoeffs, tvecsMat, rvecsMat);

    double t = 2.8;
    std::vector<cv::Point3d> tranMat1, tranMat2;
    for (int i = 0; i < 9; ++i)
        tranMat1.push_back(cv::Point3d(tvecsMat[i].at<double>(0, 0), tvecsMat[i].at<double>(1, 0), tvecsMat[i].at<double>(2, 0) + t));
    for (int i = 13; i < 22; ++i)
        tranMat2.push_back(cv::Point3d(tvecsMat[i].at<double>(0, 0), tvecsMat[i].at<double>(1, 0), tvecsMat[i].at<double>(2, 0) + t));

    std::vector<double> plane1 = findPlane(tranMat1), plane2 = findPlane(tranMat2);

    int i = 14;
    std::string filename;
    cv::Mat image, image_corrected;
    std::vector<cv::Point2d> Pts2d;
    std::vector<cv::Point3d> Pts3d;
    filename = "images//" + std::to_string(i) + ".bmp";
    GetImage(filename, image);
    Correction(image, image_corrected, cameraMatrix, distCoeffs);
    CenterLine(i, image_corrected, Pts2d);
    Point2dto3d(i, plane1, cameraMatrix, distCoeffs, Pts2d, Pts3d);

    i = 16;
    filename = "images//" + std::to_string(i) + ".bmp";
    GetImage(filename, image);
    Correction(image, image_corrected, cameraMatrix, distCoeffs);
    CenterLine(i, image_corrected, Pts2d);
    Point2dto3d(i, plane2, cameraMatrix, distCoeffs, Pts2d, Pts3d);

    std::vector<double> light_plane = findPlane(Pts3d);
    std::cout << "光平面方程系数:\n" << light_plane[0] << " " << light_plane[1] << " " << light_plane[2] << std::endl;

#ifdef EVALUATION
    std::vector<cv::Point2d> Pts2d_plane, Pts2d_object;

    i = 15;
    filename = "images//" + std::to_string(i) + ".bmp";
    GetImage(filename, image);
    Correction(image, image_corrected, cameraMatrix, distCoeffs);
    CenterLine(i, image_corrected, Pts2d);
    Point2dSperate(Pts2d, Pts2d_plane, Pts2d_object);
    Pts3d.clear();
    Point2dto3d(i, light_plane, cameraMatrix, distCoeffs, Pts2d_plane, Pts3d);
    PointtoPlaneEvaluation(Pts3d, plane1);
    Pts3d.clear();
    Point2dto3d(i, light_plane, cameraMatrix, distCoeffs, Pts2d_object, Pts3d);
    PointtoPlaneEvaluation(Pts3d, plane1);
#endif

    system("pause");
    return EXIT_SUCCESS;
}

运行结果:

线结构光平面方程自动标定

更新后的工程代码和编译代码的CMakeLists文件,及图片资源放在下面链接了:
线结构光平面方程自动标定工程下载地址
参考文献:[1]王作山. 基于激光结构光视觉引导的焊缝跟踪技术研究[D].山东大学,2019.

另外,线结构光标定的基础知识可以参考我的另一篇文章:
激光三角测量法中像素坐标与相机坐标转换

Original: https://blog.csdn.net/taifyang/article/details/122639344
Author: 给算法爸爸上香
Title: 线结构光平面方程自动标定

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/632926/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球