用于计算图像处理的opencv2,只不过这次用的不是python的版本,而是C++的版本。
参考书籍:《视觉SLAM十四讲-从理论到实践》——高翔
CMakeLists.txt
写法样例:
set(CMAKE_CXX_FLAGS "-std=c++11")
cmake_minimum_required(VERSION 2.8)
project( test )
add_executable( cv2_test cv2_test.cpp )
find_package( OpenCV REQUIRED )
include_directories(${OpenCV_INCLUDE_DIRS})
target_link_libraries( cv2_test ${OpenCV_LIBS} )
读取数据的数据类型使用 unsigned char
:因为int类型在不同操作系统平台下长度不同,而uchar类型在所有平台上长度都是一样的。
3.1 图片基本操作
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc, char **argv) {
string img_path = "../test.jpeg";
cv::Mat image;
image = cv::imread(img_path);
if (image.data == nullptr) {
cerr << "图片" << img_path << "读取失败。" << endl;
return -1;
}
cv::imshow("窗口的标题", image);
cv::waitKey(0);
cout << "图片类型为:" << image.type() << endl;
if (image.type() == CV_8UC3) {
cout << "这是彩色8比特图片。" << endl;
}
else {
cout << "这不是彩色8比特图片。" << endl;
}
cout << "宽度:" << image.cols;
cout << ",高度:" << image.rows;
cout << ",频道数:" << image.channels() << endl;
for (size_t y = 0; y < image.rows; y++) {
unsigned char *row_ptr = image.ptr<unsigned char>(y);
for (size_t x = 0; x < image.cols; x++) {
unsigned char *data_ptr = &row_ptr[x*image.channels()];
for (int c = 0; c != image.channels(); c++) {
unsigned char data = data_ptr[c];
cout << (int)data << " ";
}
cout << endl;
}
}
cv::Vec3b pixel = image.at<cv::Vec3b>(0, 0);
cout << "第一个像素点是:" << pixel << endl;
cout << "第一个像素点的第一个通道是:" << (int)pixel[0] << endl;
cv::imwrite( "../test2.jpeg", image);
}
运行得到:
【图就不放了】
图片类型为:16
这是彩色8比特图片。
宽度:550,高度:827,频道数:3
254 254 254
206 206 218
207 207 219
208 208 220
208 208 220
......
第一个像素点是:Ä254, 254, 254Å
第一个像素点的第一个通道是:254
3.2 遍历像素点
较慢的遍历图片中每个像素点(灰度)
for (auto p = img.begin<uchar>(); p != img.end<uchar>(); ++p)
{
uchar value = uchar((*p));
}
更快的像素点指针遍历法(灰度):
for (int i = 0; i <img.cols * img.rows; i++)
{
uchar value = img.data[i];
}
3.3 提取指定像素值
用at的方法按照行列提取像素值(慢)
for (int row = 0; row < grayImg.rows; row++)
{
for (int col = 0; col < grayImg.cols; col++)
{
point_value = int(grayImg.at<uchar>(row, col));
}
}
用指针按照行列提取像素值(快)
point_value = float(grayImg.data[row * grayImg.cols + col]);
3.4 初始化为0的矩阵
cv::Mat temp_img;
temp_img = cv::Mat::zeros(rows, cols, CV_8UC1);
3.5 计算某个灰度切片区域的像素和
float sum_bright = cv::sum(GrayImg(cv::Rect(col_min,row_min,w,h)))[0]
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc, char **argv) {
string img_path = "../test.jpeg";
cv::Mat image;
image = cv::imread(img_path);
if (image.data == nullptr) {
cerr << "图片" << img_path << "读取失败。" << endl;
return -1;
}
cv::Mat image_shallow_copy = image;
image_shallow_copy( cv::Rect(0, 0, 100, 100)).setTo(0);
cout << "浅拷贝图片的第一个像素:" << image_shallow_copy.at<cv::Vec3b>(0, 0) << endl;
cout << "原始图片的第一个像素:" << image.at<cv::Vec3b>(0, 0) << endl;
cv::Mat image_deep_copy = image.clone();
image_deep_copy( cv::Rect(0, 0, 100, 100)).setTo(255);
cout << "深拷贝图片的第一个像素:" << image_deep_copy.at<cv::Vec3b>(0, 0) << endl;
cout << "原始图片的第一个像素:" << image.at<cv::Vec3b>(0, 0) << endl;
}
运行得到:
浅拷贝图片的第一个像素:Ä0, 0, 0Å
原始图片的第一个像素:Ä0, 0, 0Å
深拷贝图片的第一个像素:Ä255, 255, 255Å
原始图片的第一个像素:Ä0, 0, 0Å
5.1 图片翻转
cv::flip(image_input, image_output, 1);
cv::flip(image_input, image_output, 0);
cv::flip(image_input, image_output, -1);
5.2 二值化
cv::threshold(img, binary, Threshold, 255, cv::THRESH_BINARY);
int Threshold = cv::threshold(img, binary, 0, 255, cv::THRESH_BINARY + cv::THRESH_OTSU);
5.3 轮廓检测
std::vector< std::vector< cv::Point> > contours;
cv::findContours(bianry, contours, cv::noArray(),cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
5.4 计算均值与标准差
cv::meanStdDev()
6.1 去畸变代码
来自参考书籍:《视觉SLAM十四讲-从理论到实践》——高翔
#include
#include
using namespace std;
string image_file = "../distorted.png";
int main(int argc, char **argv) {
double k1 = -0.28340811, k2 = 0.07395907, p1 = 0.00019359, p2 = 1.76187114e-05;
double fx = 458.654, fy = 457.296, cx = 367.215, cy = 248.375;
cv::Mat image = cv::imread(image_file, 0);
int rows = image.rows, cols = image.cols;
cv::Mat image_undistort = cv::Mat(rows, cols, CV_8UC1);
for (int v=0; v<rows; v++) {
for (int u=0; u<cols; u++) {
double x = (u-cx) / fx, y = (v-cy) / fy;
double r = sqrt(x * x + y * y);
double x_distorted = x * (1+ k1*r*r + k2*r*r*r*r) + 2*p1*x*y + p2*(r*r+2*x*x);
double y_distorted = y * (1+ k1*r*r + k2*r*r*r*r) + 2*p2*x*y + p1*(r*r+2*y*y);
double u_distorted = fx * x_distorted + cx;
double v_distorted = fy * y_distorted + cy;
if (u_distorted >= 0 && v_distorted >=0 && u_distorted < cols && v_distorted < rows) {
image_undistort.at<uchar>(v, u) = image.at<uchar>((int) v_distorted, (int) u_distorted);
}
else {
image_undistort.at<uchar>(v, u) = 0;
}
}
}
cv::imshow("distored", image);
cv::imshow("undistored", image_undistort);
cv::waitKey();
return 0;
}
6.2 ORB特征匹配代码
来自参考书籍:《视觉SLAM十四讲-从理论到实践》——高翔
#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
assert(img_1.data != nullptr && img_2.data != nullptr);
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "extract ORB cost = " << time_used.count() << " seconds. " << endl;
Mat outimg1;
drawKeypoints(img_1, keypoints_1, outimg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("ORB features", outimg1);
vector<DMatch> matches;
t1 = chrono::steady_clock::now();
matcher->match(descriptors_1, descriptors_2, matches);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "match ORB cost = " << time_used.count() << " seconds. " << endl;
auto min_max = minmax_element(matches.begin(), matches.end(), [](const DMatch &m1, const DMatch &m2) { return m1.distance < m2.distance; });
double min_dist = min_max.first->distance;
double max_dist = min_max.second->distance;
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
std::vector<DMatch> good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance max(2 * min_dist, 30.0))
{
good_matches.push_back(matches[i]);
}
}
Mat img_match;
Mat img_goodmatch;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_match);
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches, img_goodmatch);
imshow("all matches", img_match);
imshow("good matches", img_goodmatch);
waitKey(0);
return 0;
}
6.3 将特征数据写入磁盘/从磁盘读取特征数据并匹配输出
自己写的代码,记录一下
将图片与std::vector
if (need_save >= 0) {
if (need_save == 0) {
string save_path = "存储目录";
cv::imwrite( save_path + "cur_img.jpg", cur_img);
cv::imwrite( save_path + "forw_img.jpg", forw_img);
ostringstream out1;
for (auto p = cur_pts.begin(); p != cur_pts.end(); p++) {
out1 << to_string((*p).x) << " " << to_string((*p).y) << " " << endl;
}
ofstream fout1(save_path + "cur_pts.txt");
if (fout1) {
fout1 << out1.str() << endl;
fout1.close();
}
ostringstream out2;
for (auto p = forw_pts.begin(); p != forw_pts.end(); p++) {
out2 << to_string((*p).x) << " " << to_string((*p).y) << " " << endl;
}
ofstream fout2(save_path + "forw_pts.txt");
if (fout2) {
fout2 << out2.str() << endl;
fout2.close();
}
}
need_save -= 1;
}
从磁盘读取图片与float型的特征点坐标,用ORB进行匹配并输出匹配连接图像:
#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc, char **argv) {
string save_path = "存储目录";
cv::Mat cur_img = cv::imread(save_path + "cur_img.jpg", 0);
cv::Mat forw_img = cv::imread(save_path + "forw_img.jpg", 0);
string temp;
int pos;
float x;
float y;
ifstream cur_pts_file(save_path + "cur_pts.txt");
vector<cv::KeyPoint > cur_pts;
if (!cur_pts_file.is_open())
{
cout << "未成功打开文件cur_pts.txt" << endl;
}
while(getline(cur_pts_file, temp))
{
if (temp.length() != 0) {
pos = temp.find(" ");
x = stof(temp.substr (0, pos));
y = stof(temp.substr (pos+1, temp.length()-pos-1));
KeyPoint temp_keypoint;
temp_keypoint.pt = Point2f(x, y);
cur_pts.push_back(temp_keypoint);
}
}
cur_pts_file.close();
ifstream forw_pts_file(save_path + "forw_pts.txt");
vector<cv::KeyPoint > forw_pts;
if (!forw_pts_file.is_open())
{
cout << "未成功打开文件forw_pts.txt" << endl;
}
while(getline(forw_pts_file, temp))
{
if (temp.length() != 0) {
pos = temp.find(" ");
x = stof(temp.substr (0, pos));
y = stof(temp.substr (pos+1, temp.length()-pos-1));
KeyPoint temp_keypoint;
temp_keypoint.pt = Point2f(x, y);
forw_pts.push_back(temp_keypoint);
}
}
forw_pts_file.close();
vector<DMatch> matches;
BFMatcher bfMatcher(NORM_L2);
Mat dst1, dst2;
Ptr<DescriptorExtractor> descriptor = ORB::create();
descriptor->compute(cur_img, cur_pts, dst1);
descriptor->compute(forw_img, forw_pts, dst2);
bfMatcher.match(dst1, dst2, matches);
cv::Mat out_image;
drawMatches(cur_img, cur_pts, forw_img, forw_pts, matches, out_image);
imshow("连线图像", out_image);
waitKey(0);
return 0;
}
需要导入额外库文件
#include
互相转换
cv::cv2eigen(mat_cv, matrix_eigen);
cv::eigen2cv(matrix_eigen, mat_cv);
cv::namedWindow("show", 0);
cv::resizeWindow("show", cv::Size(1920, 1080));
cv::imshow("show", img);
pkg-config --modversion opencv
将cv::Mat转换为char数组
char image_c[width * height * channel];
std::memcpy(image_c, image_cv.data, width * height * channel);
如果转换为vector类型,则使用 std::vector<uint8_t></uint8_t>
类型的 data()
进行内存复制。
Original: https://blog.csdn.net/starvapour/article/details/122071018
Author: starvapour
Title: C++学习笔记——opencv2模块(图像处理)
原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/532298/
转载文章受原作者版权保护。转载请注明原作者出处!