【OpenCV】双目相机测距及其深度恢复原理及其算法流程

1. 数学模型

【OpenCV】双目相机测距及其深度恢复原理及其算法流程

; 2.整体流程

获取标定与图像数据==>stereoRectify==>initUndistortRectifyMap==>remap==>bg/sgbm恢复出视差图==>l利用数学模型求解深度图==》深度图相关的应用/点云为基础的应用。(2D==>3D的转换)

3. 接口解析

3.1

#include
#include
#include

using namespace cv;
using namespace std;

int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);
Mat xyz;

Mat Q;
Rect left_valid_roi, right_valid_roi;
Mat left_rectified_gray, right_rectified_gray;
Mat img;
Mat left_img, right_img;
Mat left_rectified_img, right_rectified_img;

Vec3f  point3;
float d;
Point origin;
Rect selection;
bool selectObject = false;
int mindisparity = 0;
int ndisparities = 64;
int SADWindowSize = 5;

void stereo_BM_match(int, void*)
{
    bm->setBlockSize(2 * blockSize + 5);
    bm->setROI1(left_valid_roi);
    bm->setROI2(right_valid_roi);
    bm->setPreFilterCap(31);
    bm->setMinDisparity(0);
    bm->setNumDisparities(numDisparities * 16 + 16);
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(uniquenessRatio);
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setDisp12MaxDiff(-1);
    Mat disp, disp8;
    bm->compute(left_rectified_gray, right_rectified_gray, disp);
    disp.convertTo(disp8, CV_8UC1, 255 / ((numDisparities * 16 + 16) * 16.));
    reprojectImageTo3D(disp, xyz, Q, true);
    xyz = xyz * 16;
    imshow("disparity_BM", disp8);
}

void stereo_SGBM_match(int, void*)
{
    Mat disp;

    cv::Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create(mindisparity, ndisparities, SADWindowSize);
    int P1 = 8 * left_rectified_img.channels() * SADWindowSize * SADWindowSize;
    int P2 = 10 * left_rectified_img.channels() * SADWindowSize * SADWindowSize;
    sgbm->setP1(P1);
    sgbm->setP2(P2);
    sgbm->setPreFilterCap(15);
    sgbm->setUniquenessRatio(10);
    sgbm->setSpeckleRange(2);
    sgbm->setSpeckleWindowSize(100);
    sgbm->setDisp12MaxDiff(1);

    sgbm->compute(left_rectified_img, right_rectified_img, disp);
    disp.convertTo(disp, CV_32F, 1.0 / 16);
    Mat disp8U = Mat(disp.rows, disp.cols, CV_8UC1);
    normalize(disp, disp8U, 0, 255, NORM_MINMAX, CV_8UC1);
    reprojectImageTo3D(disp8U, xyz, Q, true);
    xyz = xyz * 16;
    namedWindow("disparity_SGBM", WINDOW_AUTOSIZE);
    imshow("disparity_SGBM", disp8U);

}

static void onMouse(int event, int x, int y, int, void*)
{
    if (selectObject)
    {
        selection.x = MIN(x, origin.x);
        selection.y = MIN(y, origin.y);
        selection.width = std::abs(x - origin.x);
        selection.height = std::abs(y - origin.y);
    }

    switch (event)
    {
    case EVENT_LBUTTONDOWN:
        origin = Point(x, y);
        selection = Rect(x, y, 0, 0);
        selectObject = true;

        point3 = xyz.at<Vec3f>(origin);
        point3[0];

        cout << "世界坐标:" << endl;
        cout << "x: " << point3[0] << "  y: " << point3[1] << "  z: " << point3[2] << endl;
        d = point3[0] * point3[0] + point3[1] * point3[1] + point3[2] * point3[2];
        d = sqrt(d);

        d = d / 10.0;
        cout << "距离是:" << d << "cm" << endl;

        break;
    case EVENT_LBUTTONUP:
        selectObject = false;
        if (selection.width > 0 && selection.height > 0)
            break;
    }
}

int main()
{
    VideoCapture cap(1);
    int FRAME_WIDTH = cap.get(CAP_PROP_FRAME_WIDTH);
    int FRAME_HEIGHT = cap.get(CAP_PROP_FRAME_HEIGHT);
    Mat aligned_rectified_img(FRAME_HEIGHT, FRAME_WIDTH, CV_8UC3);
    cout << "Resolution: << cap.get(CAP_PROP_FRAME_WIDTH) << "," << cap.get(CAP_PROP_FRAME_HEIGHT) << ">\n";
    namedWindow("camera", WINDOW_AUTOSIZE);

    namedWindow("aligned_rectified_img", WINDOW_AUTOSIZE);

    Mat left_cameraMatrix = Mat::eye(3, 3, CV_64F);
    left_cameraMatrix.at<double>(0, 0) = 2.762165790037839e+02;
    left_cameraMatrix.at<double>(0, 1) = 0;
    left_cameraMatrix.at<double>(0, 2) = 1.765880468329375e+02;
    left_cameraMatrix.at<double>(1, 1) = 2.762317738515432e+02;
    left_cameraMatrix.at<double>(1, 2) = 1.272320444598781e+02;

    Mat left_distCoeffs = Mat::zeros(5, 1, CV_64F);
    left_distCoeffs.at<double>(0, 0) = 0.065542106666972;
    left_distCoeffs.at<double>(1, 0) = -0.099179821896270;
    left_distCoeffs.at<double>(2, 0) = 0;
    left_distCoeffs.at<double>(3, 0) = 0;
    left_distCoeffs.at<double>(4, 0) = 0;

    Mat right_cameraMatrix = Mat::eye(3, 3, CV_64F);
    right_cameraMatrix.at<double>(0, 0) = 2.734695143541476e+02;
    right_cameraMatrix.at<double>(0, 1) = 0;
    right_cameraMatrix.at<double>(0, 2) = 1.724017536155269e+02;
    right_cameraMatrix.at<double>(1, 1) = 2.733548075965133e+02;
    right_cameraMatrix.at<double>(1, 2) = 1.255695004672240e+02;

    Mat right_distCoeffs = Mat::zeros(5, 1, CV_64F);
    right_distCoeffs.at<double>(0, 0) = 0.067619149443979;
    right_distCoeffs.at<double>(1, 0) = -0.104286472771764;
    right_distCoeffs.at<double>(2, 0) = 0;
    right_distCoeffs.at<double>(3, 0) = 0;
    right_distCoeffs.at<double>(4, 0) = 0;

    Mat rotation_matrix = Mat::zeros(3, 3, CV_64F);
    rotation_matrix.at<double>(0, 0) = 0.999997933684708;
    rotation_matrix.at<double>(0, 1) = 5.541735042905797e-04;
    rotation_matrix.at<double>(0, 2) = -0.001955893157243;
    rotation_matrix.at<double>(1, 0) = -5.560064711997943e-04;
    rotation_matrix.at<double>(1, 1) = 0.999999406695233;
    rotation_matrix.at<double>(1, 2) = -9.367315446314680e-04;
    rotation_matrix.at<double>(2, 0) = 0.001955372884999;
    rotation_matrix.at<double>(2, 1) = 9.378170983011573e-04;
    rotation_matrix.at<double>(2, 2) = 0.999997648505221;

    Mat rotation_vector;
    Rodrigues(rotation_matrix, rotation_vector);

    Mat translation_vector = Mat::zeros(3, 1, CV_64F);
    translation_vector.at<double>(0, 0) = -74.303646210221200;
    translation_vector.at<double>(1, 0) = -0.208289299602746;
    translation_vector.at<double>(2, 0) = -1.203122420388863;

    Mat R1, R2;
    Mat P1, P2;

    Mat rmap[2][2];
    Size imageSize;
    imageSize = Size(FRAME_WIDTH >> 1, FRAME_HEIGHT);

    stereoRectify(left_cameraMatrix, left_distCoeffs,
        right_cameraMatrix, right_distCoeffs,
        imageSize, rotation_vector, translation_vector,
        R1, R2, P1, P2, Q,
        CALIB_ZERO_DISPARITY, -1, imageSize, &left_valid_roi, &right_valid_roi);

    initUndistortRectifyMap(left_cameraMatrix, left_distCoeffs, R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(right_cameraMatrix, right_distCoeffs, R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    while (1)
    {

        cap >> img;

        left_img = img(Rect(0, 0, FRAME_WIDTH >> 1, FRAME_HEIGHT));
        right_img = img(Rect(FRAME_WIDTH >> 1, 0, FRAME_WIDTH >> 1, FRAME_HEIGHT));

        remap(left_img, left_rectified_img, rmap[0][0], rmap[0][1], INTER_LINEAR);
        remap(right_img, right_rectified_img, rmap[1][0], rmap[1][1], INTER_LINEAR);

        cvtColor(left_rectified_img, left_rectified_gray, COLOR_BGR2GRAY);
        cvtColor(right_rectified_img, right_rectified_gray, COLOR_BGR2GRAY);

        for (int i = 0; i < left_rectified_img.rows; i++)
        {
            for (int j = 0; j < left_rectified_img.cols; j++)
            {
                aligned_rectified_img.at<Vec3b>(i, j)[0] = left_rectified_img.at<Vec3b>(i, j)[0];
                aligned_rectified_img.at<Vec3b>(i, j)[1] = left_rectified_img.at<Vec3b>(i, j)[1];
                aligned_rectified_img.at<Vec3b>(i, j)[2] = left_rectified_img.at<Vec3b>(i, j)[2];
            }
        }

        for (int i = 0; i < right_rectified_img.rows; i++)
        {
            for (int j = 0; j < right_rectified_img.cols; j++)
            {
                aligned_rectified_img.at<Vec3b>(i, j + (FRAME_WIDTH >> 1))[0] = right_rectified_img.at<Vec3b>(i, j)[0];
                aligned_rectified_img.at<Vec3b>(i, j + (FRAME_WIDTH >> 1))[1] = right_rectified_img.at<Vec3b>(i, j)[1];
                aligned_rectified_img.at<Vec3b>(i, j + (FRAME_WIDTH >> 1))[2] = right_rectified_img.at<Vec3b>(i, j)[2];
            }
        }

        rectangle(img, left_valid_roi, Scalar(0, 0, 255), 2, 8);
        rectangle(img, Rect(right_valid_roi.x + (FRAME_WIDTH >> 1), right_valid_roi.y, right_valid_roi.width, right_valid_roi.height), Scalar(0, 0, 255), 2, 8);

        for (int i = 0; i < aligned_rectified_img.rows; i += 32)
        {
            line(aligned_rectified_img, Point(0, i), Point(aligned_rectified_img.cols, i), Scalar(0, 255, 0), 1, LINE_8);
        }

        imshow("camera", img);
        imshow("left_img",left_img);
        imshow("right_img",right_img);
        imshow("left_rectified_img",left_rectified_img);
        imshow("right_rectified_img",right_rectified_img);
        imshow("aligned_rectified_img", aligned_rectified_img);

        stereo_BM_match(0, 0);
        stereo_SGBM_match(0, 0);

        createTrackbar("BlockSize:\n", "disparity_BM", &blockSize, 8, stereo_BM_match);

        createTrackbar("UniquenessRatio:\n", "disparity_BM", &uniquenessRatio, 50, stereo_BM_match);

        createTrackbar("NumDisparities:\n", "disparity_BM", &numDisparities, 16, stereo_BM_match);

        createTrackbar("SADWindowSize:\n", "disparity_SGBM", &SADWindowSize, 30, stereo_SGBM_match);
        setMouseCallback("disparity_SGBM", onMouse, 0);
        setMouseCallback("disparity_BM", onMouse, 0);
        int key = waitKey(30);
        if (key == 27)

            return 0;
    }
}

#include
#include
#include
#include
#include

#include
#include
#include

using namespace std;

#define WHITE "\033[37m"
#define BOLDRED "\033[1m\033[31m"
#define BOLDGREEN "\033[1m\033[32m"

struct CAMERA_INTRINSIC_PARAMETERS
{
    double cx,cy,fx,fy,baseline,scale;
};

struct FILE_FORM
{
    cv::Mat left,right;
    string dispname,depthname,colorname;
};

class ParameterReader
{
public:
    ParameterReader(string filename = "./parameters.txt")
    {
        ifstream fin(filename.c_str());
        if(!fin)
        {
            cerr<<BOLDRED"can't find parameters file!"<<endl;
            return;
        }
        while(!fin.eof())
        {
            string str;
            getline(fin,str);
            if(str[0] == '#')
            {
                continue;
            }

            int pos = str.find("=");
            if (pos == -1)
            {
                continue;
            }
            string key = str.substr(0,pos);
            string value = str.substr(pos+1,str.length());
            data[key] = value;

            if (!fin.good())
            {
                break;
            }
        }
    }
    string getData(string key)
    {
        map<string,string>::iterator iter = data.find(key);
        if (iter == data.end())
        {
            cerr<<BOLDRED"can't find:"<<key<<" parameters!"<<endl;
            return string("NOT_FOUND");
        }
        return iter->second;
    }
public:
    map<string,string> data;
};

FILE_FORM readForm(int index,ParameterReader pd);
void stereoSGBM(cv::Mat lpng,cv::Mat rpng,string filename,cv::Mat &disp);
void stereoBM(cv::Mat lpng,cv::Mat rpng,string filename,cv::Mat &disp);
void disp2Depth(cv::Mat disp,cv::Mat &depth,CAMERA_INTRINSIC_PARAMETERS camera);

int main(int argc, char const *argv[])
{
    ParameterReader pd;
    CAMERA_INTRINSIC_PARAMETERS camera;
    camera.fx = atof(pd.getData("camera.fx").c_str());
    camera.fy = atof(pd.getData("camera.fy").c_str());
    camera.cx = atof(pd.getData("camera.cx").c_str());
    camera.cy = atof(pd.getData("camera.cy").c_str());
    camera.baseline = atof(pd.getData("camera.baseline").c_str());
    camera.scale = atof(pd.getData("camera.scale").c_str());

    int startIndex = atoi(pd.getData("start_index").c_str());
    int endIndex = atoi(pd.getData("end_index").c_str());

    bool is_color = pd.getData("is_color") == string("yes");
    cout<<BOLDRED"......START......"<<endl;
    for (int currIndex = startIndex;currIndex < endIndex;currIndex++)
    {
        cout<<BOLDGREEN"Reading file: "<<currIndex<<endl;
        FILE_FORM form = readForm(currIndex,pd);
        cv::Mat disp,depth,color;

        if (pd.getData("algorithm") == string("SGBM"))
        {
            stereoSGBM(form.left,form.right,form.dispname,disp);
        }
        else if (pd.getData("algorithm") == string("BM"))
        {
            stereoBM(form.left,form.right,form.dispname,disp);
        }
        else
        {
            cout<<BOLDRED"Algorithm is wrong!"<<endl;
            return 0;
        }

        disp2Depth(disp,depth,camera);
        cv::imwrite(form.depthname,depth);
        cout<<WHITE"Depth saved!"<<endl;

        if (is_color)
        {
            cv::applyColorMap(depth,color,cv::COLORMAP_JET);
            cv::imwrite(form.colorname,color);
            cout<<WHITE"Color saved!"<<endl;
        }
    }
    cout<<BOLDRED"......END......"<<endl;

    return 0;
}

FILE_FORM readForm(int index,ParameterReader pd)
{
    FILE_FORM f;
    string lpngDir = pd.getData("left_dir");
    string rpngDir = pd.getData("right_dir");
    string dispDir = pd.getData("disp_dir");
    string depthDir = pd.getData("depth_dir");
    string colorDir = pd.getData("color_dir");
    string rgbExt = pd.getData("rgb_extension");

    string numzero;
    if ( index >= 0 && index  9 )
    {
        numzero = "00000";
    }
    else if ( index >= 10 && index  99 )
    {
        numzero = "0000";
    }
    else if ( index >= 100 && index  999 )
    {
        numzero = "000";
    }
    else if ( index >= 1000 && index  9999 )
    {
        numzero = "00";
    }
    else if ( index >= 10000 && index  99999 )
    {
        numzero = "0";
    }

    stringstream ss;
    ss<<lpngDir<<numzero<<index<<rgbExt;
    string filename;
    ss>>filename;
    f.left = cv::imread(filename,0);

    ss.clear();
    filename.clear();
    ss<<rpngDir<<numzero<<index<<rgbExt;
    ss>>filename;
    f.right = cv::imread(filename,0);

    ss.clear();
    filename.clear();
    ss<<depthDir<<index<<rgbExt;
    ss>>filename;
    f.depthname = filename;

    ss.clear();
    filename.clear();
    ss<<dispDir<<index<<rgbExt;
    ss>>filename;
    f.dispname = filename;

    ss.clear();
    filename.clear();
    ss<<colorDir<<index<<rgbExt;
    ss>>filename;
    f.colorname = filename;

    return f;
}

void stereoSGBM(cv::Mat lpng,cv::Mat rpng,string filename,cv::Mat &disp)
{
    disp.create(lpng.rows,lpng.cols,CV_16S);
    cv::Mat disp1 = cv::Mat(lpng.rows,lpng.cols,CV_8UC1);
    cv::Size imgSize = lpng.size();
    cv::Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create();

    int nmDisparities = ((imgSize.width / 8) + 15) & -16;
    int pngChannels = lpng.channels();
    int winSize = 5;

    sgbm->setPreFilterCap(31);
    sgbm->setBlockSize(winSize);
    sgbm->setP1(8*pngChannels*winSize*winSize);
    sgbm->setP2(32*pngChannels*winSize*winSize);
    sgbm->setMinDisparity(0);
    sgbm->setNumDisparities(nmDisparities);
    sgbm->setUniquenessRatio(5);
    sgbm->setSpeckleWindowSize(100);
    sgbm->setSpeckleRange(32);
    sgbm->setDisp12MaxDiff(1);
    sgbm->setMode(cv::StereoSGBM::MODE_SGBM);
    sgbm->compute(lpng,rpng,disp);

    disp.convertTo(disp1,CV_8U,255 / (nmDisparities*16.));

    cv::imwrite(filename,disp1);
    cout<<WHITE"Disp saved!"<<endl;
}

void stereoBM(cv::Mat lpng,cv::Mat rpng,string filename,cv::Mat &disp)
{
    disp.create(lpng.rows,lpng.cols,CV_16S);
    cv::Mat disp1 = cv::Mat(lpng.rows,lpng.cols,CV_8UC1);
    cv::Size imgSize = lpng.size();
    cv::Rect roi1,roi2;
    cv::Ptr<cv::StereoBM> bm = cv::StereoBM::create(16,9);

    int nmDisparities = ((imgSize.width / 8) + 15) & -16;

    bm->setPreFilterType(CV_STEREO_BM_NORMALIZED_RESPONSE);
    bm->setPreFilterSize(9);
    bm->setPreFilterCap(31);
    bm->setBlockSize(9);
    bm->setMinDisparity(0);
    bm->setNumDisparities(nmDisparities);
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(5);
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setROI1(roi1);
    bm->setROI2(roi2);
    bm->setDisp12MaxDiff(1);
    bm->compute(lpng,rpng,disp);

    disp.convertTo(disp1,CV_8U,255 / (nmDisparities*16.));

    cv::imwrite(filename,disp1);
    cout<<WHITE"Disp saved!"<<endl;
}

void disp2Depth(cv::Mat disp,cv::Mat &depth,CAMERA_INTRINSIC_PARAMETERS camera)
{
        depth.create(disp.rows,disp.cols,CV_8UC1);
        cv::Mat depth1 = cv::Mat(disp.rows,disp.cols,CV_16S);
        for (int i = 0;i < disp.rows;i++)
        {
            for (int j = 0;j < disp.cols;j++)
            {
                if (!disp.ptr<ushort>(i)[j])
                    continue;
                depth1.ptr<ushort>(i)[j] = camera.scale * camera.fx * camera.baseline / disp.ptr<ushort>(i)[j];
            }
        }
        depth1.convertTo(depth,CV_8U,1./256);
}

3.2 stereoRectify

void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
                                 InputArray cameraMatrix2, InputArray distCoeffs2,
                                 Size imageSize, InputArray R, InputArray T,
                                 OutputArray R1, OutputArray R2,
                                 OutputArray P1, OutputArray P2,
                                 OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
                                 double alpha = -1, Size newImageSize = Size(),
                                 CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
参数:

R,tvec: 是由相机1变换到相机2的变换矩阵,按照视觉这边的习惯,一般世界坐标系下的点变换到相机坐标系下的点
R1,R2:对两个相机进行旋转矫正的旋转变换矩阵,为世界系到两个相机的变换
P1,P2:两个矫正畸变后的相机投影矩阵,相对的坐标系还是原来的相机1所在坐标系
flags: 一般CALIB_ZERO_DISPARITY
alpha: -1或者不设置则使用自动剪切,0的时候没有黑边,1的时候保留所有原图像素,会有黑边
newImageSize: 默认与原图相同,所以会有剪切,该参数需要跟initUndistortRectifyMap相同,设置大一些会在大畸变的时候保留更多细节.

使用默认参数,即自动裁剪,保留原始图像大小,结果是图像自动缩放,无黑边。
使用alpha=1.0时,原图所有像素都被保留,即不裁剪,保留黑边,但是图像会缩放,结果中P矩阵中K明显进行了缩放。
alpha: 0.0 表示去除黑边后的最大四边形,1.0表示保留原图所有像素,即保留黑边

undistort()函数:

主要针对单张图像进行去畸变操作,使用默认参数的时候主要控制参数是利用newCameraMatrix来完成,而newCameraMatrix一般由getOptimalNewCameraMatrix()函数得到,getOptimalNewCameraMatrix()函数可以控制的参数有:

alpha: 0.0 表示去除黑边后的最大四边形,1.0表示保留原图所有像素,即保留黑边
newImageSize:不设置的话,默认与原图相等,设置了的话会在alpha作用之后对图像进行缩放
总的来说alpha和newImageSize是两个互不干扰的参数,alpha只管是否对图像进行裁剪,而newImageSize只负责把图像进行缩放,这二者都会对相机参数造成影响.

主要完成对双目图像进行调整,计算出用于立体校正的参数,即:给定两个相机的K、畸变参数、外参,计算出相应的R1,R2,P1,P2 矩阵. 作用如公式所示:

【OpenCV】双目相机测距及其深度恢复原理及其算法流程
在initUndistortRectifyMap的时候,对归一化平面上的点坐标进行旋转的修正,效果如下图,P1,P2矩阵与给定的alpha和newImageSize参数有关,alpha只控制有无黑边(即有效像素)newImageSize控制图像缩放.

【OpenCV】双目相机测距及其深度恢复原理及其算法流程
【OpenCV】双目相机测距及其深度恢复原理及其算法流程

3.3 reprojectImageTo3D函数

该函数将视差图,通过投影矩阵Q,得到一副映射图,图像大小与视差图相同,且每个像素具有三个通道,分别存储了该像素位置在相机坐标系下的三维点坐标在x, y, z,三个轴上的值,即每个像素的在相机坐标系下的三维坐标。

void cv::reprojectImageTo3D(
    InputArray disparity,
    OutputArray _3dImage,
    InputArray Q,
    bool handleMissingValues = false,
    int ddepth = -1
)

【OpenCV】双目相机测距及其深度恢复原理及其算法流程

4. 参考资料

  1. https://stackoverflow.com/questions/22418846/reprojectimageto3d-in-opencv
  2. https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga617b1685d4059c6040827800e72ad2b6
  3. https://www.coder.work/article/114127

Original: https://blog.csdn.net/Darlingqiang/article/details/126497609
Author: guoqiang_sunshine
Title: 【OpenCV】双目相机测距及其深度恢复原理及其算法流程

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/637523/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球