前言
小編研究生的研究方向是視覺(jué)SLAM,目前在自學(xué),本篇文章為初學(xué)高翔老師課的第四次作業(yè)。
1.圖像去畸變
現(xiàn)實(shí)?活中的圖像總存在畸變。原則上來(lái)說(shuō),針孔透視相機(jī)應(yīng)該將三維世界中的直線投影成直線,但是當(dāng)我們使???和魚眼鏡頭時(shí),由于畸變的原因,直線在圖像?看起來(lái)是扭曲的。本次作業(yè),你將嘗試如何對(duì)?張圖像去畸變,得到畸變前的圖像。
對(duì)于畸變,用兩張鮮明的照片來(lái)展示:
undistort_image.cpp:
//
// Created by ljh on 2023/11/5.
//
#include <opencv2/opencv.hpp>
#include <string>
using namespace std;
string image_file = "/home/lih/video4_homework/homework1/test.png"; // 請(qǐng)確保路徑正確
int main(int argc, char **argv) {
// 本程序需要你自己實(shí)現(xiàn)去畸變部分的代碼。盡管我們可以調(diào)用OpenCV的去畸變,但自己實(shí)現(xiàn)一遍有助于理解。
// 畸變參數(shù)
double k1 = -0.28340811, k2 = 0.07395907, p1 = 0.00019359, p2 = 1.76187114e-05;
// 內(nèi)參
double fx = 458.654, fy = 457.296, cx = 367.215, cy = 248.375;
cv::Mat image = cv::imread(image_file,0); // 圖像是灰度圖,CV_8UC1
int rows = image.rows, cols = image.cols;
cv::Mat image_undistort = cv::Mat(rows, cols, CV_8UC1); // 去畸變以后的圖
// 計(jì)算去畸變后圖像的內(nèi)容
for (int v = 0; v < rows; v++)
for (int u = 0; u < cols; u++) {
double u_distorted = 0, v_distorted = 0;
// TODO 按照公式,計(jì)算點(diǎn)(u,v)對(duì)應(yīng)到畸變圖像中的坐標(biāo)(u_distorted, v_distorted) (~6 lines)
// start your code here
// 按照公式,計(jì)算點(diǎn)(u,v)對(duì)應(yīng)到畸變圖像中的坐標(biāo)(u_distorted, v_distorted)
double x = (u-cx)/fx, y = (v-cy)/fy;
// 計(jì)算圖像點(diǎn)坐標(biāo)到光心的距離;
double r = sqrt(x*x+y*y);
// 計(jì)算投影點(diǎn)畸變后的點(diǎn)
double x_distorted = x*(1+k1*r+k2*r*r)+2*p1*x*y+p2*(r+2*x*x);
double y_distorted = y*(1+k1*r+k2*r*r)+2*p2*x*y+p1*(r+2*y*y);
// 把畸變后的點(diǎn)投影回去
u_distorted = x_distorted*fx+cx;
v_distorted = y_distorted*fy+cy;
// end your code here
// 賦值 (最近鄰插值)
if (u_distorted >= 0 && v_distorted >= 0 && u_distorted < cols && v_distorted < rows) {
image_undistort.at<uchar>(v, u) = image.at<uchar>((int) v_distorted, (int) u_distorted);
} else {
image_undistort.at<uchar>(v, u) = 0;
}
}
// 畫圖去畸變后圖像
cv::imshow("image undistorted", image_undistort);
cv::waitKey();
return 0;
}
string image_file = “/home/lih/video4_homework/homework1/test.png”; // 填寫你自己的圖片路徑
CMakeLists.txt:
cmake_minimum_required(VERSION 2.8)
PROJECT(undistort_image)
IF(NOT CMAKE_BUILD_TYPE) #(可選)如果沒(méi)有指定cmake編譯模式,就選擇Relealse模式,必須寫成三行
SET(CMAKE_BUILD_TYPE Release)
ENDIF()
MESSAGE("Build type: " ${CMAKE_BUILD_TYPE}) #終端打印cmake編譯模式的信息
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -O3 -march=native ") #添加c標(biāo)準(zhǔn)支持庫(kù)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -march=native") #添加c++標(biāo)準(zhǔn)支持庫(kù)
# Check C++11 or C++0x support #檢查c++11或c++0x標(biāo)準(zhǔn)支持庫(kù)
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
if(COMPILER_SUPPORTS_CXX11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
add_definitions(-DCOMPILEDWITHC11)
message(STATUS "Using flag -std=c++11.")
elseif(COMPILER_SUPPORTS_CXX0X)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
add_definitions(-DCOMPILEDWITHC0X)
message(STATUS "Using flag -std=c++0x.")
else()
message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
find_package(OpenCV 3.0 QUIET) #find_package(<Name>)命令首先會(huì)在模塊路徑中尋找 Find<name>.cmake
if(NOT OpenCV_FOUND)
find_package(OpenCV 2.4.3 QUIET)
if(NOT OpenCV_FOUND)
message(FATAL_ERROR "OpenCV > 2.4.3 not found.")
endif()
endif()
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(image undistort_image.cpp)
#鏈接OpenCV庫(kù)
target_link_libraries(image ${OpenCV_LIBS})
然后
mkdir build
cd build
cmake …
make
./image
2.雙目視差的使用
雙?相機(jī)的??好處是可以通過(guò)左右?的視差來(lái)恢復(fù)深度。課程中我們介紹了由視差計(jì)算深度的過(guò)程。本題,你需要根據(jù)視差計(jì)算深度,進(jìn)??成點(diǎn)云數(shù)據(jù)。本題的數(shù)據(jù)來(lái)?Kitti 數(shù)據(jù)集 [2]。 Kitti中的相機(jī)部分使?了?個(gè)雙?模型。雙?采集到左圖和右圖,然后我們可以通過(guò)左右視圖恢復(fù)出深度。經(jīng)典雙?恢復(fù)深度的算法有 BM(Block Matching), SGBM(Semi-Global Matching)[3, 4]等,但本題不探討?體視覺(jué)內(nèi)容(那是?個(gè)?問(wèn)題)。我們假設(shè)雙?計(jì)算的視差已經(jīng)給定,請(qǐng)你根據(jù)雙?模型,畫出圖像對(duì)應(yīng)的點(diǎn)云,并顯?到 Pangolin 中。題給定的左右圖見(jiàn) code/left.png 和 code/right.png,視差圖亦給定,見(jiàn)code/right.png。雙?的參數(shù)如下:
fx= 718.856; fy = 718.856; cx =607.1928; cy = 185.2157
且雙?左右間距(即基線)為:
d = 0.573 m
請(qǐng)根據(jù)以上參數(shù),計(jì)算相機(jī)數(shù)據(jù)對(duì)應(yīng)的點(diǎn)云,并顯?到 Pangolin 中。程序請(qǐng)code/disparity.cpp ?件。
disparity.cpp:
// start your code here
// 根據(jù)雙目模型計(jì)算 point 的位置
double x = (u - cx) / fx;
double y = (v - cy) / fy;
double depth = fx * b / (disparity.at<float>(v, u));
point[0] = x * depth;
point[1] = y * depth;
point[2] = depth;
// end your code here
double x和double y的計(jì)算方式和上一題一樣,depth就算如下:
計(jì)算出depth后,那么point模仿課上五對(duì)圖片那個(gè)實(shí)踐仿寫即可。只不過(guò)實(shí)踐中的d(視差)沒(méi)有給出,而此題中視差d已給,所以公式寫出來(lái)略有不同。
CMakeLists.txt:
cmake_minimum_required( VERSION 2.8 )
project(stereoVision)
set( CMAKE_CXX_FLAGS "-std=c++11 -O3")
include_directories("/usr/include/eigen3")
find_package(Pangolin REQUIRED)
include_directories( ${Pangolin_INCLUDE_DIRS} )
find_package(OpenCV 3.0 QUIET) #find_package(<Name>)命令首先會(huì)在模塊路徑中尋找 Find<name>.cmake
if(NOT OpenCV_FOUND)
find_package(OpenCV 2.4.3 QUIET)
if(NOT OpenCV_FOUND)
message(FATAL_ERROR "OpenCV > 2.4.3 not found.")
endif()
endif()
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(disparity disparity.cpp)
target_link_libraries(disparity ${OpenCV_LIBRARIES})
target_link_libraries(disparity ${Pangolin_LIBRARIES})
然后就是編譯五部曲:
mkdir build
cd build
cmake …
make
./disparity
如果你出現(xiàn)了這張圖片,那就是你的disparity.cpp中的圖片位置沒(méi)有寫對(duì),找不到圖片所導(dǎo)致的!!
運(yùn)行成功如下:
3.矩陣微分
①第一問(wèn):如果大家有不理解的地方可以看看這個(gè)印度三哥的視屏,我認(rèn)為講的還是非常清晰的,至少我搜了很多國(guó)內(nèi)的都沒(méi)有這個(gè)講得好,雖然語(yǔ)言不通,但是一點(diǎn)都不影響學(xué)習(xí)。高博的清華PPT還是不適合我這種人看。
鏈接:
矩陣求導(dǎo)講解
②第二問(wèn):如果大家有不理解的地方可以看看這個(gè)印度三哥的視屏,我認(rèn)為講的還是非常清晰的,至少我搜了很多國(guó)內(nèi)的都沒(méi)有這個(gè)講得好,雖然語(yǔ)言不通,但是一點(diǎn)都不影響學(xué)習(xí)。高博的清華PPT還是不適合我這種人看。
鏈接:
矩陣求導(dǎo)講解
③第三問(wèn):
4.高斯牛頓法的曲線擬合實(shí)驗(yàn)
當(dāng)然我覺(jué)得大家很有必要了解一下這一塊的由來(lái),我的上一篇博客講述了海斯矩陣,凸函數(shù)等基本概念,大家看這個(gè)之前我認(rèn)為很必要學(xué)習(xí)一下: 鏈接:
SLAM第四講實(shí)踐中的最優(yōu)化知識(shí)
在做這道題之前我們非常有必要了解一下什么是牛頓法法,因?yàn)楦咚古nD法是牛頓法的改進(jìn),我以一道最優(yōu)化的簡(jiǎn)單立體讓你明白什么是牛頓法:
下來(lái)我們?cè)倏锤咚古nD法,我搜查了很多資料,很難找到一道高斯牛頓法的數(shù)學(xué)題來(lái)讓大家理解,所以我只能找到一個(gè)更為詳細(xì)點(diǎn)的高斯牛頓法的計(jì)算步驟讓大家理解:
到這里,我們開(kāi)始做題:
gaussnewton.cpp:
#include <iostream>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <Eigen/Core>
#include <Eigen/Dense>
using namespace std;
using namespace Eigen;
int main(int argc, char **argv) {
double ar = 1.0, br = 2.0, cr = 1.0; // 真實(shí)參數(shù)值
double ae = 2.0, be = -1.0, ce = 5.0; // 估計(jì)參數(shù)值
int N = 100; // 數(shù)據(jù)點(diǎn)
double w_sigma = 1.0; // 噪聲Sigma值
double inv_sigma = 1.0 / w_sigma;
cv::RNG rng; // OpenCV隨機(jī)數(shù)產(chǎn)生器
vector<double> x_data, y_data; // 數(shù)據(jù)
for (int i = 0; i < N; i++) {
double x = i / 100.0;
x_data.push_back(x);
y_data.push_back(exp(ar * x * x + br * x + cr) + rng.gaussian(w_sigma * w_sigma));
}
// 開(kāi)始Gauss-Newton迭代
int iterations = 100; // 迭代次數(shù)
double cost = 0, lastCost = 0; // 本次迭代的cost和上一次迭代的cost
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
for (int iter = 0; iter < iterations; iter++) {
Matrix3d H = Matrix3d::Zero(); // Hessian = J^T W^{-1} J in Gauss-Newton
Vector3d b = Vector3d::Zero(); // bias
cost = 0;
for (int i = 0; i < N; i++) {
double xi = x_data[i], yi = y_data[i]; // 第i個(gè)數(shù)據(jù)點(diǎn)
double error = yi - exp(ae * xi * xi + be * xi + ce);//計(jì)算雅可比矩陣J(Xk)和誤差f(Xk)
Vector3d J; // 雅可比矩陣
J[0] = -xi * xi * exp(ae * xi * xi + be * xi + ce); // de/da
J[1] = -xi * exp(ae * xi * xi + be * xi + ce); // de/db
J[2] = -exp(ae * xi * xi + be * xi + ce); // de/dc
H += inv_sigma * inv_sigma * J * J.transpose();
b += -inv_sigma * inv_sigma * error * J;
cost += error * error;
}
// 求解線性方程 Hx=b
Vector3d dx = H.ldlt().solve(b);
if (isnan(dx[0])) {
cout << "result is nan!" << endl;
break;
}
if (iter > 0 && cost >= lastCost) {
cout << "cost: " << cost << ">= last cost: " << lastCost << ", break." << endl;
break;
}
ae += dx[0];
be += dx[1];
ce += dx[2];
lastCost = cost;
cout << "total cost: " << cost << ", \t\tupdate: " << dx.transpose() <<
"\t\testimated params: " << ae << "," << be << "," << ce << endl;
}
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "solve time cost = " << time_used.count() << " seconds. " << endl;
cout << "estimated abc = " << ae << ", " << be << ", " << ce << endl;
return 0;
}
CMakeLists.txt:
cmake_minimum_required(VERSION 2.8)
project(homework4)
set(CMAKE_BUILD_TYPE Release)
set(CMAKE_CXX_FLAGS "-std=c++14 -O3")
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
# OpenCV
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
# Eigen
include_directories("/usr/include/eigen3")
add_executable(homework4 gaussnewton.cpp)
target_link_libraries(homework4 ${OpenCV_LIBS})
然后老五套
mkdir build
cd build
cmake …
make
./homework4文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-752278.html
文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-752278.html
到了這里,關(guān)于自學(xué)SLAM(8)《第四講:相機(jī)模型與非線性優(yōu)化》作業(yè)的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!