簡介
圖像算法中會經(jīng)常用到攝像機的畸變校正,有必要總結(jié)分析OpenCV中畸變校正方法,其中包括普通針孔相機模型和魚眼相機模型fisheye兩種畸變校正方法。普通相機模型畸變校正函數(shù)針對OpenCV中的cv::initUndistortRectifyMap(),魚眼相機模型畸變校正函數(shù)對應(yīng)OpenCV中的cv::fisheye::initUndistortRectifyMap()。兩種方法算出映射Mapx和Mapy后,統(tǒng)一用cv::Remap()函數(shù)進行插值得到校正后的圖像。
魚眼模型的畸變校正
魚眼鏡頭生產(chǎn)過程中不可能精確地按照投影模型來設(shè)計,所以為了方便魚眼相機的標(biāo)定,Kannala-Brandt提出了一種魚眼相機的一般多項式近似模型。取前5項,給出了足夠的自由度來很好的近似各種投影模型。
簡要流程就是:
-
1.求內(nèi)參矩陣的逆,由于攝像機坐標(biāo)系的三維點到二維圖像平面,需要乘以旋轉(zhuǎn)矩陣R和內(nèi)參矩陣K。那么反向投影回去則是二維圖像坐標(biāo)乘以 K*R的逆矩陣。
-
2.將目標(biāo)圖像中的每一個像素點坐標(biāo)(j,i),乘以1中求出的逆矩陣iR,轉(zhuǎn)換到攝像機坐標(biāo)系(_x,_y,_w),并歸一化得到z=1平面下的三維坐標(biāo)(x,y,1)。
-
3.求出平面模型下像素點對應(yīng)魚眼半球模型下的極坐標(biāo)(r, theta)。
-
4.利用魚眼畸變模型求出擁有畸變時像素點對應(yīng)的theta_d。
-
5.利用求出的theta_d值將三維坐標(biāo)點重投影到二維圖像平面得到(u,v),(u,v)即為目標(biāo)圖像對應(yīng)的畸變圖像中像素點坐標(biāo)。
-
6.使用cv::Remap()函數(shù),根據(jù)mapx,mapy取出對應(yīng)坐標(biāo)位置的像素值賦值給目標(biāo)圖像,一般采用雙線性插值法,得到畸變校正后的目標(biāo)圖像。文章來源:http://www.zghlxwxcb.cn/news/detail-629224.html
#include <opencv2\opencv.hpp>
void cv::fisheye::initUndistortRectifyMap( InputArray K, InputArray D, InputArray R, InputArray P,
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2 )
{
CV_Assert( m1type == CV_16SC2 || m1type == CV_32F || m1type <=0 );
map1.create( size, m1type <= 0 ? CV_16SC2 : m1type );
map2.create( size, map1.type() == CV_16SC2 ? CV_16UC1 : CV_32F );
CV_Assert((K.depth() == CV_32F || K.depth() == CV_64F) && (D.depth() == CV_32F || D.depth() == CV_64F));
CV_Assert((P.empty() || P.depth() == CV_32F || P.depth() == CV_64F) && (R.empty() || R.depth() == CV_32F || R.depth() == CV_64F));
CV_Assert(K.size() == Size(3, 3) && (D.empty() || D.total() == 4));
CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3);
CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3));
//從內(nèi)參矩陣K中取出歸一化焦距fx,fy; cx,cy
cv::Vec2d f, c;
if (K.depth() == CV_32F)
{
Matx33f camMat = K.getMat();
f = Vec2f(camMat(0, 0), camMat(1, 1));
c = Vec2f(camMat(0, 2), camMat(1, 2));
}
else
{
Matx33d camMat = K.getMat();
f = Vec2d(camMat(0, 0), camMat(1, 1));
c = Vec2d(camMat(0, 2), camMat(1, 2));
}
//從畸變系數(shù)矩陣D中取出畸變系數(shù)k1,k2,k3,k4
Vec4d k = Vec4d::all(0);
if (!D.empty())
k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
//旋轉(zhuǎn)矩陣RR轉(zhuǎn)換數(shù)據(jù)類型為CV_64F,如果不需要旋轉(zhuǎn),則RR為單位陣
cv::Matx33d RR = cv::Matx33d::eye();
if (!R.empty() && R.total() * R.channels() == 3)
{
cv::Vec3d rvec;
R.getMat().convertTo(rvec, CV_64F);
RR = Affine3d(rvec).rotation();
}
else if (!R.empty() && R.size() == Size(3, 3))
R.getMat().convertTo(RR, CV_64F);
//新的內(nèi)參矩陣PP轉(zhuǎn)換數(shù)據(jù)類型為CV_64F
cv::Matx33d PP = cv::Matx33d::eye();
if (!P.empty())
P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
//關(guān)鍵一步:新的內(nèi)參矩陣*旋轉(zhuǎn)矩陣,然后利用SVD分解求出逆矩陣iR,后面用到
cv::Matx33d iR = (PP * RR).inv(cv::DECOMP_SVD);
//反向映射,遍歷目標(biāo)圖像所有像素位置,找到畸變圖像中對應(yīng)位置坐標(biāo)(u,v),并分別保存坐標(biāo)(u,v)到mapx和mapy中
for( int i = 0; i < size.height; ++i)
{
float* m1f = map1.getMat().ptr<float>(i);
float* m2f = map2.getMat().ptr<float>(i);
short* m1 = (short*)m1f;
ushort* m2 = (ushort*)m2f;
//二維圖像平面坐標(biāo)系->攝像機坐標(biāo)系
double _x = i*iR(0, 1) + iR(0, 2),
_y = i*iR(1, 1) + iR(1, 2),
_w = i*iR(2, 1) + iR(2, 2);
for( int j = 0; j < size.width; ++j)
{
//歸一化攝像機坐標(biāo)系,相當(dāng)于假定在Z=1平面上
double x = _x/_w, y = _y/_w;
//求魚眼半球體截面半徑r
double r = sqrt(x*x + y*y);
//求魚眼半球面上一點與光心的連線和光軸的夾角Theta
double theta = atan(r);
//畸變模型求出theta_d,相當(dāng)于有畸變的角度值
double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta4*theta4;
double theta_d = theta * (1 + k[0]*theta2 + k[1]*theta4 + k[2]*theta6 + k[3]*theta8);
//利用有畸變的Theta值,將攝像機坐標(biāo)系下的歸一化三維坐標(biāo),重投影到二維圖像平面,得到(j,i)對應(yīng)畸變圖像中的(u,v)
double scale = (r == 0) ? 1.0 : theta_d / r;
double u = f[0]*x*scale + c[0];
double v = f[1]*y*scale + c[1];
//保存(u,v)坐標(biāo)到mapx,mapy
if( m1type == CV_16SC2 )
{
int iu = cv::saturate_cast<int>(u*cv::INTER_TAB_SIZE);
int iv = cv::saturate_cast<int>(v*cv::INTER_TAB_SIZE);
m1[j*2+0] = (short)(iu >> cv::INTER_BITS);
m1[j*2+1] = (short)(iv >> cv::INTER_BITS);
m2[j] = (ushort)((iv & (cv::INTER_TAB_SIZE-1))*cv::INTER_TAB_SIZE + (iu & (cv::INTER_TAB_SIZE-1)));
}
else if( m1type == CV_32FC1 )
{
m1f[j] = (float)u;
m2f[j] = (float)v;
}
//這三條語句是上面 ”//二維圖像平面坐標(biāo)系->攝像機坐標(biāo)系“的一部分,是矩陣iR的第一列,這樣寫能夠簡化計算
_x += iR(0, 0);
_y += iR(1, 0);
_w += iR(2, 0);
}
}
}
針孔模型的畸變校正
主要流程和上面Fisheye模型差不多,只有第4部分的畸變模型不一樣,普通相機的畸變模型如下,k1,k2,k3為徑向畸變,p1,p2為切向畸變:
其中
r
2
=
x
2
+
y
2
r^2=x^2 + y^2
r2=x2+y2
將畸變后的點通過內(nèi)參數(shù)矩陣投影到像素平面,得到該點在圖像上的正確位置文章來源地址http://www.zghlxwxcb.cn/news/detail-629224.html
#include <opencv2\opencv.hpp>
void cv::initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs,
InputArray _matR, InputArray _newCameraMatrix,
Size size, int m1type, OutputArray _map1, OutputArray _map2 )
{
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat();
if( m1type <= 0 )
m1type = CV_16SC2;
CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );
_map1.create( size, m1type );
Mat map1 = _map1.getMat(), map2;
if( m1type != CV_32FC2 )
{
_map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
map2 = _map2.getMat();
}
else
_map2.release();
Mat_<double> R = Mat_<double>::eye(3, 3);
Mat_<double> A = Mat_<double>(cameraMatrix), Ar;
if( !newCameraMatrix.empty() )
Ar = Mat_<double>(newCameraMatrix);
else
Ar = getDefaultNewCameraMatrix( A, size, true );
if( !matR.empty() )
R = Mat_<double>(matR);
if( !distCoeffs.empty() )
distCoeffs = Mat_<double>(distCoeffs);
else
{
distCoeffs.create(14, 1, CV_64F);
distCoeffs = 0.;
}
CV_Assert( A.size() == Size(3,3) && A.size() == R.size() );
CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3));
//LU分解求新的內(nèi)參矩陣Ar與旋轉(zhuǎn)矩陣R乘積的逆矩陣iR
Mat_<double> iR = (Ar.colRange(0,3)*R).inv(DECOMP_LU);
const double* ir = &iR(0,0);
//從舊的內(nèi)參矩陣中取出光心位置u0,v0,和歸一化焦距fx,fy
double u0 = A(0, 2), v0 = A(1, 2);
double fx = A(0, 0), fy = A(1, 1);
//尼瑪14個畸變系數(shù),不過大多用到的只有(k1,k2,p1,p2),最多加一個k3,用不到的置為0
CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) ||
distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) ||
distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1) ||
distCoeffs.size() == Size(1, 12) || distCoeffs.size() == Size(12, 1) ||
distCoeffs.size() == Size(1, 14) || distCoeffs.size() == Size(14, 1));
if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
distCoeffs = distCoeffs.t();
const double* const distPtr = distCoeffs.ptr<double>();
double k1 = distPtr[0];
double k2 = distPtr[1];
double p1 = distPtr[2];
double p2 = distPtr[3];
double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? distPtr[4] : 0.;
double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[5] : 0.;
double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[6] : 0.;
double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[7] : 0.;
double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[8] : 0.;
double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[9] : 0.;
double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[10] : 0.;
double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[11] : 0.;
double tauX = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[12] : 0.;
double tauY = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[13] : 0.;
//tauX,tauY這個是什么梯形畸變,用不到的話matTilt為單位陣
// Matrix for trapezoidal distortion of tilted image sensor
cv::Matx33d matTilt = cv::Matx33d::eye();
cv::detail::computeTiltProjectionMatrix(tauX, tauY, &matTilt);
for( int i = 0; i < size.height; i++ )
{
float* m1f = map1.ptr<float>(i);
float* m2f = map2.empty() ? 0 : map2.ptr<float>(i);
short* m1 = (short*)m1f;
ushort* m2 = (ushort*)m2f;
//利用逆矩陣iR將二維圖像坐標(biāo)(j,i)轉(zhuǎn)換到攝像機坐標(biāo)系(_x,_y,_w)
double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];
for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
{
//攝像機坐標(biāo)系歸一化,令Z=1平面
double w = 1./_w, x = _x*w, y = _y*w;
//這一部分請看OpenCV官方文檔,畸變模型部分
double x2 = x*x, y2 = y*y;
double r2 = x2 + y2, _2xy = 2*x*y;
double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
double xd = (x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+s2*r2*r2);
double yd = (y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+s4*r2*r2);
//根據(jù)求取的xd,yd將三維坐標(biāo)重投影到二維畸變圖像坐標(biāo)(u,v)
cv::Vec3d vecTilt = matTilt*cv::Vec3d(xd, yd, 1);
double invProj = vecTilt(2) ? 1./vecTilt(2) : 1;
double u = fx*invProj*vecTilt(0) + u0;
double v = fy*invProj*vecTilt(1) + v0;
//保存u,v的值到Mapx,Mapy中
if( m1type == CV_16SC2 )
{
int iu = saturate_cast<int>(u*INTER_TAB_SIZE);
int iv = saturate_cast<int>(v*INTER_TAB_SIZE);
m1[j*2] = (short)(iu >> INTER_BITS);
m1[j*2+1] = (short)(iv >> INTER_BITS);
m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1)));
}
else if( m1type == CV_32FC1 )
{
m1f[j] = (float)u;
m2f[j] = (float)v;
}
else
{
m1f[j*2] = (float)u;
m1f[j*2+1] = (float)v;
}
}
}
}
到了這里,關(guān)于相機的畸變矯正與opencv代碼說明的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!