一、小波變換
小波分析是一個比較難的分支,用戶采用小波變換,可以實現(xiàn)圖像壓縮,振動信號的分解與重構(gòu)等,因此在實際工程上應(yīng)用較廣泛。小波分析與Fourier變換相比,小波變換是空間域和頻率域的局部變換,因而能有效地從信號中提取信息。小波變換通過伸縮和平移等基本運算,實現(xiàn)對信號的多尺度分解與重構(gòu),從而很大程度上解決了Fourier變換帶來的很多難題。
小波分析作一個新的數(shù)學(xué)分支,它是泛函分析、Fourier分析、數(shù)值分析的完美結(jié)晶;小波分析也是一種“時間—尺度”分析和多分辨分析的新技術(shù),它在信號分析、語音合成、圖像壓縮與識別、大氣與海洋波分析等方面的研究,都有廣泛的應(yīng)用。
二、圖像融合
1、采用對比度拉伸,完成紅外圖像增強
# 裁剪線性RGB對比度拉伸:(去掉2%百分位以下的數(shù),去掉98%百分位以上的數(shù),上下百分位數(shù)一般相同,并設(shè)置輸出上下限)
def truncated_linear_stretch(image, truncated_value=2, maxout=255, min_out=0):
def gray_process(gray, maxout=maxout, minout=min_out):
truncated_down = np.percentile(gray, truncated_value)
truncated_up = np.percentile(gray, 100 - truncated_value)
gray_new = ((maxout - minout) / (truncated_up - truncated_down)) * gray
gray_new[gray_new < minout] = minout
gray_new[gray_new > maxout] = maxout
return np.uint8(gray_new)
(b, g, r) = cv2.split(image)
b = gray_process(b)
g = gray_process(g)
r = gray_process(r)
result = cv2.merge((b, g, r))# 合并每一個通道
return result
2、采用Surf特征點匹配,完成圖像配準(zhǔn)
# RGB圖片配準(zhǔn)函數(shù),采用白天的可見光與紅外灰度圖,計算兩者Surf共同特征點,之間的仿射矩陣。
def Images_matching(img_base, img_target):
img_base=cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
img_target=cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY)
hessian = 400
# 初始化surf算子
surf = cv2.xfeatures2d.SURF_create(hessian)
# surf = cv2.xfeatures2d_SURF.create(hessian)
# surf = cv2.SIFT_create(hessian)
# 使用surf算子計算特征點和特征點周圍的特征向量
kp1, des1 = surf.detectAndCompute(img_base, None) # 1136 1136, 64
kp2, des2 = surf.detectAndCompute(img_target, None)
# 進(jìn)行KNN特征匹配
FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的參數(shù)
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度樹的數(shù)量為5
searchParams = dict(checks=50) # 指定遞歸次數(shù)
flann = cv2.FlannBasedMatcher(indexParams, searchParams) # 建立匹配器
matches = flann.knnMatch(des1, des2, k=2) # 得出匹配的關(guān)鍵點 list: 1136
good = []
# 提取優(yōu)秀的特征點
for m, n in matches:
if m.distance < 0.7 * n.distance: # 如果第一個鄰近距離比第二個鄰近距離的0.7倍小,則保留
good.append(m) # 134
src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查詢圖像的特征描述子索引 # 134, 2
dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 訓(xùn)練(模板)圖像的特征描述子索引
H = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC) # 生成變換矩陣 H[0]: 3, 3 H[1]: 134, 1
return H[0]
3、采用HSV通道小波變換,完成圖像融合
# YUV通道小波變換RGB融合,低頻均值高頻最大值
def Images_fusion(img_base,img_target): # 可見光,配準(zhǔn)后的紅外光
# 按照不同的融合方式,計算通道融合
def fuseCoeff(cooef1, cooef2, method):
if (method == 'mean'):
cooef = (abs(cooef1) + abs(cooef2)) / 2 # abs 絕對值
# cooef=0.5*cooef1+1.5*cooef2
elif (method == 'min'):
cooef = np.minimum(cooef1, cooef2)
elif (method == 'max'):
cooef = np.maximum(abs(cooef1), abs(cooef2))
return cooef
# HSV的小波融合過程
LOW_METHOD = 'mean'
HIGH_METHOD = 'max'
YUVimg_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2YUV) # 可見光轉(zhuǎn)換為HSV
# cv2.imwrite("D:/VS/vsprj/cuda/cudawavetest/2.jpg", YUVimg_base)
grayimg_target = cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY) #紅外灰度化
# cv2.imwrite("D:/VS/vsprj/cuda/cudawavetest/1.jpg",grayimg_target)
Yimg_base = YUVimg_base[:, :, 0] # 1024,1024
wavelet = 'haar'
cooef_base = pywt.wavedec2(Yimg_base[:, :], wavelet, level=1) # base灰度圖的小波展開 512, 512 3
start = time.time()
cooef_target = pywt.wavedec2(grayimg_target[:, :], wavelet, level=1) # target灰度圖的小波展開 512, 512 3
end = time.time()-start
print("小波展開:{}".format(end))
fusedCooef = []
for i in range(len(cooef_base)):
if (i == 0):
fusedCooef.append(fuseCoeff(cooef_base[0], cooef_target[0], LOW_METHOD)) # 低頻部分取均值
else:
# 高頻部分取最大值
c1 = fuseCoeff(cooef_base[i][0], cooef_target[i][0], HIGH_METHOD)
c2 = fuseCoeff(cooef_base[i][1], cooef_target[i][1], HIGH_METHOD)
c3 = fuseCoeff(cooef_base[i][2], cooef_target[i][2], HIGH_METHOD)
fusedCooef.append((c1, c2, c3)) # 高頻合并
tempfusedImage = pywt.waverec2(fusedCooef, wavelet) # 小波逆變換
fusedImage = np.multiply(np.divide(tempfusedImage - np.min(tempfusedImage), (np.max(tempfusedImage) -\
np.min(tempfusedImage))), 255) # 逆變換后歸一至(0,255)
start = time.time()
fusedImage = fusedImage.astype(np.uint8)
Yimg_new = fusedImage
fusedYUV = cv2.merge([Yimg_new, YUVimg_base[:, :, 1], YUVimg_base[:, :, 2]]) # 用小波變換替換V通道
end = time.time() - start
print("圖像重建:{}".format(end))
fusedBGR = cv2.cvtColor(fusedYUV, cv2.COLOR_YUV2BGR) # 融合后的HSV轉(zhuǎn)為BGR
return fusedBGR
4、總的代碼
import cv2
import numpy as np
import pywt #引入小波模塊
import time
from PIL import Image
# 裁剪線性RGB對比度拉伸:(去掉2%百分位以下的數(shù),去掉98%百分位以上的數(shù),上下百分位數(shù)一般相同,并設(shè)置輸出上下限)
def truncated_linear_stretch(image, truncated_value=2, maxout=255, min_out=0):
def gray_process(gray, maxout=maxout, minout=min_out):
truncated_down = np.percentile(gray, truncated_value)
truncated_up = np.percentile(gray, 100 - truncated_value)
gray_new = ((maxout - minout) / (truncated_up - truncated_down)) * gray
gray_new[gray_new < minout] = minout
gray_new[gray_new > maxout] = maxout
return np.uint8(gray_new)
(b, g, r) = cv2.split(image)
b = gray_process(b)
g = gray_process(g)
r = gray_process(r)
result = cv2.merge((b, g, r))# 合并每一個通道
return result
# RGB圖片配準(zhǔn)函數(shù),采用白天的可見光與紅外灰度圖,計算兩者Surf共同特征點,之間的仿射矩陣。
def Images_matching(img_base, img_target):
img_base=cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
img_target=cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY)
hessian = 400
# 初始化surf算子
surf = cv2.xfeatures2d.SURF_create(hessian)
# surf = cv2.xfeatures2d_SURF.create(hessian)
# surf = cv2.SIFT_create(hessian)
# 使用surf算子計算特征點和特征點周圍的特征向量
kp1, des1 = surf.detectAndCompute(img_base, None) # 1136 1136, 64
kp2, des2 = surf.detectAndCompute(img_target, None)
# 進(jìn)行KNN特征匹配
FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的參數(shù)
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度樹的數(shù)量為5
searchParams = dict(checks=50) # 指定遞歸次數(shù)
flann = cv2.FlannBasedMatcher(indexParams, searchParams) # 建立匹配器
matches = flann.knnMatch(des1, des2, k=2) # 得出匹配的關(guān)鍵點 list: 1136
good = []
# 提取優(yōu)秀的特征點
for m, n in matches:
if m.distance < 0.7 * n.distance: # 如果第一個鄰近距離比第二個鄰近距離的0.7倍小,則保留
good.append(m) # 134
src_pts = np.array([kp1[m.queryIdx].pt for m in good]) # 查詢圖像的特征描述子索引 # 134, 2
dst_pts = np.array([kp2[m.trainIdx].pt for m in good]) # 訓(xùn)練(模板)圖像的特征描述子索引
H = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC) # 生成變換矩陣 H[0]: 3, 3 H[1]: 134, 1
return H[0]
# YUV通道小波變換RGB融合,低頻均值高頻最大值
def Images_fusion(img_base,img_target): # 可見光,配準(zhǔn)后的紅外光
# 按照不同的融合方式,計算通道融合
def fuseCoeff(cooef1, cooef2, method):
if (method == 'mean'):
cooef = (abs(cooef1) + abs(cooef2)) / 2 # abs 絕對值
# cooef=0.5*cooef1+1.5*cooef2
elif (method == 'min'):
cooef = np.minimum(cooef1, cooef2)
elif (method == 'max'):
cooef = np.maximum(abs(cooef1), abs(cooef2))
return cooef
# HSV的小波融合過程
LOW_METHOD = 'mean'
HIGH_METHOD = 'max'
YUVimg_base = cv2.cvtColor(img_base, cv2.COLOR_BGR2YUV) # 可見光轉(zhuǎn)換為HSV
# cv2.imwrite("D:/VS/vsprj/cuda/cudawavetest/2.jpg", YUVimg_base)
grayimg_target = cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY) #紅外灰度化
# cv2.imwrite("D:/VS/vsprj/cuda/cudawavetest/1.jpg",grayimg_target)
Yimg_base = YUVimg_base[:, :, 0] # 1024,1024
wavelet = 'haar'
cooef_base = pywt.wavedec2(Yimg_base[:, :], wavelet, level=1) # base灰度圖的小波展開 512, 512 3
start = time.time()
cooef_target = pywt.wavedec2(grayimg_target[:, :], wavelet, level=1) # target灰度圖的小波展開 512, 512 3
end = time.time()-start
print("小波展開:{}".format(end))
fusedCooef = []
for i in range(len(cooef_base)):
if (i == 0):
fusedCooef.append(fuseCoeff(cooef_base[0], cooef_target[0], LOW_METHOD)) # 低頻部分取均值
else:
# 高頻部分取最大值
c1 = fuseCoeff(cooef_base[i][0], cooef_target[i][0], HIGH_METHOD)
c2 = fuseCoeff(cooef_base[i][1], cooef_target[i][1], HIGH_METHOD)
c3 = fuseCoeff(cooef_base[i][2], cooef_target[i][2], HIGH_METHOD)
fusedCooef.append((c1, c2, c3)) # 高頻合并
tempfusedImage = pywt.waverec2(fusedCooef, wavelet) # 小波逆變換
fusedImage = np.multiply(np.divide(tempfusedImage - np.min(tempfusedImage), (np.max(tempfusedImage) -\
np.min(tempfusedImage))), 255) # 逆變換后歸一至(0,255)
start = time.time()
fusedImage = fusedImage.astype(np.uint8)
Yimg_new = fusedImage
fusedYUV = cv2.merge([Yimg_new, YUVimg_base[:, :, 1], YUVimg_base[:, :, 2]]) # 用小波變換替換V通道
end = time.time() - start
print("圖像重建:{}".format(end))
fusedBGR = cv2.cvtColor(fusedYUV, cv2.COLOR_YUV2BGR) # 融合后的HSV轉(zhuǎn)為BGR
return fusedBGR
#
def main():
matchimg_di = cv2.imread('images/oripics/basic_day_infrared.jpg') # 1080, 1920, 3
matchimg_dv = cv2.imread('images/oripics/basic_day_visual.jpg')
# matchimg_di = cv2.imread('left_2020_11_30-15_15_31.jpg') # 1080, 1920, 3
# matchimg_dv = cv2.imread('right_2020_11_30-15_15_31.jpg')
# 1080, 1920, 3
orimg_nv=matchimg_dv
orimg_ni=matchimg_di
# orimg_nv = cv2.imread('images/oripics/night_visual_40m.jpg')
# orimg_ni = cv2.imread('images/oripics/night_infrared_40m.jpg')
# orimg_nv = cv2.imread('images/oripics/left_2020_11_30-20_06_54.jpg')
# orimg_ni = cv2.imread('images/oripics/right_2020_11_30-20_06_54.jpg')
# orimg_nv = cv2.imread('images/oripics/left_2020_11_30-20_08_09.jpg')
# orimg_ni = cv2.imread('images/oripics/right_2020_11_30-20_08_09.jpg')
orimg_nv = cv2.imread('images/oripics/left_video_2020_11_30-20_09_32_73.jpg') # 1024, 1024, 3
orimg_ni = cv2.imread('images/oripics/right_video_2020_11_30-20_09_32_73.jpg') # 1024, 1024, 3
#用白天圖像進(jìn)行配準(zhǔn)
# enhance_matchimg_di = truncated_linear_stretch(matchimg_di)# 配準(zhǔn)模板紅外圖像RGB增強
h, w = orimg_nv.shape[:2] # 1024 1024
H = Images_matching(matchimg_dv, matchimg_di) # (3, 3)
# enhance_orimg_ni=truncated_linear_stretch(orimg_ni) # 需融合紅外圖像RGB增強
matched_ni = cv2.warpPerspective(orimg_ni, H, (w, h))
cv2.imwrite("./1.jpg",matched_ni)# 紅外圖像按照仿射矩陣配準(zhǔn) 1024, 1024, 3
start = time.time()
fusion = Images_fusion(orimg_nv, matched_ni)
cv2.imwrite("./2.jpg",fusion)
end = time.time()-start
print(end)
# enhance=truncated_linear_stretch(fusion)#融合圖像RGB增強
# cv2.imshow('0', cv2.resize(orimg_nv, (600, 400)))
# cv2.imshow('1', cv2.resize(orimg_ni, (600, 400)))
cv2.imshow('2', cv2.resize(fusion, (1200, 800)))
cv2.waitKey(0)
# name_fusionfile='YUV_04.jpg'
# path_fusionfile='images/fusion/'+name_fusionfile
# cv2.imwrite(path_fusionfile, fusion)
if __name__ == '__main__':
main()
5、融合效果 上圖
紅外:
可見光:
融合后,效果一般,但是將就看吧,熟悉一下融合過程。
三、視頻融合
一個可見光攝像頭,一個紅外攝像頭,就可以實現(xiàn)實時夜間車外圖像融合啦
video_path_infrared = "../videos/ir/video_2020_11_30-20_05_30.avi"
video_path_visible = "../videos/vi/video_2020_11_30-20_05_30.avi"
video_save_path = "../videos/out.avi"
video_fps = 25
# matchimg_di = cv2.imread('images/oripics/basic_day_infrared.jpg') # 1080, 1920, 3
# matchimg_dv = cv2.imread('images/oripics/basic_day_visual.jpg')
matchimg_di = cv2.imread('left_2020_11_30-15_15_31.jpg') # 1080, 1920, 3
matchimg_dv = cv2.imread('right_2020_11_30-15_15_31.jpg')
H = Images_matching(matchimg_dv, matchimg_di)
capture_in = cv2.VideoCapture(video_path_infrared)
capture_vi = cv2.VideoCapture(video_path_visible)
if video_save_path != "":
fourcc = cv2.VideoWriter_fourcc(*'XVID')
size = (int(capture_vi.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture_vi.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)
fps = 0.0
while (True):
t1 = time.time()
# 讀取某一幀
ref1, frame1 = capture_in.read()
ref2, frame2 = capture_vi.read()
# 格式轉(zhuǎn)變,BGRtoRGB
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
# 轉(zhuǎn)變成Image
# frame1 = Image.fromarray(np.uint8(frame1))
# frame2 = Image.fromarray(np.uint8(frame2))
h, w = frame1.shape[:2]
matched_ni = cv2.warpPerspective(frame2, H, (w, h))
cv2.imwrite("./1.jpg", matched_ni)
# 進(jìn)行檢測
fusion = Images_fusion(frame1, matched_ni)
fusion = np.array(fusion)
end = time.time() - t1
# RGBtoBGR滿足opencv顯示格式
frame = cv2.cvtColor(fusion, cv2.COLOR_RGB2BGR)
fps = (fps + (1. / end )) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("video", frame)
c = cv2.waitKey(1) & 0xff
if video_save_path != "":
out.write(frame)
if c == 27:
capture.release()
break
capture.release()
out.release()
cv2.destroyAllWindows()
一定選對配準(zhǔn)圖片,不然會出現(xiàn)鬼影文章來源:http://www.zghlxwxcb.cn/news/detail-420525.html
四、總結(jié)
這個方法試出來視頻融合的時候如何加速,大家如果有什么好方法,多教教我!文章來源地址http://www.zghlxwxcb.cn/news/detail-420525.html
到了這里,關(guān)于基于小波變換的圖像融合(附加視頻融合)代碼的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!