1 創(chuàng)建環(huán)境
conda create -n face python=3.7
conda activate face
pip install opencv-python
pip install pillow
pip install opencv-contrib-python
2 準(zhǔn)備工作
2.1新建members.csv文件
文件內(nèi)容依次是id,First_name,Last_name,如圖:
2.2新建face文件夾
里面存放采集的人臉信息,用于訓(xùn)練
k = cv2.waitKey(1)
if k == 27: # 通過esc鍵退出攝像
break
elif count >= 200: # 得到n個(gè)樣本后退出攝像(樣本越大,精度越高,但采集信息的時(shí)間也越長(zhǎng))
break
2.3注意事項(xiàng)
- 臉部識(shí)別特征模塊
Path路徑為你創(chuàng)建環(huán)境下的cv2包中haarcascade_frontalface_default.xml對(duì)應(yīng)的地址
只加了opencv中臉部特征,沒加眼部識(shí)別。(cv2包中還有眼部特征)
Path = r"C:\Users\11931\.conda\envs\face1\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml"
- 調(diào)用攝像頭
手機(jī)下載一個(gè)ip攝像頭,將電腦和手機(jī)連一個(gè)無線網(wǎng)(建議電腦連手機(jī)熱點(diǎn)),將參數(shù)改為手機(jī)ip地址。
3 源碼
import cv2
import os
import numpy as np
from PIL import Image
import datetime
import csv
# 調(diào)用筆記本內(nèi)置攝像頭,所以參數(shù)為0,如果有其他的攝像頭可以調(diào)整參數(shù)為1,2
Path = r"C:\Users\11931\.conda\envs\face1\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml"
face_detector = cv2.CascadeClassifier(Path)
names = []
zh_name = []
with open("members.csv", "r", encoding='UTF-8') as csv_file:
reader = csv.reader(csv_file)
for item in reader:
# print(item)
names.append(item[2])
zh_name.append(item[1])
# print (zh_name)
def data_collection():
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# cv2.CAP_DSHOW是作為open調(diào)用的一部分傳遞標(biāo)志,還有許多其它的參數(shù),而這個(gè)CAP_DSHOW是微軟特有的。
face_id = input('\n 請(qǐng)輸入你的ID:')
print('\n 數(shù)據(jù)初始化中,請(qǐng)直視攝像機(jī)錄入數(shù)據(jù)....')
count = 0
while True:
# 從攝像頭讀取圖片
sucess, img = cap.read()
# 轉(zhuǎn)為灰度圖片
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 檢測(cè)人臉
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))
count += 1
# 保存圖像
cv2.imwrite("11/Member." + str(face_id) + '.' + str(count) + '.jpg', gray[y: y + h, x: x + w])
cv2.imshow('data collection', img)
# 保持畫面的持續(xù)。
k = cv2.waitKey(1)
if k == 27: # 通過esc鍵退出攝像
break
elif count >= 200: # 得到n個(gè)樣本后退出攝像
break
cap.release()
cv2.destroyAllWindows()
def face_training():
# 人臉數(shù)據(jù)路徑
path = './face'
recognizer = cv2.face.LBPHFaceRecognizer_create()
def getImagesAndLabels(path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)] # join函數(shù)將多個(gè)路徑組合后返回
faceSamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = face_detector.detectMultiScale(img_numpy)
for (x, y, w, h) in faces:
faceSamples.append(img_numpy[y:y + h, x: x + w])
ids.append(id)
return faceSamples, ids
print('數(shù)據(jù)訓(xùn)練中')
faces, ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
recognizer.write(r'.\trainer.yml')
# print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))
def face_ientification():
cap = cv2.VideoCapture(0)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('./trainer.yml')
faceCascade = cv2.CascadeClassifier(Path)
font = cv2.FONT_HERSHEY_SIMPLEX
idnum = 0
global namess
cam = cv2.VideoCapture(0)
# 設(shè)置大小
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, img = cam.read()
# 圖像灰度處理
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 將人臉用vector保存各個(gè)人臉的坐標(biāo)、大?。ㄓ镁匦伪硎荆? faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2, # 表示在前后兩次相繼的掃描中,搜索窗口的比例系數(shù)
minNeighbors=5, # 表示構(gòu)成檢測(cè)目標(biāo)的相鄰矩形的最小個(gè)數(shù)(默認(rèn)為3個(gè))
minSize=(int(minW), int(minH)) # minSize和maxSize用來限制得到的目標(biāo)區(qū)域的范圍
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
idnum, confidence = recognizer.predict(gray[y:y + h, x:x + w])
if confidence < 100:
namess = names[idnum]
confidence = "{0}%".format(round(100 - confidence))
else:
namess = "unknown"
confidence = "{0}%".format(round(100 - confidence))
cv2.putText(img, str(namess), (x + 5, y - 5), font, 1, (0, 0, 255), 1)
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (0, 0, 0), 1) # 輸出置信度
cv2.imshow(u'Identification punch', img)
k = cv2.waitKey(10)
if k == 13:
theTime = datetime.datetime.now()
# print(zh_name[idnum])
strings = [str(zh_name[idnum]), str(theTime)]
print(strings)
with open("log.csv", "a", newline="") as csvFile:
writer = csv.writer(csvFile)
writer.writerow([str(zh_name[idnum]), str(theTime)])
elif k == 27:
cap.release()
cv2.destroyAllWindows()
break
while True:
a = int(input("輸入1,錄入臉部,輸入2進(jìn)行識(shí)別打卡:"))
if a == 1:
data_collection()
face_training()
elif a == 2:
face_ientification()
4 操作步驟
- 錄入臉部 (輸入id,按照之前建立的csv文件序號(hào)操作)
2. 識(shí)別打卡(按下enter會(huì)錄入信息,esc退出)文章來源:http://www.zghlxwxcb.cn/news/detail-788555.html
文章來源地址http://www.zghlxwxcb.cn/news/detail-788555.html
- 查看log(按下enter會(huì)錄入信息,esc退出)
到了這里,關(guān)于基于python+opencv的人臉識(shí)別打卡(手把手教你)的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!