博客匯總:Python | 人臉識別系統(tǒng) — 博客索引
GitHub地址:Su-Face-Recognition
注:閱讀本博客前請先參考
工具安裝、環(huán)境配置:Python | 人臉識別系統(tǒng) — 簡介
UI界面設(shè)計(jì):Python | 人臉識別系統(tǒng) — UI界面設(shè)計(jì)
UI事件處理:Python | 人臉識別系統(tǒng) — UI事件處理
攝像頭畫面展示:Python | 人臉識別系統(tǒng) — 攝像頭畫面展示
一、基本思路
代碼使用 靜默活體檢測+交互活體檢測 結(jié)合判斷。
????????靜默活體檢測使用百度API,通過接口返回的置信度,判斷是否通過。
????????交互活體檢測通過要求用戶完成一定動(dòng)作,判斷是否通過。
二、初始化
????????初始化 isFaceRecognition_flag 標(biāo)志判斷當(dāng)前人臉識別狀態(tài)。
? ? ? ? 按鈕綁定人臉識別判斷器 recognize_face_judge 方法。
? ? ? ? 其余屬性會在后面講到。
def __init__(self, parent=None):
super(UserMainWindow, self).__init__(parent)
self.setupUi(self)
self.isFaceDetection_flag = False # 是否打開活體檢測標(biāo)志
self.biopsy_testing_button.clicked.connect(self.detect_face_judge) # 活體檢測
self.detector = None # 人臉檢測器
self.predictor = None # 特征點(diǎn)檢測器
# 閃爍閾值
self.EAR_THRESH = None
self.MOUTH_THRESH = None
# 總閃爍次數(shù)
self.eye_flash_counter = None
self.mouth_open_counter = None
self.turn_left_counter = None
self.turn_right_counter = None
# 連續(xù)幀數(shù)閾值
self.EAR_CONSTANT_FRAMES = None
self.MOUTH_CONSTANT_FRAMES = None
self.LEFT_CONSTANT_FRAMES = None
self.RIGHT_CONSTANT_FRAMES = None
# 連續(xù)幀計(jì)數(shù)器
self.eye_flash_continuous_frame = 0
self.mouth_open_continuous_frame = 0
self.turn_left_continuous_frame = 0
self.turn_right_continuous_frame = 0
# 字體顏色
self.text_color = (255, 0, 0)
# 百度API
self.api = BaiduApiUtil
三、判斷器
# 活體檢測判斷器
def detect_face_judge(self):
if not self.cap.isOpened():
QMessageBox.information(self, "提示", self.tr("請先打開攝像頭"))
else:
if not self.isFaceDetection_flag:
self.isFaceDetection_flag = True
self.biopsy_testing_button.setText("關(guān)閉活體檢測")
self.detect_face()
self.biopsy_testing_button.setText("活體檢測")
self.isFaceDetection_flag = False
elif self.isFaceDetection_flag:
self.isFaceDetection_flag = False
self.remind_label.setText("")
self.biopsy_testing_button.setText("活體檢測")
self.show_camera()
四、檢測器
? ? ? ? 首先判斷當(dāng)前環(huán)境是否聯(lián)網(wǎng)(聯(lián)網(wǎng)檢測代碼在BaiduApiUtil工具類中,工具類的代碼在下面),聯(lián)網(wǎng)進(jìn)行靜默活體檢測+交互活體檢測(聯(lián)網(wǎng)檢測),否則進(jìn)行單獨(dú)的交互活體檢測(本地檢測)。
# 百度API
self.api = BaiduApiUtil
... ...
# 整體活體檢測
def detect_face(self):
if self.api.network_connect_judge():
if not self.detect_face_network():
return False
if not self.detect_face_local():
return False
return True
# 聯(lián)網(wǎng)活體檢測
def detect_face_network(self):
... ...
# 本地活體檢測
def detect_face_local(self):
... ...
1、靜默活體檢測
? ? ? ? 靜默活體檢測用到了百度智能云接口,我們創(chuàng)建一個(gè)工具類 BaiduApiUtil,在工具類中編寫網(wǎng)絡(luò)連接、請求、解析結(jié)果等的代碼。然后在用戶界面邏輯代碼中進(jìn)行使用。
接口詳情請參考?百度智能云-接口詳情
代碼示例請參考?百度智能云-代碼示例
注意:使用前需要注冊百度智能云賬號,申請接口(接口免費(fèi)),獲取自己的API_KEY以及SECRET_KEY
????????(1)工具類 BaiduApiUtil
? ? ? ? ? ? ? ? ?a.聯(lián)網(wǎng)判斷
def network_connect_judge():
"""
聯(lián)網(wǎng)判斷
:return: 是否聯(lián)網(wǎng)
"""
ret = os.system("ping baidu.com -n 1")
return True if ret == 0 else False
? ? ? ?? ? ? ? b. 獲取訪問令牌
將申請百度接口的API_KEY等參數(shù)保存到.conf配置文件中(配置文件在當(dāng)前項(xiàng)目的conf目錄下),然后在使用ConfigParser讀取并使用。
[baidu_config] app_id = XXXXXXXXXXXXXXXXXXXXXXXX secret_key = XXXXXXXXXXXXXXXXXXXXXXXX
def get_access_token():
"""
獲取訪問令牌
:return: 訪問令牌
"""
conf = ConfigParser()
path = os.path.join(os.path.dirname(__file__))
conf.read(path[:path.rindex('util')] + "conf\\setting.conf", encoding='gbk')
API_KEY = conf.get('baidu_config', 'app_id')
SECRET_KEY = conf.get('baidu_config', 'secret_key')
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {"grant_type": "client_credentials", "client_id": API_KEY, "client_secret": SECRET_KEY}
return str(requests.post(url, params=params).json().get("access_token"))
? ? ? ? ? ? ? ? c.接口調(diào)用
注意:進(jìn)行API請求時(shí),上傳的圖片格式為base64格式,我們傳入的圖片為jpg格式,故需要進(jìn)行格式轉(zhuǎn)換。通過base64.b64encode()方法進(jìn)行轉(zhuǎn)換
def face_api_invoke(path):
"""
人臉 API 調(diào)用
:param path: 待檢測的圖片路徑
:return: 是否通過靜默人臉識別
"""
with open(path, 'rb') as f:
img_data = f.read()
base64_data = base64.b64encode(img_data)
base64_str = base64_data.decode('utf-8')
url = "https://aip.baidubce.com/rest/2.0/face/v3/faceverify?access_token=" + get_access_token()
headers = {'Content-Type': 'application/json'}
payload = json.dumps(([{
"image": base64_str,
"image_type": "BASE64"
}]))
response = requests.request("POST", url, headers=headers, data=payload)
print(response)
result = json.loads(response.text)
if result["error_msg"] == "SUCCESS":
frr_1e_4 = result["result"]["thresholds"]["frr_1e-4"]
frr_1e_3 = result["result"]["thresholds"]["frr_1e-3"]
frr_1e_2 = result["result"]["thresholds"]["frr_1e-2"]
face_liveness = result["result"]["face_liveness"]
if face_liveness >= frr_1e_2:
return True
elif frr_1e_3 <= face_liveness <= frr_1e_2:
return True
elif face_liveness <= frr_1e_4:
return False
?????????(2)用戶主界面邏輯調(diào)用
# 文件目錄
curPath = os.path.abspath(os.path.dirname(__file__))
# 項(xiàng)目根路徑
rootPath = curPath[:curPath.rindex('logic')] # logic為存放用戶界面邏輯代碼的文件夾名
# 配置文件夾路徑
CONF_FOLDER_PATH = rootPath + 'conf\\'
# 圖片文件夾路徑
PHOTO_FOLDER_PATH = rootPath + 'photo\\'
# 數(shù)據(jù)文件夾路徑
DATA_FOLDER_PATH = rootPath + 'data\\'
... ...
# 聯(lián)網(wǎng)活體檢測
def detect_face_network(self):
while self.cap.isOpened():
ret, frame = self.cap.read()
frame_location = face_recognition.face_locations(frame)
if len(frame_location) == 0:
QApplication.processEvents()
self.remind_label.setText("未檢測到人臉")
else:
global PHOTO_FOLDER_PATH
shot_path = PHOTO_FOLDER_PATH + datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
self.show_image.save(shot_path)
QApplication.processEvents()
self.remind_label.setText("正在初始化\n請稍后")
# 百度API進(jìn)行活體檢測
QApplication.processEvents()
if not self.api.face_api_invoke(shot_path):
os.remove(shot_path)
QMessageBox.about(self, '警告', '未通過活體檢測')
self.remind_label.setText("")
return False
else:
os.remove(shot_path)
return True
show_video = cv2.cvtColor(cv2.resize(frame, (self.WIN_WIDTH, self.WIN_HEIGHT)), cv2.COLOR_BGR2RGB)
self.show_image = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.camera_label.setPixmap(QPixmap.fromImage(self.show_image))
2、交互活體檢測
????????(1)基本原理
????????采用開源框架dlib的shape_predictor_68_face_landmarks模型,對人臉的68個(gè)特征點(diǎn)進(jìn)行檢測定位。本系統(tǒng)活體檢測主要檢測人臉左搖頭、右搖頭、眨眼、張嘴、點(diǎn)頭等多個(gè)動(dòng)作,故需要鼻子[32,36]、左眼[37,42]、右眼[43,48]、上嘴唇內(nèi)邊緣[66,68]等多個(gè)部分的特征點(diǎn)集合。
?
????????眨眼檢測的基本原理是計(jì)算眼睛長寬比EAR(Eye Aspect Ratio)值。當(dāng)人眼睜開時(shí),EAR在某個(gè)值上下波動(dòng)。當(dāng)人眼閉合時(shí),EAR將迅速下降,理論上接近于零,實(shí)際上一般波動(dòng)于0.25上下,故本系統(tǒng)設(shè)置閾值在0.25。
????????EAR的計(jì)算公式如下:
?
?????????????????其中,p1~p5為當(dāng)前眼睛的6個(gè)標(biāo)記點(diǎn),圖示如下:
????????(2)實(shí)現(xiàn)原理
????????不斷讀取攝像頭傳回的每一幀,對該幀的EAR值進(jìn)行計(jì)算。當(dāng)EAR低于閾值時(shí),自動(dòng)將當(dāng)前幀計(jì)數(shù)加一。當(dāng)幀連續(xù)計(jì)數(shù)超過2幀后,EAR值大于閾值,則將該次動(dòng)作視為一次眨眼
????????同理,對張嘴、左搖頭、右搖頭的處理也是類似的。首先通過dlib獲取當(dāng)前器官的標(biāo)記點(diǎn),計(jì)算其長寬比,與系統(tǒng)預(yù)先指定的閾值進(jìn)行比較。當(dāng)長寬比小于閾值時(shí),連續(xù)幀計(jì)數(shù)器自動(dòng)加一。當(dāng)連續(xù)幀計(jì)數(shù)器值超過指定值時(shí),判斷本次動(dòng)作為一次有效的動(dòng)作,進(jìn)行記錄。
????????由于需要用戶進(jìn)行各種動(dòng)作的完成,紙質(zhì)或者電子照片基本上無法再通過本次活體檢測。
????????但對于視頻,攻擊者有可能使用預(yù)先錄制的完成一定順序動(dòng)作的視頻,以此欺騙系統(tǒng)。對于該情況,本團(tuán)隊(duì)的應(yīng)對措施如下:
????????系統(tǒng)需要用戶完成左搖頭、右搖頭、眨眼、張嘴動(dòng)作,其中張嘴以及眨眼指定的次數(shù)為指定數(shù)目。系統(tǒng)對上述動(dòng)作進(jìn)行隨機(jī)打亂,并且張嘴以及眨眼指定的次數(shù)也為隨機(jī)數(shù)。
????????通過以上方式,用戶在進(jìn)行每一次的交互活體檢測時(shí),需要完成的方案都是完全不相同的,且完成的張嘴、眨眼次數(shù)也是不同的。當(dāng)用戶超過系統(tǒng)的指定時(shí)間未完成檢測,則自動(dòng)判斷為活體檢測失效。當(dāng)用戶超過3次進(jìn)行登錄的活體檢測失敗,系統(tǒng)將判斷當(dāng)前用戶存在風(fēng)險(xiǎn),并鎖死當(dāng)前登錄的用戶。被鎖死的用戶需要經(jīng)過管理員通過管理員系統(tǒng)方可以解除鎖定。
????????通過以上的方式,對視頻的欺騙攻擊,本系統(tǒng)也有能力進(jìn)行抵御阻擋。
????????(3)代碼詳解
????????a.初始化
????????需要初始化的參數(shù)包括:特征點(diǎn)檢測器 self.predictor、self.detector、閃爍閾值、總閃爍次數(shù)、連續(xù)幀數(shù)閾值、連續(xù)幀計(jì)數(shù)器、當(dāng)前總幀數(shù)、檢測隨機(jī)值、面部特征點(diǎn)索引。
? ? ? ? 特征點(diǎn)檢測器:通過dlib的shape_predictor_68_face_landmarks模型,對人臉的68個(gè)特征點(diǎn)進(jìn)行檢測定位,首先需要進(jìn)行模型的加載。由于模型加載時(shí)間較長,設(shè)置邏輯判斷。當(dāng)不是第一次使用活體檢測時(shí),使用已經(jīng)加載好的屬性,提高初始化時(shí)間。
????????面部特征點(diǎn)索引:當(dāng)前用戶面部特征點(diǎn)的索引序號
????????閃爍閾值、連續(xù)幀計(jì)數(shù)器:設(shè)置眨眼、張嘴的EAR、MAR閾值,當(dāng)前幀用戶的動(dòng)作超過閾值時(shí)連續(xù)幀計(jì)數(shù)器加一。
???????連續(xù)幀數(shù)閾值:當(dāng)幀連續(xù)計(jì)數(shù)超過閾值設(shè)置的幀數(shù)后,EAR值大于閾值,則將該次動(dòng)作視為一次眨眼或張嘴動(dòng)作。
???????總閃爍次數(shù):用戶需要完成的動(dòng)作的次數(shù)。
???????當(dāng)前總幀數(shù):從開始到當(dāng)前時(shí)間 活體檢測的幀數(shù),超過系統(tǒng)指定幀數(shù)時(shí)則判斷活體檢測失敗。
???????檢測隨機(jī)值:包括隨機(jī)次數(shù)的眨眼、張嘴次數(shù),以及隨機(jī)的動(dòng)作集合,如(右轉(zhuǎn)頭-眨眼-張嘴-左轉(zhuǎn)頭)、(右轉(zhuǎn)頭-眨眼-左轉(zhuǎn)頭-張嘴)、(眨眼-張嘴-右轉(zhuǎn)頭-左轉(zhuǎn)頭)等
項(xiàng)目結(jié)構(gòu)如下:
?
?其中 shape_predictor_68_face_landmarks.dat 文件在項(xiàng)目的data目錄下。
# 本地活體檢測
def detect_face_local(self):
self.detect_start_time = time()
QApplication.processEvents()
self.remind_label.setText("正在初始化\n請稍后")
# 特征點(diǎn)檢測器首次加載比較慢,通過判斷減少后面加載的速度
if self.detector is None:
self.detector = dlib.get_frontal_face_detector()
if self.predictor is None:
self.predictor = dlib.shape_predictor('../data/shape_predictor_68_face_landmarks.dat')
# 閃爍閾值
self.EAR_THRESH = 0.25
self.MOUTH_THRESH = 0.7
# 總閃爍次數(shù)
self.eye_flash_counter = 0
self.mouth_open_counter = 0
self.turn_left_counter = 0
self.turn_right_counter = 0
# 連續(xù)幀數(shù)閾值
self.EAR_CONSTANT_FRAMES = 2
self.MOUTH_CONSTANT_FRAMES = 2
self.LEFT_CONSTANT_FRAMES = 4
self.RIGHT_CONSTANT_FRAMES = 4
# 連續(xù)幀計(jì)數(shù)器
self.eye_flash_continuous_frame = 0
self.mouth_open_continuous_frame = 0
self.turn_left_continuous_frame = 0
self.turn_right_continuous_frame = 0
print("活體檢測 初始化時(shí)間:", time() - self.detect_start_time)
# 當(dāng)前總幀數(shù)
total_frame_counter = 0
# 設(shè)置隨機(jī)值
now_flag = 0
random_type = [0, 1, 2, 3]
random.shuffle(random_type)
random_eye_flash_number = random.randint(4, 6)
random_mouth_open_number = random.randint(2, 4)
QMessageBox.about(self, '提示', '請按照指示執(zhí)行相關(guān)動(dòng)作')
self.remind_label.setText("")
# 抓取面部特征點(diǎn)的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
? ? ? ? ? ? ? b.EAR、MAR等值的計(jì)算
? ? ? ? ? ? ? ? 以眼睛為例,獲取眼睛的,套用計(jì)算EAR值的公式,計(jì)算得到EAR值。
# 計(jì)算眼長寬比例 EAR值
@staticmethod
def count_EAR(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
EAR = (A + B) / (2.0 * C)
return EAR
# 計(jì)算嘴長寬比例 MAR值
@staticmethod
def count_MAR(mouth):
A = dist.euclidean(mouth[1], mouth[11])
B = dist.euclidean(mouth[2], mouth[10])
C = dist.euclidean(mouth[3], mouth[9])
D = dist.euclidean(mouth[4], mouth[8])
E = dist.euclidean(mouth[5], mouth[7])
F = dist.euclidean(mouth[0], mouth[6]) # 水平歐幾里德距離
ratio = (A + B + C + D + E) / (5.0 * F)
return ratio
# 計(jì)算左右臉轉(zhuǎn)動(dòng)比例 FR值
@staticmethod
def count_FR(face):
rightA = dist.euclidean(face[0], face[27])
rightB = dist.euclidean(face[2], face[30])
rightC = dist.euclidean(face[4], face[48])
leftA = dist.euclidean(face[16], face[27])
leftB = dist.euclidean(face[14], face[30])
leftC = dist.euclidean(face[12], face[54])
ratioA = rightA / leftA
ratioB = rightB / leftB
ratioC = rightC / leftC
ratio = (ratioA + ratioB + ratioC) / 3
return ratio
? ? ? ? c.用戶動(dòng)作判斷
def check_eye_flash(self, average_EAR):
if average_EAR < self.EAR_THRESH:
self.eye_flash_continuous_frame += 1
else:
if self.eye_flash_continuous_frame >= self.EAR_CONSTANT_FRAMES:
self.eye_flash_counter += 1
self.eye_flash_continuous_frame = 0
def check_mouth_open(self, mouth_MAR):
if mouth_MAR > self.MOUTH_THRESH:
self.mouth_open_continuous_frame += 1
else:
if self.mouth_open_continuous_frame >= self.MOUTH_CONSTANT_FRAMES:
self.mouth_open_counter += 1
self.mouth_open_continuous_frame = 0
def check_right_turn(self, leftRight_FR):
if leftRight_FR <= 0.5:
self.turn_right_continuous_frame += 1
else:
if self.turn_right_continuous_frame >= self.RIGHT_CONSTANT_FRAMES:
self.turn_right_counter += 1
self.turn_right_continuous_frame = 0
def check_left_turn(self, leftRight_FR):
if leftRight_FR >= 2.0:
self.turn_left_continuous_frame += 1
else:
if self.turn_left_continuous_frame >= self.LEFT_CONSTANT_FRAMES:
self.turn_left_counter += 1
self.turn_left_continuous_frame = 0
? ? ? ? d.活體檢測判斷
? ? ? ? 當(dāng)攝像頭打開時(shí),進(jìn)行活體檢測判斷。當(dāng)用戶 活體檢測成功 或者 超時(shí) 時(shí)才退出循環(huán)。
while self.cap.isOpened():
ret, frame = self.cap.read()
total_frame_counter += 1
frame = imutils.resize(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
if len(rects) == 1:
QApplication.processEvents()
shape = self.predictor(gray, rects[0])
shape = face_utils.shape_to_np(shape)
# 提取面部坐標(biāo)
left_eye = shape[lStart:lEnd]
right_eye = shape[rStart:rEnd]
mouth = shape[mStart:mEnd]
# 計(jì)算長寬比
left_EAR = self.count_EAR(left_eye)
right_EAR = self.count_EAR(right_eye)
mouth_MAR = self.count_MAR(mouth)
leftRight_FR = self.count_FR(shape)
average_EAR = (left_EAR + right_EAR) / 2.0
# 計(jì)算左眼、右眼、嘴巴的凸包
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
mouth_hull = cv2.convexHull(mouth)
# 可視化
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [mouth_hull], -1, (0, 255, 0), 1)
if now_flag >= 4:
self.remind_label.setText("")
QMessageBox.about(self, '提示', '已通過活體檢測')
self.turn_right_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
return True
if random_type[now_flag] == 0:
if self.turn_left_counter > 0:
now_flag += 1
else:
self.remind_label.setText("請向左搖頭")
self.check_left_turn(leftRight_FR)
self.turn_right_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 1:
if self.turn_right_counter > 0:
now_flag += 1
else:
self.remind_label.setText("請向右搖頭")
self.check_right_turn(leftRight_FR)
self.turn_left_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 2:
if self.mouth_open_counter >= random_mouth_open_number:
now_flag += 1
else:
self.remind_label.setText("已張嘴{}次\n還需張嘴{}次".format(self.mouth_open_counter, (
random_mouth_open_number - self.mouth_open_counter)))
self.check_mouth_open(mouth_MAR)
self.turn_right_counter = 0
self.turn_left_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 3:
if self.eye_flash_counter >= random_eye_flash_number:
now_flag += 1
else:
self.remind_label.setText("已眨眼{}次\n還需眨眼{}次".format(self.eye_flash_counter, (
random_eye_flash_number - self.eye_flash_counter)))
self.check_eye_flash(average_EAR)
self.turn_right_counter = 0
self.turn_left_counter = 0
self.mouth_open_counter = 0
elif len(rects) == 0:
QApplication.processEvents()
self.remind_label.setText("沒有檢測到人臉!")
elif len(rects) > 1:
QApplication.processEvents()
self.remind_label.setText("檢測到超過一張人臉!")
show_video = cv2.cvtColor(cv2.resize(frame, (self.WIN_WIDTH, self.WIN_HEIGHT)), cv2.COLOR_BGR2RGB)
self.show_image = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.camera_label.setPixmap(QPixmap.fromImage(self.show_image))
if total_frame_counter >= 1000.0:
QMessageBox.about(self, '警告', '已超時(shí),未通過活體檢測')
self.remind_label.setText("")
return False
????????(4)全部代碼
# 計(jì)算眼長寬比例 EAR值
@staticmethod
def count_EAR(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
EAR = (A + B) / (2.0 * C)
return EAR
# 計(jì)算嘴長寬比例 MAR值
@staticmethod
def count_MAR(mouth):
A = dist.euclidean(mouth[1], mouth[11])
B = dist.euclidean(mouth[2], mouth[10])
C = dist.euclidean(mouth[3], mouth[9])
D = dist.euclidean(mouth[4], mouth[8])
E = dist.euclidean(mouth[5], mouth[7])
F = dist.euclidean(mouth[0], mouth[6]) # 水平歐幾里德距離
ratio = (A + B + C + D + E) / (5.0 * F)
return ratio
# 計(jì)算左右臉轉(zhuǎn)動(dòng)比例 FR值
@staticmethod
def count_FR(face):
rightA = dist.euclidean(face[0], face[27])
rightB = dist.euclidean(face[2], face[30])
rightC = dist.euclidean(face[4], face[48])
leftA = dist.euclidean(face[16], face[27])
leftB = dist.euclidean(face[14], face[30])
leftC = dist.euclidean(face[12], face[54])
ratioA = rightA / leftA
ratioB = rightB / leftB
ratioC = rightC / leftC
ratio = (ratioA + ratioB + ratioC) / 3
return ratio
# 本地活體檢測
def detect_face_local(self):
self.detect_start_time = time()
QApplication.processEvents()
self.remind_label.setText("正在初始化\n請稍后")
# 特征點(diǎn)檢測器首次加載比較慢,通過判斷減少后面加載的速度
if self.detector is None:
self.detector = dlib.get_frontal_face_detector()
if self.predictor is None:
global DATA_FOLDER_PATH
self.predictor = dlib.shape_predictor('../data/shape_predictor_68_face_landmarks.dat')
# 閃爍閾值
self.EAR_THRESH = 0.25
self.MOUTH_THRESH = 0.7
# 總閃爍次數(shù)
self.eye_flash_counter = 0
self.mouth_open_counter = 0
self.turn_left_counter = 0
self.turn_right_counter = 0
# 連續(xù)幀數(shù)閾值
self.EAR_CONSTANT_FRAMES = 2
self.MOUTH_CONSTANT_FRAMES = 2
self.LEFT_CONSTANT_FRAMES = 4
self.RIGHT_CONSTANT_FRAMES = 4
# 連續(xù)幀計(jì)數(shù)器
self.eye_flash_continuous_frame = 0
self.mouth_open_continuous_frame = 0
self.turn_left_continuous_frame = 0
self.turn_right_continuous_frame = 0
print("活體檢測 初始化時(shí)間:", time() - self.detect_start_time)
# 當(dāng)前總幀數(shù)
total_frame_counter = 0
# 設(shè)置隨機(jī)值
now_flag = 0
random_type = [0, 1, 2, 3]
random.shuffle(random_type)
random_eye_flash_number = random.randint(4, 6)
random_mouth_open_number = random.randint(2, 4)
QMessageBox.about(self, '提示', '請按照指示執(zhí)行相關(guān)動(dòng)作')
self.remind_label.setText("")
# 抓取面部特征點(diǎn)的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
while self.cap.isOpened():
ret, frame = self.cap.read()
total_frame_counter += 1
frame = imutils.resize(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
if len(rects) == 1:
QApplication.processEvents()
shape = self.predictor(gray, rects[0])
shape = face_utils.shape_to_np(shape)
# 提取面部坐標(biāo)
left_eye = shape[lStart:lEnd]
right_eye = shape[rStart:rEnd]
mouth = shape[mStart:mEnd]
# 計(jì)算長寬比
left_EAR = self.count_EAR(left_eye)
right_EAR = self.count_EAR(right_eye)
mouth_MAR = self.count_MAR(mouth)
leftRight_FR = self.count_FR(shape)
average_EAR = (left_EAR + right_EAR) / 2.0
# 計(jì)算左眼、右眼、嘴巴的凸包
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
mouth_hull = cv2.convexHull(mouth)
# 可視化
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [mouth_hull], -1, (0, 255, 0), 1)
if now_flag >= 4:
self.remind_label.setText("")
QMessageBox.about(self, '提示', '已通過活體檢測')
self.turn_right_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
return True
if random_type[now_flag] == 0:
if self.turn_left_counter > 0:
now_flag += 1
else:
self.remind_label.setText("請向左搖頭")
self.check_left_turn(leftRight_FR)
self.turn_right_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 1:
if self.turn_right_counter > 0:
now_flag += 1
else:
self.remind_label.setText("請向右搖頭")
self.check_right_turn(leftRight_FR)
self.turn_left_counter = 0
self.mouth_open_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 2:
if self.mouth_open_counter >= random_mouth_open_number:
now_flag += 1
else:
self.remind_label.setText("已張嘴{}次\n還需張嘴{}次".format(self.mouth_open_counter, (
random_mouth_open_number - self.mouth_open_counter)))
self.check_mouth_open(mouth_MAR)
self.turn_right_counter = 0
self.turn_left_counter = 0
self.eye_flash_counter = 0
elif random_type[now_flag] == 3:
if self.eye_flash_counter >= random_eye_flash_number:
now_flag += 1
else:
self.remind_label.setText("已眨眼{}次\n還需眨眼{}次".format(self.eye_flash_counter, (
random_eye_flash_number - self.eye_flash_counter)))
self.check_eye_flash(average_EAR)
self.turn_right_counter = 0
self.turn_left_counter = 0
self.mouth_open_counter = 0
elif len(rects) == 0:
QApplication.processEvents()
self.remind_label.setText("沒有檢測到人臉!")
elif len(rects) > 1:
QApplication.processEvents()
self.remind_label.setText("檢測到超過一張人臉!")
show_video = cv2.cvtColor(cv2.resize(frame, (self.WIN_WIDTH, self.WIN_HEIGHT)), cv2.COLOR_BGR2RGB)
self.show_image = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.camera_label.setPixmap(QPixmap.fromImage(self.show_image))
if total_frame_counter >= 1000.0:
QMessageBox.about(self, '警告', '已超時(shí),未通過活體檢測')
self.remind_label.setText("")
return False
def check_eye_flash(self, average_EAR):
if average_EAR < self.EAR_THRESH:
self.eye_flash_continuous_frame += 1
else:
if self.eye_flash_continuous_frame >= self.EAR_CONSTANT_FRAMES:
self.eye_flash_counter += 1
self.eye_flash_continuous_frame = 0
def check_mouth_open(self, mouth_MAR):
if mouth_MAR > self.MOUTH_THRESH:
self.mouth_open_continuous_frame += 1
else:
if self.mouth_open_continuous_frame >= self.MOUTH_CONSTANT_FRAMES:
self.mouth_open_counter += 1
self.mouth_open_continuous_frame = 0
def check_right_turn(self, leftRight_FR):
if leftRight_FR <= 0.5:
self.turn_right_continuous_frame += 1
else:
if self.turn_right_continuous_frame >= self.RIGHT_CONSTANT_FRAMES:
self.turn_right_counter += 1
self.turn_right_continuous_frame = 0
def check_left_turn(self, leftRight_FR):
if leftRight_FR >= 2.0:
self.turn_left_continuous_frame += 1
else:
if self.turn_left_continuous_frame >= self.LEFT_CONSTANT_FRAMES:
self.turn_left_counter += 1
self.turn_left_continuous_frame = 0
繼續(xù)閱讀:
用戶端邏輯:
- 人臉識別:Python | 人臉識別系統(tǒng) — 人臉識別
- 背景模糊:Python | 人臉識別系統(tǒng) — 背景模糊
- 姿態(tài)檢測:Python | 人臉識別系統(tǒng) — 姿態(tài)檢測
- 人臉比對:Python | 人臉識別系統(tǒng) — 人臉比對
- 用戶操作:Python | 人臉識別系統(tǒng) — 簡介
管理員端邏輯:文章來源:http://www.zghlxwxcb.cn/news/detail-434205.html
- 管理員操作:Python | 人臉識別系統(tǒng) —— 管理員操作
注:以上代碼僅為參考,若需要運(yùn)行,請參考項(xiàng)目GitHub完整源代碼:??Python | 人臉識別系統(tǒng) —— 管理員操作文章來源地址http://www.zghlxwxcb.cn/news/detail-434205.html
到了這里,關(guān)于Python | 人臉識別系統(tǒng) — 活體檢測的文章就介紹完了。如果您還想了解更多內(nèi)容,請?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!