opencv、dlib、paddlehub檢測效果對比。dlib和paddlehub的效果相對好一點。
說明:本文只做人臉檢測不識別,找識別的不用看本文。
## 部署說明 # 1. 安裝python或conda # 2. 安裝依賴,pip install -r requirements.txt # 3. 192.168.1.41 修改為你部署機器的IP # 4. python app_dlib.py啟動 # 5. 試驗,http://192.168.1.41:7049 # 6. 接口,http://192.168.1.41:7049/run/predict/
接口參數(shù),post請求,body傳1個包含base64圖片的JSON,替換圖片就行
{
fn_index: 0,
data: ["data:image/jpeg;base64,/9j/4AAQSkZJtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q=="],
session_hash: "s1oy98lial"
}
依賴(用1個就行)
dlib需要C++編譯器(gcc 或 vs)
gradio
opencv-python
dlib
paddlehub
opencv檢測
import gradio as gr
import cv2
# 加載人臉檢測器
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye_tree_eyeglasses.xml')
# UGC: Define the inference fn() for your models
def model_inference(image):
# 加載圖像
# image = cv2.imread(image)
# 將圖像轉換為灰度圖像
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 進行人臉檢測
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
# 在圖像上標記人臉
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 3)
# 顯示結果
# cv2.imshow('Face Detection', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
json_out = {"result": len(faces)}
return image,json_out
def clear_all():
return None, None, None
with gr.Blocks() as demo:
gr.Markdown("人臉檢測")
with gr.Column(scale=1, min_width=100):
img_in = gr.Image(value="1.png",
label="Input")
with gr.Row():
btn1 = gr.Button("Clear")
btn2 = gr.Button("Submit")
img_out = gr.Image(label="Output").style(height=400)
json_out = gr.JSON(label="jsonOutput")
btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])
btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])
gr.Button.style(1)
demo.launch(server_name='192.168.1.41', share=True, server_port=7048)
?
?
dlib檢測
import gradio as gr
import cv2
import dlib
detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor(
# "dlib_model/shape_predictor_68_face_landmarks.dat"
# )
# UGC: Define the inference fn() for your models
def model_inference(image):
# 加載圖像
# image = cv2.imread(image)
# 將圖像轉換為灰度圖像
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 進行人臉檢測
faces = detector(gray, 1)
for face in faces:
# 在圖片中標注人臉,并顯示
left = face.left()
top = face.top()
right = face.right()
bottom = face.bottom()
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
# shape = predictor(image, face) # 尋找人臉的68個標定點
# # 遍歷所有點,打印出其坐標,并圈出來
# for pt in shape.parts():
# pt_pos = (pt.x, pt.y)
# cv2.circle(image, pt_pos, 1, (0, 255, 0), 2)
json_out = {"result": len(faces)}
return image,json_out
def clear_all():
return None, None, None
with gr.Blocks() as demo:
gr.Markdown("人臉檢測")
with gr.Column(scale=1, min_width=100):
img_in = gr.Image(value="1.png",
label="Input")
with gr.Row():
btn1 = gr.Button("Clear")
btn2 = gr.Button("Submit")
img_out = gr.Image(label="Output").style(height=400)
json_out = gr.JSON(label="jsonOutput")
btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])
btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])
gr.Button.style(1)
demo.launch(server_name='192.168.1.41', share=True, server_port=7049)
?PaddleHub檢測
import gradio as gr
import paddlehub as hub
import cv2
#直接調用PaddleHub中的人臉檢測
module = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_640")
def model_inference(image):
# images(list[numpy.ndarray]): 圖片數(shù)據(jù),ndarray.shape為[H, W, C],BGR格式;
# paths(list[str]): 圖片的路徑;
# batch_size(int): batch的大?。? # use_gpu(bool): 是否使用GPU;
# visualization(bool): 是否將識別結果保存為圖片文件;
# output_dir(str): 圖片的保存路徑,當為None時,默認設為face_detector_640_predict_output;
# confs_threshold(float): 置信度的閾值。
faces = module.face_detection([image], visualization=False)[0]["data"]
for face in faces:
# 在圖片中標注人臉,并顯示
left = int(face["left"])
top = int(face["top"])
right = int(face["right"])
bottom = int(face["bottom"])
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
json_out = {"result": len(faces)}
return image,json_out
def clear_all():
return None, None, None
with gr.Blocks() as demo:
gr.Markdown("人臉檢測")
with gr.Column(scale=1, min_width=100):
img_in = gr.Image(value="1.png",
label="Input")
with gr.Row():
btn1 = gr.Button("Clear")
btn2 = gr.Button("Submit")
img_out = gr.Image(label="Output").style(height=400)
json_out = gr.JSON(label="jsonOutput")
btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])
btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])
gr.Button.style(1)
demo.launch(server_name='192.168.1.41', share=True, server_port=7050)
APIPOST調接口測試
?
?
axios調用示例文章來源:http://www.zghlxwxcb.cn/news/detail-475913.html
var axios = require("axios").default;
var options = {
method: 'POST',
url: 'http://192.168.1.41:7050/run/predict/',
headers: {'content-type': 'application/json'},
data: '{\r\n fn_index: 0, \r\n data: ["data:image/jpeg;base64,/9j/4gM5jj4ihEoiOUxSpDKSBjsPFBYRtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q=="], \r\n session_hash: "s1oy98lial"\r\n}'
};
axios.request(options).then(function (response) {
console.log(response.data);
}).catch(function (error) {
console.error(error);
});
jquery調用示例文章來源地址http://www.zghlxwxcb.cn/news/detail-475913.html
const settings = {
"async": true,
"crossDomain": true,
"url": "http://192.168.1.41:7050/run/predict/",
"method": "POST",
"headers": {
"content-type": "application/json"
},
"data": "{\r\n fn_index: 0, \r\n data: [\"data:image/jpeg;base64,/9j/4AAQSkZJUWYgM5jj4ihEoiOUxSpDKSBjsPFBYRtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q==\"], \r\n session_hash: \"s1oy98lial\"\r\n}"
};
$.ajax(settings).done(function (response) {
console.log(response);
});
到了這里,關于opencv、dlib、paddlehub人臉檢測的文章就介紹完了。如果您還想了解更多內容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關文章,希望大家以后多多支持TOY模板網(wǎng)!