部署參考:https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/multi_thread/python/pipeline/README_CN.md
安裝
cpu: pip install fastdeploy-python
gpu :pip install fastdeploy-gpu-python
#下載部署示例代碼
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/tutorials/multi_thread/python/pipeline
# 下載模型,圖片和字典文件
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
tar xvf ch_PP-OCRv3_det_infer.tar
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
tar xvf ch_PP-OCRv3_rec_infer.tar
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
命令:
多線程
python multi_thread_process_ocr.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image_path xxx/xxx --device gpu --thread_num 3
多進程
python multi_thread_process_ocr.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image_path xxx/xxx --device gpu --use_multi_process True --process_num 3
問題
多進程圖片分配有bug
文件:multi_thread_process_ocr.py
原始代碼:270行
修改為如下,去掉1
ModuleNotFoundError: No module named ‘example’
因為安裝包不對,fastdeploy與fastdeploy-python不是同一個包
CUDA error(3), initialization error.
----------------------
Error Message Summary:
----------------------
ExternalError: CUDA error(3), initialization error.
[Hint: Please search for the error code(3) on website (https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038) to get Nvidia's official solution and advice about CUDA Error.] (at /home/fastdeploy/develop/paddle_build/v0.0.0/Paddle/paddle/phi/backends/gpu/cuda/cuda_info.cc:251)
參考:
PaddlePaddle——問題解決:使用Python multiprocessing時報錯:CUDA error(3), initialization error.
https://github.com/PaddlePaddle/PaddleDetection/issues/2241
paddle 相關模塊只在方法里面引用,要在多進程外有 import
這些模塊
flask部署
發(fā)送列表類型的圖片base64編碼,返回列表類型的字符串
注意server端文件放在FastDeploy/tutorials/multi_thread/python/pipeline目錄下
創(chuàng)建server端文章來源:http://www.zghlxwxcb.cn/news/detail-667665.html
from threading import Thread
import cv2
import os
from multiprocessing import Pool
import sys
import fastdeploy as fd
import numpy as np
import base64
from PIL import Image
from io import BytesIO
from sqlalchemy import create_engine, text
from flask import Flask, request, jsonify
import argparse
import ast
# watch -n 0.1 nvidia-smi
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--det_model",
# required=True,
type=str,
default='ch_PP-OCRv3_det_infer',
help="Path of Detection model of PPOCR.")
parser.add_argument(
"--cls_model",
# required=True,
type=str,
default='ch_ppocr_mobile_v2.0_cls_infer',
help="Path of Classification model of PPOCR.")
parser.add_argument(
"--rec_model",
# required=True,
type=str,
default='ch_PP-OCRv3_rec_infer',
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--rec_label_file",
# required=True,
type=str,
default='ppocr_keys_v1.txt',
help="Path of Recognization model of PPOCR.")
# parser.add_argument(
# "--image_path",
# type=str,
# required=True,
# help="The directory or path or file list of the images to be predicted."
# )
parser.add_argument(
"--device",
type=str,
default='gpu', # cpu
help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="default",
help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
)
parser.add_argument(
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
parser.add_argument(
"--cpu_thread_num",
type=int,
default=9,
help="Number of threads while inference on CPU.")
parser.add_argument(
"--cls_bs",
type=int,
default=1,
help="Classification model inference batch size.")
parser.add_argument(
"--rec_bs",
type=int,
default=6,
help="Recognition model inference batch size")
parser.add_argument("--thread_num", type=int, default=1, help="thread num")
parser.add_argument(
"--use_multi_process",
type=ast.literal_eval,
default=True,
help="Wether to use multi process.")
parser.add_argument(
"--process_num", type=int, default=5, help="process num")
return parser.parse_args()
def get_image_list(image_path):
image_list = []
if os.path.isfile(image_path):
image_list.append(image_path)
# load image in a directory
elif os.path.isdir(image_path):
for root, dirs, files in os.walk(image_path):
for f in files:
image_list.append(os.path.join(root, f))
else:
raise FileNotFoundError(
'{} is not found. it should be a path of image, or a directory including images.'.
format(image_path))
if len(image_list) == 0:
raise RuntimeError(
'There are not image file in `--image_path`={}'.format(image_path))
return image_list
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu(args.device_id)
option.set_cpu_thread_num(args.cpu_thread_num)
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
return option
if args.backend.lower() == "trt":
assert args.device.lower(
) == "gpu", "TensorRT backend require inference on device GPU."
option.use_trt_backend()
elif args.backend.lower() == "pptrt":
assert args.device.lower(
) == "gpu", "Paddle-TensorRT backend require inference on device GPU."
option.use_trt_backend()
option.enable_paddle_trt_collect_shape()
option.enable_paddle_to_trt()
elif args.backend.lower() == "ort":
option.use_ort_backend()
elif args.backend.lower() == "paddle":
option.use_paddle_infer_backend()
elif args.backend.lower() == "openvino":
assert args.device.lower(
) == "cpu", "OpenVINO backend require inference on device CPU."
option.use_openvino_backend()
return option
def load_model(args, runtime_option):
# Detection模型, 檢測文字框
det_model_file = os.path.join(args.det_model, "inference.pdmodel")
det_params_file = os.path.join(args.det_model, "inference.pdiparams")
# Classification模型,方向分類,可選
cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
# Recognition模型,文字識別模型
rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
rec_label_file = args.rec_label_file
# PPOCR的cls和rec模型現(xiàn)在已經(jīng)支持推理一個Batch的數(shù)據(jù)
# 定義下面兩個變量后, 可用于設置trt輸入shape, 并在PPOCR模型初始化后, 完成Batch推理設置
cls_batch_size = 1
rec_batch_size = 6
# 當使用TRT時,分別給三個模型的runtime設置動態(tài)shape,并完成模型的創(chuàng)建.
# 注意: 需要在檢測模型創(chuàng)建完成后,再設置分類模型的動態(tài)輸入并創(chuàng)建分類模型, 識別模型同理.
# 如果用戶想要自己改動檢測模型的輸入shape, 我們建議用戶把檢測模型的長和高設置為32的倍數(shù).
det_option = runtime_option
det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
[1, 3, 960, 960])
# 用戶可以把TRT引擎文件保存至本地
#det_option.set_trt_cache_file(args.det_model + "/det_trt_cache.trt")
global det_model
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_option)
cls_option = runtime_option
cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
[cls_batch_size, 3, 48, 320],
[cls_batch_size, 3, 48, 1024])
# 用戶可以把TRT引擎文件保存至本地
# cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
global cls_model
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_option)
rec_option = runtime_option
rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
[rec_batch_size, 3, 48, 320],
[rec_batch_size, 3, 48, 2304])
# 用戶可以把TRT引擎文件保存至本地
#rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")
global rec_model
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=rec_option)
# 創(chuàng)建PP-OCR,串聯(lián)3個模型,其中cls_model可選,如無需求,可設置為None
global ppocr_v3
ppocr_v3 = fd.vision.ocr.PPOCRv3(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
# 給cls和rec模型設置推理時的batch size
# 此值能為-1, 和1到正無窮
# 當此值為-1時, cls和rec模型的batch size將默認和det模型檢測出的框的數(shù)量相同
ppocr_v3.cls_batch_size = cls_batch_size
ppocr_v3.rec_batch_size = rec_batch_size
def predict(model, img_list):
result_list = []
# predict ppocr result
for image in img_list:
im = cv2.imread(image)
result = model.predict(im)
result_list.append(result)
return result_list
def process_predict(image):
# predict ppocr result
im = cv2.imread(image)
result = ppocr_v3.predict(im)
print(result)
def process_predict_text(base64_str):
image = base64_to_bgr(base64_str)
result = ppocr_v3.predict(image)
# print(result)
return ''.join(result.text) #不能直接返回OCR對象序列化會失敗
def cv_show(img):
'''
展示圖片
@param img:
@param name:
@return:
'''
cv2.namedWindow('name', cv2.WINDOW_KEEPRATIO) # cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
cv2.imshow('name', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def base64_to_bgr(base64_str):
base64_hex = base64.b64decode(base64_str)
image = BytesIO(base64_hex)
img = Image.open(image)
if img.mode=='RGBA':
width = img.width
height = img.height
img2 = Image.new('RGB', size=(width, height), color=(255, 255, 255))
img2.paste(img, (0, 0), mask=img)
image_array = np.array(img2)
else:
image_array = np.array(img)
image = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
return image
class WrapperThread(Thread):
def __init__(self, func, args):
super(WrapperThread, self).__init__()
self.func = func
self.args = args
# self.result = self.func(*self.args)
def run(self):
self.result = self.func(*self.args)
def get_result(self):
return self.result
def ocr_image_list(imgs_list):
args = parse_arguments()
# 對于三個模型,均采用同樣的部署配置
# 用戶也可根據(jù)自行需求分別配置
runtime_option = build_option(args)
if args.use_multi_process:
process_num = args.process_num
with Pool(
process_num,
initializer=load_model,
initargs=(args, runtime_option)) as pool:
#
results = pool.map(process_predict_text, imgs_list)
# pool.map(process_predict, imgs_list)
# 進一步處理結果
for i, result in enumerate(results):
print(i, result)
else:
load_model(args, runtime_option)
threads = []
thread_num = args.thread_num
image_num_each_thread = int(len(imgs_list) / thread_num)
# unless you want independent model in each thread, actually model.clone()
# is the same as model when creating thead because of the existence of
# GIL(Global Interpreter Lock) in python. In addition, model.clone() will consume
# additional memory to store independent member variables
for i in range(thread_num):
if i == thread_num - 1:
t = WrapperThread(
predict,
args=(ppocr_v3.clone(),
imgs_list[i * image_num_each_thread:]))
else:
t = WrapperThread(
predict,
args=(ppocr_v3.clone(),
imgs_list[i * image_num_each_thread:(i + 1) *
image_num_each_thread])) # - 1
threads.append(t)
t.start()
for i in range(thread_num):
threads[i].join()
for i in range(thread_num):
for result in threads[i].get_result():
print('thread:', i, ', result: ', result)
@app.route('/ocr/submit', methods=['POST'])
def ocr():
args = parse_arguments()
process_num = 1#args.process_num
runtime_option = build_option(args)
data = request.get_json()
# 獲取 Base64 數(shù)據(jù)
base64_str = data['img_base64']
with Pool(
process_num, initializer=load_model, initargs=(args, runtime_option)) as pool:
results = pool.map(process_predict_text, base64_str)
# 返回響應
response = {'message': 'Data received', 'result': results}
return jsonify(response)
import json
import pandas as pd
import time
if __name__ == '__main__':
app.run(host='192.168.xxx.xxx', port=5000)
client 端文章來源地址http://www.zghlxwxcb.cn/news/detail-667665.html
import base64
import sys
import requests
import json
# 讀取圖像文件
with open('./pic/img.png', 'rb') as image_file:
# 將圖像文件內容讀取為字節(jié)流
image_data = image_file.read()
# 將圖像字節(jié)流進行 Base64 編碼
img_base64 = base64.b64encode(image_data)
data = {
'img_base64': [img_base64.decode('utf-8')]
}
headers = {
'Content-Type': 'application/json'
}
response = requests.post("http://192.168.xxx.xxx:5000/ocr/submit", data=json.dumps(data),headers = headers)
if response.status_code == 200:
result = response.json()
print(result['result'])
else:
print('Error:', response.status_code)
到了這里,關于fastdeploy部署多線程/進程paddle ocr(python flask框架 )的文章就介紹完了。如果您還想了解更多內容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關文章,希望大家以后多多支持TOY模板網(wǎng)!