- 使用隊(duì)列完成生產(chǎn)者消費(fèi)者模式
- 使用類創(chuàng)建多線程提高爬蟲(chóng)速度
'''
https://sc.chinaz.com/tupian/index.html
https://sc.chinaz.com/tupian/index_2.html
https://sc.chinaz.com/tupian/index_3.html
'''
from threading import Thread
from queue import Queue
import requests
from bs4 import BeautifulSoup
import os
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69',
}
class Put_Thread(Thread):
def __init__(self, url_queue, img_queue):
super().__init__()
self.url_queue = url_queue
self.img_queue = img_queue
def run(self):
while not self.url_queue.empty():
url = self.url_queue.get()
self.fetch_url(url)
def fetch_url(self, url):
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'lxml')
data_list = soup.find_all('img', class_='lazy')
for i in data_list:
title = i.get('alt')
href = 'https:' + i.get('data-original').replace('_s', '')
self.img_queue.put((title, href))
class Get_Thread(Thread):
def __init__(self, img_queue):
super().__init__()
self.img_queue = img_queue
def run(self):
while True:
try:
img_data = self.img_queue.get(timeout=3)
except:
break
else:
title, href = img_data
if not os.path.exists('./image'):
os.mkdir('./image')
with open('./image/' + title + '.jpg', 'wb') as f:
resp = requests.get(href, headers=headers).content
f.write(resp)
print(title, '保存成功!')
def main():
'''存放url'''
url_queue = Queue()
'''存放圖片的地址和名稱'''
img_queue = Queue()
url_queue.put('https://sc.chinaz.com/tupian/index.html')
for i in range(1,11):
url = 'https://sc.chinaz.com/tupian/index_{}.html'.format(i)
url_queue.put(url)
for i in range(41):
t1 = Put_Thread(url_queue, img_queue)
t1.start()
t2 = Get_Thread(img_queue)
t2.start()
if __name__ == '__main__':
main()
print('\n************主線程已結(jié)束************\n')
- 通過(guò)隊(duì)列可以讓線程之間進(jìn)行通信
- 創(chuàng)建繼承Thread的類創(chuàng)建線程,run()會(huì)在線程start時(shí)執(zhí)行
- 吃cpu性能
文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-700482.html
文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-700482.html
到了這里,關(guān)于python爬蟲(chóng),多線程與生產(chǎn)者消費(fèi)者模式的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!