目錄
一、bill_comment.py
二、bili_comment_pic.py
三、bilibili.py
四、bilihot_pic.py
五、bilisearch_pic.py
六、draw_cloud.py
七、weibo.py
八、weibo_comment.py
九、weibo_comment_pic.py
十、weibo_pic.py
十一、weibo_top.py
十二、weibo_top_pic.py
十三、weibo_top_pie.py
十四、pachong.py
十五、代碼文件說明
一、bill_comment.py
import requests# 發(fā)送請求
import pandas as pd#保存csv文件
import os # 判斷文件是否存在
import time
from time import sleep# 設(shè)置等待,防止反爬
import json
import random# 生成隨機數(shù)
import os.path
import requests
import csv
import re
import bili_comment_pic
def trans_date(v_timestamp):
""""10位時間戳轉(zhuǎn)換為時間字符串"""
timeArray=time.localtime(v_timestamp)
otherStyleTime = time.strftime("%Y-%m-%d %H: %M:%S", timeArray)
return otherStyleTime
def getoid(bv):
resp=requests.get("https://www.bilibili.com/video/"+bv)
obj=re.compile(f'"aid":(?P<id>.*?),"bvid":"{bv}"') #在網(wǎng)頁源代碼里可以找到id,用正則獲取到
oid=obj.search(resp.text).group('id')
print('oid是'+oid) #在程序運行時告訴我們已經(jīng)獲取到了參數(shù)oid
return oid
def get_bili_comment(bv_list,max_page):
for bvid in bv_list:
#保存文件名
bili_file='biliComment_{}pages_{}.csv'.format(max_page,bvid)
#如果csv存在,先刪除
if os.path.exists(bili_file):
os.remove(bili_file)
print('存在,已刪除:{}'.format(bili_file))
#
# # 請求頭
# headers = {
# 'Authority':'api.bilibili.com',
# 'Accept':'application/json, text/plain, */*',
# 'Accept-Encoding':'gzip, deflate, br',
# 'Accept-Language':'zh-CN,zh;q=0.9',
# #需要定期更換cookie
# 'Cookie':
# 'buvid3=09193776-D54E-C4E9-D77E-A3CEC61048A052609infoc; b_nut=1666432252; i-wanna-go-back=-1; b_ut=7; _uuid=9837E983-2521-B3D3-E815-AF3877BF973253126infoc; buvid_fp=bca1b3ca8709dc8fafd31a3014e880cb; nostalgia_conf=-1; PVID=1; CURRENT_FNVAL=4048; rpdid=0z9ZwfQgnR|lkoRrAma|2ss|3w1Q0AxQ; sid=73446m9u; buvid4=FFE4C4F3-FFE7-4A1B-F2E9-BA77F904B1B753643-022102217-RoU6Io6eaXN5hT%2FTDpMpDggrSpyQiYXaOp1a506ie3QU%2FFwMxK3Zhw%3D%3D; b_lsid=E6E6D472_1883D6194B0',
# 'Origin':'https://www.bilibili.com',
# 'Referer':'https://www.bilibili.com/video/BV1zh4y1H7ZS/?spm_id_from=333.999.0.0&vd_source=7dd889e8bc19f867cf9a8b6d62c711ee',
# 'Sec-Ch-Ua':'"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
# 'Sec-Ch-Ua-Mobile':'?0',
# 'Sec-Ch-Ua-Platform':'"macOS"',
# 'Sec-Fetch-Dest':'empty',
# 'Sec-Fetch-Mode':'cors',
# 'Sec-Fetch-Site':'same-site',
# 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'
#
# }
# # 更簡單的網(wǎng)頁頭
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
"referer": "https://www.bilibili.com/"
}
for page in range(1,max_page + 1):
#請求參數(shù)
params = {
'jsonp':'jsonp',
'mode': '3',#mode=3代表按熱門排序,mode=2代表按時間排序
'oid': getoid(bvid),
'next':page,
'type': '1',
}
# type:評論類型,這里固定值1
# oid: 哪個視頻
# pn: 第幾頁的評論
# sort: 排序。0: 按照時間排序。2:按照熱度排序。默認2
url = (f"https://api.bilibili.com/x/v2/reply/main") # 獲得網(wǎng)頁源碼
response = requests.get(url, headers=headers,params=params,)
print(response.status_code)
data_list=response.json()['data']['replies']#解析評論數(shù)據(jù)
comment_list=[]#評論內(nèi)容空列表
time_list=[]#評論時間空列表
#location_list=[]#評論IP空列表
user_list=[]#評論用戶名空列表
like_list=[]#評論點贊數(shù)空列表
replyCount_list=[]#評論回復(fù)數(shù)空列表
userid_list=[]#評論用戶id空列表
#循環(huán)爬取每一條評論數(shù)據(jù)
for a in data_list:
#評論內(nèi)容
comment=a['content']['message']
comment_list.append(comment)
#評論時間
time=a['ctime']
time_list.append(trans_date(time))
#time_list.append(trans_date(v_str=i) for i in range(time))
# #IP屬地(評論后一段時間會消失,所以不爬了)
# location = a['source']
# location_list.append(location)
#評論回復(fù)數(shù)
replyCount = a['rcount']
replyCount_list.append(replyCount)
#點贊數(shù)
like = a['like']
like_list.append(like)
# 評論用戶名
user = a['member']['uname']
user_list.append(user)
# 評論用戶名
userid = a['member']['mid']
userid_list.append(userid)
#把列表拼接為dataFrame數(shù)據(jù)
df=pd.DataFrame({
#'視頻鏈接':'https://www.bilibili.com/video/'+v_bid,
'評論頁碼':page,
'評論時間':time_list,
'評論作者':user_list,
'評論id': userid_list,
#'IP屬地':location_list,
'點贊數(shù)':like_list,
'評論回復(fù)數(shù)':replyCount_list,
'評論內(nèi)容':comment_list,
})
# 表頭
if os.path.exists(bili_file):
header = None
else:
header = ['評論頁碼','評論時間', '評論作者', '評論id', '點贊數(shù)', '評論回復(fù)數(shù)', '評論內(nèi)容']
column=['評論頁碼','評論時間', '評論作者', '評論id', '點贊數(shù)', '評論回復(fù)數(shù)', '評論內(nèi)容']
# 保存到csv文件
df.to_csv(bili_file, mode='a+', index=False, columns=column,header=header, encoding='utf-8-sig')
#print('csv保存成功:{}'.format(bili_file))
print('第{}頁爬取完成'.format(page))
#print(df)
# 數(shù)據(jù)清洗、去重
df = pd.read_csv(bili_file, engine='python', encoding='utf-8-sig')
os.remove(bili_file)
# 刪除重復(fù)數(shù)據(jù)
df.drop_duplicates(subset='評論內(nèi)容', inplace=True, keep='first')
# 再次保存csv文件
column=header = ['評論頁碼', '評論時間', '評論作者', '評論id', '點贊數(shù)', '評論回復(fù)數(shù)', '評論內(nèi)容']
df.to_csv(bili_file, mode='a+', index=False, columns=column,header=header, encoding='utf-8-sig')
print('數(shù)據(jù)清洗完成')
bili_comment_pic.main(bili_file)
if __name__=='__main__':
#視頻bv號,循環(huán)爬取多個視頻評論
#bv_list=['BV1Ss4y1M7KT','BV1VM411N7qc']
bv_list = [str(x) for x in input("請輸入視頻bv號(示例:BV1Ss4y1M7KT,BV1VM411N7qc),以逗號分隔:").split(',')]
#最大爬取頁
max_page=int(input("請輸入搜索的頁數(shù)"))
#調(diào)用爬取
get_bili_comment(bv_list=bv_list,max_page=max_page)
二、bili_comment_pic.py
# 允許副本存在,忽略報錯
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def view(info,bili_file):
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf') # 設(shè)置中文字體(圖標中能顯示中文)
likes = info['點贊數(shù)'] # 點贊
reply = info['評論回復(fù)數(shù)'] # 回復(fù)
comment = info['評論內(nèi)容'] # 內(nèi)容
# print(comment)
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# **********************************************************************綜合評分和播放量對比
# *******點贊數(shù)條形圖
fig, ax1 = plt.subplots()
length = len(comment)
plt.bar(x=np.arange(length), tick_label=comment, height=likes, color='red') # 設(shè)置柱狀圖
plt.title('點贊數(shù)和評論數(shù)數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('評論內(nèi)容') # 橫軸名
plt.ylabel('點贊數(shù)') # 縱軸名
plt.xticks(rotation=90, color='green') # 設(shè)置橫坐標變量名旋轉(zhuǎn)度數(shù)和顏色
# *******評論數(shù)折線圖
ax2 = ax1.twinx() # 組合圖必須加這個
ax2.plot(reply, color='cyan') # 設(shè)置線粗細,節(jié)點樣式
plt.ylabel('評論數(shù)') # y軸
plt.plot(1, label='點贊數(shù)', color="red", linewidth=5.0) # 圖例
#plt.plot(1, label='評論回復(fù)數(shù)', color="cyan", linewidth=1.0, linestyle="-") # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(bili_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
def main(bili_file):
info = pd.read_csv(bili_file,engine='python', encoding='utf-8-sig')
info=info.nlargest(60,'點贊數(shù)')
info=info.reset_index(drop=True)
view(info,bili_file)
if __name__ == '__main__':
main('biliComment_15pages_BV1Ss4y1M7KT.csv')
三、bilibili.py
import requests
from urllib.parse import quote
import json
import time
from time import sleep
import pandas as pd
import hashlib
import bilihot_pic
import bilisearch_pic
"""
bilisearch類的需求功能
1.初始化需要輸入?yún)?shù)
search:你需要搜索的數(shù)據(jù)
page:需要查看的頁數(shù)
2.使用方法
a = blisearch(serch,page) 初始化類
a.findall() 將爬取的數(shù)據(jù)存入excel文件中
"""
class bilisearch():
# 第一個輸入的參數(shù)是搜索數(shù)據(jù),第二個是搜素頁數(shù)
def __init__(self, search, page):
# 對輸入進行編碼
self.search = search
self.searchurl = '&keyword=' + quote(search, 'utf-8')
# 構(gòu)造瀏覽器訪問請求頭
# 大概是一定要cookie才能訪問的 測試一下cookie過段時間還能不能訪問
self.head = {
'authority': 'api.bilibili.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.44',
'Referer': "https://search.bilibili.com/all?from_source=webtop_search&spm_id_from=333.1007&search_source=5keyword=",
'referer': 'https://www.bilibili.com/',
'cookie': 'buvid3=05746C34-6526-44A7-9132-4C0A7180E63C148796infoc; LIVE_BUVID=AUTO4216287558369376; i-wanna-go-back=-1; CURRENT_BLACKGAP=0; buvid4=CE2658E1-DE0F-1555-42F9-BBE8E7E701B973047-022012116-NXuDwzBl0l7IPmxDzx269g%3D%3D; buvid_fp_plain=undefined; blackside_state=0; is-2022-channel=1; _uuid=136F106D6-AA102-198A-C5DD-7351A72CFDE849203infoc; b_nut=100; rpdid=0zbfvWJdeE|54lJB1MA|2Ln|3w1OVksf; CURRENT_QUALITY=80; hit-new-style-dyn=1; CURRENT_PID=b98a29b0-cd2f-11ed-9194-494fac97dd7c; fingerprint=5050e9471226aa5c2be3ac56100522f8; header_theme_version=CLOSE; nostalgia_conf=-1; hit-dyn-v2=1; home_feed_column=5; CURRENT_FNVAL=4048; bp_video_offset_329341133=781400043392336000; SESSDATA=0948d8e9%2C1696396399%2Cef62d%2A42; bili_jct=cb7a5dbbd0153907fff4b713334d6833; DedeUserID=329341133; DedeUserID__ckMd5=acfa5c750e5b3e7f; PVID=1; b_ut=5; innersign=0; b_lsid=7C37E147_1875B2E5B1D; bsource=search_bing; buvid_fp=5050e9471226aa5c2be3ac56100522f8'
}
# 需要爬取的頁數(shù)
self.page = page
# 保存的數(shù)據(jù)
# self.data=[]
def dataProcess(self, data):
# 存入csv的數(shù)據(jù)集
storedata = []
# 每一頁的數(shù)據(jù)量是30個
for i in range(30):
if (data[i]['type'] == 'picture_ad_0'):
continue
# 作者
author = data[i]['author']
# 標題 替換<em class="keyword"> </em>
title = data[i]['title'].replace('<em class="keyword">', '').replace('</em>', '')
# 播放量
play = data[i]['play']
# 簡介
description = data[i]['description']
# 封面
pic = data[i]['pic']
# 播放地址
arcurl = data[i]['arcurl']
# id
id = data[i]['id']
# 時間
pubdate = data[i]['pubdate']
# 10位時間戳轉(zhuǎn)換為時間字符串
timeArray = time.localtime(pubdate)
pubdate = time.strftime("%Y-%m-%d %H: %M:%S", timeArray)
# 將數(shù)據(jù)以字典的格式存入data序列中
# self.data.append({'author':author,'title':title,'play':play,'description':description,'pic':pic,'arcurl':arcurl,'id':id})
storedata.append([author, title, play, description, pic, arcurl, id, pubdate])
return storedata
def reverse(self, page):
timenow = int(time.time())
if (page == 1):
an = f'refresh=true&_extra=&ad_resource=5646&context=&duration=&from_source=&from_spmid=333.337&highlight=1&keyword={self.search}&order=&page=1&page_size=42&platform=pc&qv_id=EfNJjEtrA0N5DxzPVKch7Kz6v33ezlFR&single_column=0&source_tag=3&web_location=1430654&wts={timenow}'
wt = '55540207d820a7368ab7e104169d409d'
data = an + wt
md = hashlib.md5(data.encode('UTF-8'))
return md.hexdigest(), timenow
else:
an = f'refresh=true&_extra=&ad_resource=5654&category_id=&context=&dynamic_offset={str((page - 1) * 30)}&from_source=&from_spmid=333.337&gaia_vtoken=&highlight=1&keyword={self.search}&page={page}&page_size=42&platform=pc&qv_id=hJgZIEUY51fw9Pp7s8pidIVEJ7Z08KaS&search_type=video&single_column=0&source_tag=3&web_location=1430654&wts={timenow}'
wt = '55540207d820a7368ab7e104169d409d'
data = an + wt
md = hashlib.md5(data.encode('UTF-8'))
return md.hexdigest(), timenow
# 綜合排序
def findall(self):
for pnum in range(1, int(self.page) + 1):
# 拼接關(guān)鍵字,請求數(shù)據(jù)
w_rid, timenow = self.reverse(pnum)
if (pnum == 1):
target = requests.get(
f'https://api.bilibili.com/x/web-interface/wbi/search/all/v2?__refresh__=true&_extra=&context=&page={pnum}&page_size=42&order=&duration=&from_source=&from_spmid=333.337&platform=pc&highlight=1&single_column=0&keyword={self.search}&qv_id=noyCOTfEBm8ZzMVGopKgzYbiqLFxoAn1&ad_resource=5646&source_tag=3&web_location=1430654&w_rid={w_rid}&wts={timenow}',
headers=self.head)
else:
target = requests.get(
f'https://api.bilibili.com/x/web-interface/wbi/search/all/v2?refresh=true&_extra=&ad_resource=5654&category_id=&context=&dynamic_offset={(pnum - 1) * 30}&from_source=&from_spmid=333.337&gaia_vtoken=&highlight=1&keyword={self.search}&page={pnum}&page_size=42&platform=pc&qv_id=hJgZIEUY51fw9Pp7s8pidIVEJ7Z08KaS&search_type=video&single_column=0&source_tag=3&web_location=1430654&w_rid={w_rid}&wts={timenow}',
headers=self.head)
# 將數(shù)據(jù)轉(zhuǎn)換為py對象
data = json.loads(target.text)
# 存入csv的數(shù)據(jù)集
storedata = self.dataProcess(data['data']['result'][10]['data'])
print('第', pnum, '頁完成')
# 調(diào)用storeCsvdata
self.storeCsvdata('b站清單_' + str(self.search) + '_第' + str(pnum) + '頁.csv', storedata, pnum)
# 設(shè)置等待1s
sleep(1)
# 寫入文件模塊
def storeCsvdata(self, filename, storedata, pagenum):
with open(filename, 'a+') as fp:
# 構(gòu)造列表頭
name = ['作者', '標題', '播放量', '簡介', '封面', '播放地址', 'id', '時間']
# 寫入文件
writer = pd.DataFrame(storedata, columns=name)
writer.to_csv(filename, index=False, encoding='utf-8-sig')
bilisearch_pic.main(filename)
fp.close()
"""
bilihot類的功能
1.初始化需要的參數(shù)
無
2.使用方法
a = bilihot() 初始化
a.findall() 調(diào)用搜索
a.storeCsvdata() 儲存數(shù)據(jù)
a.data 可以查看數(shù)據(jù)
a.data[i][j] i為第幾個數(shù)據(jù)集合 j為['作者','標題','播放量','簡介','封面','id','播放地址','時間','分區(qū)']
"""
class bilihot():
def __init__(self):
# 構(gòu)造瀏覽器訪問請求頭
self.head = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.44',
'Referer': "https://search.bilibili.com/all?from_source=webtop_search&spm_id_from=333.1007&search_source=5keyword=",
'referer': 'https://www.bilibili.com/v/popular/rank/all',
'authority': 'api.bilibili.com',
}
# 保存一份數(shù)據(jù)
self.data = []
def findall(self):
# 請求數(shù)據(jù)
target = requests.get('https://api.bilibili.com/x/web-interface/ranking/v2?rid=0&type=all', headers=self.head)
# 將數(shù)據(jù)轉(zhuǎn)換為py對象
data = json.loads(target.text)
for i in data['data']['list']:
# 作者
author = i['owner']['name']
# 標題
title = i['title']
# 播放量
play = i['stat']['view']
# 簡介
desc = i['desc']
# 封面
pic = i['pic']
# id
id = i['aid']
# 播放地址
arcurl = i['short_link_v2']
# 發(fā)布日期
pubdate = i['pubdate']
# 10位時間戳轉(zhuǎn)換為時間字符串
timeArray = time.localtime(pubdate)
pubdate = time.strftime("%Y-%m-%d %H: %M:%S", timeArray)
# 分區(qū)
tname = i['tname']
self.data.append([author, title, play, desc, pic, id, arcurl, pubdate, tname])
print('請求數(shù)據(jù)成功')
def storeCsvdata(self):
with open('b站排行榜.csv', 'a+') as fp:
# 構(gòu)造列表頭
name = ['作者', '標題', '播放量', '簡介', '封面', 'id', '播放地址', '時間', '分區(qū)']
# 寫入文件
writer = pd.DataFrame(self.data, columns=name)
writer.to_csv('b站排行榜.csv', index=False, encoding='utf-8-sig')
print('寫入成功')
bilihot_pic.main('b站排行榜.csv')
fp.close()
if __name__ == '__main__':
# search: 你需要搜索的數(shù)據(jù)
search = input("請輸入搜索的關(guān)鍵詞")
# page: 需要查看的頁數(shù)
page = int(input("請輸入搜索的頁數(shù)"))
# 初始化類
a = bilisearch(search, page)
# 將爬取的數(shù)據(jù)存入excel文件中
a.findall()
# 初始化
b = bilihot()
# 調(diào)用搜索
b.findall()
# 儲存數(shù)據(jù)
b.storeCsvdata()
四、bilihot_pic.py
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
def view(info,bili_file):
# 設(shè)置中文字體(圖標中能顯示中文)
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf')
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
title = info['標題']
views = info['播放量']
# *******播放量條形圖
fig, ax1 = plt.subplots()
length = len(title)
plt.barh(y=np.arange(length), tick_label=title, width=views, color='cyan') # 設(shè)置柱狀圖
plt.title('標題和播放量的數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('播放量') # 橫軸名
plt.ylabel('標題') # 縱軸名
plt.yticks(color='green') # 設(shè)置橫坐標變量名旋轉(zhuǎn)度數(shù)和顏色
plt.plot(1, label='播放量', color="cyan", linewidth=5.0) # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(bili_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
def main(bili_file):
info = pd.read_csv(bili_file,engine='python', encoding='utf-8-sig')
info = info.nlargest(50, '播放量')
info = info.sort_values('播放量', ascending=True)
view(info,bili_file)
if __name__ == '__main__':
main('b站排行榜.csv')
五、bilisearch_pic.py
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
def view(info,bili_file):
# 設(shè)置中文字體(圖標中能顯示中文)
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf')
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
title = info['標題']
views = info['播放量']
# *******播放量條形圖
fig, ax1 = plt.subplots()
length = len(title)
plt.barh(y=np.arange(length), tick_label=title, width=views, color='green') # 設(shè)置柱狀圖
plt.title('標題和播放量的數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('播放量') # 橫軸名
plt.ylabel('標題') # 縱軸名
plt.yticks(color='blue') # 設(shè)置縱坐標變量名顏色
plt.plot(1, label='播放量', color="green", linewidth=5.0) # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(bili_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
def main(bili_file):
info = pd.read_csv(bili_file,engine='python', encoding='utf-8-sig')
info = info.sort_values('播放量', ascending=True)
view(info,bili_file)
if __name__ == '__main__':
main('b站清單_疫情_第1頁.csv')
六、draw_cloud.py
import numpy as np
import pandas as pd
from wordcloud import WordCloud, ImageColorGenerator
from PIL import Image
def draw_cloud(weibo_file):
image = Image.open('.\\background.jpg') # 作為背景輪廓圖
graph = np.array(image)
# 參數(shù)分別是指定字體、背景顏色、最大的詞的大小、使用給定圖作為背景形狀
wc = WordCloud(font_path='msyh.ttc',background_color='white',max_words=100, mask=graph)
fp = pd.read_csv(weibo_file,engine='python', encoding='utf-8-sig') # 讀取詞頻文件
name = list(fp['熱搜內(nèi)容']) # 詞
value = fp['熱搜熱度'] # 詞的頻率
for i in range(len(name)):
name[i] = str(name[i])
dic = dict(zip(name, value)) # 詞頻以字典形式存儲
print(dic)
wc.generate_from_frequencies(dic) # 根據(jù)給定詞頻生成詞云
image_color = ImageColorGenerator(graph)#生成詞云的顏色
wc.to_file('.\圖片\draw_cloud-{}.png'.format(weibo_file)) # 圖片命名
if __name__ == '__main__':
draw_cloud('微博top_fun.csv')
七、weibo.py
import os.path
import re
from jsonpath import jsonpath
import requests
import pandas as pd
import datetime
from fake_useragent import UserAgent
import weibo_pic
def trans_time(v_str):
"""轉(zhuǎn)換GMT時間為標準格式"""
GMT_FORMAT='%a %b %d %H:%M:%S +0800 %Y'
timearray=datetime.datetime.strptime(v_str,GMT_FORMAT)
ret_time=timearray.strftime("%Y-%m-%d %H:%M:%S")
return ret_time
def get_weibo_list(v_keyword,v_max_page):
"""
爬取微博內(nèi)容列表
:param v_keyword: 搜索關(guān)鍵字
:param v_max_page: 爬取前幾頁
:return: None
"""
# 保存文件名
v_weibo_file = '微博清單_{}_前{}頁.csv'.format(v_keyword,v_max_page)
# 如果csv存在,先刪除
if os.path.exists(v_weibo_file):
os.remove(v_weibo_file)
print('微博清單存在,已刪除:{}'.format(v_weibo_file))
for page in range(1,v_max_page+1):
print('===開始爬取第{}頁微博==='.format(page))
# 請求頭
ua = UserAgent()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-encording": "gzip, deflate, br"
}
#請求地址
url='https://m.weibo.cn/api/container/getIndex'
#請求參數(shù)
params={
"containerid":"100103type=1&q={}".format(v_keyword),
"page_type":"searchall",
"page":page
}
#發(fā)送請求
r=requests.get(url,headers=headers,params=params)
print(r.status_code)
#解析json數(shù)據(jù)
cards=r.json()["data"]["cards"]
#微博內(nèi)容
text_list=jsonpath(cards,'$..mblog.text')
#微博內(nèi)容-正則表達式數(shù)據(jù)清洗
dr=re.compile(r'<[^>]+>',re.S)
text2_list=[]
print('text_list is:')
print(text_list)
if not text_list:#如果未獲取到微博內(nèi)容,則進入下一輪循環(huán)
continue
if type(text_list)==list and len (text_list)>0:
for text in text_list:
text2=dr.sub('',text)#正則表達式提取微博內(nèi)容
print(text2)
text2_list.append(text2)
#微博創(chuàng)建時間
time_list = jsonpath(cards, '$..mblog.created_at')
time_list=[trans_time(v_str=i) for i in time_list]
#微博作者
author_list = jsonpath(cards, '$..mblog.user.screen_name')
#微博id
id_list = jsonpath(cards, '$..mblog.user.id')
# 微博bid
bid_list = jsonpath(cards, '$..mblog.bid')
# 轉(zhuǎn)發(fā)數(shù)
reposts_count_list = jsonpath(cards, '$..mblog.reposts_count')
# 評論數(shù)
comments_count_list = jsonpath(cards, '$..mblog.comments_count')
# 點贊數(shù)
attitudes_count_list = jsonpath(cards, '$..mblog.attitudes_count')
df=pd.DataFrame(
{
'頁碼':[page]*len(id_list),
'微博id':id_list,
'微博bid': bid_list,
'微博作者': author_list,
'發(fā)布時間': time_list,
'微博內(nèi)容': text2_list,
'轉(zhuǎn)發(fā)數(shù)': reposts_count_list,
'評論數(shù)': comments_count_list,
'點贊數(shù)': attitudes_count_list
}
)
#表頭
if os.path.exists(v_weibo_file):
header=None
else:
header=['頁碼','微博id','微博bid','微博作者','發(fā)布時間','微博內(nèi)容','轉(zhuǎn)發(fā)數(shù)','評論數(shù)','點贊數(shù)']
column=['頁碼','微博id','微博bid','微博作者','發(fā)布時間','微博內(nèi)容','轉(zhuǎn)發(fā)數(shù)','評論數(shù)','點贊數(shù)']
#保存到csv文件
df.to_csv(v_weibo_file,mode='a+',index=False,columns=column, header=header,encoding='utf-8-sig')
print('csv保存成功:{}'.format(v_weibo_file))
# 數(shù)據(jù)清洗、去重
df = pd.read_csv(v_weibo_file, engine='python', encoding='utf-8-sig')
os.remove(v_weibo_file)
# 刪除重復(fù)數(shù)據(jù)
df.drop_duplicates(subset='微博bid', inplace=True, keep='first')
# 再次保存csv文件
header = ['頁碼','微博id','微博bid','微博作者','發(fā)布時間','微博內(nèi)容','轉(zhuǎn)發(fā)數(shù)','評論數(shù)','點贊數(shù)']
column=header
df.to_csv(v_weibo_file, mode='a+', index=False, columns=column, header=header,encoding='utf-8-sig')
print('數(shù)據(jù)清洗完成')
weibo_pic.main(v_weibo_file)
if __name__=='__main__':
# 爬取關(guān)鍵字
search_keyword = input("請輸入搜索的關(guān)鍵詞")
#爬取頁數(shù)
max_search_page=int(input("請輸入搜索的頁數(shù)"))
#調(diào)用爬取微博函數(shù)
get_weibo_list(v_keyword=search_keyword,v_max_page=max_search_page)
八、weibo_comment.py
import requests# 發(fā)送請求
import pandas as pd#保存csv文件
import os # 判斷文件是否存在
import datetime
import time
from time import sleep# 設(shè)置等待,防止反爬
import json
import random# 生成隨機數(shù)
import os.path
import requests
import csv
import re
import weibo_comment_pic
def trans_time(v_str):
"""轉(zhuǎn)換GMT時間為標準格式"""
GMT_FORMAT='%a %b %d %H:%M:%S +0800 %Y'
timearray=datetime.datetime.strptime(v_str,GMT_FORMAT)
ret_time=timearray.strftime("%Y-%m-%d %H:%M:%S")
return ret_time
def get_bili_comment(weiboID_list,max_page):
for weibo_id in weiboID_list:
#保存文件名
wbComment_file='weiboComment_{}pages_{}.csv'.format(max_page,weibo_id)
#如果csv存在,先刪除
if os.path.exists(wbComment_file):
os.remove(wbComment_file)
print('存在,已刪除:{}'.format(wbComment_file))
#請求頭
headers = {
#不加cookie只能爬一頁
'cookie':'__bid_n=1883c7fc76e10d57174207; FPTOKEN=IBsER/uKazbtpMIEgvaOTfAuHsmYQM5g0VL9U1G3ybs72PsWHEBbiKv0w+R59BrOvSwxDKJevIDwL0SSwPV5yWd3lIFsx6KXQ/qYPpPTjTRW5kFr+j74rsScC6MKc1G9142e5tEEf7atvY/zTxl9B6jy/y7MEo0ETLT0VjL6nbpzkWe/SnIw97Tjb+9lqYoGHS6lPqZ5yAhDPKn0KK4htwxqr0qMglAG6ZcT7mn+BUZAygRSrqWZwZ6KSE0r27qsR0bDTAI8dsQFq1gPfYONp5UHfw9FFsBiscLULixqm31wTHYziK8gxi0/R6yIQ8Tq3OQkNmx+Kw7E/8YknGOiVmpjfRn5FNShZs3/t8SNBJEcZ9qaQnw/iF/jwPoFkMXz87Tp22aQUmFgeQu/u0wAYQ==|wC9ITrusKUtoBk6wTqvs+jaY6iwSJyX4pD0y+hSvnOA=|10|acf98643db3def55913fefef5034d5ee; WEIBOCN_FROM=1110106030; loginScene=102003; SUB=_2A25JbkPWDeRhGeNH7FIV-SjKzjyIHXVqkW2erDV6PUJbkdAGLRbkkW1NSoXhCHcUhbni8gGXfjdc5HNqec9qABj_; MLOGIN=1; _T_WM=98495433469; XSRF-TOKEN=a62fb7; mweibo_short_token=9f0e28d6c9; M_WEIBOCN_PARAMS=oid%3D4903111417922777%26luicode%3D20000061%26lfid%3D4903111417922777%26uicode%3D20000061%26fid%3D4903111417922777',
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
'X-Xsrf-Token':'a62fb7'
}
max_id = ''
for page in range(1,max_page + 1):
if page==1:#第一頁沒有max_id參數(shù)
url='https://m.weibo.cn/comments/hotflow?id={}&mid={}&max_id_type=0'.format(weibo_id,weibo_id)
else:
if max_id == '0':#max_id=0,說明沒有下一頁了,結(jié)束循環(huán)
print('max_id==0,break now')
break
url='https://m.weibo.cn/comments/hotflow?id={}&mid={}&max_id={}&max_id_type=0'.format(weibo_id,weibo_id,max_id)
response = requests.get(url, headers=headers)
#ok = response.json()['ok']
#print(ok)
print(response.status_code)
max_id=response.json()['data']['max_id']
#print(response.json()['data']['max_id'])
print(max_id)
datas= response.json()['data']['data']
page_list = []
id_list = []
text_list=[]
time_list=[]
like_count_list=[]
source_list=[]
username_list=[]
user_id_list=[]
user_gender_list=[]
follow_count_list=[]
followers_count_list=[]
for data in datas:
page_list.append(page)
id_list.append(data['id'])
dr=re.compile(r'<[^>]+>',re.S)#用正則表達式清洗評論數(shù)據(jù)
text2 = dr.sub('', data['text'])
text_list.append(text2)#評論內(nèi)容
time_list.append(trans_time(data['created_at']))#評論時間
like_count_list.append(data['like_count'])#點贊
source_list.append(data['source'])#屬地
username_list.append(data['user']['screen_name'])#評論者姓名
user_id_list.append(data['user']['id'])
user_gender_list.append(data['user']['gender'])# 評論者性別
follow_count_list.append(data['user']['follow_count'])#評論者關(guān)注數(shù)
followers_count=str(data['user']['followers_count'])
if(followers_count[-1]=='萬'):
followers_count=int(float(followers_count.strip('萬')))*10000
followers_count_list.append(followers_count)#評論者粉絲數(shù)
#把列表拼接為dataFrame數(shù)據(jù)
df=pd.DataFrame({
'評論頁碼':page_list,
'微博id':[weibo_id]*len(time_list),
'評論id':id_list,
'評論內(nèi)容':text_list,
'評論時間':time_list ,
'評論點贊數(shù)':like_count_list,
'評論屬地':source_list,
'評論者姓名':username_list ,
'評論者id':user_id_list ,
'評論者性別':user_gender_list,
'評論者關(guān)注數(shù)':follow_count_list,
'評論者粉絲數(shù)':followers_count_list,
})
# 表頭
if os.path.exists(wbComment_file):
header = None
else:
header = ['評論頁碼','微博id', '評論id','評論內(nèi)容','評論時間','評論點贊數(shù)','評論屬地', '評論者姓名','評論者id','評論者性別', '評論者關(guān)注數(shù)','評論者粉絲數(shù)']
column=['評論頁碼','微博id', '評論id','評論內(nèi)容','評論時間','評論點贊數(shù)','評論屬地', '評論者姓名','評論者id','評論者性別', '評論者關(guān)注數(shù)','評論者粉絲數(shù)']
# 保存到csv文件
df.to_csv(wbComment_file, mode='a+', index=False, columns=column, header=header, encoding='utf-8-sig')
#print('csv保存成功:{}'.format(bili_file))
#print(df)
print('第{}頁爬取完成'.format(page))
# 數(shù)據(jù)清洗、去重
df = pd.read_csv(wbComment_file, engine='python', encoding='utf-8-sig')
os.remove(wbComment_file)
# 刪除重復(fù)數(shù)據(jù)
df.drop_duplicates(subset='評論內(nèi)容', inplace=True, keep='first')
# 再次保存csv文件
column=header = ['評論頁碼', '微博id', '評論id', '評論內(nèi)容', '評論時間', '評論點贊數(shù)', '評論屬地', '評論者姓名',
'評論者id', '評論者性別', '評論者關(guān)注數(shù)', '評論者粉絲數(shù)']
df.to_csv(wbComment_file, mode='a+', index=False, columns=column,header=header, encoding='utf-8-sig')
print('數(shù)據(jù)清洗完成')
weibo_comment_pic.main(wbComment_file)
if __name__=='__main__':
#目標微博https: // m.weibo.cn / detail / 4903111417922777
#目標微博ID,可循環(huán)爬取多個(這里只爬一個)
weiboID_list=[str(x) for x in input("請輸入微博ID(示例:4903111417922777),以逗號分隔:").split(',')]
#weiboID_list=['4903111417922777']
#最大爬取頁
max_page=int(input("請輸入搜索的頁數(shù)"))
#調(diào)用爬取
get_bili_comment(weiboID_list=weiboID_list,max_page=max_page)
九、weibo_comment_pic.py
# 允許副本存在,忽略報錯
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def view(info,weibo_file):
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf') # 設(shè)置中文字體(圖標中能顯示中文)
likes = info['評論點贊數(shù)'] # 點贊數(shù)
reply = info['評論者粉絲數(shù)'] # 粉絲數(shù)
forward = info['評論者關(guān)注數(shù)'] # 關(guān)注數(shù)
author = info['評論者姓名'] # 作者,因為內(nèi)容太長了
# print(comment)
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# **********************************************************************綜合評分和播放量對比
# *******點贊數(shù)條形圖
fig, ax1 = plt.subplots()
length = len(author)
plt.bar(x=np.arange(length), tick_label=author, height=likes, color='blue') # 設(shè)置柱狀圖
plt.title('評論點贊數(shù)、粉絲數(shù)和關(guān)注數(shù)的數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('微博內(nèi)容') # 橫軸名
plt.ylabel('評論點贊數(shù)') # 縱軸名
plt.xticks(rotation=90, color='green') # 設(shè)置橫坐標變量名旋轉(zhuǎn)度數(shù)和顏色
# *******評論者粉絲數(shù)折線圖
ax2 = ax1.twinx() # 組合圖必須加這個
ax2.plot(reply, color='red') # 設(shè)置線粗細,節(jié)點樣式
# *******評論者關(guān)注數(shù)折線圖
ax2.plot(forward, color='yellow') # 設(shè)置線粗細,節(jié)點樣式
plt.ylabel('粉絲/關(guān)注數(shù)') # y軸
plt.plot(1, label='評論者點贊數(shù)', color="blue", linewidth=5.0) # 圖例
#plt.plot(1, label='評論者粉絲數(shù)', color="red", linewidth=1.0, linestyle="-") # 圖例
#plt.plot(1, label='評論者關(guān)注數(shù)', color="yellow", linewidth=1.0, linestyle="-") # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(weibo_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
def main(weibo_file):
info = pd.read_csv(weibo_file,engine='python', encoding='utf-8-sig')
info = info.nlargest(100, '評論點贊數(shù)')
info = info.reset_index(drop=True)
view(info,weibo_file)
if __name__ == '__main__':
main('weiboComment_15pages_4903111417922777.csv')
十、weibo_pic.py
# 允許副本存在,忽略報錯
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def view(info,weibo_file):
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf') # 設(shè)置中文字體(圖標中能顯示中文)
likes = info['點贊數(shù)'] # 點贊數(shù)
reply = info['評論數(shù)'] # 評論數(shù)
forward = info['轉(zhuǎn)發(fā)數(shù)'] # 轉(zhuǎn)發(fā)數(shù)
author = info['微博作者'] # 作者,因為內(nèi)容太長了
# print(comment)
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# **********************************************************************綜合評分和播放量對比
# *******點贊數(shù)條形圖
fig, ax1 = plt.subplots()
length = len(author)
plt.bar(x=np.arange(length), tick_label=author, height=likes, color='blue') # 設(shè)置柱狀圖
plt.title('點贊數(shù)、評論數(shù)和轉(zhuǎn)發(fā)數(shù)的數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('微博內(nèi)容') # 橫軸名
plt.ylabel('點贊數(shù)') # 縱軸名
plt.xticks(rotation=90, color='green') # 設(shè)置橫坐標變量名旋轉(zhuǎn)度數(shù)和顏色
# *******評論數(shù)折線圖
ax2 = ax1.twinx() # 組合圖必須加這個
ax2.plot(reply, color='red') # 設(shè)置線粗細,節(jié)點樣式
# *******轉(zhuǎn)發(fā)數(shù)折線圖
ax2.plot(forward, color='yellow') # 設(shè)置線粗細,節(jié)點樣式
plt.ylabel('評論/轉(zhuǎn)發(fā)數(shù)') # y軸
plt.plot(1, label='點贊數(shù)', color="blue", linewidth=5.0) # 圖例
#plt.plot(1, label='評論數(shù)', color="red", linewidth=1.0, linestyle="-") # 圖例
#plt.plot(1, label='轉(zhuǎn)發(fā)數(shù)', color="yellow", linewidth=1.0, linestyle="-") # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(weibo_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
def main(weibo_file):
info = pd.read_csv(weibo_file,engine='python', encoding='utf-8-sig')
info = info.nlargest(100, '點贊數(shù)')
info = info.reset_index(drop=True)
view(info,weibo_file)
if __name__ == '__main__':
main('微博清單_疫情_前10頁.csv')
十一、weibo_top.py
import os.path
import re
from jsonpath import jsonpath
import requests
import pandas as pd
from fake_useragent import UserAgent
import weibo_top_pic
import weibo_top_pie
import draw_cloud
def get_weibo_top():
keyword=list(['realtimehot','gym','game','fun'])
for search_keyword in keyword:
# 保存文件名
v_weibo_file = '微博top_{}.csv'.format(search_keyword)
# 如果csv存在,先刪除
if os.path.exists(v_weibo_file):
os.remove(v_weibo_file)
print('微博榜單存在,已刪除:{}'.format(v_weibo_file))
print('===開始爬取{}微博榜單==='.format(search_keyword))
# 請求頭
ua = UserAgent()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-encording": "gzip, deflate, br"
}
#請求地址
url='https://m.weibo.cn/api/container/getIndex'
#請求參數(shù)
params={
"containerid":"106003type=25&t=3&disable_hot=1&filter_type={}".format(search_keyword),
"title": "微博熱搜",
"show_cache_when_error": 1,
"extparam": "seat=1&dgr=0&filter_type=realtimehot®ion_relas_conf=0&pos=0_0&c_type=30&lcate=1001&mi_cid=100103&cate=10103&display_time=1684642048&pre_seqid=144917672",
"luicode": 10000011,
"lfid": 231583,
}
#發(fā)送請求
r=requests.get(url,headers=headers,params=params)
print(r.status_code)
#解析json數(shù)據(jù)
cards=r.json()["data"]["cards"][0]["card_group"]
#熱搜內(nèi)容
text_list=jsonpath(cards,'$..desc')
print('text_list is:')
print(text_list)
#熱搜連接地址
href_list = jsonpath(cards, '$..scheme')
# 熱搜排名
order_list = jsonpath(cards, '$..pic')
# 熱搜熱度
view_count_list = jsonpath(cards, '$..desc_extr')
j=1
for i in range(0, len(order_list)):
if order_list[i] == 'https://simg.s.weibo.com/20210408_search_point_orange.png':
order_list[i] = '無'
view_count_list[i]=0
continue
if order_list[i] == "https://simg.s.weibo.com/20180205110043_img_search_stick%403x.png":
view_count_list.insert(0, 0)
order_list[i] = '無'
continue
view_count_list[i]=str(view_count_list[i])
view_count_list[i]=int(re.sub("\D", "", view_count_list[i]))
order_list[i] = j
j = j + 1
print(len(order_list),len(text_list),len(view_count_list),len(href_list))
df=pd.DataFrame(
{
'熱搜排名':order_list,
'熱搜內(nèi)容': text_list,
'熱搜熱度': view_count_list,
'熱搜連接地址': href_list,
}
)
#表頭
if os.path.exists(v_weibo_file):
header=None
else:
header=['熱搜排名','熱搜內(nèi)容','熱搜熱度','熱搜連接地址']
column = ['熱搜排名','熱搜內(nèi)容','熱搜熱度','熱搜連接地址']
#保存到csv文件
df.to_csv(v_weibo_file,mode='a+',index=False,columns=column, header=header, encoding='utf-8-sig')
print('csv保存成功:{}'.format(v_weibo_file))
weibo_top_pic.main(v_weibo_file)
weibo_top_pie.pie(v_weibo_file)
#draw_cloud.draw_cloud(v_weibo_file)
if __name__=='__main__':
#調(diào)用爬取微博函數(shù)
get_weibo_top()
十二、weibo_top_pic.py
# 允許副本存在,忽略報錯
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def view(info,weibo_file):
my_font = font_manager.FontProperties(fname='./STHeiti-TC-Medium.ttf') # 設(shè)置中文字體(圖標中能顯示中文)
heat = info['熱搜熱度']
content = info['熱搜內(nèi)容']
# 為了坐標軸上能顯示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# **********************************************************************綜合評分和播放量對比
# *******點贊數(shù)條形圖
fig, ax1 = plt.subplots()
length=len(content)
plt.bar(x = np.arange(length),tick_label=content, height=heat, color='blue') # 設(shè)置柱狀圖
plt.title('熱搜內(nèi)容和熱搜熱度的數(shù)據(jù)分析', fontproperties=my_font) # 表標題
ax1.tick_params(labelsize=6)
plt.xlabel('熱搜內(nèi)容') # 橫軸名
plt.ylabel('熱搜熱度') # 縱軸名
plt.xticks(rotation=90, color='green') # 設(shè)置橫坐標變量名旋轉(zhuǎn)度數(shù)和顏色
plt.plot(1, label='熱搜熱度', color="blue", linewidth=5.0) # 圖例
plt.legend()
plt.savefig('.\圖片\pic-{}.png'.format(weibo_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
十三、weibo_top_pie.py
import pandas as pd
import numpy as np
from pyecharts import options as opts
from pyecharts.charts import Pie
import matplotlib.pyplot as plt
def pie(weibo_file):
plt.rcParams['font.family']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
data=pd.read_csv(weibo_file,engine='python', encoding='utf-8-sig')
df1=data['熱搜內(nèi)容']
df2=data['熱搜熱度']
X=df1
Y=[]
s=sum(df2)
for i in df2:
a=i/s
a=round(a,2)
Y.append(a)
plt.figure(figsize=(12, 12))
plt.pie(x=Y,
labels=X,
wedgeprops={'width': 0.4},
startangle=90,
autopct='%.2f%%',
pctdistance=0.9
)
plt.title('熱搜對應(yīng)的熱度占比',fontsize=20)
plt.savefig('.\圖片\pie-{}.png'.format(weibo_file), dpi=1000, bbox_inches='tight') # 保存至本地
plt.show()
if __name__ == '__main__':
pie('微博top_realtimehot.csv')
十四、pachong.py
import weibo
import weibo_top
import weibo_comment
import bilibili
import bili_comment
net=int(input("請選擇爬取的網(wǎng)站:1.微博 2.b站 3.停止爬取"))
while(net!=3):
if (net==1):
choice1=int(input("請選擇爬取的方向:1.排行榜 2.關(guān)鍵詞 3.評論"))
if(choice1==1):
# 調(diào)用爬取微博函數(shù)
weibo_top.get_weibo_top()
if (choice1 == 2):
# 爬取關(guān)鍵字
search_keyword = input("請輸入搜索的關(guān)鍵詞")
# 爬取頁數(shù)
max_search_page = int(input("請輸入搜索的頁數(shù)"))
# 調(diào)用爬取微博函數(shù)
weibo.get_weibo_list(v_keyword=search_keyword, v_max_page=max_search_page)
if (choice1 == 3):
# 目標微博ID,可循環(huán)爬取多個(這里只爬一個)
weiboID_list = [str(x) for x in input("請輸入微博ID(示例:4903111417922777),以逗號分隔:").split(',')]
# 最大爬取頁
max_page = int(input("請輸入搜索的頁數(shù)"))
# 調(diào)用爬取
weibo_comment.get_bili_comment(weiboID_list=weiboID_list, max_page=max_page)
if (net==2):
choice2=int(input("請選擇爬取的方向:1.排行榜 2.關(guān)鍵詞 3.評論"))
if(choice2==1):
# 初始化
b = bilibili.bilihot()
# 調(diào)用搜索
b.findall()
# 儲存數(shù)據(jù)
b.storeCsvdata()
if (choice2 == 2):
# search: 你需要搜索的數(shù)據(jù)
search = input("請輸入搜索的關(guān)鍵詞")
# page: 需要查看的頁數(shù)
page = int(input("請輸入搜索的頁數(shù)"))
# 初始化類
a = bilibili.bilisearch(search, page)
# 將爬取的數(shù)據(jù)存入excel文件中
a.findall()
if (choice2 == 3):
# 視頻bv號,循環(huán)爬取多個視頻評論
bv_list = [str(x) for x in input("請輸入視頻bv號(示例:BV1Ss4y1M7KT,BV1VM411N7qc),以逗號分隔:").split(',')]
# 最大爬取頁
max_page = int(input("請輸入搜索的頁數(shù)"))
# 調(diào)用爬取
bili_comment.get_bili_comment(bv_list=bv_list, max_page=max_page)
net = int(input("請選擇爬取的網(wǎng)站:1.微博 2.b站 3.停止爬取"))
十五、代碼文件說明
pachong: b站、微博爬蟲與數(shù)據(jù)可視化總程序
b站:
bilibili 爬取b站熱搜榜和關(guān)鍵詞搜索
bili_comment 爬取b站評論
bilihot_pic b站熱搜榜數(shù)據(jù)可視化(柱形圖、折線圖)
bilisearch_pic b站關(guān)鍵詞搜索數(shù)據(jù)可視化(柱形圖、折線圖)
bili_comment_pic b站評論數(shù)據(jù)可視化(柱形圖、折線圖)
微博:
weibo_top 爬取微博熱搜榜
weibo 爬取微博關(guān)鍵詞搜索
weibo_comment 爬取微博評論
weibo_top_pic 微博熱搜榜數(shù)據(jù)可視化(柱形圖、折線圖)
weibo_top_pie 微博熱搜榜數(shù)據(jù)可視化(環(huán)形圖)
weibo_pic 微博關(guān)鍵詞搜索數(shù)據(jù)可視化(柱形圖、折線圖)
weibo_comment_pic 微博評論數(shù)據(jù)可視化(柱形圖、折線圖)文章來源:http://www.zghlxwxcb.cn/news/detail-523270.html
draw_cloud 微博熱搜榜數(shù)據(jù)可視化(詞圖云)文章來源地址http://www.zghlxwxcb.cn/news/detail-523270.html
到了這里,關(guān)于Python爬蟲 | 爬取微博和嗶哩嗶哩數(shù)據(jù)的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!