网上看了很多python的爬虫都是在爬豆瓣电影top250,心里想着没事也写一个。
1.爬取准备:
通过查看豆瓣url,发现每次都是增加25来进行换页
捕获.PNG
所以我们每次在url新增25即可
我们爬取的信息为:电影名称/排名/导演演员信息/评分/slogan
import pandas as pd
import requests
from lxml import etree
#爬取豆瓣top250电影,放到excle表中作为爬虫小demo
def crawler(num):
"""
爬取当前页面的电影数据
:return:
"""
base_url="https://movie.douban.com/top250?start={}&filter=".format(num)
r=requests.get(base_url)
r.encoding='utf-8'
res=r.text
html=etree.HTML(res)
return html
def analysis(html):
film_list=[]
for j in range(1,26):
rank_data=html.xpath('//*[@id="content"]/div/div[1]/ol/li[{}]/div/div[1]/em/text()'.format(j))[0]
film_name=html.xpath('//*[@id="content"]/div/div[1]/ol/li[{}]/div/div[2]/div[1]/a/span[1]/text()'.format(j))[0]
film_star=html.xpath('normalize-space(//*[@id="content"]/div/div[1]/ol/li[{}]/div/div[2]/div[2]/p[1]/text()[1])'.format(j))
score=html.xpath('//*[@id="content"]/div/div[1]/ol/li[{}]/div/div[2]/div[2]/div/span[2]/text()'.format(j))[0]
slogan=html.xpath('//*[@id="content"]/div/div[1]/ol/li[{}]/div/div[2]/div[2]/p[2]/span/text()'.format(j))
if len(slogan)==0:
slogan_value="没有slogan"
else:
slogan_value=slogan[0]
film_list.append((rank_data,film_name,film_star,score,slogan_value))
return film_list
def run_data(start_list):
all_film_list=[]
for i in start_list:
crawler_data=crawler(i)
Analysis_data=analysis(crawler_data)
for fime_msg in Analysis_data:
all_film_list.append(fime_msg)
all_film_dataframe=pd.DataFrame(all_film_list,columns=["排名","电影名称","简介","评分","slogan"])
print(all_film_dataframe)
all_film_dataframe.to_excel("D:/work/film.xls",index=False)
print("电影下载完毕")
def main():
"""
主函数
:return:
"""
start_list=[0,25,50,75,100,125,150,175,200,225]
film=run_data(start_list)
if __name__ == '__main__':
main()
效果如下:
捕获.PNG
拜拜~
网友评论