# -*- coding: utf-8 -*-
import pandas
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
from bs4 import BeautifulSoup
import requests
import time
urls=['https://movie.douban.com/top250?start={}&filter='.format(str(i)) for i in range(0,250,25)]
#url='https://movie.douban.com/mine?status=wish'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',
'Cookie':'gr_user_id=1ba0f40c-adb5-4011-9f66-ef641710c42e; viewed="1419678_1786120"; RT=s=1466650874130&r=https%3A%2F%2Fmovie.douban.com%2Ftop250%3Fstart%3D75%26filter%3D; ll="108091"; __utmt_t1=1; __utma=30149280.1565775292.1453707266.1466481018.1466650178.19; __utmb=30149280.15.8.1466650881759; __utmc=30149280; __utmz=30149280.1466650178.19.18.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; bid="0K5aWKIHxEU"'
}
title=[]
image=[]
actor=[]
empression=[]
rate=[]
evalu_num=[]
#爬取并显示top250电影的名称,图片地址,演员等
def allfilm(web_url,data=None):
web_data=requests.get(web_url,headers = headers)
soup=BeautifulSoup(web_data.text,'lxml')
time.sleep(2)
titles=soup.select('#content > div > div.article > ol > li > div > div.info > div.hd > a')
images=soup.select('#content > div > div.article > ol > li > div > div.pic > a > img')
actors=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > p:nth-of-type(1)') #bd樹下的第一個p標籤
empressions=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > p:nth-of-type(2)')
rates=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > div > span.rating_num')
evalu_nums=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > div > span:nth-of-type(4)')
#content > div > div.article > ol > li:nth-child(1) > div > div.info > div.bd > div > span:nth-child(4)
#content > div > div.article > ol > li:nth-child(2) > div > div.info > div.bd > div > span:nth-child(4)
for title1,image1,actor1,empression1,rate1,evalu_num1 in zip(titles,images,actors,empressions,rates,evalu_nums):
title.append(title1.get_text().replace('\\xa0',' ').strip()),
image.append(image1.get('src')),
actor.append(actor1.get_text().replace('\\xa0',' ').strip()),
empression.append(empression1.get_text()),
rate.append(rate1.get_text()),
evalu_num.append(evalu_num1.get_text())
for sigle_url in urls:
allfilm(sigle_url)
data={'title':title,
'image':image,
'actor':actor,
'empression':empression,
'rate':rate,
'evalu_num':evalu_num}
#frame=DataFrame(data,columns=[u'电影名',u'图片链接',u'演员',u'印象',u'评分',u'评价数'])
print('done!!')
frame=DataFrame(data,columns=['title','image','actor','empression','rate','evalu_num'])
frame.columns=[u'电影名',u'图片链接',u'演员',u'印象',u'评分',u'评价数']
#将dataframe数据写入csv或xlsx文件
frame.to_excel('C:\\Users\\zhchenjia\\Desktop\doubanfilm.xlsx',index=True)
网友评论