#导入包
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
file_url="http://maoyan.com/board/4?"+"offset="#猫眼电影top100第一页
url_setx=["http://maoyan.com/board/4?"]#用于测试
url_set=["http://maoyan.com/board/4?"]
for i in range(10,100,10):
url_set.append("http://maoyan.com/board/4?offset="+str(i))
print(url_set)
#使用字符串循环连接,得到猫眼top100的十个url连接
name=[]#film name
star=[]#film star
date=[]# film date
for url in url_set:
html=requests.get(url).content
x=BeautifulSoup(html)
y=x.find_all(name="a",attrs={"href":re.compile("\/films\/[0-9]{1,6}"),"class":"image-link"})
print(y)
for i in y:
i0=string=re.sub(string=i.attrs['title'],pattern="\n",repl="")
name.append(i0)
z=x.find_all(name="p",attrs={"class":"star"})
for i in z:
i1=re.sub(string=i.string,pattern=u"主演:",repl="")
i1=re.sub(string=i1,pattern=" *",repl="")
i1=re.sub(string=i1,pattern="\n",repl="")
star.append(i1)
p=x.find_all(name="p",attrs={"class":"releasetime"})
for i in p:
date.append(re.sub(string=re.sub(string=i.string,pattern=u"上映时间:",repl=""),pattern="\(.*\)",repl=""))
#cbind into a DataFrame
data={"name":name,"star":star,"date":date}
x=pd.DataFrame(data)
print(x)
网友评论