frombs4importBeautifulSoup
importrequests
importtime
importrandom
content_all= []
link_all= []
defget_content(url):
web_data=requests.get(url)
Soup=BeautifulSoup(web_data.text,'lxml')
title=Soup.select(' div.pho_info > h4 > em')[0].get_text()
address=Soup.select('div.pho_info > p')[0].get('title').strip()
brice=Soup.select('#pricePart > div.day_l > span')[0].get_text()
house_img=Soup.select('#curBigImage')[0].get('src')
master_img=Soup.select('#floatRightBox > div.js_box.clearfix > div.member_pic > a > img')[0].get('src')
master_name=Soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > a')[0].get_text()
master_gender=Soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > span')[0].get('class')[0]
data={
'title':title,
'address':address,
'brice':int(brice),
'house_img':house_img,
'master_img':master_img,
'master_name':master_name,
'master_gender':master_gender
}
ifdata['master_gender'] =='member_girl_ico':
#print('女')
data['master_gender'] ='女'
else:
#print('男')
data['master_gender'] ='男'
returndata
defget_single_web(url):
link_emp= []
web_data=requests.get(url)
Soup=BeautifulSoup(web_data.text,'lxml')
urls=Soup.select('a.resule_img_a')
for i in urls:
link_emp.append(i.get('href'))
returnlink_emp
url_all= [r'http://xa.*******.com/search-duanzufang-p{}-0/'.format(str(i))foriinrange(1,2)]
count=0
foriinurl_all:
web_data_host_links=get_single_web(i)
link_all+=web_data_host_links#将所有的链接存放到一个列表中
time.sleep(random.randrange(1,3))
forjinlink_all:
count+=1
print('link is {} num is {} \n {}'.format(j, count, get_content(j)))#将每个链接和抓取的内容打印出来
content_all.append(get_content(j))#将每个详情页的内容以字典形式存放,然后将每个详情页抓取内容存放到列表中
time.sleep(random.randrange(1,3))#时间随机等待1到2秒
sort_content_all=sorted(content_all,key=lambdax:x['brice'])
print(sort_content_all)
思路:
1、对所有页数进行统计
2、对每一页的详情页列表统计
3、通过字典结构化保存
4、定位详情页抓取的内容
知识小结:
lamada x:x[ ] 函数运用
formate() 格式化输出
网友评论