#coding:utf-8
import requests,json
from lxml import etree
root_url='https://www.huxiu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
html=requests.get(root_url,headers=headers).text
#print(html)
select=etree.HTML(html)
zixun_infos=select.xpath('//ul[@class="header-column header-column1 header-column-zx menu-box"]/li/a')
items=[]
for info in zixun_infos:
item={}
channel_name=info.xpath('text()')[0]
#print(channel_name)
#channel_url=root_url+info.xpath('@href')[0]
catId=info.xpath('@href')[0].replace('/channel/','').replace('.html','')#获取标题页中的catId
#print(channel_url)
#print(channel_name,catId)
item['channel_name']=channel_name
item['catId']=catId
items.append(item)
#print(items)
post_url='https://www.huxiu.com/channel/ajaxGetMore'
for item in items:
channel_name=item['channel_name']
catId=item['catId']
post_data={
'huxiu_hash_code': '073a0ed48ad2ab31d4d1a4675783dbd7',
'page':'1',
'catId': catId
}
html_post=requests.post(post_url,data=post_data,headers=headers).text
#print(html_post)
dict_data=json.loads(html_post)
parse_data=dict_data['data']
#print(parse_data)
total_page=parse_data['total_page']
data=parse_data['data']
print(channel_name,catId,total_page,data.strip()[0:40])
网友评论