第一种情况
这是网址
我要做的是获取全部数据包括列表页和详情页
这个网站看上去是很容易获取,都是很标准的格式,只是详情页跳转了一次url,翻页虽然没用参数但是有data,一切看上去都很容易。
翻页方案一
driver = webdriver.Firefox()
driver.get(response.url)
soup = BeautifulSoup(driver.page_source, 'lxml')
for i in range(300):
table=soup.find('table',class_='Winstar-table').find_all('tr')
#解析迭代
driver.find_element_by_link_text('[下一页]').click()
time.sleep(2)
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.close()
翻页方案二
VIEWSTATE = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
EVENTVALIDATION = response.xpath("//input[@id='__EVENTVALIDATION']/@value").extract_first()
for i in range(392):
data = {
'__EVENTTARGET': '_ctl9$lbtnNextPage',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'__VIEWSTATE': VIEWSTATE,
'__EVENTVALIDATION': EVENTVALIDATION,
'_ctl9:dprlEnterpriseType': '01',
'_ctl9:dprlEnterpriseClassify': '',
'_ctl9:txtEnterpriseName': '',
'_ctl9:txtGotoPage': ''
}
#迭代
res = requests.post(response.url, headers=self.headers1, data=data)
html = etree.HTML(res.text)
VIEWSTATE = html.xpath("//input[@id='__VIEWSTATE']/@value")[0]
EVENTVALIDATION = html.xpath("//input[@id='__EVENTVALIDATION']/@value")[0]
方案二我一开始用的 BeautifulSoup去解析,但是没成功,后来小伙伴改用了xpath,我发现是我少加了一个参数。
接下来就是进入到详情页
截图一.png
截图一是在列表页获取的详情页URL但是里面的内容并不全,真正的信息在下面
截图二.png
根据请求的URL特点,就是找到一个RelatingID
我去看了请求页的源码
function tonclick(id)
{
if(id.length == 3)//id长度判断,=3的才是实际节点。
{
var ran = randomnumber=Math.floor(Math.random()*100);
var itemtype = GetUrlParam("ItemType");
var itemid = GetUrlParam("ItemID");
var tabid = GetUrlParam("tabid");
var moduleid = GetUrlParam("moduleid");
var resourceID = GetUrlParam("ResourceID");
window.open(InfoTable.OpenUrl(itemtype,itemid,id).value+"&tabid="+tabid+"&moduleid="+moduleid+"&ResourceID="+resourceID+"&Ran="+ran,'main')
}
};
function GetUrlParam( paramName )
{
var oRegex = new RegExp( '[\?&]' + paramName + '=([^&]+)', 'i' ) ;
var oMatch = oRegex.exec( window.location.search ) ;
if ( oMatch && oMatch.length > 1 )
return oMatch[1] ;
else
return '' ;
}
function loadxml(tableid)
{
document.getElementById("treebox").innerHTML="";//先将树清空。
tree=new dhtmlXTreeObject("treebox","100%","100%",0);
tree.setImagePath("Skin/Default/Image/");
tree.setOnClickHandler(tonclick);
tree.loadXMLString('<FolderGroup Id="0"><FolderItem Name="企业信息" Id="01" open="1" im0="books_close.gif" im1="books_open.gif" im2="books_close.gif"><FolderItem Id="011" Name="企业概况" Table="ConstructionAptitudes" im0="book_gray.gif" ShowAbout="2" LocalShow="00" LinkAddress="../../DesktopModules/BaseInfo/EnterpriseSurvey.aspx" Key="GUID" RelatingKey="EnterpriseTypeGUID" UserPurview="001111" System="111" IsDefault="true" /></FolderItem></FolderGroup>');
tonclick(tableid);//加载默认打开页面
}
function loadpage(itemtype)
{
var itemid = GetUrlParam("ItemID");
var tabid = GetUrlParam("tabid");
var moduleid = GetUrlParam("moduleid");
var resourceID = GetUrlParam("ResourceID");
location.href = "InfoTable.aspx?ItemID="+itemid+"&ItemType="+itemtype+"&tabid="+tabid+"&moduleid="+moduleid+"&ResourceID="+resourceID;
}
开始我觉的是逆向解开就能得到RelatingID,但是仔细找找你就会发现RelatingID藏得并不复杂
截图三.png
打开详情页的url会调用/ajax/JSJG.CollectionLoad.WebUI.InfoTable,JSJG.CollectionLoad.WebUI.ashx这个请求,获取一个详情的URL
重点来了
截图四.png调用/ajax/JSJG.CollectionLoad.WebUI.InfoTable,JSJG.CollectionLoad.WebUI.ashx会用到Payload参数
下面是获取RelatingID的代码
referer='http://host/'+tr.find('a')['onclick'].split('(\'')[1].split('\'')[0]
ItemID =referer.split('ItemID=')[1].split('&')[0]
ItemType ='01'
url = 'http://host/ajax/JSJG.CollectionLoad.WebUI.InfoTable,JSJG.CollectionLoad.WebUI.ashx?_method=OpenUrl&_session=no'
payload = 'itemtype={}\r\nitemid={}\r\ntable=011'.format(ItemType, ItemID)
headers = {
'Referer':str(referer),
'Host':'host',
'Content-Type':'text/plain;charset=UTF-8',
'Origin':'http://host',
'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '67',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.18 Safari/537.36', }
res = requests.post(url, headers=headers, data=payload)
RelatingID = re.search("RelatingID=(.*?)'", res.text).group(1)
特别提示要注意payload传参方式
到这里就获取到了RelatingID,然后拼接一下URL,解析保存然后任务就完成啦!
但是程序跑完我发现保存的数据大概只有六页
因为我用到了两次headers,第一个headers在所有方法的最前面,第二个headers是在一个方法里面
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.18 Safari/537.36'}
self.headers['Referer'] = referer
self.headers['Host'] = 'host'
self.headers['Content-Type'] = 'text/plain;charset=UTF-8'
self.headers['Origin'] = 'http://host'
self.headers['Accept'] = '*/*'
self.headers['Accept-Encoding'] = 'gzip, deflate'
self.headers['Accept-Language'] = 'zh-CN,zh;q=0.9'
self.headers['Connection'] = 'keep-alive'
self.headers['Content-Length'] = '67'
我掉进的坑是这俩重名但是在方法内部使用了self来调用,这就导致它请求的时候会认为我是调用的全局headers而不是方法内部的headers
解决完重跑可以得到全部5k+的数据了,开心
网友评论