之前在麦子学院看了黄老师关于爬虫的基础课程,自己试着练习一下,不过和黄老师的有些区别,我使用了BeautifulSoup包,等于是改写了一下黄老师的代码吧。
以下是实现代码,直接上干货
# coding:utf-8
import urllib2,re
from bs4 import BeautifulSoup
import bs4
def retrive_tangshi_300():
url = 'http://www.gushiwen.org/gushi/tangshi.aspx'
r = urllib2.urlopen(url)
soup = BeautifulSoup(r.read(),'html.parser',from_encoding='utf-8')
# 通过select选取标签内容、地址
#tags = soup.select('div a')
#for tag in tags:
# print tag['href']
shige_list = []
current_poem = {}
tags = soup.find_all('div', class_ = "guwencont2")
for tag in tags:
#print tag.a['href']
for t in tag.children:
#print t,type(t)
if type(t) == bs4.element.Tag:
pattern = re.compile(r'(.*)\((.*)\)')
m = pattern.match(t.string)
if m:
current_poem['url'] = t['href']
current_poem['title'] = m.group(1)
current_poem['author'] = m.group(2)
shige_list.append(current_poem)
current_poem = {}
return shige_list
if __name__ == '__main__':
r = retrive_tangshi_300()
print 'url: ',r[0]['url'],'\n 作者:',r[0]['author'],'\n 题目:',r[0]['title']
网友评论