好奇总被人提及的网络爬虫到底是什么技术,于是也想用,我的需求是可以下载小说、文献摘要、还有各种生信数据库的网站抓取等,先拿我喜欢的魔道祖师小说来练手。
如果懂点HTML/CSS/JS的话,基于网页的标签去读取识别,很简单了,直接上代码:
class downloader(object):
def __init__(self):
self.server = 'http://www.bequgew.com'
self.target = 'http://www.bequgew.com/33489/'
self.names = [] #存放章节名
self.urls = [] #存放章节链接
self.nums = 0 #章节数
def get_download_url(self):
req = requests.get(url = self.target)
req.encoding = req.apparent_encoding
html = req.text
div_bf = BeautifulSoup(html)
div = div_bf.find_all('div', class_ = 'article_texttitleb')
a_bf = BeautifulSoup(str(div[0]))
a = a_bf.find_all('a')
#self.nums = len(a[15:]) #剔除不必要的章节,并统计章节数
self.nums = len(a) #我全要
for each in a:
self.names.append(each.string)
self.urls.append(self.server + each.get('href'))
def get_contents(self, target):
req = requests.get(url = target)
req.encoding = req.apparent_encoding
html = req.text
bf = BeautifulSoup(html)
texts = bf.find_all('div', id = 'book_text')
texts = texts[0].text.replace('\xa0'*4,'\n')
return texts
def writer(self, name, path, text):
write_flag = True
with open(path, 'a', encoding='utf-8') as f:
f.write(name + '\n')
f.writelines(text)
f.write('\n\n')
if __name__ == "__main__":
dl = downloader()
dl.get_download_url()
print('《魔道祖师》开始下载:')
for i in range(dl.nums):
print(dl.names[i])
print(dl.urls[i])
dl.writer(dl.names[i], '魔道祖师.txt', dl.get_contents(dl.urls[i]))
sys.stdout.write(" 已下载:%.3f%%" % float(i/dl.nums) + '\r')
sys.stdout.flush()
print('《魔道祖师》下载完成')
版权说明:参考大神Jack-Cui的示例
网友评论