流程框架
- 抓取单页内容:利用requests请求目标站点,得到单个网页HTML代码,返回结果
- 正则表达式分析:根据HTML代码分析得到电影的名称、主演、上映时间、评分、图片链接等信息
- 保存至文件:通过文件的形式将结果保存
- 开启循环及多线程:对多页内容遍历,开启多线程提高抓取速度
实例
import requests
from requests.exceptions import RequestException
import re
import json
from multiprocessing import Pool
def get_one_page(url):
try:
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name">'
+ '<a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">'
+ '(.*?)</p>.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
#print(items)
for item in items:
yield{
'index':item[0],
'image':item[1],
'title':item[2],
'actor':item[3].strip()[3:],
'time':item[4].strip()[5:],
'score':item[5]+item[6]
}
def write_to_file(content):
with open('Maoyan_Top100.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False)+'\n')
f.close()
def main(offset):
url = 'https://maoyan.com/board/4?offset' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
# 单进程
if __name__ == '__main__':
for i in range(10):
main(i*10)
# 多进程
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
网友评论