周末玩游戏时看到了不少好看的皮肤,就想着下载下来当做壁纸。想到做到,立马开搞;
其实主要分为两大步就可以搞定:
- 首先需要用python写一个利用url地址下载图片的方法;
- 然后通过分析要下载的页面,写一个通过selenium批量获取下载地址的代码;
利用python下载网络图片的代码
import urllib.request
def download_url_img(img_url, img_address):
response = urllib.request.urlopen(img_url)
img = response.read()
with open(img_address, 'wb') as f:
f.write(img)
利用selenium定位元素获取图片地址,完整代码如下
import os
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from Utils.down_save_url_img import download_url_img
def driver_wait(dr, xpath):
WebDriverWait(dr, 20, 2).until(EC.presence_of_element_located((By.XPATH, xpath)))
iterms = []
driver = webdriver.Chrome()
driver.get('https://pvp.qq.com/web201605/wallpaper.shtml###')
while True:
sleep(2)
img_class_names = driver.find_element_by_class_name('p_hd').find_elements_by_class_name('p_newhero_item')
for class_name in img_class_names:
name = class_name.find_element_by_tag_name('h4').find_element_by_tag_name('a').text
lis = class_name.find_element_by_tag_name('ul').find_elements_by_tag_name('li') # 获取ul下的所有li
img_url = lis[1].find_element_by_tag_name('a').get_attribute("href") # 选取自己需要的li
iterm = name, img_url
iterms.append(iterm)
totalpage = driver.find_element_by_class_name('totalpage').text
if int(totalpage.split('/')[0]) != int(totalpage.split('/')[1]): # 判断是否为最后一页
# 点击下一页
driver.find_element_by_class_name('downpage').click()
else:
break
driver.quit()
print(iterms)
for iterm in iterms:
address = "/Users/Pictures/wzry/"
os.chdir(address) # 更换目录
img_address = iterm[0] + '_1280*720.jpg'
download_url_img(iterm[1], img_address)
网友评论