美文网首页
day69-Selenium爬取动态渲染页面

day69-Selenium爬取动态渲染页面

作者: barriers | 来源:发表于2019-02-21 20:13 被阅读0次

    1.chromedriver环境配置

    安装selenium:pip install selenium
    下载谷歌浏览器版本对应的chromedriver
    配置chromedriver环境变量:将文件路径添加到path
    python环境中启动浏览器:
    from selenium import webdriver
    brower = webdriver.Chrome()
    mac中退出python环境 quit()

    2selenium的使用

    from selenium import webdriver
    from selenium.common.exceptions import TimeoutException
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver import ActionChains
    def f1(browser):
        # 访问页面
        browser.get('https://www.mkv99.com/vod-detail-id-9462.html')
        # 获取渲染后的页面内容
        print(browser.page_source)
        # 获取当前网址
        print(browser.current_url)
        # 获取浏览器cookie
        print(browser.get_cookies())
        # # 根据id获取单个节点
        input1 = browser.find_element_by_id('1thUrlid第01集')
        print(input1)
        # 获取节点属性
        print(input1.get_attribute('href'))     
        # 用css选择器获取单个节点
        input_list = browser.find_elements_by_css_selector('.dwon2')
        for item in input_list:
            print(item.get_attribute('href'))
        print(input2.get_attribute('href'))
        # 获取节点的坐标
        print(input1.location)
        # 获取节点的宽高
        print(input1.size)
        # 用xpath方法获取单个节点find_element_by_xpath(find_elements_by_xpath获取多个节点)
        input3 = browser.find_element_by_xpath('//*[@class="dwon2"]')
        print(input3.get_attribute('id'))
        # 根据name获取单个节点
        input4 = browser.find_element_by_name('CopyAddr1')
        print(input4.tag_name)
        # 根据链接文字获取单个节点
        # 根据链接文字完全匹配
        input5 = browser.find_element_by_link_text('今日更新')
        # 根据链接文字部分匹配(包含)
        input6 = browser.find_elements_by_partial_link_text('下载')
        # 获取节点文本值
        print(input5.text)
        print(input6)
    def f2(browser):
        browser.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
        # 切换到指定iframe
        browser.switch_to.frame('iframeResult') 
        source = browser.find_element_by_css_selector('#draggable')
        target = browser.find_element_by_css_selector('#droppable')
        # 动作链
        actions = ActionChains(browser)
        # 将选定的源移动到目标的位置
        actions.drag_and_drop(source, target)
        actions.perform()
    def main():
        # 使用chrome浏览器
        browser = webdriver.Chrome()
        # 使用Firefox浏览器
        browser = webdriver.Firefox()
        # 使用Edge浏览器
        browser = webdriver.Edge()
        # 使用Phantom浏览器(无头浏览器)
        browser = webdriver.PhatomJS()
        # 使用Safari浏览器(苹果浏览器)
        browser = webdriver.Safari()
        try:
            f2(browser)
        finally:
            # 关闭浏览器
            browser.close()
    if __name__ == '__main__':
        main()
    

    browser.current_url:获取当前网址
    browser.find_element_by_id('1thUrlid第01集'):根据id获取某个节点
    input1.get_attribute('href'):获取节点属性
    input_list = browser.find_elements_by_css_selector('.dwon2'):用class选择
    browser.find_element_by_link_text('今日更新'):根据链接文字完全匹配
    browser.find_elements_by_partial_link_text('下载'):根据链接文字部分匹配(包含)
    input5.text:获取节点文本值
    actions.drag_and_drop(source, target)将目标从source移动到 target位置

    3利用selenium获取京东商品信息

    import requests
    from selenium import webdriver
    from selenium.common.exceptions import TimeoutException
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    import time
    from lxml import etree    
    from sqlalchemy.ext.declarative import declarative_base
    from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
    from sqlalchemy.orm import sessionmaker, relationship
    from sqlalchemy import create_engine    
    from jd_models import Goods    
    engine = create_engine("mysql+pymysql://root:123456@127.0.0.1/jd?charset=utf8", max_overflow=5)
    session_maker = sessionmaker(bind=engine)
    session = session_maker()                
    chrome_options = webdriver.ChromeOptions()    
    browser = webdriver.Chrome(chrome_options=chrome_options)    
    browser.set_window_size(1400, 700)
    wait = WebDriverWait(browser, 3)
    KEYWORD = '衣服'        
    def get_page(page):
        if page == 1:
            url = 'https://www.jd.com/'
            # 访问网址
            browser.get(url)
            # 获取输入框
            input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#key')))
            # 在输入框中输入内容
            input.clear()
            input.send_keys(KEYWORD)
            # 输入后等待3秒点击搜索框
            button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#search button.button')))
            button.click()    
        # 停留3秒
        time.sleep(3)    
        # 滑动到页面底部,滑动指定次数,让商品加载
        # 先滑动到底部
        str_js = 'var scrollHeight = document.body.scrollHeight;window.scrollTo(0, scrollHeight);'
        browser.execute_script(str_js)
        # 倒着往上滑动
        for i in range(16, 0, -1):
            # 执行滚动js代码scrollTo(横向滚动,可设为0, 竖向滚动);表示滚动到哪个坐标;竖向直接设为scrollHeight则一下调到末尾,故分成n分来连续显示
            str_js = 'var scrollHeight = document.body.scrollHeight;window.scrollTo(0, (%d * scrollHeight / 16));' % i
            time.sleep(1)
            browser.execute_script(str_js)
        # browser.page_source获取渲染后的页面内容
        # 保存当前页的数据
        html = browser.page_source
        # 准备点击进入下一页
        # 滚动分页控制部分
        input = browser.find_element_by_css_selector('#J_bottomPage input.input-txt')
        # 跳到跳页输入框下面的50px位置
        str_js = 'var scrollHeight = document.body.scrollHeight;window.scrollTo(0, %d);' % (input.location['y'] - 50)
        browser.execute_script(str_js)
        time.sleep(1)
        # 输入页码
        input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_bottomPage input.input-txt')))
        input.clear()
        input.send_keys(page)    
        # 点击下一页
        submit = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_bottomPage .btn.btn-default')))
        submit.click()    
        return html    
    # 保存图片函数    
    def save_img(page):
        url = 'http://'+page
        response = requests.get(url)
        img = response.content
        filename = page.split('/')[-1]
        print(filename)
        with open('./image/%s' % filename, 'wb') as f:
            f.write(img)        
    def get_paruse(html):
        etree_html = etree.HTML(html)
        result_list = etree_html.xpath('//div[@id="J_goodsList"]//div[@class="gl-i-wrap"]')
        result = []
        for item in result_list:
            item_dict = {}
            title = item.xpath('./div[@class="p-img"]/a/@title')
            item_dict['title'] = title[0]
            if item.xpath('./div[@class="p-img"]/a/img/@data-lazy-img')[0] == 'done':
                img = item.xpath('./div[@class="p-img"]/a/img/@src')
            else:
                img = item.xpath('./div[@class="p-img"]/a/img/@data-lazy-img')
            item_dict['img'] = img[0].replace('//', '')
            # 保存图片到本地
            save_img(item_dict['img'])    
            price = item.xpath('./div[@class="p-price"]//i/text()')
            item_dict['price'] = price[0]
            detail = item.xpath('./div[@class="p-img"]/a/@href')
            item_dict['detail'] = detail[0].replace('//', '')
            sku = item.xpath('./../@data-sku')
            item_dict['sku'] = sku[0]
            result.append(item_dict)
            # 将数据保存到数据库
            goods = Goods()
            goods.title = title
            goods.img = img
            goods.price = price
            goods.sku = sku
            goods.detail = detail
            session.add(goods)
            session.commit()
        print(result, len(result))        
    def main():
        for page in range(100):
            page += 1
            html = get_page(page)
            get_paruse(html)    
    if __name__ == '__main__':
        main()
    

    item.xpath('./../@data-sku') 获取当前节点的父节点的data-sku属性
    执行滚动js代码scrollTo(横向滚动,可设为0, 竖向滚动);表示滚动到哪个坐标;竖向直接设为scrollHeight则一下调到末尾,故常分成n分利用for循环来连续显示
    str_js = 'var scrollHeight = document.body.scrollHeight;window.scrollTo(0, scrollHeight);'
    browser.execute_script(str_js)

    4利用sqlalchemy创建模型向数据库中存数据

    from sqlalchemy.ext.declarative import declarative_base
    from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
    from sqlalchemy.orm import sessionmaker, relationship
    from sqlalchemy import create_engine    
    engine = create_engine("mysql+pymysql://root:123456@127.0.0.1:3306/jd?charset=utf8", max_overflow=5,encoding='utf-8')
    Base = declarative_base()    
    class Goods(Base):
        __tablename__ = 'goods'
        id = Column(Integer, primary_key=True, autoincrement=True)    #主键,自增
        title = Column(String(512))
        img = Column(String(1024))
        price = Column(String(32))
        sku = Column(String(32))
        detail = Column(String(1024))
    

    5建表语句

    create database jd default character set='utf8';
    use jd;
    create table goods(
      id int primary key auto_increment,
      title varchar(512),
      img varchar(1024),
      price varchar(32),
      sku varchar(32),
      detail varchar(1024)
    );

    相关文章

      网友评论

          本文标题:day69-Selenium爬取动态渲染页面

          本文链接:https://www.haomeiwen.com/subject/zuidyqtx.html