美文网首页
爬虫学习(五)Selenium+chormedrive自动化模拟

爬虫学习(五)Selenium+chormedrive自动化模拟

作者: 拼了老命在学习 | 来源:发表于2020-07-21 15:46 被阅读0次

    1.简单使用

    # from selenium import webdriver
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"  #chormedriver的绝对路径
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("http://www.baidu.com")
    #driver.close() #退出页面
    #driver.quit() #退出浏览器
    

    2.定位元素的获取,以百度页面为例

    #定位元素的获取,以百度页面为例
    #1.通过id获取
    # input = driver.find_element_by_id("kw")
    #2.通过name获取
    # input = driver.find_element_by_name("wd")
    #3.通过类名查找
    # input = driver.find_element_by_class_name("s_ipt")
    #4.通过xpath语法查找
    # input = driver.find_element_by_xpath("//input[@id='kw']")
    #5.通过CSS选择器获取
    # input = driver.find_element_by_css_selector('.quickdelete-wrap > input')
    #6.通过标签名
    # input = driver.find_elements_by_tag_name()
    #填充定位元素
    # input.send_keys('python')
    

    3.selenium操控页面常见元素

    #常见表单元素 input type='text\password\number' 文本框元素
    #button、 input(type='submit')  按钮
    #checkbox input='checkbox' #例如是否记住密码的方框
    #select 下拉列表
    
    #操作输入框
    # from selenium import webdriver
    # import time
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("http://www.baidu.com")
    # input = driver.find_element_by_id("kw")
    # input.send_keys('python')
    # time.sleep(2)
    # #清除文本框
    # input.clear()
    
    #操作checkbox
    # from selenium import webdriver
    # import time
    # 
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("https://douban.com/")
    # driver.switch_to.frame(driver.find_elements_by_tag_name('iframe')[0]) #切换到子框架
    # rememberBtn = driver.find_element_by_name("remember")
    # rememberBtn.click()
    
    #操作select标签,即下拉选择列表
    # from selenium import webdriver
    # from selenium.webdriver.support.ui import Select
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("https://dobai.cn/")
    #选中select标签
    # selectBtn = Select(driver.find_element_by_name('jumpMenu'))
    #通过索引选择序列为1的页面
    # selectBtn.select_by_index(1)
    #通过值选择
    # selectBtn.select_by_value('http://www.95yueba.com')
    #通过可视化文本选择
    # selectBtn.select_by_visible_text('95秀客户端')
    #取消选中所有选项
    # selectBtn.deselect_all()
    
    #操作submit
    # from selenium import webdriver
    # from selenium.webdriver.support.ui import Select
    # 
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("http://www.baidu.com")
    # 
    # inputbar = driver.find_element_by_id('kw')
    # inputbar.send_keys('python')
    #获取submit标签并操作
    # submitbar = driver.find_element_by_id('su')
    # submitbar.click()
    

    4.行为链

    在页面中的操作有很多步,可以通过鼠标行为链类ActionChains来完成

    # 行为链
    # from selenium import webdriver
    # from selenium.webdriver.common.action_chains import ActionChains
    # 
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("http://www.baidu.com")
    # 分别获取输入和submit标签
    # inputtag = driver.find_element_by_id('kw')
    # submittag = driver.find_element_by_id('su')
    # 
    # actions = ActionChains(driver)
    # actions.move_to_element(inputtag) #鼠标移动到输入框
    # actions.send_keys('python') #填充内容
    # actions.move_to_element(submittag) #移动到submit,准备提交表单
    # actions.click(submittag) #提交表单
    # actions.perform() #使行为按顺序执行
    

    5.selenium操作cookie

    #selenium操作cookie
    from selenium import webdriver
    driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    driver = webdriver.Chrome(executable_path=driver_path)
    driver.get("http://www.baidu.com")
    #获取cookie
    for cookie in driver.get_cookies():
        print(cookie)
    #根据cookie的key获取value
    print(driver.get_cookie("PSTM"))
    #删除某个cookie
    driver.delete_cookie("PSTM")
    print(driver.get_cookie("PSTM"))
    #删除所有cookie
    driver.delete_all_cookies()
    

    6.selenium的隐式等待和显示等待

    #selenium的隐式等待和显示等待
    # from selenium import webdriver
    # from selenium.webdriver.support.ui import WebDriverWait
    # from selenium.webdriver.support import expected_conditions as ES
    # from selenium.webdriver.common.by import By
    # driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    # driver = webdriver.Chrome(executable_path=driver_path)
    # driver.get("https://douban.com/")
    #切换到子框架
    # driver.switch_to.frame(driver.find_elements_by_tag_name('iframe')[0])
    #要先点击用账号密码登录的按钮,不然会找不到输入账号和密码的地方
    # bottom1 = driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]')
    # bottom1.click()
    # 隐式等待20s
    # driver.implicitly_wait(20)
    # driver.find_element_by_id("123456")
    # 显式等待
    # element=WebDriverWait(driver,10).until(
    #     ES.presence_of_element_located((By.ID,"username")) #必须传入元组
    # )
    # print(element)
    

    7.selenium打开和切换多个页面

    #selenium打开多个页面
    from selenium import webdriver
    driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    driver = webdriver.Chrome(executable_path=driver_path)
    driver.get("http://www.baidu.com")
    #打开一个新页面
    driver.execute_script("window.open('https://www.douban.com/')")
    #注意旧版本river.switch_to_window要用river.switch_to.window代替
    driver.switch_to.window(driver.window_handles[1]) #driver.window_handles[]是一个列表,里面装着窗口句柄
    print(driver.current_url) #获取当前页面url
    print(driver.page_source) #获取网页源代码
    

    8.selenium使用代理

    #selenium使用代理
    from selenium import webdriver
    import time
    driver_path = r"D:\Python\chromedriver\chromedriver.exe"
    options = webdriver.ChromeOptions()
    options.add_argument("--proxy-server=http://175.42.68.11:9999")
    driver = webdriver.Chrome(executable_path=driver_path,options=options) #新版本改动chorme_options改为options
    driver.get('http://httpbin.org/ip')
    print(driver.page_source)
    time.sleep(10)
    driver.quit()
    

    9.tesseract-OCR识别

    安装方法:请自行百度
    安装包:由于墙的原因,我下载不太顺利,所以安装包送上,需要自取
    链接: https://pan.baidu.com/s/18RGUURHRMTlcpZS-NJlB0w 提取码: dfsf

    import pytesseract
    from PIL import Image
    
    #找到tesseract.exe
    pytesseract.pytesseract.tesseract_cmd = r"D:\Python\tesseract\tesseract.exe"
    
    #找到Image路径并打开
    image = Image.open(r"D:\Python\tesseract_demo\a.png")
    
    #读取文字,默认为英文,可指定语言,前提需将训练数据导入tesseract的tessdata文件夹中
    text = pytesseract.image_to_string(image,"chi_sim")
    
    print(text)
    

    彩蛋

    自动化爬取湖工大教务系统成绩查询页面(验证码需手动输入)

    from selenium import webdriver
    from bs4 import BeautifulSoup
    from selenium.webdriver.support.ui import Select
    from pyecharts.charts import Bar,Page
    from pyecharts import options as opts
    import time
    
    driver = ''
    studies = []
    def get_page():
        global driver
        driver_path = r"D:\Python\chromedriver\chromedriver.exe"
        driver = webdriver.Chrome(executable_path=driver_path)
        url = 'http://run.hbut.edu.cn/Account/LogOn'
        driver.get(url)
        username = driver.find_element_by_id("UserName")
        #在引号中输入你的学号
        username.send_keys("")
        password = driver.find_element_by_id("Password")
        #在引号中输入你的密码
        password.send_keys("")
        #睡5秒,此时请在打开的页面输入验证码
        time.sleep(5)
        denglu = driver.find_element_by_xpath("//div[@class='b f']")
        denglu.click()
        time.sleep(1)
        # driver.get('http://run.hbut.edu.cn/StuGrade/Index?SemesterName=20191&SemesterNameStr=2019%E5%AD%A6%E5%B9%B4%20%E7%AC%AC%E4%B8%80%E5%AD%A6%E6%9C%9F')
        #访问成绩查询页面
        driver.get(
            'http://run.hbut.edu.cn/StuGrade/Index')
        # html = driver.page_source
        # page_parse(html)
        #控制select部件,下拉选择不同页面并抓取页面HTML,并传入解析页面函数
        for x in range(1,4):
            selectbtn = Select(driver.find_element_by_name("SemesterName"))
            #睡5秒,防止操作太快被识别为爬虫
            time.sleep(5)
            selectbtn.select_by_index(x)
            html = driver.page_source
            page_parse(html)
            # print(driver.current_url)
        time.sleep(5)
        driver.quit()
    
    def page_parse(html):
        soup = BeautifulSoup(html, 'lxml')
        #提取所有tr标签并过滤表头
        trs = soup.find_all('tr')[1:]
        study = []
        for tr in trs:
            #提取tr标签下的文字,并过滤课程编号
            texts = list(tr.stripped_strings)[1:6]
            #过滤课程类型,将其他数据写入字典
            del texts[1]
            grade = {
                'name': texts[0],
                'jidian': texts[1],
                'content-score': texts[2],
                'score': texts[3]
            }
            #将当前学年所有科目成绩以字典形式存入列表
            study.append(grade)
        #将存有每个学年所有科目的成绩的字典的列表作为元素存入列表
        studies.append(study)
    
    if __name__ == '__main__':
        get_page()
        content_all = []
        score_all = []
        pscores_all = []
        for study in studies:
            scores = 0
            myscores = 0
            for x in study:
                #计算每个学年的各个科目总学分
                a = float(x['content-score'])
                scores = scores + a
                #计算每个学年期末考试各科绩点*学分的总和
                b = float(x['jidian']) * float(x['content-score'])
                myscores = myscores + b
            #计算学年平均绩点
            pscores = myscores / scores
            #将平均绩点存入列表中
            pscores_all.append(pscores)
            #提取每个学年的科目名称并转化为列表,并存入列表中
            content_classes = list(map(lambda x: x["name"], study))
            content_all.append(content_classes)
            #提取课程期末成绩,并存入列表
            score_classes= list(map(lambda x: x["score"], study))
            score_all.append(score_classes)
        #进行简单数据可视化
        bar0 = (Bar()
                .add_xaxis(content_all[0])
                .add_yaxis('', score_all[0])
                .set_global_opts(xaxis_opts=opts.AxisOpts(name='课程'))
                .set_global_opts(yaxis_opts=opts.AxisOpts(name='成绩'))
                .set_global_opts(title_opts=opts.TitleOpts(title='2019第一学年',subtitle='绩点:%f' %pscores_all[0],
                                                           pos_left='center',
                                                           pos_top='5%' ))
               )
        bar1 = (Bar()
                .add_xaxis(content_all[1])
                .add_yaxis('', score_all[1])
                .set_global_opts(xaxis_opts=opts.AxisOpts(name='课程'))
                .set_global_opts(yaxis_opts=opts.AxisOpts(name='成绩'))
                .set_global_opts(title_opts=opts.TitleOpts(title='2018第二学年', subtitle='绩点:%f' % pscores_all[1],
                                                           pos_left='center',
                                                           pos_top='5%'))
                )
        bar2 = (Bar()
                .add_xaxis(content_all[2])
                .add_yaxis('', score_all[2])
                .set_global_opts(xaxis_opts=opts.AxisOpts(name='课程'))
                .set_global_opts(yaxis_opts=opts.AxisOpts(name='成绩'))
                .set_global_opts(title_opts=opts.TitleOpts(title='2018第一学年', subtitle='绩点:%f' % pscores_all[2],
                                                           pos_left='center',
                                                           pos_top='5%'))
                )
        page = Page()
        page.add(bar0,bar1,bar2)
        #路径根据自己电脑更改
        page.render('E:/桌面/成绩.html')
    

    相关文章

      网友评论

          本文标题:爬虫学习(五)Selenium+chormedrive自动化模拟

          本文链接:https://www.haomeiwen.com/subject/xfpphktx.html