美文网首页
Python爬虫练习(三)多线程爬取NCBI genome搜索结

Python爬虫练习(三)多线程爬取NCBI genome搜索结

作者: 精神秃头生信小伙儿 | 来源:发表于2021-01-26 02:31 被阅读0次

    由于ncbi的genome搜索结果的页面对应的翻页操作发送的请求是个post请求,并且其参数众多不太好用requests模块直接请求到页面,因此直接用selenium模拟翻页操作获得每页源码数据即可,由于结果比较多,因此考虑使用多线程/异步/多进程等等,这里用的是线程池的操作,我在这里调用8个线程,针对是哺乳类基因组页面的搜索结果,还是比较快的,一共22页,每个步骤/参数/函数的作用都标注出来了。

    from lxml import etree
    from selenium import webdriver
    from multiprocessing.dummy import Pool
    from functools import partial 
    import os
    import requests
    # 实现无可视化界面
    from selenium.webdriver.chrome.options import Options
    # 实现规避检测
    from selenium.webdriver import ChromeOptions
    
    
    def setoption():
        """
        谷歌浏览器常规反反爬的参数设置
        """
        # 实现无可视化界面的操作
        chrome_options = Options()
        chrome_options.add_experimental_option(
            'excludeSwitches', ['enable-logging'])
        chrome_options.add_argument("--headless")
        chrome_options.add_argument("--disable-gpu")
        # 实现规避检测
        option = ChromeOptions()
        option.add_experimental_option("excludeSwitches",
                                       ["enable-automation"])
        return chrome_options, option
    
    
    def getonepage(pagetext, filepath):
        """
        得到一页所有物种对应详情页的物种名+基因组信息统计,
        并且写入到文件当中。
        """
        tree = etree.HTML(pagetext)
        initurl = "https://www.ncbi.nlm.nih.gov"
        div_list = tree.xpath(
            '//*[@id="maincontent"]/div/div[5]/div[@class="rprt"]')
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
                (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
        }
        for div in div_list:
            detail_url = initurl + div.xpath('.//p/a/@href')[0]
            response = requests.get(detail_url, headers=headers)
            response = response.content.decode()
            detail_html = bytes(bytearray(response, encoding='utf-8'))
            detail_tree = etree.HTML(detail_html)
            name = detail_tree.xpath(
                '//*[@id="maincontent"]/div/div[5]/div/div[2]/table[1]//tr//span[1]/text()')[0]
            summary = "".join(detail_tree.xpath(
                '//*[@id="mmtest1"]/div/div/div/table//tr//text()'))
            print(name, summary, sep="\n")
            with open(filepath, "a", encoding="utf-8") as fp:
                fp.write(name+"\n"+summary+"\n")
    
    
    def mainprocess(chrome_options, option, executable_path, filepath, thread=4):
        """
        开启selenium无头浏览器,先得到每一页的源码数据存储,
        然后用每页源码数据作为参数,进行多线程搭建。
        """
        # 让selenium规避被检测到的风险
        bro = webdriver.Chrome(executable_path=executable_path,
                               chrome_options=chrome_options,
                               options=option)
        bro.get("https://www.ncbi.nlm.nih.gov/genome/?term=txid40674%5BOrganism%3Aexp%5D")
        # 生成用于多线程使用的参数列表
        pagetext_list = []
        # 获取当前页源码数据
        pagetext = bro.page_source
        print("Append page1 to the queue.")
        pagetext_list.append(pagetext)
        # 获取全部页码总数
        allpagetree = etree.HTML(pagetext)
        allpage = int(allpagetree.xpath('//*[@id="pageno2"]/@last')[0])
        # 将每页的源码数据加入多线程参数列表
        for pagenum in range(2, allpage+1):
            next_btn = bro.find_element_by_xpath(
                "/html/body/div[1]/div[1]/form/div[1]/div[4]/div/div[7]/div/a[1]")
            next_btn.click()
            pagetext = bro.page_source
            print(f"Append page{pagenum} to the queue.")
            pagetext_list.append(pagetext)
        # 检测是否存在之前的文件
        if os.path.isfile(filepath):
            os.remove(filepath)
        # 多线程使用
        pool = Pool(thread)
        # param = {pagetext: pagetext_list, filepath: filepath}
        pool.map(partial(getonepage, filepath=filepath), pagetext_list)
        pool.close()
        pool.join()
        bro.quit()
    
    
    if __name__ == "__main__":
        chrome_options, option = setoption()
        mainprocess(chrome_options, option,
                    r"chromedriver.exe", "genomeinfo.txt", 8)
    

    相关文章

      网友评论

          本文标题:Python爬虫练习(三)多线程爬取NCBI genome搜索结

          本文链接:https://www.haomeiwen.com/subject/chmpaktx.html