美文网首页
使用爬虫爬取顶点网的小说

使用爬虫爬取顶点网的小说

作者: _三余无梦生_ | 来源:发表于2019-01-11 16:04 被阅读0次
    import requests
    from bs4 import BeautifulSoup
    import os
    import re
    def get_Soup(Fiction_url):
        header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
            'Connection': 'keep - alive'
        }
        html_doc = requests.get(Fiction_url,headers=header)
        html_doc.encoding = 'utf-8'  # 笔趣阁网页编码为gbk 所以需要修改 防止中文乱码。
        soup = BeautifulSoup(html_doc.text, 'html.parser')
        return soup
        #返回soup
    def get_FictionName(soup):
        fictionName = soup.find('h1').text
        fiction = {'名称':fictionName}
        return fiction
    
    def mkdir(fiction):
        save_path = 'D:\\'+ fiction['名称']
        isExists = os.path.exists(save_path)   #判断目录是否存在
        if not isExists:
            os.makedirs(save_path)    #创建目录
            print('{}目录创建成功'.find(save_path))
            return save_path
        else:
            print("目录已经存在,请修改文件名")
            save_path = 'D:\\'+ input('请输入新的文件名:')
            os.makedirs(save_path)
            print('{}目录创建成功'.find(save_path))
            return save_path
    
    def get_Fiction_AddAndName(soup):
        i = 1    #用来个章节计数用
        chapter_add = [] #章节地址 list
        chapter_name = [] #章节名 list
        rstr = r"[\=\(\)\,\/\\\:\*\?\"\<\>\|\' ']"  #为去掉章节名中的特殊字符做准备
        for chapter in soup.select('td'):
            href = chapter.select('a')[0]['href']
            chapter_add.append(href)
            name = re.sub(rstr, "", chapter.select('a')[0].text)   #去掉章节中的特殊字符
            text_name = '第' + str(i) + '章' + name
            chapter_name.append(text_name)
            i += 1
        return chapter_add,chapter_name
    def get_Fiction(chapter_add,chapter_name,save_path):
        sum_chapter = len(chapter_add)
        i = 0
        for chapter in chapter_add:
            chapter_content = requests.get(chapter)
            chapter_content.encoding = 'utf-8'
            content_soup = BeautifulSoup(chapter_content.text,'html')
            try:
                text = content_soup.find('dd', id='contents').text
            except:
                print("第{}章节为空".format(i))
                text = '空'
            create_txt(text,save_path,chapter_name[i])
            i += 1
            print('共{}章,正在写入第{}章.....'.format(sum_chapter,i))
    def create_txt(text,save_path,chapter_name):
        f = open(save_path+'\\'+ chapter_name+'.txt','w+',encoding='utf-8')  #创建txt文件 w 是写入 encoding 修改编码方式
        f.write(text)
        f.close()
    
    
    Fiction_url = 'https://www.23us.so/files/article/html/15/15497/'
    
    soup = get_Soup(Fiction_url)           #获取soup
    fiction = get_FictionName(soup)        #获取小说名字
    save_path = mkdir(fiction)             #创建目录
    chapter_add,chapter_name = get_Fiction_AddAndName(soup)
    get_Fiction(chapter_add,chapter_name,save_path)
    
    
    
    

    相关文章

      网友评论

          本文标题:使用爬虫爬取顶点网的小说

          本文链接:https://www.haomeiwen.com/subject/dhbxdqtx.html