网页信息采集
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/14 11:47
# @Author : Liu
# @Site :
# @File : csv测试.py
# @Software: PyCharm
import pandasas pd
import urllib3
# csv_file=pd.read_csv('111.csv',encoding='gb2312',usecols=[3])
# c=[]
import urllib.parse
from bs4import BeautifulSoup
from seleniumimport webdriver
import csv
import urllib.request
names=[]
with open('2222.csv','r')as csvfile:
# 读片名信息
rows = csv.reader(csvfile)
for rowin rows:
names.append(row[3])
# 片名
print(names)
new_urls=[]#url地址
rows=[]
for lin names:
if '' in l:
names.remove(l)
for lin names:
'''http://baike.baidu.com/search/wsord?word=''' # 得到url的方法
name=urllib.parse.quote(l)
name.encode('utf-8')
url='https://movie.douban.com/subject_search?search_text='+name
new_urls.append(url)
zuizhong=[]
for url1in new_urls:
d=[]#类型标注
driver = webdriver.PhantomJS(executable_path='D:/phantomjs-2.1.1-windows/bin/phantomjs.exe')
driver.get(url1)#解析网页,提取网址信息
soup=BeautifulSoup(driver.page_source,"html.parser")
# print soup
for tagin soup.find('div',class_='item-root'):
a=tag.get('href')
if a!=None:
page = urllib.request.urlopen(a)
contents = page.read()
soup = BeautifulSoup(contents,"html.parser")
for leixingin soup.find_all('span',property='v:genre'):
# print leixing.string
a=leixing.string
d.append(a)
zuizhong.append(d)
print(zuizhong)
csvFile2 =open('leixing.csv','w',newline='')# 设置newline,否则两行之间会空一行
writer = csv.writer(csvFile2)
m =len(zuizhong)#写入csv里
for iin range(m):
writer.writerow(zuizhong[i])
csvFile2.close()
网友评论