教你用python3根据关键词爬取百度百科的内容
|
前言 关于python版本,我一开始看很多资料说python2比较好,因为很多库还不支持3,但是使用到现在为止觉得还是pythin3比较好用,因为编码什么的问题,觉得2还是没有3方便。而且在网上找到的2中的一些资料稍微改一下也还是可以用。 好了,开始说爬百度百科的事。 这里设定的需求是爬取北京地区n个景点的全部信息,n个景点的名称是在文件中给出的。没有用到api,只是单纯的爬网页信息。 1、根据关键字获取url 由于只需要爬取信息,而且不涉及交互,可以使用简单的方法而不需要模拟浏览器。 可以直接 <strong>http://baike.baidu.com/search/word?word="guanjianci"</strong> <strong>for </strong>l <strong>in </strong>view_names: <strong>'''http://baike.baidu.com/search/word?word=''' </strong><em># 得到url的方法 </em><em> </em>name=urllib.parse.quote(l) name.encode(<strong>'utf-8'</strong>) url=<strong>'http://baike.baidu.com/search/word?word='</strong>+name 这里要注意关键词是中午所以要注意编码问题,由于url中不能出现空格,所以需要用 关于quote(): 在 Python2.x 中的用法是: 例子: 比如『我,unicode 为 0x6211,UTF-8编码为0xE60x880x91,URL编码就是 %E6%88%91。 Python的urllib库中提供了 2、下载url 用urllib库轻松实现,见下面的代码中 3、利用Beautifulsoup获取html 4、数据分析 百科中的内容是并列的段,所以在爬的时候不能自然的按段逻辑存储(因为全都是并列的)。所以必须用正则的方法。 基本的想法就是把整个html文件看做是str,然后用正则的方法截取想要的内容,在重新把这段内容转换成 可能要花些时间看一下正则。 代码中还有很多细节,忘了再查吧只能,下次绝对应该边做编写文档,或者做完马上写。。。 贴代码!
# coding:utf-8
'''
function:爬取百度百科所有北京景点,
author:yi
'''
import urllib.request
from urllib.request import urlopen
from urllib.error import HTTPError
import urllib.parse
from bs4 import BeautifulSoup
import re
import codecs
import json
class BaikeCraw(object):
def __init__(self):
self.urls =set()
self.view_datas= {}
def craw(self,filename):
urls = self.getUrls(filename)
if urls == None:
print("not found")
else:
for urll in urls:
print(urll)
try:
html_count=self.download(urll)
self.passer(urll,html_count)
except:
print("view do not exist")
'''file=self.view_datas["view_name"]
self.craw_pic(urll,file,html_count)
print(file)'''
def getUrls (self,filename):
new_urls = set()
file_object = codecs.open(filename,encoding='utf-16',)
try:
all_text = file_object.read()
except:
print("文件打开异常!")
file_object.close()
file_object.close()
view_names=all_text.split(" ")
for l in view_names:
if '?' in l:
view_names.remove(l)
for l in view_names:
'''http://baike.baidu.com/search/word?word=''' # 得到url的方法
name=urllib.parse.quote(l)
name.encode('utf-8')
url='http://baike.baidu.com/search/word?word='+name
new_urls.add(url)
print(new_urls)
return new_urls
def manger(self):
pass
def passer(self,urll,html_count):
soup = BeautifulSoup(html_count,'html.parser',from_encoding='utf_8')
self._get_new_data(urll,soup)
return
def download(self,url):
if url is None:
return None
response = urllib.request.urlopen(url)
if response.getcode() != 200:
return None
return response.read()
def _get_new_data(self,url,soup): ##得到数据
if soup.find('div',class_="main-content").find('h1') is not None:
self.view_datas["view_name"]=soup.find('div',class_="main-content").find('h1').get_text()#景点名
print(self.view_datas["view_name"])
else:
self.view_datas["view_name"] = soup.find("div",class_="feature_poster").find("h1").get_text()
self.view_datas["view_message"] = soup.find('div',class_="lemma-summary").get_text()#简介
self.view_datas["basic_message"]=soup.find('div',class_="basic-info cmn-clearfix").get_text() #基本信息
self.view_datas["basic_message"]=self.view_datas["basic_message"].split("n")
get=[]
for line in self.view_datas["basic_message"]:
if line != "":
get.append(line)
self.view_datas["basic_message"]=get
i=1
get2=[]
tmp="%%"
for line in self.view_datas["basic_message"]:
if i % 2 == 1:
tmp=line
else:
a=tmp+":"+line
get2.append(a)
i=i+1
self.view_datas["basic_message"] = get2
self.view_datas["catalog"] = soup.find('div',class_="lemma-catalog").get_text().split("n")#目录整体
get = []
for line in self.view_datas["catalog"]:
if line != "":
get.append(line)
self.view_datas["catalog"] = get
#########################百科内容
view_name=self.view_datas["view_name"]
html = urllib.request.urlopen(url)
soup2 = BeautifulSoup(html.read(),'html.parser').decode('utf-8')
p = re.compile(r'',re.DOTALL) # 尾
r = p.search(content_data_node)
content_data = content_data_node[0:r.span(0)[0]]
lists = content_data.split('')
i = 1
for list in lists:#每一大块
final_soup = BeautifulSoup(list,"html.parser")
name_list = None
try:
part_name = final_soup.find('h2',class_="title-text").get_text().replace(view_name,'').strip()
part_data = final_soup.get_text().replace(view_name,'').replace(part_name,'').replace('编辑','') # 历史沿革
name_list = final_soup.findAll('h3',class_="title-text")
all_name_list = {}
na="part_name"+str(i)
all_name_list[na] = part_name
final_name_list = []###########
for nlist in name_list:
nlist = nlist.get_text().replace(view_name,'').strip()
final_name_list.append(nlist)
fin="final_name_list"+str(i)
all_name_list[fin] = final_name_list
print(all_name_list)
i=i+1
#正文
try:
p = re.compile(r'',re.DOTALL)
final_soup = final_soup.decode('utf-8')
r = p.search(final_soup)
final_part_data = final_soup[r.span(0)[0]:]
part_lists = final_part_data.split('')
for part_list in part_lists:
final_part_soup = BeautifulSoup(part_list,"html.parser")
content_lists = final_part_soup.findAll("div",class_="para")
for content_list in content_lists: # 每个最小段
try:
pic_word = content_list.find("div",class_="lemma-picture text-pic layout-right").get_text() # 去掉文字中的图片描述
try:
pic_word2 = content_list.find("div",class_="description").get_text() # 去掉文字中的图片描述
content_list = content_list.get_text().replace(pic_word,'').replace(pic_word2,'')
except:
content_list = content_list.get_text().replace(pic_word,'')
except:
try:
pic_word2 = content_list.find("div",class_="description").get_text() # 去掉文字中的图片描述
content_list = content_list.get_text().replace(pic_word2,'')
except:
content_list = content_list.get_text()
r_part = re.compile(r'[d.]|[d]')
part_result,number = re.subn(r_part,"",content_list)
part_result = "".join(part_result.split())
#print(part_result)
except:
final_part_soup = BeautifulSoup(list,"html.parser")
content_lists = final_part_soup.findAll("div",class_="para")
for content_list in content_lists:
try:
pic_word = content_list.find("div",class_="lemma-picture text-pic layout-right").get_text() # 去掉文字中的图片描述
try:
pic_word2 = content_list.find("div",class_="description").get_text() # 去掉文字中的图片描述
content_list = content_list.get_text().replace(pic_word,'')
except:
content_list = content_list.get_text().replace(pic_word,'')
except:
try:
pic_word2 = content_list.find("div",class_="description").get_text() # 去掉文字中的图片描述
content_list = content_list.get_text().replace(pic_word2,'')
except:
content_list = content_list.get_text()
r_part = re.compile(r'[d.]|[d]')
part_result,content_list)
part_result = "".join(part_result.split())
#print(part_result)
except:
print("error")
return
def output(self,filename):
json_data = json.dumps(self.view_datas,ensure_ascii=False,indent=2)
fout = codecs.open(filename+'.json','a',)
fout.write( json_data)
# print(json_data)
return
def craw_pic(self,filename,from_encoding='utf_8')
node_pic=soup.find('div',class_='banner').find("a",href=re.compile("/photo/poi/....."))
if node_pic is None:
return None
else:
part_url_pic=node_pic['href']
full_url_pic=urllib.parse.urljoin(url,part_url_pic)
#print(full_url_pic)
try:
html_pic = urlopen(full_url_pic)
except HTTPError as e:
return None
soup_pic=BeautifulSoup(html_pic.read())
pic_node=soup_pic.find('div',class_="album-list")
print(pic_node)
return
if __name__ =="__main__" :
spider=BaikeCraw()
filename="D:PyCharmview_spiderview_points_part.txt"
spider.craw(filename)
总结 用python3根据关键词爬取百度百科的内容到这就基本结束了,希望这篇文章能对大家学习python有所帮助。 (编辑:安卓应用网) 【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容! |
