前言
關于python版本,我一開始看很多資料說python2比較好,因為很多庫還不支持3,但是使用到現在為止覺得還是pythin3比較好用,因為編碼什么的問題,覺得2還是沒有3方便。而且在網上找到的2中的一些資料稍微改一下也還是可以用。
好了,開始說爬百度百科的事。
這里設定的需求是爬取北京地區n個景點的全部信息,n個景點的名稱是在文件中給出的。沒有用到api,只是單純的爬網頁信息。
1、根據關鍵字獲取url
由于只需要爬取信息,而且不涉及交互,可以使用簡單的方法而不需要模擬瀏覽器。
可以直接
1
|
<strong>http: / / baike.baidu.com / search / word?word = "guanjianci" < / strong> |
1
2
3
4
5
|
<strong> for < / strong>l <strong> in < / strong>view_names: <strong> '''http://baike.baidu.com/search/word?word=''' < / strong><em> # 得到url的方法 < / em><em> < / em>name = urllib.parse.quote(l) name.encode(<strong> 'utf-8' < / strong>) url = <strong> 'http://baike.baidu.com/search/word?word=' < / strong> + name |
這里要注意關鍵詞是中午所以要注意編碼問題,由于url中不能出現空格,所以需要用quote
函數處理一下。
關于quote():
在 Python2.x 中的用法是:urllib.quote(text)
。Python3.x 中是urllib.parse.quote(text)
。按照標準,URL只允許一部分ASCII 字符(數字字母和部分符號),其他的字符(如漢字)是不符合URL標準的。所以URL中使用其他字符就需要進行URL編碼。URL中傳參數的部分(query String),格式是:name1=value1&name2=value2&name3=value3
。假如你的name或者value值中的『&』或者『=』等符號,就當然會有問題。所以URL中的參數字符串也需要把『&=』等符號進行編碼。URL編碼的方式是把需要編碼的字符轉化為%xx的形式。通常URL編碼是基于UTF-8的(當然這和瀏覽器平臺有關)
例子:
比如『我,unicode 為 0x6211,UTF-8編碼為0xE60x880x91,URL編碼就是 %E6%88%91。
Python的urllib庫中提供了quote
和quote_plus
兩種方法。這兩種方法的編碼范圍不同。不過不用深究,這里用quote
就夠了。
2、下載url
用urllib庫輕松實現,見下面的代碼中def download(self,url)
3、利用Beautifulsoup獲取html
4、數據分析
百科中的內容是并列的段,所以在爬的時候不能自然的按段邏輯存儲(因為全都是并列的)。所以必須用正則的方法。
基本的想法就是把整個html文件看做是str,然后用正則的方法截取想要的內容,在重新把這段內容轉換成beautifulsoup
對象,然后在進一步處理。
可能要花些時間看一下正則。
代碼中還有很多細節,忘了再查吧只能,下次絕對應該邊做編寫文檔,或者做完馬上寫。。。
貼代碼!
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
|
# coding:utf-8 ''' function:爬取百度百科所有北京景點, author:yi ''' import urllib.request from urllib.request import urlopen from urllib.error import HTTPError import urllib.parse from bs4 import BeautifulSoup import re import codecs import json class BaikeCraw( object ): def __init__( self ): self .urls = set () self .view_datas = {} def craw( self ,filename): urls = self .getUrls(filename) if urls = = None : print ( "not found" ) else : for urll in urls: print (urll) try : html_count = self .download(urll) self .passer(urll, html_count) except : print ( "view do not exist" ) '''file=self.view_datas["view_name"] self.craw_pic(urll,file,html_count) print(file)''' def getUrls ( self , filename): new_urls = set () file_object = codecs. open (filename, encoding = 'utf-16' , ) try : all_text = file_object.read() except : print ( "文件打開異常!" ) file_object.close() file_object.close() view_names = all_text.split( " " ) for l in view_names: if '?' in l: view_names.remove(l) for l in view_names: '''http://baike.baidu.com/search/word?word=''' # 得到url的方法 name = urllib.parse.quote(l) name.encode( 'utf-8' ) url = 'http://baike.baidu.com/search/word?word=' + name new_urls.add(url) print (new_urls) return new_urls def manger( self ): pass def passer( self ,urll,html_count): soup = BeautifulSoup(html_count, 'html.parser' , from_encoding = 'utf_8' ) self ._get_new_data(urll, soup) return def download( self ,url): if url is None : return None response = urllib.request.urlopen(url) if response.getcode() ! = 200 : return None return response.read() def _get_new_data( self , url, soup): ##得到數據 if soup.find( 'div' , class_ = "main-content" ).find( 'h1' ) is not None : self .view_datas[ "view_name" ] = soup.find( 'div' , class_ = "main-content" ).find( 'h1' ).get_text() #景點名 print ( self .view_datas[ "view_name" ]) else : self .view_datas[ "view_name" ] = soup.find( "div" , class_ = "feature_poster" ).find( "h1" ).get_text() self .view_datas[ "view_message" ] = soup.find( 'div' , class_ = "lemma-summary" ).get_text() #簡介 self .view_datas[ "basic_message" ] = soup.find( 'div' , class_ = "basic-info cmn-clearfix" ).get_text() #基本信息 self .view_datas[ "basic_message" ] = self .view_datas[ "basic_message" ].split( "\n" ) get = [] for line in self .view_datas[ "basic_message" ]: if line ! = "": get.append(line) self .view_datas[ "basic_message" ] = get i = 1 get2 = [] tmp = "%%" for line in self .view_datas[ "basic_message" ]: if i % 2 = = 1 : tmp = line else : a = tmp + ":" + line get2.append(a) i = i + 1 self .view_datas[ "basic_message" ] = get2 self .view_datas[ "catalog" ] = soup.find( 'div' , class_ = "lemma-catalog" ).get_text().split( "\n" ) #目錄整體 get = [] for line in self .view_datas[ "catalog" ]: if line ! = "": get.append(line) self .view_datas[ "catalog" ] = get #########################百科內容 view_name = self .view_datas[ "view_name" ] html = urllib.request.urlopen(url) soup2 = BeautifulSoup(html.read(), 'html.parser' ).decode( 'utf-8' ) p = re. compile (r'', re.DOTALL) # 尾 r = p.search(content_data_node) content_data = content_data_node[ 0 :r.span( 0 )[ 0 ]] lists = content_data.split('') i = 1 for list in lists: #每一大塊 final_soup = BeautifulSoup( list , "html.parser" ) name_list = None try : part_name = final_soup.find( 'h2' , class_ = "title-text" ).get_text().replace(view_name, '').strip() part_data = final_soup.get_text().replace(view_name, ' ').replace(part_name, ' ').replace(' 編輯 ', ' ') # 歷史沿革 name_list = final_soup.findAll( 'h3' , class_ = "title-text" ) all_name_list = {} na = "part_name" + str (i) all_name_list[na] = part_name final_name_list = [] ########### for nlist in name_list: nlist = nlist.get_text().replace(view_name, '').strip() final_name_list.append(nlist) fin = "final_name_list" + str (i) all_name_list[fin] = final_name_list print (all_name_list) i = i + 1 #正文 try : p = re. compile (r'', re.DOTALL) final_soup = final_soup.decode( 'utf-8' ) r = p.search(final_soup) final_part_data = final_soup[r.span( 0 )[ 0 ]:] part_lists = final_part_data.split('') for part_list in part_lists: final_part_soup = BeautifulSoup(part_list, "html.parser" ) content_lists = final_part_soup.findAll( "div" , class_ = "para" ) for content_list in content_lists: # 每個最小段 try : pic_word = content_list.find( "div" , class_ = "lemma-picture text-pic layout-right" ).get_text() # 去掉文字中的圖片描述 try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的圖片描述 content_list = content_list.get_text().replace(pic_word, ' ').replace(pic_word2, ' ') except : content_list = content_list.get_text().replace(pic_word, '') except : try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的圖片描述 content_list = content_list.get_text().replace(pic_word2, '') except : content_list = content_list.get_text() r_part = re. compile (r '\[\d.\]|\[\d\]' ) part_result, number = re.subn(r_part, "", content_list) part_result = "".join(part_result.split()) #print(part_result) except : final_part_soup = BeautifulSoup( list , "html.parser" ) content_lists = final_part_soup.findAll( "div" , class_ = "para" ) for content_list in content_lists: try : pic_word = content_list.find( "div" , class_ = "lemma-picture text-pic layout-right" ).get_text() # 去掉文字中的圖片描述 try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的圖片描述 content_list = content_list.get_text().replace(pic_word, ' ').replace(pic_word2, ' ') except : content_list = content_list.get_text().replace(pic_word, '') except : try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的圖片描述 content_list = content_list.get_text().replace(pic_word2, '') except : content_list = content_list.get_text() r_part = re. compile (r '\[\d.\]|\[\d\]' ) part_result, number = re.subn(r_part, "", content_list) part_result = "".join(part_result.split()) #print(part_result) except : print ( "error" ) return def output( self ,filename): json_data = json.dumps( self .view_datas, ensure_ascii = False , indent = 2 ) fout = codecs. open (filename + '.json' , 'a' , encoding = 'utf-16' , ) fout.write( json_data) # print(json_data) return def craw_pic( self ,url,filename,html_count): soup = BeautifulSoup(html_count, 'html.parser' , from_encoding = 'utf_8' ) node_pic = soup.find( 'div' , class_ = 'banner' ).find( "a" , href = re. compile ( "/photo/poi/....\." )) if node_pic is None : return None else : part_url_pic = node_pic[ 'href' ] full_url_pic = urllib.parse.urljoin(url,part_url_pic) #print(full_url_pic) try : html_pic = urlopen(full_url_pic) except HTTPError as e: return None soup_pic = BeautifulSoup(html_pic.read()) pic_node = soup_pic.find( 'div' , class_ = "album-list" ) print (pic_node) return if __name__ = = "__main__" : spider = BaikeCraw() filename = "D:\PyCharm\\view_spider\\view_points_part.txt" spider.craw(filename) |
總結
用python3根據關鍵詞爬取百度百科的內容到這就基本結束了,希望這篇文章能對大家學習python有所幫助。