
Python多線程、異步+多進程爬蟲實現(xiàn)代碼
這篇文章主要介紹了Python多線程、異步+多進程爬蟲實現(xiàn)代碼,需要的朋友可以參考下
安裝Tornado
省事點可以直接用grequests庫,下面用的是tornado的異步client。 異步用到了tornado,根據(jù)官方文檔的例子修改得到一個簡單的異步爬蟲類??梢詤⒖枷伦钚碌奈臋n學習下。
pip install tornado
異步爬蟲
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
from datetime import timedelta
from tornado import httpclient, gen, ioloop, queues
import traceback
class AsySpider(object):
"""A simple class of asynchronous spider."""
def __init__(self, urls, concurrency=10, **kwargs):
urls.reverse()
self.urls = urls
self.concurrency = concurrency
self._q = queues.Queue()
self._fetching = set()
self._fetched = set()
def fetch(self, url, **kwargs):
fetch = getattr(httpclient.AsyncHTTPClient(), 'fetch')
return fetch(url, **kwargs)
def handle_html(self, url, html):
"""handle html page"""
print(url)
def handle_response(self, url, response):
"""inherit and rewrite this method"""
if response.code == 200:
self.handle_html(url, response.body)
elif response.code == 599: # retry
self._fetching.remove(url)
self._q.put(url)
@gen.coroutine
def get_page(self, url):
try:
response = yield self.fetch(url)
print('######fetched %s' % url)
except Exception as e:
print('Exception: %s %s' % (e, url))
raise gen.Return(e)
raise gen.Return(response)
@gen.coroutine
def _run(self):
@gen.coroutine
def fetch_url():
current_url = yield self._q.get()
try:
if current_url in self._fetching:
return
print('fetching****** %s' % current_url)
self._fetching.add(current_url)
response = yield self.get_page(current_url)
self.handle_response(current_url, response) # handle reponse
self._fetched.add(current_url)
for i in range(self.concurrency):
if self.urls:
yield self._q.put(self.urls.pop())
finally:
self._q.task_done()
@gen.coroutine
def worker():
while True:
yield fetch_url()
self._q.put(self.urls.pop()) # add first url
# Start workers, then wait for the work queue to be empty.
for _ in range(self.concurrency):
worker()
yield self._q.join(timeout=timedelta(seconds=300000))
assert self._fetching == self._fetched
def run(self):
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(self._run)
class MySpider(AsySpider):
def fetch(self, url, **kwargs):
"""重寫父類fetch方法可以添加cookies,headers,timeout等信息"""
cookies_str = "PHPSESSID=j1tt66a829idnms56ppb70jri4; pspt=%7B%22id%22%3A%2233153%22%2C%22pswd%22%3A%228835d2c1351d221b4ab016fbf9e8253f%22%2C%22_code%22%3A%22f779dcd011f4e2581c716d1e1b945861%22%7D; key=%E9%87%8D%E5%BA%86%E5%95%84%E6%9C%A8%E9%B8%9F%E7%BD%91%E7%BB%9C%E7%A7%91%E6%8A%80%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8; think_language=zh-cn; SERVERID=a66d7d08fa1c8b2e37dbdc6ffff82d9e|1444973193|1444967835; CNZZDATA1254842228=1433864393-1442810831-%7C1444972138" # 從瀏覽器拷貝cookie字符串
headers = {
'User-Agent': 'mozilla/5.0 (compatible; baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'cookie': cookies_str
}
return super(MySpider, self).fetch( # 參數(shù)參考tornado文檔
url, headers=headers, request_timeout=1
)
def handle_html(self, url, html):
print(url, html)
def main():
urls = []
for page in range(1, 100):
urls.append('http://www.baidu.com?page=%s' % page)
s = MySpider(urls)
s.run()
if __name__ == '__main__':
main()
可以繼承這個類,塞一些url進去,然后重寫handle_page處理得到的頁面。
異步+多進程爬蟲
還可以再變態(tài)點,加個進程池,使用了multiprocessing模塊。效率颼颼的,
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
from multiprocessing import Pool
from datetime import timedelta
from tornado import httpclient, gen, ioloop, queues
class AsySpider(object):
"""A simple class of asynchronous spider."""
def __init__(self, urls, concurrency):
urls.reverse()
self.urls = urls
self.concurrency = concurrency
self._q = queues.Queue()
self._fetching = set()
self._fetched = set()
def handle_page(self, url, html):
filename = url.rsplit('/', 1)[1]
with open(filename, 'w+') as f:
f.write(html)
@gen.coroutine
def get_page(self, url):
try:
response = yield httpclient.AsyncHTTPClient().fetch(url)
print('######fetched %s' % url)
except Exception as e:
print('Exception: %s %s' % (e, url))
raise gen.Return('')
raise gen.Return(response.body)
@gen.coroutine
def _run(self):
@gen.coroutine
def fetch_url():
current_url = yield self._q.get()
try:
if current_url in self._fetching:
return
print('fetching****** %s' % current_url)
self._fetching.add(current_url)
html = yield self.get_page(current_url)
self._fetched.add(current_url)
self.handle_page(current_url, html)
for i in range(self.concurrency):
if self.urls:
yield self._q.put(self.urls.pop())
finally:
self._q.task_done()
@gen.coroutine
def worker():
while True:
yield fetch_url()
self._q.put(self.urls.pop())
# Start workers, then wait for the work queue to be empty.
for _ in range(self.concurrency):
worker()
yield self._q.join(timeout=timedelta(seconds=300000))
assert self._fetching == self._fetched
def run(self):
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(self._run)
def run_spider(beg, end):
urls = []
for page in range(beg, end):
urls.append('http://127.0.0.1/%s.htm' % page)
s = AsySpider(urls, 10)
s.run()
def main():
_st = time.time()
p = Pool()
all_num = 73000
num = 4 # number of cpu cores
per_num, left = divmod(all_num, num)
s = range(0, all_num, per_num)
res = []
for i in range(len(s)-1):
res.append((s[i], s[i+1]))
res.append((s[len(s)-1], all_num))
print res
for i in res:
p.apply_async(run_spider, args=(i[0], i[1],))
p.close()
p.join()
print time.time()-_st
if __name__ == '__main__':
main()
多線程爬蟲
線程池實現(xiàn).
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import Queue
import sys
import requests
import os
import threading
import time
class Worker(threading.Thread): # 處理工作請求
def __init__(self, workQueue, resultQueue, **kwds):
threading.Thread.__init__(self, **kwds)
self.setDaemon(True)
self.workQueue = workQueue
self.resultQueue = resultQueue
def run(self):
while 1:
try:
callable, args, kwds = self.workQueue.get(False) # get task
res = callable(*args, **kwds)
self.resultQueue.put(res) # put result
except Queue.Empty:
break
class WorkManager: # 線程池管理,創(chuàng)建
def __init__(self, num_of_workers=10):
self.workQueue = Queue.Queue() # 請求隊列
self.resultQueue = Queue.Queue() # 輸出結(jié)果的隊列
self.workers = []
self._recruitThreads(num_of_workers)
def _recruitThreads(self, num_of_workers):
for i in range(num_of_workers):
worker = Worker(self.workQueue, self.resultQueue) # 創(chuàng)建工作線程
self.workers.append(worker) # 加入到線程隊列
def start(self):
for w in self.workers:
w.start()
def wait_for_complete(self):
while len(self.workers):
worker = self.workers.pop() # 從池中取出一個線程處理請求
worker.join()
if worker.isAlive() and not self.workQueue.empty():
self.workers.append(worker) # 重新加入線程池中
print 'All jobs were complete.'
def add_job(self, callable, *args, **kwds):
self.workQueue.put((callable, args, kwds)) # 向工作隊列中加入請求
def get_result(self, *args, **kwds):
return self.resultQueue.get(*args, **kwds)
def download_file(url):
#print 'beg download', url
requests.get(url).text
def main():
try:
num_of_threads = int(sys.argv[1])
except:
num_of_threads = 10
_st = time.time()
wm = WorkManager(num_of_threads)
print num_of_threads
urls = ['http://www.baidu.com'] * 1000
for i in urls:
wm.add_job(download_file, i)
wm.start()
wm.wait_for_complete()
print time.time() - _st
if __name__ == '__main__':
main()
這三種隨便一種都有很高的效率,但是這么跑會給網(wǎng)站服務器不小的壓力,尤其是小站點,還是有點節(jié)操為好。
數(shù)據(jù)分析咨詢請掃描二維碼
若不方便掃碼,搜微信號:CDAshujufenxi
LSTM 模型輸入長度選擇技巧:提升序列建模效能的關鍵? 在循環(huán)神經(jīng)網(wǎng)絡(RNN)家族中,長短期記憶網(wǎng)絡(LSTM)憑借其解決長序列 ...
2025-07-11CDA 數(shù)據(jù)分析師報考條件詳解與準備指南? ? 在數(shù)據(jù)驅(qū)動決策的時代浪潮下,CDA 數(shù)據(jù)分析師認證愈發(fā)受到矚目,成為眾多有志投身數(shù) ...
2025-07-11數(shù)據(jù)透視表中兩列相乘合計的實用指南? 在數(shù)據(jù)分析的日常工作中,數(shù)據(jù)透視表憑借其強大的數(shù)據(jù)匯總和分析功能,成為了 Excel 用戶 ...
2025-07-11尊敬的考生: 您好! 我們誠摯通知您,CDA Level I和 Level II考試大綱將于 2025年7月25日 實施重大更新。 此次更新旨在確保認 ...
2025-07-10BI 大數(shù)據(jù)分析師:連接數(shù)據(jù)與業(yè)務的價值轉(zhuǎn)化者? ? 在大數(shù)據(jù)與商業(yè)智能(Business Intelligence,簡稱 BI)深度融合的時代,BI ...
2025-07-10SQL 在預測分析中的應用:從數(shù)據(jù)查詢到趨勢預判? ? 在數(shù)據(jù)驅(qū)動決策的時代,預測分析作為挖掘數(shù)據(jù)潛在價值的核心手段,正被廣泛 ...
2025-07-10數(shù)據(jù)查詢結(jié)束后:分析師的收尾工作與價值深化? ? 在數(shù)據(jù)分析的全流程中,“query end”(查詢結(jié)束)并非工作的終點,而是將數(shù) ...
2025-07-10CDA 數(shù)據(jù)分析師考試:從報考到取證的全攻略? 在數(shù)字經(jīng)濟蓬勃發(fā)展的今天,數(shù)據(jù)分析師已成為各行業(yè)爭搶的核心人才,而 CDA(Certi ...
2025-07-09【CDA干貨】單樣本趨勢性檢驗:捕捉數(shù)據(jù)背后的時間軌跡? 在數(shù)據(jù)分析的版圖中,單樣本趨勢性檢驗如同一位耐心的偵探,專注于從單 ...
2025-07-09year_month數(shù)據(jù)類型:時間維度的精準切片? ? 在數(shù)據(jù)的世界里,時間是最不可或缺的維度之一,而year_month數(shù)據(jù)類型就像一把精準 ...
2025-07-09CDA 備考干貨:Python 在數(shù)據(jù)分析中的核心應用與實戰(zhàn)技巧? ? 在 CDA 數(shù)據(jù)分析師認證考試中,Python 作為數(shù)據(jù)處理與分析的核心 ...
2025-07-08SPSS 中的 Mann-Kendall 檢驗:數(shù)據(jù)趨勢與突變分析的有力工具? ? ? 在數(shù)據(jù)分析的廣袤領域中,準確捕捉數(shù)據(jù)的趨勢變化以及識別 ...
2025-07-08備戰(zhàn) CDA 數(shù)據(jù)分析師考試:需要多久?如何規(guī)劃? CDA(Certified Data Analyst)數(shù)據(jù)分析師認證作為國內(nèi)權威的數(shù)據(jù)分析能力認證 ...
2025-07-08LSTM 輸出不確定的成因、影響與應對策略? 長短期記憶網(wǎng)絡(LSTM)作為循環(huán)神經(jīng)網(wǎng)絡(RNN)的一種變體,憑借獨特的門控機制,在 ...
2025-07-07統(tǒng)計學方法在市場調(diào)研數(shù)據(jù)中的深度應用? 市場調(diào)研是企業(yè)洞察市場動態(tài)、了解消費者需求的重要途徑,而統(tǒng)計學方法則是市場調(diào)研數(shù) ...
2025-07-07CDA數(shù)據(jù)分析師證書考試全攻略? 在數(shù)字化浪潮席卷全球的當下,數(shù)據(jù)已成為企業(yè)決策、行業(yè)發(fā)展的核心驅(qū)動力,數(shù)據(jù)分析師也因此成為 ...
2025-07-07剖析 CDA 數(shù)據(jù)分析師考試題型:解鎖高效備考與答題策略? CDA(Certified Data Analyst)數(shù)據(jù)分析師考試作為衡量數(shù)據(jù)專業(yè)能力的 ...
2025-07-04SQL Server 字符串截取轉(zhuǎn)日期:解鎖數(shù)據(jù)處理的關鍵技能? 在數(shù)據(jù)處理與分析工作中,數(shù)據(jù)格式的規(guī)范性是保證后續(xù)分析準確性的基礎 ...
2025-07-04CDA 數(shù)據(jù)分析師視角:從數(shù)據(jù)迷霧中探尋商業(yè)真相? 在數(shù)字化浪潮席卷全球的今天,數(shù)據(jù)已成為企業(yè)決策的核心驅(qū)動力,CDA(Certifie ...
2025-07-04CDA 數(shù)據(jù)分析師:開啟數(shù)據(jù)職業(yè)發(fā)展新征程? ? 在數(shù)據(jù)成為核心生產(chǎn)要素的今天,數(shù)據(jù)分析師的職業(yè)價值愈發(fā)凸顯。CDA(Certified D ...
2025-07-03