入门级Python网络爬虫(转)
本帖最后由 xiaoye 于 2015-9-30 11:04 编辑Python爬虫多线程抓取代理服务器
[日期:2013-07-13] 来源:Linux社区作者:Linux [字体:大 中 小]
Python作为一门功能强大的脚本语言来说,经常被用来写爬虫程序,下面是Python爬虫多线程抓取代理服务器
首先通过谷歌把包含代理服务器地址的网页查出来,我选择从 http://www.88181.com/ 这个网站上去抓, 在它上面了爬了800个代理(选择的8个页面)
#!/usr/bin/env python
#coding:utf-8
import urllib2
import re
import threading
import time
rawProxyList = []
checkedProxyList = []
#抓取代理网站
portdicts ={'v':"3",'m':"4",'a':"2",'l':"9",'q':"0",'b':"5",'i':"7",'w':"6",'r':"8",'c':"1"}
targets = []
for i in xrange(1,9):
target = r"http://www.88181.com/proxy%d.html" % i
targets.append(target)
#print targets
#正则
p = re.compile(r'''<tr><td>(.+?)<SCRIPT type=text/javascript>document.write\(":"\+(.+?)\)</SCRIPT></td><td>(.+?)</td><td>.+?</td><td>(.+?)</td></tr>''')
#获取代理的类
class ProxyGet(threading.Thread):
def __init__(self,target):
threading.Thread.__init__(self)
self.target = target
def getProxy(self):
print "目标网站: " + self.target
req = urllib2.urlopen(self.target)
result = req.read()
#print chardet.detect(result)
matchs = p.findall(result)
for row in matchs:
ip=row
port =row
port = map(lambda x:portdicts,port.split('+'))
port = ''.join(port)
agent = row
addr = row.decode("cp936").encode("utf-8")
proxy =
#print proxy
rawProxyList.append(proxy)
def run(self):
self.getProxy()
#检验代理的类
class ProxyCheck(threading.Thread):
def __init__(self,proxyList):
threading.Thread.__init__(self)
self.proxyList = proxyList
self.timeout = 5
self.testUrl = "http://www.baidu.com/"
self.testStr = "030173"
def checkProxy(self):
cookies = urllib2.HTTPCookieProcessor()
for proxy in self.proxyList:
proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(proxy,proxy)})
#print r'http://%s:%s' %(proxy,proxy)
opener = urllib2.build_opener(cookies,proxyHandler)
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')]
#urllib2.install_opener(opener)
t1 = time.time()
try:
#req = urllib2.urlopen("http://www.baidu.com", timeout=self.timeout)
req = opener.open(self.testUrl, timeout=self.timeout)
#print "urlopen is ok...."
result = req.read()
#print "read html...."
timeused = time.time() - t1
pos = result.find(self.testStr)
#print "pos is %s" %pos
if pos > 1:
checkedProxyList.append((proxy,proxy,proxy,timeused))
#print "ok ip: %s %s %s %s" %(proxy,proxy,proxy,timeused)
else:
continue
except Exception,e:
#print e.message
continue
def run(self):
self.checkProxy()
if __name__ == "__main__":
getThreads = []
checkThreads = []
#对每个目标网站开启一个线程负责抓取代理
for i in range(len(targets)):
t = ProxyGet(targets)
getThreads.append(t)
for i in range(len(getThreads)):
getThreads.start()
for i in range(len(getThreads)):
getThreads.join()
print '.'*10+"总共抓取了%s个代理" %len(rawProxyList) +'.'*10
#开启20个线程负责校验,将抓取到的代理分成20份,每个线程校验一份
for i in range(20):
t = ProxyCheck(rawProxyList[((len(rawProxyList)+19)/20) * i:((len(rawProxyList)+19)/20) * (i+1)])
checkThreads.append(t)
for i in range(len(checkThreads)):
checkThreads.start()
for i in range(len(checkThreads)):
checkThreads.join()
print '.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10
#持久化
f= open("proxy_list.txt",'w+')
for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x,y)):
print "checked proxy is: %s:%s\t%s\t%s" %(proxy,proxy,proxy,proxy)
f.write("%s:%s\t%s\t%s\n"%(proxy,proxy,proxy,proxy))
f.close()
部分log: 目标网站: http://www.88181.com/proxy1.html
目标网站: http://www.88181.com/proxy2.html
目标网站: http://www.88181.com/proxy3.html
目标网站: http://www.88181.com/proxy4.html
目标网站: http://www.88181.com/proxy5.html
目标网站: http://www.88181.com/proxy6.html
目标网站: http://www.88181.com/proxy7.html
目标网站: http://www.88181.com/proxy8.html
..........总共抓取了800个代理..........
..........总共有478个代理通过校验.........
173.213.113.111:8089 United States0.341555833817
173.213.113.111:3128 United States0.347477912903
210.101.131.232:8080 韩国 首尔 0.418715000153 不错不错,拿走了! http://asp-muma.com/ 免杀asp木马,免杀php大马,jsp大马,aspx大马下载。 还是不错的哦,顶了 支持,看起来不错呢! 还是不错的哦,顶了 支持,看起来不错呢! 学习学习技术,加油! 学习学习技术,加油! 支持中国红客联盟(ihonker.org) 支持,看起来不错呢!