2015-06-12 97 views
4

我正在尝试编写一个用于并行抓取网站的Python脚本。我制作了一个原型,可以让我爬到深处。Queue.join()不会取消阻止

但是,join()似乎没有工作,我不明白为什么。

这里是我的代码:

from threading import Thread 
import Queue 
import urllib2 
import re 
from BeautifulSoup import * 
from urlparse import urljoin 


def doWork(): 
    while True: 
     try: 
      myUrl = q_start.get(False) 
     except: 
      continue 
     try: 
      c=urllib2.urlopen(myUrl) 
     except: 
      continue 
     soup = BeautifulSoup(c.read()) 
     links = soup('a') 
     for link in links: 
      if('href' in dict(link.attrs)): 
       url = urljoin(myUrl,link['href']) 
       if url.find("'")!=-1: continue 
       url=url.split('#')[0] 
       if url[0:4] == 'http': 
        print url 
        q_new.put(url) 




q_start = Queue.Queue() 

q_new = Queue.Queue() 



for i in range(20): 
     t = Thread(target=doWork) 
     t.daemon = True 
     t.start() 


q_start.put("http://google.com") 
print "loading" 
q_start.join() 
print "end" 

回答