2013-11-02 120 views
1

我要去掉下面链接里面的javascript表。 http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtmlPython硒,刮网页javascript表

import codecs 
import lxml.html as lh 
from lxml import etree 
import requests 
from selenium import webdriver 
import urllib2 
from bs4 import BeautifulSoup 

URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml' 
profile = webdriver.FirefoxProfile() 
profile.set_preference('network.http.max-connections', 30) 
profile.update_preferences() 
browser = webdriver.Firefox(profile) 
browser.get(URL) 
content = browser.page_source 
soup = BeautifulSoup(''.join(content)) 

当我到达网页的内容,然后我需要知道圆足球的数量在那个特定的联赛。

下面的代码只找到唯一的表,我可以知道如何获得所有38个足球比赛的表?谢谢。

# scrap the round of soccer matches 
soup.findAll('td', attrs={'class': 'lsm2'}) 

# print the soccer matches' result of default round, but there have 38 rounds (id from s1 to s38) 
print soup.find("div", {"id": "Match_Table"}).prettify() 

回答

1
# ============================================================ 
import codecs 
import lxml.html as lh 
from lxml import etree 
import requests 
from selenium import webdriver 
import urllib2 
from bs4 import BeautifulSoup 
from pandas import DataFrame, Series 
import html5lib 

URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml' 
profile = webdriver.FirefoxProfile() 
profile.set_preference('network.http.max-connections', 30) 
profile.update_preferences() 
browser = webdriver.Firefox(profile) 
browser.get(URL) 

content = browser.page_source 
soup = BeautifulSoup(''.join(content)) 
# num = soup.findAll('td', attrs={'class': 'lsm2'}) 
# num = soup.findAll('table')[2].findAll('td')[37].text 
# soup.findAll('table',attrs={'class':'e_run_tb'}) 

    num1 = soup.findAll('table')[2].findAll('tr') 
    for i in range(1,len(num1)+1): 
     for j in range(1,len(num1[i-1])+1): 
      # click button on website 
      clickme = browser.find_element_by_xpath('//*[@id="e_run_tb"]/tbody/tr'+'['+str(i)+']'+'/td'+'['+str(j)+']') 
      clickme.click() 

      content = browser.page_source 
      soup = BeautifulSoup(''.join(content)) 

      table = soup.find('div', attrs={'class': 'e_matches'}) 
      rows = table.findAll('tr') 
#   for tr in rows: 
#    cols = tr.findAll('td') 
#    for td in cols: 
#     text = td.find(text=True) 
#     print text, 
#    print 
      for tr in rows[5:16]: #from row 5 to 16 
       cols = tr.findAll('td') 
       for td in cols: 
        text = td.find(text=True) 
        print text, 
       print 
      print 
0

最简单的事情可能我使用Selenium点击从2-38的lsm2链接(因为1存在启动),然后在每次点击后刮id为Match_Table表 - 积累的结果,当您走。

+0

但似乎时间消耗,因为有相当多的英国足球联赛需要报废。有什么更好的想法:) –