Python Selenium,抓取网页JavaScript表

Python Selenium, scraping webpage javascript table

本文关键字:JavaScript 网页 抓取 Selenium Python      更新时间:2023-09-26

我将在下面的链接中废弃javascript表。http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml

import codecs
import lxml.html as lh
from lxml import etree
import requests
from selenium import webdriver
import urllib2
from bs4 import BeautifulSoup
URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml'
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.max-connections', 30)
profile.update_preferences()
browser = webdriver.Firefox(profile)
browser.get(URL)
content = browser.page_source
soup = BeautifulSoup(''.join(content))

当我获得网页的内容时,我需要知道该特定联赛的足球比赛轮数。

下面的代码只找到了唯一的桌子,我可以知道如何获得所有 38 场足球比赛的桌子吗?谢谢。

# scrap the round of soccer matches
soup.findAll('td', attrs={'class': 'lsm2'})
# print the soccer matches' result of default round, but there have 38 rounds (id from s1 to s38)
print soup.find("div", {"id": "Match_Table"}).prettify()
# ============================================================
import codecs
import lxml.html as lh
from lxml import etree
import requests
from selenium import webdriver
import urllib2
from bs4 import BeautifulSoup
from pandas import DataFrame, Series
import html5lib
URL = 'http://data2.7m.cn/history_Matches_Data/2009-2010/92/en/index.shtml'
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.max-connections', 30)
profile.update_preferences()
browser = webdriver.Firefox(profile)
browser.get(URL)
content = browser.page_source
soup = BeautifulSoup(''.join(content))
# num = soup.findAll('td', attrs={'class': 'lsm2'})
# num = soup.findAll('table')[2].findAll('td')[37].text
# soup.findAll('table',attrs={'class':'e_run_tb'})
    num1 = soup.findAll('table')[2].findAll('tr')
    for i in range(1,len(num1)+1):
        for j in range(1,len(num1[i-1])+1):
            # click button on website
            clickme = browser.find_element_by_xpath('//*[@id="e_run_tb"]/tbody/tr'+'['+str(i)+']'+'/td'+'['+str(j)+']')
            clickme.click()
            content = browser.page_source
            soup = BeautifulSoup(''.join(content))
            table = soup.find('div', attrs={'class': 'e_matches'})
            rows = table.findAll('tr')
#           for tr in rows:
#             cols = tr.findAll('td')
#             for td in cols:
#                    text = td.find(text=True)
#                    print text,
#                print
            for tr in rows[5:16]: #from row 5 to 16
                cols = tr.findAll('td')
                for td in cols:
                    text = td.find(text=True)
                    print text,
                print
            print
最简单的

方法可能是使用 Selenium 单击从 2-38 开始的lsm2链接(因为存在 1 开始),然后在每次单击后用 id Match_Table 抓取表格 - 随时积累结果。