从 BeautifulSoup 页面检索所有信息

Rya*_*eld 2 python beautifulsoup web-crawler web-scraping selenium-webdriver

我正在尝试抓取 OldNavy 网页上产品的网址。然而,它只给出了产品列表的一部分,而不是整个列表(例如,当 URL 远远超过 8 个时,只给出 8 个)。我希望有人可以帮助并找出问题所在。

from bs4 import BeautifulSoup
from selenium import webdriver
import html5lib
import platform
import urllib
import urllib2
import json


link = http://oldnavy.gap.com/browse/category.do?cid=1035712&sop=true
base_url = "http://www.oldnavy.com"

driver = webdriver.PhantomJS()
driver.get(link)
html = driver.page_source
soup = BeautifulSoup(html, "html5lib")
bigDiv = soup.findAll("div", class_="sp_sm spacing_small")
for div in bigDiv:
  links = div.findAll("a")
  for i in links:
    j = j + 1
    productUrl = base_url + i["href"]
    print productUrl
Run Code Online (Sandbox Code Playgroud)

fur*_*ras 5

此页面用于JavaScript加载元素,但仅当您向下滚动页面时才会加载。

它被称为"lazy loading"

您也必须滚动页面。

from selenium import webdriver
from bs4 import BeautifulSoup
import time

link = "http://oldnavy.gap.com/browse/category.do?cid=1035712&sop=true"
base_url = "http://www.oldnavy.com"

driver = webdriver.PhantomJS()
driver.get(link)

# ---

# scrolling

lastHeight = driver.execute_script("return document.body.scrollHeight")
#print(lastHeight)

pause = 0.5
while True:
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    time.sleep(pause)
    newHeight = driver.execute_script("return document.body.scrollHeight")
    if newHeight == lastHeight:
        break
    lastHeight = newHeight
    #print(lastHeight)

# ---

html = driver.page_source
soup = BeautifulSoup(html, "html5lib")

#driver.find_element_by_class_name

divs = soup.find_all("div", class_="sp_sm spacing_small")
for div in divs:
    links = div.find_all("a")
    for link in links:
    print base_url + link["href"]
Run Code Online (Sandbox Code Playgroud)

想法:https ://stackoverflow.com/a/28928684/1832058