对比下多个方法
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
ele_hots = driver.find_elements('css selector','.title-content-title')
for ele_hot in ele_hots:
print(ele_hot.text)
from pyquery import PyQuery as pq
from local_fake_useragent import UserAgent
ua_chrome = UserAgent('chrome')
baidu = pq(url='https://www.baidu.com/',headers={'user-agent':ua_chrome.rget})
hot_news = baidu('.title-content-title').items()
for hot in hot_news:
print(hot.text())
import requests
from lxml import etree
from local_fake_useragent import UserAgent
ua_chrome = UserAgent('chrome')
baidu_text = requests.get(url='https://www.baidu.com/',headers={'user-agent':ua_chrome.rget}).text
hot_news = etree.HTML(baidu_text).xpath('//*[@class="title-content-title"]')
for hot in hot_news:
print(hot.text)
方案一 | 方案二 | 方案三 | |
---|---|---|---|
库 | selenium | pyquery | requests+lxml |
UI | √ | × | × |
需要ua | × | √ | √ |
定位方式 | css|xpath | css | xpath |
获取数据 | 需要操作 | 无需操作 | 无需操作 |
绕过鉴权 | 有多种方法 | 需要结合其他库 | 需要结合其他库 |