爬虫三(Bs4搜索、Selenium基本使用、无界面浏览器、切换选项卡、浏览器前进后退)
作者:YXN-python 阅读量:164 发布日期:2022-03-10
一、Bs4搜索文档树
from bs4 import BeautifulSoup
html_doc = """
asdfasdf<strong id="bbb" class="boldest">The Dormouse's story</strong>
Once upon a time there were three little sisters; and their names were
<a id="link1" class="sister" href="http://example.com/elsie">Elsie</a>,
<a id="link2" class="sister" href="http://example.com/lacie">Lacie</a> and
<a id="link3" class="sister" href="http://example.com/tillie">Tillie</a>;
and they lived at the bottom of a well....
"""
soup = BeautifulSoup(html_doc, 'lxml')
字符串
res = soup.find(name='a', id='link2')
res1 = soup.find(href='http://example.com/tillie')
res2 = soup.find(class_='story')
res3 = soup.body.find('p')
res4 = soup.body.find(string='Elsie')
res5 = soup.find(attrs={'class': 'sister'})
print(res, res1, res2, res3, res4, res5)
正则表达式
res = soup.find_all(name=re.compile('^b'))
res1 = soup.find_all(href=re.compile('^http'))
for item in res1:
url = item.attrs.get('href')
print(url)
res2 = soup.find(attrs={'href': re.compile('^a')})
print(res2)
列表
res = soup.find_all(class_=['story', 'sister'])
res1 = soup.find_all(name=['a', 'p'])
print(res)
布尔
res = soup.find_all(name=True)
print(res)
方法
def has_class_but_no_id(tag):
return tag.has_attr('class') and not tag.has_attr('id')
print(soup.find_all(has_class_but_no_id))
二、CSS选择器
from bs4 import BeautifulSoup
html_doc = """
asdfasdf<strong id="bbb" class="boldest">The Dormouse's story</strong>
Once upon a time there were three little sisters; and their names were
<a id="link1" class="sister" href="http://example.com/elsie">Elsie</a>,
<a id="link2" class="sister" href="http://example.com/lacie">Lacie</a> and
<a id="link3" class="sister" href="http://example.com/tillie">Tillie</a>;
and they lived at the bottom of a well....
"""
soup = BeautifulSoup(html_doc, 'lxml')
res = soup.select('a')
res1 = soup.select('#link1')
res2 = soup.select('.sister')
res3 = soup.select('body>p>a')
res4 = soup.select('body>p>a:nth-child(2)')
res5 = soup.select('body>p>a:nth-last-child(1)')
res6 = soup.select('a[href="http://example.com/tillie"]')
print(res6)
三、selenium基本使用
Selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决Requests无法直接执行JavaScript代码的问题 Selenium本质是通过驱动浏览器,完全模拟浏览器的操作,比如跳转、输入、点击、下拉等,来拿到网页渲染之后的结果,可支持多种浏览器。
安装selenium
pip3 install selenium
下载浏览器驱动 根据自己浏览器的版本进行下载
https://registry.npmmirror.com/binary.html?path=chromedriver/
模拟使用
from selenium import webdriver
import time
bro = webdriver.Chrome(executable_path='./chromedriver')# 使用插件
bro.get('http://www.baidu.com')# 目标地址
time.sleep(3)# 睡眠三秒
bro.close()# 关闭
bro.quit()
四、无界面浏览器
做爬虫,不希望有一个浏览器打开,谷歌支持无头浏览器,后台运行,没有浏览器的图形化(GUI)界面
from selenium.webdriver.chrome.options import Options
chrome_options = Options()# 打开一个浏览器
chrome_options.add_argument('window-size=1920x3000') # 指定分辨率
chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度
chrome_options.add_argument('--headers') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# chrome_options.binary_location = r"user/macbookpro/......" # 手动指定使用的浏览器位置
bro = webdriver.Chrome(executable_path='./chromedriver', options=chrome_options)
bro.get('https://www.jd.com/')# 目标网址
print(bro.page_source) # 浏览器中看到的页面内容
time.sleep(3)
bro.close()# 关闭tab页
bro.quit()# 关闭浏览器
五、selenium其他使用
1)获取位置属性大小、文本
bro.find_element(by=By.ID,value='id号')
bro.find_element(by=By.LINK_TEXT,value='a标签文本内容')
bro.find_element(by=By.PARTIAL_LINK_TEXT,value='a标签文本内容模糊匹配')
bro.find_element(by=By.CLASS_NAME,value='类名')
bro.find_element(by=By.TAG_NAME,value='标签名')
bro.find_element(by=By.NAME,value='属性name')
# -----所有语言通用的----
bro.find_element(by=By.CSS_SELECTOR,value='css选择器')
bro.find_element(by=By.XPATH,value='xpath选择器')
# 获取标签位置,大小
print(code.location)
print(code.size)
-------
print(code.tag_name)
print(code.id)
2)等待元素被加载
# 代码执行很快,有些标签还没加载出来,直接取,取不到 隐士等待
bro.implicitly_wait(10) # 10秒
3)元素操作
点击
标签.click()
input输入文字
标签.send_keys('文字')
input清空文字
标签.clear()
模拟键盘操作
from selenium.webdriver.common.keys import Keys
input_search.send_keys(Keys.ENTER)
4)执行Js代码
bro = webdriver.Chrome(executable_path='./chromedriver')
bro.get('https://www.jd.com')
bro.execute_script('alert("document.cookie")') # 弹窗提示 cookie
bro.execute_script('scrollTo(0, 600)') # 下拉600px
for i in range(10): # 循环下拉400px
y = 400*(i+1)
bro.execute_script('scrollTo(0, %s)' % y)
time.sleep(1)
bro.execute_script('scrollTo(0, document.body.scrollHeight)') # 移动到网页底部
time.sleep(3)
bro.close()
5)切换选项卡
bro = webdriver.Chrome(executable_path='./chromedriver')
bro.get('https://www.jd.com')# 目标地址京东
bro.execute_script('window.open()')
bro.switch_to.window(bro.window_handles[1]) # 新建一个打开淘宝
bro.get('http://www.taobao.com')
time.sleep(2)
bro.switch_to.window(bro.window_handles[0]) # 切换为京东
time.sleep(3)
bro.close()
bro.quit()
6)浏览器前进后退
bro = webdriver.Chrome(executable_path='./chromedriver')
bro.get('https://www.jd.com/')
time.sleep(2)
bro.get('https://www.taobao.com/')
time.sleep(2)
bro.get('https://www.baidu.com/')
bro.back() # 后退
time.sleep(1)
bro.forward() # 前进
time.sleep(3)
bro.close()
转载自:CSDN-MeiJin_
YXN-python
2022-03-10