## urllib.request
~~~
import urllib.request
url = 'http://www.baidu.com/'
response = urllib.request.urlopen(url).read()
#使用urllib.request.urlopen可以讀取網頁內容
~~~
~~~
import urllib.request
url = 'https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1543822283&di=b327e6e2dc59105bcb73a174bff94919&imgtype=jpg&er=1&src=http%3A%2F%2Ftupian.qqjay.com%2Fu%2F2017%2F1201%2F2_161641_2.jpg'
res = urllib.request.urlopen(url)
# 圖片下載方式一
with open('fengjing.jpg','wb') as f:
f.write(res.read())
# 圖片下載方式二
urllib.request.urlretrieve(url,'tupian.jpg')
#使用urllib.request.urlretrieve可以將圖片直接下載到指定的路徑
~~~
~~~
import urllib.request
url = 'http://www.baidu.com/'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
'Accept-Language':'zh-CN,zh;q=0.9'
}
# 構建請求對象
request = urllib.request.Request(url=url,headers=headers)
#使用urllib.request.Request可以構建帶請求頭的請求對象
~~~
~~~
import urllib.request
handler = urllib.request.ProxyHandler({'http': '124.243.226.18:8888'})
opener = urllib.request.build_opener(handler)
url = 'http://www.baidu.com/s?wd=IP'
headers = {
"Host": "www.baidu.com",
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Accept-Language": "zh-CN,zh;q=0.8",
}
request = urllib.request.Request(url=url,headers=headers)
response = opener.open(request)
print(response.read().decode())
#給程序設置代理
~~~
## urllib.parse
~~~
import urllib.parse
res = urllib.parse.urlparse(url)
print(res)
#輸出結果:
ParseResult(scheme='http', netloc='www.baidu.com', path='/s', params='', query='wd=%E7%9F%A5%E4%B9%8E', fragment='')
#使用 urllib.parse.urlparse可以分割url
~~~
~~~
import urllib.parse
word = '知乎'
res = urllib.parse.quote(word)
print(res)
res = urllib.parse.unquote(res)
print(res)
#輸出結果:
%E7%9F%A5%E4%B9%8E
知乎
#使用urllib.parse.quote和urllib.parse.unquote可以對字符進行編碼
~~~
~~~
import urllib.parse
params = {
'wd':'知乎'
}
word = urllib.parse.urlencode(params)
print(word)
#輸出結果:
wd=%E7%9F%A5%E4%B9%8E
#使用urllib.parse.urlencode可以把一個字典編碼成以上格式
~~~
## requests
```
import requests
response = requests.get('http://www.baidu.com/')
print(response.url)
print(response.text)
print(response.status_code)
print(response.headers)
print(response.cookies)
print(response.content.decode())
基本方法
```
```
http://www.baidu.com/
<!DOCTYPE html>
<!--STATUS OK--><html> <head><meta http-equiv=content-type content=text/html;charset=utf-8><meta http-equiv=X-UA-Compatible content=IE=Edge><meta content=always name=referrer><link rel=stylesheet type=text/css href=http://s1.bdstatic.com/r/www/cache/bdorz/baidu.min.css><title>????o|?????????????°±??¥é??</title></head> <body link=#0000cc> <div id=wrapper> <div id=head> <div class=head_wrapper> <div class=s_form> <div class=s_form_wrapper> <div id=lg> <img hidefocus=true src=//www.baidu.com/img/bd_logo1.png width=270 height=129> </div> <form id=form name=f action=//www.baidu.com/s class=fm> <input type=hidden name=bdorz_come value=1> <input type=hidden name=ie value=utf-8> <input type=hidden name=f value=8> <input type=hidden name=rsv_bp value=1> <input type=hidden name=rsv_idx value=1> <input type=hidden name=tn value=baidu><span class="bg s_ipt_wr"><input id=kw name=wd class=s_ipt value maxlength=255 autocomplete=off autofocus></span><span class="bg s_btn_wr"><input type=submit id=su value=????o|?????? class="bg s_btn"></span> </form> </div> </div> <div id=u1> <a href=http://news.baidu.com name=tj_trnews class=mnav>??°é??</a> <a href=http://www.hao123.com name=tj_trhao123 class=mnav>hao123</a> <a href=http://map.baidu.com name=tj_trmap class=mnav>??°???</a> <a href=http://v.baidu.com name=tj_trvideo class=mnav>è§?é¢?</a> <a href=http://tieba.baidu.com name=tj_trtieba class=mnav>è′′??§</a> <noscript> <a href=http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u=http%3A%2F%2Fwww.baidu.com%2f%3fbdorz_come%3d1 name=tj_login class=lb>??????</a> </noscript> <script>document.write('<a href="http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u='+ encodeURIComponent(window.location.href+ (window.location.search === "" ? "?" : "&")+ "bdorz_come=1")+ '" name="tj_login" class="lb">??????</a>');</script> <a href=//www.baidu.com/more/ name=tj_briicon class=bri style="display: block;">??′?¤??o§???</a> </div> </div> </div> <div id=ftCon> <div id=ftConw> <p id=lh> <a href=http://home.baidu.com>??3?o?????o|</a> <a href=http://ir.baidu.com>About Baidu</a> </p> <p id=cp>?2017?Baidu?<a href=http://www.baidu.com/duty/>?????¨????o|??????èˉ?</a>? <a href=http://jianyi.baidu.com/ class=cp-feedback>???è§????é|?</a>??o?ICPèˉ?030173??·? <img src=//www.baidu.com/img/gs.gif> </p> </div> </div> </div> </body> </html>
200
{'Cache-Control': 'private, no-cache, no-store, proxy-revalidate, no-transform', 'Connection': 'Keep-Alive', 'Content-Encoding': 'gzip', 'Content-Type': 'text/html', 'Date': 'Mon, 03 Dec 2018 13:39:39 GMT', 'Last-Modified': 'Mon, 23 Jan 2017 13:27:32 GMT', 'Pragma': 'no-cache', 'Server': 'bfe/1.0.8.18', 'Set-Cookie': 'BDORZ=27315; max-age=86400; domain=.baidu.com; path=/', 'Transfer-Encoding': 'chunked'}
<RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
<!DOCTYPE html>
<!--STATUS OK--><html> <head><meta http-equiv=content-type content=text/html;charset=utf-8><meta http-equiv=X-UA-Compatible content=IE=Edge><meta content=always name=referrer><link rel=stylesheet type=text/css href=http://s1.bdstatic.com/r/www/cache/bdorz/baidu.min.css><title>百度一下,你就知道</title></head> <body link=#0000cc> <div id=wrapper> <div id=head> <div class=head_wrapper> <div class=s_form> <div class=s_form_wrapper> <div id=lg> <img hidefocus=true src=//www.baidu.com/img/bd_logo1.png width=270 height=129> </div> <form id=form name=f action=//www.baidu.com/s class=fm> <input type=hidden name=bdorz_come value=1> <input type=hidden name=ie value=utf-8> <input type=hidden name=f value=8> <input type=hidden name=rsv_bp value=1> <input type=hidden name=rsv_idx value=1> <input type=hidden name=tn value=baidu><span class="bg s_ipt_wr"><input id=kw name=wd class=s_ipt value maxlength=255 autocomplete=off autofocus></span><span class="bg s_btn_wr"><input type=submit id=su value=百度一下 class="bg s_btn"></span> </form> </div> </div> <div id=u1> <a href=http://news.baidu.com name=tj_trnews class=mnav>新聞</a> <a href=http://www.hao123.com name=tj_trhao123 class=mnav>hao123</a> <a href=http://map.baidu.com name=tj_trmap class=mnav>地圖</a> <a href=http://v.baidu.com name=tj_trvideo class=mnav>視頻</a> <a href=http://tieba.baidu.com name=tj_trtieba class=mnav>貼吧</a> <noscript> <a href=http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u=http%3A%2F%2Fwww.baidu.com%2f%3fbdorz_come%3d1 name=tj_login class=lb>登錄</a> </noscript> <script>document.write('<a href="http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u='+ encodeURIComponent(window.location.href+ (window.location.search === "" ? "?" : "&")+ "bdorz_come=1")+ '" name="tj_login" class="lb">登錄</a>');</script> <a href=//www.baidu.com/more/ name=tj_briicon class=bri style="display: block;">更多產品</a> </div> </div> </div> <div id=ftCon> <div id=ftConw> <p id=lh> <a href=http://home.baidu.com>關于百度</a> <a href=http://ir.baidu.com>About Baidu</a> </p> <p id=cp>?2017?Baidu?<a href=http://www.baidu.com/duty/>使用百度前必讀</a>? <a href=http://jianyi.baidu.com/ class=cp-feedback>意見反饋</a>?京ICP證030173號? <img src=//www.baidu.com/img/gs.gif> </p> </div> </div> </div> </body> </html>
運行結果
```
```
'''
目標網站:
http://www.shicimingju.com/book/sanguoyanyi.html
代碼思路:
1、確定url
2、偽裝瀏覽器信息
3、獲取request
4、獲取url指向頁面的內容
5、解析獲取到的網頁,獲取標題標題和文章鏈接
6、下載文章內容到本地
'''
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import requests
import lxml
def handle_requests(url):
headers = {
'User-Agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}
response = requests.get(url,headers)
return response
def download_text(title, href):
response = handle_requests(href)
content = response.text
soup = BeautifulSoup(content,'lxml')
res = soup.find('div',class_ = 'chapter_content').find_all('p')
f = open('hongloumeng.txt', 'a',encoding='utf-8')
f.write(title)
print(title)
for p in res:
f.write(p.text)
f.close()
def parse_content(content):
soup = BeautifulSoup(content,'lxml')
res = soup.select('.book-mulu > ul > li > a')
for i in res:
title = i.text
href = 'http://www.shicimingju.com' + i['href']
download_text(title,href)
print('正在下載。。。')
def main():
url = 'http://www.shicimingju.com/book/hongloumeng.html'
# 獲取包裝頭信息,獲取request對象
response = handle_requests(url)
# 根據得到的request發送請求,獲取頁面所有內容
content = response.text
# 解析頁面內容,獲取文章標題和內容鏈接
parse_content(content)
if __name__ == '__main__':
main()
requests爬取小說
```
```
import requests
url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
data = {
'cname':'',
'pid':'',
'keyword':'杭州',
'pageIndex':'1',
'pageSize':'10',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}
response = requests.post(url,data,headers=headers)
print(response.text)
requests的post請求
```
## selenium
selenium最初是一個測試工具,而爬蟲中使用它主要是為了解決requests無法直接執行JavaScript代碼的問題
selenium本質是通過驅動瀏覽器,完全模擬瀏覽器的操作,比如跳轉、輸入、點擊、下拉等,來拿到網頁渲染之后的結果,可支持多種瀏覽器
```
import requests
from selenium import webdriver
import time
from bs4 import BeautifulSoup
path = r'D:\Python1806\spider\day5\chromedriver.exe'
url = 'https://so.gushiwen.org/user/login.aspx'
browser = webdriver.Chrome(executable_path=path)
# 指定url
browser.get(url)
time.sleep(2)
# 找到用戶名和密碼等輸入項
username = browser.find_element_by_xpath('//input[@id="email"]')
username.send_keys('wusir666666@163.com')
time.sleep(1)
pwd = browser.find_element_by_xpath("//input[@id='pwd']")
pwd.send_keys('ymmnxhwm13579')
time.sleep(1)
soup = BeautifulSoup(browser.page_source,'lxml')
browser.save_screenshot('login.png')
code = input("請輸入驗證碼:")
checkcode = browser.find_element_by_xpath("//input[@id='code']")
checkcode.send_keys(code)
login = browser.find_element_by_xpath("//input[@id='denglu']")
login.click()
selenium基礎使用
```
- Python學習
- Python基礎
- Python初識
- 列表生成式,生成器,可迭代對象,迭代器詳解
- Python面向對象
- Python中的單例模式
- Python變量作用域、LEGB、閉包
- Python異常處理
- Python操作正則
- Python中的賦值與深淺拷貝
- Python自定義CLI三方庫
- Python并發編程
- Python之進程
- Python之線程
- Python之協程
- Python并發編程與IO模型
- Python網絡編程
- Python之socket網絡編程
- Django學習
- 反向解析
- Cookie和Session操作
- 文件上傳
- 緩存的配置和使用
- 信號
- FBV&&CBV&&中間件
- Django補充
- 用戶認證
- 分頁
- 自定義搜索組件
- Celery
- 搭建sentry平臺監控
- DRF學習
- drf概述
- Flask學習
- 項目拆分
- 三方模塊使用
- 爬蟲學習
- Http和Https區別
- 請求相關庫
- 解析相關庫
- 常見面試題
- 面試題
- 面試題解析
- 網絡原理
- 計算機網絡知識簡單介紹
- 詳解TCP三次握手、四次揮手及11種狀態
- 消息隊列和數據庫
- 消息隊列之RabbitMQ
- 數據庫之Redis
- 數據庫之初識MySQL
- 數據庫之MySQL進階
- 數據庫之MySQL補充
- 數據庫之Python操作MySQL
- Kafka常用命令
- Linux學習
- Linux基礎命令
- Git
- Git介紹
- Git基本配置及理論
- Git常用命令
- Docker
- Docker基本使用
- Docker常用命令
- Docker容器數據卷
- Dockerfile
- Docker網絡原理
- docker-compose
- Docker Swarm
- HTML
- CSS
- JS
- VUE