#使用urllib
#发送请求
#1.urlopen
import urllib.request
response = urllib.request.urlopen('https://www.python.org')
#print(response.read().decode('utf-8'))
#利用 type 方法输出响应的类型
print(type(response))
print(response.status)
print(response.getheaders())
print(response.getheader('Sercer'))
#传递参数
#urllib.request.urlopen(url, data=None, [timeout,]*, cafile=None, capath=None, cadefault=False, context=None)
#data 参数是可选的。如果要添加该参数,需要使用 bytes 方法将参数转化为字节流编码格式的内容,即 bytes 类型
#另外,如果传递了这个参数,则它的请求方式就不再是 GET 方式,而是 POST 方式
#在这里请求的站点是 httpbin.org,它可以提供 HTTP 请求测试
import urllib.parse
data = bytes(urllib.parse.urlencode({'word':'hello'}),encoding = 'utf8')
response = urllib.request.urlopen('http://httpbin.org/post',data=data)
print(response.read())
b'{\n "args": {}, \n "data": "", \n "files": {}, \n "form": {\n "word": "hello"\n }, \n "headers": {\n "Accept-Encoding": "identity", \n "Content-Length": "10", \n "Content-Type": "application/x-www-form-urlencoded", \n "Host": "httpbin.org", \n "User-Agent": "Python-urllib/3.12", \n "X-Amzn-Trace-Id": "Root=1-67c1704f-4eb3a4ac2b4e81cb47c01cfc"\n }, \n "json": null, \n "origin": "154.40.60.12", \n "url": "http://httpbin.org/post"\n}\n'
#通过设置超时时间来控制一个网页如果长时间未响应,就跳过它的抓取
import socket
import urllib.error
try:
response = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
#2. Request
#class urllib.request.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None)
#必选参数url data必须传类型的 调用实例add_header()添加headers参数
import urllib.request
request = urllib.request.Request('https://python.org')
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))
from urllib import parse,request
url = 'http://httpbin.org/post'
headers = {'User-Agent':'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)','Host':'httpbin.org'}
dict = {'name':'Germey'}
data = bytes(parse.urlencode(dict),encoding='utf8')
req = request.Request(url,data=data,headers=headers,method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
{
"args": {},
"data": "",
"files": {},
"form": {
"name": "Germey"
},
"headers": {
"Accept-Encoding": "identity",
"Content-Length": "11",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)",
"X-Amzn-Trace-Id": "Root=1-67c17458-6ec1e2f400a80e126995c7e6"
},
"json": null,
"origin": "154.40.60.12",
"url": "http://httpbin.org/post"
}
#3.高级用法
#各种处理器,有专门处理登录验证的,有处理 Cookies 的,有处理代理设置的
#urllib.request 模块里的 BaseHandler 类,它是所有其他 Handler 的父类,它提供了最基本的方法,例如 default_open、protocol_request 等
#另一个比较重要的类就是 OpenerDirector,我们可以称为 Opener ,简而言之,就是利用 Handler 来构建 Opener
#HTTPBasicAuthHandler 用于管理认证,如果一个链接打开时需要认证,那么可以用它来解决认证问题
from urllib.request import HTTPPasswordMgrWithDefaultRealm,HTTPBasicAuthHandler,build_opener
from urllib.error import URLError
username = 'admin'
password = '*******'
url = 'http://127.0.0.1:5244/'
p =HTTPPasswordMgrWithDefaultRealm()
p.add_password(None,url,username,password)
auth_handler = HTTPBasicAuthHandler()
opener = build_opener(auth_handler)
try:
result = opener.open(url)
html = result.read().decode('utf-8')
print(html)
except URLError as e:
print(e.reason)
#代理,以快代理为例
from urllib.request import ProxyHandler
import requests
api = "https://dps.kdlapi.com/api/getdps"
# 请求参数
params = {
"secret_id": "你的id",
"signature": "你的签名",
"num": 1, # 提取数量
}
# 获取响应内容
response = requests.get(api, params=params)
# 解析代理 IP
proxy_ip = response.text.strip() # 去除多余的空白字符和换行符
# 检查是否获取到有效的代理 IP
if not proxy_ip:
print("未获取到有效的代理 IP")
else:
# 定义代理
username = "*******" # 替换为你的用户名
password = "*******" # 替换为你的密码
proxy = f"https://{username}:{password}@{proxy_ip}"
print(f"获取的代理 IP: {proxy_ip}")
# 创建一个 ProxyHandler
proxy_handler = urllib.request.ProxyHandler({'http': proxy, 'https': proxy})
# 创建一个 opener
opener = urllib.request.build_opener(proxy_handler)
try:
response = opener.open('https://www.baidu.com')
print(response.read().decode('utf-8'))
except URLError as e:
print(e.reason)
获取的代理 IP: 218.95.37.135:40358
<html>
<head>
<script>
location.replace(location.href.replace("https://","http://"));
</script>
</head>
<body>
<noscript><meta http-equiv="refresh" content="0;url=http://www.baidu.com/"></noscript>
</body>
</html>
#Cookies
#将网站的 Cookies 获取下来
import http.cookiejar,urllib.request
#声明一个 CookieJar 对象
cookie = http.cookiejar.CookieJar()
#利用 HTTPCookieProcessor 来构建一个 Handler
handler = urllib.request.HTTPCookieProcessor(cookie)
#利用 build_opener 方法构建出 Opener,执行 open 函数
opener = urllib.request.build_opener(handler)
response = opener.open('https://www.baidu.com')
for item in cookie:
print(item.name+"="+item.value)
BD_NOT_HTTPS=1
BIDUPSID=BFE0F5D5293A45F6AEC0BA9BA07B81DA
PSTM=1740734698
BAIDUID=BFE0F5D5293A45F6457A648A94C15082:FG=1
#Cookies以文本形式保存
filename = 'cookies.txt'
#CookieJar 就需要换成 MozillaCookieJar,用来处理 Cookies 和文件相关的事件
cookie = http.cookiejar.MozillaCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True, ignore_expires=True)
#处理异常
#1. URLError
from urllib import request,error
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.URLError as e:
print(e.reason)
Not Found
#2.HTTPError
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers,sep='\n')
#URLError 是 HTTPError 的父类,所以可以先选择捕获子类的错误,再去捕获父类的错误
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
print(e.reason)
else:
print('Request Successfully')
#解析链接
#处理 URL 的标准接口,例如实现 URL 各部分的抽取、合并以及链接转换
#1. urlparse
#urllib.parse.urlparse(urlstring, scheme='', allow_fragments=True)
from urllib.parse import urlparse
result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result),result)
#标准链接格式:scheme://netloc/path;params?query#fragment
<class 'urllib.parse.ParseResult'> ParseResult(scheme='http', netloc='www.baidu.com', path='/index.html', params='user', query='id=5', fragment='comment')
# ParseResult 实际上是一个元组,我们可以用索引顺序来获取,也可以用属性名获取
result = urlparse('http://www.baidu.com/index.html#comment', allow_fragments=False)
print(result.scheme, result[0], result.netloc, result[1], sep='\n')
http
http
www.baidu.com
www.baidu.com
#2. urlunparse
from urllib.parse import urlunparse
data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
print(urlunparse(data))
http://www.baidu.com/index.html;user?a=6#comment
#3. urlsplit
#和 urlparse 方法非常相似,它不再单独解析 params 这一部分,只返回 5 个结果, params 会合并到 path 中
from urllib.parse import urlsplit
result = urlsplit('http://www.baidu.com/index.html;user?id=5#comment')
print(result)
SplitResult(scheme='http', netloc='www.baidu.com', path='/index.html;user', query='id=5', fragment='comment')
#4. urlunsplit
from urllib.parse import urlunsplit
data = ['http', 'www.baidu.com', 'index.html', 'a=6', 'comment']
print(urlunsplit(data))
http://www.baidu.com/index.html?a=6#comment
#5. urljoin
#urlunparse 和 urlunsplit 可以完成链接的合并,不过必须要有特定长度的对象,链接的每一部分都要清晰分开
from urllib.parse import urljoin
print(urljoin('http://www.baidu.com', 'FAQ.html'))
print(urljoin('http://www.baidu.com', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html?question=2'))
print(urljoin('http://www.baidu.com?wd=abc', 'https://cuiqingcai.com/index.php'))
print(urljoin('http://www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com#comment', '?category=2'))
http://www.baidu.com/FAQ.html
https://cuiqingcai.com/FAQ.html
https://cuiqingcai.com/FAQ.html
https://cuiqingcai.com/FAQ.html?question=2
https://cuiqingcai.com/index.php
http://www.baidu.com?category=2#comment
www.baidu.com?category=2#comment
www.baidu.com?category=2
#6. urlencode:构造 GET 请求参数
from urllib.parse import urlencode
params = {
'name':'germey',
'age':22
}
base_url = 'http://baidu.com?'
url = base_url + urlencode(params)
print(url)
http://baidu.com?name=germey&age=22
#7. parse_qs 反序列化,将GET请求参数转回字典
from urllib.parse import parse_qs
query = 'name=germey&age=22'
print(parse_qs(query))
#8. parse_qsl 将参数转化为元组组成的列表
from urllib.parse import parse_qsl
query = 'name=germey&age=22'
print(parse_qsl(query))
{'name': ['germey'], 'age': ['22']}
[('name', 'germey'), ('age', '22')]
#9. quote:将中文字符转化为 URL 编码
from urllib.parse import quote
keyword = ' 壁纸 '
url = 'https://www.baidu.com/s?wd=' + quote(keyword)
print(url)
#10.unquote:进行 URL 解码
from urllib.parse import unquote
url = 'https://www.baidu.com/s?wd=%E5%A3%81%E7%BA%B8'
print(unquote(url))
https://www.baidu.com/s?wd=%20%E5%A3%81%E7%BA%B8%20
https://www.baidu.com/s?wd=壁纸
#分析 Robots 协议
#robotparser
from urllib.robotparser import RobotFileParser
rp = RobotFileParser()
rp.set_url('http://www.jianshu.com/robots.txt')
rp.read()
print(rp.can_fetch('*', 'http://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*', "http://www.jianshu.com/search?q=python&page=1&type=collections"))
False
False