from urllib import request
def visit_baidu():
URL = "http://HdhCmsTestbaidu测试数据"
# open the URL
req = request.urlopen(URL)
# read the URL
html = req.read()
# decode the URL to utf-8
html = html.decode("utf_8")
print(html)
if __name__ == '__main__':
visit_baidu() from urllib import request
def vists_baidu():
# create a request obkect
req = request.Request('http://HdhCmsTestbaidu测试数据')
# open the request object
response = request.urlopen(req)
# read the response
html = response.read()
html = html.decode('utf-8')
print(html)
if __name__ == '__main__':
vists_baidu() from urllib import request
from urllib import error
def Err():
url = "https://segmentfault测试数据/zzz"
req = request.Request(url)
try:
response = request.urlopen(req)
html = response.read().decode("utf-8")
print(html)
except error.HTTPError as e:
print(e.code)
if __name__ == '__main__':
Err() from urllib import request
from urllib import error
def Err():
url = "https://segmentf测试数据/"
req = request.Request(url)
try:
response = request.urlopen(req)
html = response.read().decode("utf-8")
print(html)
except error.URLError as e:
print(e.reason)
if __name__ == '__main__':
Err() from urllib import request
from urllib import error
# 第一种方法,URLErroe和HTTPError
def Err():
url = "https://segmentfault测试数据/zzz"
req = request.Request(url)
try:
response = request.urlopen(req)
html = response.read().decode("utf-8")
print(html)
except error.HTTPError as e:
print(e.code)
except error.URLError as e:
print(e.reason) 以上就是python之网页爬虫教程的详细内容,更多请关注Gxl网其它相关文章!
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://haodehen.cn/did81792