问题描述:目标检测或者图像分割需要大量的数据,如果手动从网上找的话会比较慢,这时候,我们可以从网上爬虫下来,然后自己筛选即可。
代码如下(不要忘记安装代码依赖的库):
# -*- coding: utf-8 -*-
import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os
num = 0
numPicture = 0
file = ''
List = []def Find(url, A):global Listprint('正在检测图片总数,请稍等.....')t = 0i = 1s = 0while t < 1000:Url = url + str(t)try:# 这里搞了下Result = A.get(Url, timeout=7, allow_redirects=False)except BaseException:t = t + 60continueelse:result = Result.textpic_url = re.findall('"objURL":"(.*?)",', result, re.S) # 先利用正则表达式找到图片urls += len(pic_url)if len(pic_url) == 0:breakelse:List.append(pic_url)t = t + 60return sdef recommend(url):Re = []try:html = requests.get(url, allow_redirects=False)except error.HTTPError as e:returnelse:html.encoding = 'utf-8'bsObj = BeautifulSoup(html.text, 'html.parser')div = bsObj.find('div', id='topRS')if div is not None:listA = div.findAll('a')for i in listA:if i is not None:Re.append(i.get_text())return Redef dowmloadPicture(html, keyword):global num# t =0pic_url = re.findall('"objURL":"(.*?)",', html, re.S) # 先利用正则表达式找到图片urlprint('找到关键词:' + keyword + '的图片,即将开始下载图片...')for each in pic_url:print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each))try:if each is not None:pic = requests.get(each, timeout=7)else:continueexcept BaseException:print('错误,当前图片无法下载')continueelse:string = file + r'\\' + keyword + '_' + str(num) + '.jpg'fp = open(string, 'wb')fp.write(pic.content)fp.close()num += 1if num >= numPicture:returnif __name__ == '__main__': # 主函数入口############################### 这里加了点headers = {'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2','Connection': 'keep-alive','User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0','Upgrade-Insecure-Requests': '1'}A = requests.Session()A.headers = headers###############################word = input("请输入搜索关键词(可以是人名,地名等): ")# add = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=%E5%BC%A0%E5%A4%A9%E7%88%B1&pn=120'url = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn='# 这里搞了下tot = Find(url, A)Recommend = recommend(url) # 记录相关推荐print('经过检测%s类图片共有%d张' % (word, tot))numPicture = int(input('请输入想要下载的图片数量 '))file = input('请建立一个存储图片的文件夹,输入文件夹名称即可')y = os.path.exists(file)if y == 1:print('该文件已存在,请重新输入')file = input('请建立一个存储图片的文件夹,)输入文件夹名称即可')os.mkdir(file)else:os.mkdir(file)t = 0tmp = urlwhile t < numPicture:try:url = tmp + str(t)# 这里搞了下result = A.get(url, timeout=10, allow_redirects=False)except error.HTTPError as e:print('网络错误,请调整网络后重试')t = t + 60else:dowmloadPicture(result.text, word)t = t + 60print('当前搜索结束,感谢使用')print('猜你喜欢')for re in Recommend:print(re, end=' ')
这里以搜索明星的图片为例,运行代码,然后根据提示输入搜索图片的名字→搜索图片的张数→保存本地的文件夹即可。
注意:运行的时候只能使用国内网站,而不能使用外网。不然会出现这个错误→requests.exceptions.SSLError: HTTPSConnectionPool(host='image.baidu.com', port=443): Max retries exceeded with url: /search/flip?tn=baiduimage&ie=utf-8&word=%E6%A1%82%E6%9E%97&pn= (Caused by SSLError(SSLZeroReturnError(6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1131)')))