1
2

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 3 years have passed since last update.

秒速で行う画像クローリングまとめ

Posted at

概要

大規模なデータセットといえばKaggleなどだと思いますが、お目当ての画像がない時もありますよね。

この記事で書かれたコードを実行することで、
画像のクローリングを、

  • Google
  • Yahoo
  • Bing
  • Baidu
  • Flickr

で簡単に手早く行えます。
簡単にキータの記事、及びicrawlerのexampleを漁って試してみたもののまとめになります。

Google編

https://qiita.com/Yuki-Takatsu/items/3f30727d5b21a83ea4ed
ここを参考に(コピペ)しました。

scrape_google.py
import requests
import random
import shutil
import bs4
import ssl
import os.path
ssl._create_default_https_context = ssl._create_unverified_context

def image(keyword):
    Res = requests.get("https://www.google.com/search?hl=jp&q=" + keyword + "&btnG=Google+Search&tbs=0&safe=off&tbm=isch")
    Html = Res.text
    Soup = bs4.BeautifulSoup(Html,'lxml')
    links = Soup.find_all("img")
    link = random.choice(links).get("src")
    return link

def download_img(url, file_name, save_dir, img_num):
    try: 
        r = requests.get(url, stream=True)
        if r.status_code == 200:
                with open( os.path.join( save_dir, file_name, str(img_num)+".png" ) , 'wb') as f:
                    r.raw.decode_content = True
                    shutil.copyfileobj(r.raw, f)
    except:
        pass 



if __name__ == "__main__":
    num = 10
    keyword = "cat"
    save_dir = 'data'

    data_list = keyword.split(' ')
    data = keyword.lower()
    file_name_dir = '_'.join(data_list)
    file_name_dir = file_name_dir.lower() 

    if not os.path.exists(save_dir): 
        os.mkdir(save_dir) 
    if not os.path.exists( os.path.join( save_dir, file_name_dir ) ): 
        os.mkdir( os.path.join( save_dir, file_name_dir ) ) 


    for i in range(int(num)):
        try:
            print(f'{i} th attempt for crawl...')
            link = image(data)
            download_img(link, file_name_dir, save_dir, i)
        except:
            pass


##Yahoo編

https://qiita.com/ishiwara51/items/3979fbc1c69b4f8ee2d3
ここを参考に(完全にコピペ)しました。

scrape_yahoo.py
import os
import sys
import traceback
from mimetypes import guess_extension
from time import time, sleep
from urllib.request import urlopen, Request
from urllib.parse import quote
from bs4 import BeautifulSoup

MY_EMAIL_ADDR = ''

class Fetcher:
    def __init__(self, ua=''):
        self.ua = ua

    def fetch_img_direct(self, url):
        """
        yahoo画像検索画面に表示されている画像のbyte情報を抽出します。
        引数:
            url: yahoo画像検索画面のurlです。

        返り値:
            img_b_content: ウェブサイトリソースのbyteコードのリストです。
            mime: CONTENT_TYPEにより指定されている拡張子です。
        """
        req = Request(url, headers={'User-Agent': self.ua})
        try:
            with urlopen(req, timeout=3) as p:
                page_b_content = p.read()
                structured_page = BeautifulSoup(page_b_content.decode('UTF-8'), 'html.parser')
                img_link_elems = structured_page.find_all('img')
                img_urls = [e.get('src') for e in img_link_elems if e.get('src').startswith('http')]
                img_urls = list(set(img_urls)) #なぜset化しているのかは不明
        except:
            sys.stderr.write('Error in fetching {}\n'.format(url))
            sys.stderr.write(traceback.format_exc())
            return None, None

        img_b_content = []
        mime = []
        for i, img_url in enumerate(img_urls):
            req1 = Request(img_url, headers={'User-Agent': self.ua})
            try:
                with urlopen(req1, timeout=3) as p:
                    img_b_content.append(p.read())
                    mime.append(p.getheader('Content-Type'))
            except:
                sys.stderr.write('Error in fetching {}\n'.format(img_url))
                sys.stderr.write(traceback.format_exc())
                continue

        return img_b_content, mime

fetcher = Fetcher(MY_EMAIL_ADDR)



def url_brancher(word):
    """
    yahoo画像検索画面のurlを、検索条件の組み合わせの数だけ取得します。

    引数:
        word : 検索語です。

    返り値:
        urllist : yahoo画像検索画面のurlのリストです。
    """
    constant = "https://search.yahoo.co.jp/image/search?p={}&n=60".format(quote(word))

    values = [\
    ["", "small"],\
    ["", "red"],\
    ["", "face"]\
    ]
    """
    values = [\
    ["", "small", "medium", "large", "wallpaper", "widewallpaper"],\
    ["", "red", "orange", "yellow", "green", "teal", "blue", "purple", "pink", "white", "gray", "black", "brown"],\
    ["", "face", "photo", "clipart", "lineart"]\
    ]
    """
    urllist = []

    for i in range(len(values[0])):
        for j in range(len(values[1])):
            for k in range(len(values[2])):
                urllist.append(constant + "&dim={}".format(values[0][i]) + "&imc={}".format(values[1][j]) + "&ctype={}".format(values[2][k]))
    return urllist



def main(word):
    """
    Fetchにより取得された情報をもとに画像ファイルを保存します。

    引数:
    word: 検索語です。

    返り値:
    """
    data_dir = 'data/'
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    yahoo_url_list = url_brancher(word)

    for i, yahoo_url in enumerate(yahoo_url_list):
        sleep(0.1)
        img, mime = fetcher.fetch_img_direct(yahoo_url)
        if not mime or not img:
            print('Error in fetching {}\n'.format(yahoo_url))
            continue

        for j, img_url in enumerate(img):
            ext = guess_extension(mime[j].split(';')[0])
            if ext in ('.jpe', '.jpeg'):
                ext = '.jpg'
            if not ext:
                print('Error in fetching {}\n'.format(img_url))
                continue

            result_file = os.path.join(data_dir, str(i) + str(j)+ ext)
            with open(result_file, mode='wb') as f:
                f.write(img_url)
            print('fetched', str(i) + str(j) + ext)



if __name__ == '__main__':
    word = 'cat'
    main(word)

##icrawler編

icrawlerは、
画像のクローリングをgoogle,bing, baidu, Flickrで行えるライブラリです。
ただ、現在おそらくGoogleでのクローリングだけ行えない??
ですが、Bing, Baidu, Flickrでは問題なくクローリングできました。
FlickrだけAPIKeyの取得が必要になりますのでご注意ください(2分くらいでできますのでご安心を。)

scrape_icrawler.py

import logging
import os.path as osp
from argparse import ArgumentParser

from icrawler.builtin import (BaiduImageCrawler, BingImageCrawler,
                              FlickrImageCrawler, GoogleImageCrawler,
                              GreedyImageCrawler, UrlListCrawler)


key_word = 'cat'
max_num = 200

def test_google():
    print('start testing GoogleImageCrawler')
    google_crawler = GoogleImageCrawler(
        downloader_threads=4,
        storage={'root_dir': 'images/google'},
        log_level=logging.INFO)
    search_filters = dict()
        # size='large',
        # color='orange',
        # license='commercial,modify',
        # date=(None, (2017, 11, 30)))
    google_crawler.crawl(key_word, filters=search_filters, max_num=max_num)


def test_bing():
    print('start testing BingImageCrawler')
    bing_crawler = BingImageCrawler(
        downloader_threads=2,
        storage={'root_dir': 'images/bing'},
        log_level=logging.INFO)
    search_filters = dict()
        # type='photo',
        # license='commercial',
        # layout='wide',
        # size='large',
        # date='pastmonth')
    bing_crawler.crawl(key_word, max_num=max_num, filters=search_filters)


def test_baidu():
    print('start testing BaiduImageCrawler')
    # search_filters = dict(size='large', color='blue')
    search_filters = dict()
    baidu_crawler = BaiduImageCrawler(
        downloader_threads=4, storage={'root_dir': 'images/baidu'})
    baidu_crawler.crawl(key_word, filters=search_filters, max_num=max_num)


def test_flickr():
    print('start testing FlickrImageCrawler')
    flickr_crawler = FlickrImageCrawler(
        apikey=None,
        parser_threads=2,
        downloader_threads=4,
        storage={'root_dir': 'images/flickr'})
    flickr_crawler.crawl(
        max_num=max_num,
        tags=key_word,
        tag_mode='all',
        )


def test_greedy():
    print('start testing GreedyImageCrawler')
    greedy_crawler = GreedyImageCrawler(
        parser_threads=4, storage={'root_dir': 'images/greedy'})
    greedy_crawler.crawl(
        'http://www.bbc.com/news', max_num=max_num, min_size=(100, 100))


def test_urllist():
    print('start testing UrlListCrawler')
    urllist_crawler = UrlListCrawler(
        downloader_threads=3, storage={'root_dir': 'images/urllist'})
    filelist = osp.join(osp.dirname(__file__), 'filelist_demo.txt')
    urllist_crawler.crawl(filelist)


def main():
    parser = ArgumentParser(description='Test built-in crawlers')
    parser.add_argument(
        '--crawler',
        nargs='+',
        # default=['google', 'bing', 'baidu', 'flickr', 'greedy', 'urllist'],
        default=['google', 'bing', 'baidu', 'flickr'],
        help='which crawlers to test')
    args = parser.parse_args()
    for crawler in args.crawler:
        eval('test_{}()'.format(crawler))
        print('\n')


if __name__ == '__main__':
    main()

##まとめ

大規模なデータセットがないようなとき、手早くクローリングして画像を集めることができると嬉しいですね。
他にも良いやりかたや画像のソースがあると思いますので、他も試せていけたらなと思います。

1
2
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
2

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?