LoginSignup
8

More than 5 years have passed since last update.

BINDのqueriesログをElasticsearchに保存する

Last updated at Posted at 2015-02-06

概要

DNSサーバーのqueriesログをElasticsearchに保存する

検証環境

用途 ip
マスターDNSサーバー 192.168.24.101
スレーブDNSサーバー 192.168.24.102
Elasticsearchサーバー 192.168.24.103

作業履歴

bind の queriesログをsyslogに出力させる設定を入れる

/var/named/chroot/etc/named.conf

    ()
    logging {

            channel "syslog_local1" {
                syslog   local1;
            };

            category queries {
                "syslog_local1";
            };
    };
    ()

bind(chroot環境) の queries ログを syslog で Elasticsearch サーバーへ転送する

shell
cat << EOF > /etc/rsyslog.d/bind_chroot.conf
$AddUnixListenSocket /var/named/chroot/dev/log
local1.*    @192.168.24.103:42185
EOF
/etc/init.d/rsyslog restart

Elasticsearch の インストール

shell
yum install java-1.8.0-openjdk-deve
rpm -ivh https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.2.noarch.rpm
shell
iptables -A INPUT -p tcp -m multiport --dports 8080,9200,9300 -s 127.0.0.1 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 8080,9200,9300 -s 192.168.24.0/24 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 8080,9200,9300 -j DROP

Elasticsearch 設定ファイル修正

shell
sed -i.org \
 -e 's/#cluster\.name.*/cluster.name: named.elasticsearch/g'  \
 -e 's/#discovery\.zen\.ping\.multicast\.enabled.*/discovery.zen.ping.multicast.enabled: false/g' \
/etc/elasticsearch/elasticsearch.yml
diff
--- /etc/elasticsearch/elasticsearch.yml.org    2015-02-07 02:19:24.338361576 +0900
+++ /etc/elasticsearch/elasticsearch.yml        2015-02-07 02:29:39.266361349 +0900
@@ -29,7 +29,7 @@
 # Cluster name identifies your cluster for auto-discovery. If you're running
 # multiple clusters on the same network, make sure you're using unique names.
 #
-#cluster.name: elasticsearch
+cluster.name: named.elasticsearch


 #################################### Node #####################################
@@ -319,7 +319,7 @@
 #
 # 1. Disable multicast discovery (enabled by default):
 #
-#discovery.zen.ping.multicast.enabled: false
+discovery.zen.ping.multicast.enabled: false
 #
 # 2. Configure an initial list of master nodes in the cluster
 #    to perform discovery when new nodes (master or data) are started:

Elasticsearch 起動と確認

shell
/etc/init.d/elasticsearch start
curl -X GET http://localhost:9200/
result
{
  "status" : 200,
  "name" : "Vashti",
  "cluster_name" : "named.elasticsearch",
  "version" : {
    "number" : "1.4.2",
    "build_hash" : "927caff6f05403e936c20bf4529f144f0c89fd8c",
    "build_timestamp" : "2014-12-16T14:11:12Z",
    "build_snapshot" : false,
    "lucene_version" : "4.10.2"
  },
  "tagline" : "You Know, for Search"
}

Elasticsearch マッピング確認

shell
curl http://localhost:9200/namedlog-*/_mapping?pretty

Elasticsearch テンプレート確認

shell
curl -XGET localhost:9200/_template/template_1

全テンプレートを確認する場合

shell
curl -XGET localhost:9200/_template/template*

template_1を消す場合

shell
curl -XDELETE localhost:9200/_template/template_1

Elasticsearch テンプレート適用

shell
curl -XPUT localhost:9200/_template/template_1 -d '
{
  "template" : "namedlog-*",
    "mappings" : {
      "fluentd" : {
        "properties" : {
          "@log_name" : {
            "type" : "string"
          },
          "@timestamp" : {
            "type" : "date",
            "format" : "dateOptionalTime"
          },
          "class_type" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "country" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "dst" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "fqdn" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "log_type" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "src" : {
            "type" : "string",
            "index" : "not_analyzed"
          },
          "view" : {
            "type" : "string",
            "index" : "not_analyzed"
          }
        }
      }
    }
  }
}'

Elasticsearch 全てのデータ(index)を削除する

shell
curl -XDELETE 'http://localhost:9200/namedlog-*'

Elasticsearch の WEBフロントエンド インストール

shell
/usr/share/elasticsearch/bin/plugin -install mobz/elasticsearch-head

Fluent のインストール

shell
curl -L http://toolbelt.treasuredata.com/sh/install-redhat.sh | sh
iptables -A INPUT -s 192.168.24.101 -p udp --dport 42185 -j ACCEPT
iptables -A INPUT -s 192.168.24.102 -p udp --dport 42185 -j ACCEPT
iptables -A INPUT -p udp --dport 42185 -j DROP
mkdir -p /etc/td-agent/conf.d

Fluent の plugin インストール

shell
yum install geoip-devel --enablerepo=epel
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-parser
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-geoip
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-flatten-hash
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-elasticsearch
tips
CentOS5系にインストールするときはremiのlibcurl-develが必要になる(公式やepelはダメ、バージョンが合わない)
yum install gcc libcurl-devel --enablerepo=remi
shell
echo 'include conf.d/*.conf' > /etc/td-agent/td-agent.conf
vi /etc/td-agent/conf.d/bind_queries.conf

/etc/td-agent/conf.d/bind_queries.conf
#----------------------------------------------------------
# 各DNSサーバーのrsyslog から のログを取得する処理
# local1.info -> syslog:42185 -> named.syslog.local1.info
#----------------------------------------------------------
<source>
    type syslog
    port 42185
    tag  named.rewrite
</source>

#----------------------------------------------------------
# フィルタリング処理
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-rewrite
#----------------------------------------------------------
<match named.rewrite.local1.info>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # named.rewrite.local1.info -> named.parser
    #----------------------------------------------------------
    <store>
        type            rewrite
        remove_prefix   named.rewrite.local1.info
        add_prefix      named.parser

        #--- QUERY
        <rule>
            key         message
            pattern     client ([.0-9]+).[0-9]*: view ([^ ]*): [^ ]* ([^ ]*) ([^ ]* [^ ]* [^ ]*) \(([.0-9:]+)\)
            replace     {"log_type":"QUERY", "src":"\1","view":"\2","fqdn":"\3","class_type":"\4","dst":"\5"}
            last        true
        </rule>

        #--- パターンにマッチしないものは捨てる
        <rule>
            key         message
            pattern     .*
            ignore      true
        </rule>
    </store>
</match>

#----------------------------------------------------------
# フィルタリングした文字列をJSONフォーマットに変換
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-parser
#----------------------------------------------------------
<match named.parser>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # named.parser -> named.geoip
    #----------------------------------------------------------
    <store>
        type            parser
        tag             named.geoip
        key_name        message
        format          json
        reserve_data    yes
    </store>
</match>

#----------------------------------------------------------
# 国別コード付与処理
# yum install geoip-devel --enablerepo=epel
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-geoip
#----------------------------------------------------------
<match named.geoip>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # named.geoip -> named.flatten_hash
    #----------------------------------------------------------
    <store>
        type                geoip
        geoip_lookup_key    src

        <record>
            country         ${country_code['src']}
        </record>

        tag                 named.flatten_hash
        log_level           debug
        flush_interval      1s
    </store>
</match>

#----------------------------------------------------------
# ネストしたJSONをフラット化する
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-flatten-hash
#----------------------------------------------------------
<match named.flatten_hash>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # named.flatten_hash -> named.record_reformer
    #----------------------------------------------------------
    <store>
        type flatten_hash
        tag  named.record_reformer
        separator _
    </store>
</match>

#----------------------------------------------------------
# 不必要なキーを捨てる
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-record-reformer
#----------------------------------------------------------
<match named.record_reformer>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # named.record_reformer -> named.elasticsearch
    #----------------------------------------------------------
    <store>
        type record_reformer
        tag  named.elasticsearch
        remove_keys message,ident,pid
    </store>
</match>

#----------------------------------------------------------
# Elasticsearch に登録する
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-elasticsearch
#----------------------------------------------------------
<match named.elasticsearch>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    <store>
        type stdout
    </store>

    #----------------------------------------------------------
    # named.elasticsearch -> elasticsearch [localhost:9200]
    #----------------------------------------------------------
    <store>
        type            elasticsearch
        include_tag_key true
        tag_key         @log_name
        host            localhost
        port            9200
        logstash_format true
        logstash_prefix namedlog
        flush_interval  10s
    </store>
</match>
shell
/etc/init.d/td-agent start

kibana インストール

shell
cd /usr/local/src/
wget https://download.elasticsearch.org/kibana/kibana/kibana-3.1.2.tar.gz
tar zxvf kibana-3.1.2.tar.gz
cd /usr/local/src/kibana-3.1.2 
python -m SimpleHTTPServer 8080

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
8