LoginSignup
6
4

More than 5 years have passed since last update.

BINDのqueriesログをInfluxDBに保存する

Last updated at Posted at 2015-02-06

概要

DNSサーバーのqueriesログをInfluxDBに保存する

検証環境

用途 ip
マスターDNSサーバー 192.168.24.101
スレーブDNSサーバー 192.168.24.102
InfluxDBサーバー 192.168.24.103

作業履歴

bind の queriesログをsyslogに出力させる設定を入れる

/var/named/chroot/etc/named.conf

    ()
    logging {

            channel "syslog_local1" {
                syslog   local1;
            };

            category queries {
                "syslog_local1";
            };
    };
    ()

bind(chroot環境) の queries ログを syslog で InfluxDB サーバーへ転送する

shell
cat << EOF > /etc/rsyslog.d/bind_chroot.conf
$AddUnixListenSocket /var/named/chroot/dev/log
local1.*    @192.168.24.103:42185
EOF
/etc/init.d/rsyslog restart

InfluxDB のインストール

shell
rpm -ivh http://s3.amazonaws.com/influxdb/influxdb-latest-1.x86_64.rpm
/etc/init.d/influxdb start

shell
iptables -A INPUT -p tcp -m multiport --dports 8083,8086,8090,8099 -s 127.0.0.1 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 8083,8086,8090,8099 -s 192.168.24.0/24 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 8083,8086,8090,8099 -j DROP
shell
#--- update cluster root password
curl -u root:root -X POST 'http://127.0.0.1:8086/cluster_admins/root' -d '{"password": "****"}'

#--- create a database
curl -u root:**** -X POST 'http://127.0.0.1:8086/db' -d '{"name": "named_log"}'

#--- add database user
curl -u root:**** -X POST 'http://127.0.0.1:8086/db/named_log/users' -d '{"name": "fluent", "password": "****"}'


#--- create a database
curl -u root:**** -X POST 'http://127.0.0.1:8086/db' -d '{"name": "grafana"}'

#--- add database user
curl -u root:**** -X POST 'http://127.0.0.1:8086/db/named_log/users' -d '{"name": "grafana", "password": "****"}'

Fluent のインストール

shell
curl -L http://toolbelt.treasuredata.com/sh/install-redhat.sh | sh
iptables -A INPUT -s 192.168.24.101 -p udp --dport 42185 -j ACCEPT
iptables -A INPUT -s 192.168.24.102 -p udp --dport 42185 -j ACCEPT
iptables -A INPUT -p udp --dport 42185 -j DROP
mkdir -p /etc/td-agent/conf.d

Fluent の plugin インストール

shell
yum install geoip-devel --enablerepo=epel
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-geoip
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-flatten-hash
/usr/lib64/fluent/ruby/bin/gem install fluent-plugin-influxdb
shell
echo 'include conf.d/*.conf' > /etc/td-agent/td-agent.conf
vi /etc/td-agent/conf.d/bind_queries.conf
/etc/td-agent/conf.d/bind_queries.conf
```conf:/etc/td-agent/conf.d/bind_queries.conf
#----------------------------------------------------------
# 各DNSサーバーのrsyslog から のログを取得する処理
# local1.info -> syslog:42185 -> named.syslog.local1.info
#----------------------------------------------------------
<source>
    type syslog
    port 42185
    tag  named.syslog
</source>

#----------------------------------------------------------
# フィルタリング処理
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-flatten-hash
#----------------------------------------------------------
<match named.syslog.local1.info>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # フィルタリング して タグを切り替える
    # named.syslog.*.* -> named.filtered
    #----------------------------------------------------------
    <store>
        type            rewrite
        remove_prefix   named.syslog.local1.info
        add_prefix      named.filtered

        #--- QUERY
        <rule>
            key         message
            pattern     client ([.0-9]+).[0-9]*: view ([^ ]*): [^ ]* ([^ ]*) ([^ ]* [^ ]* [^ ]*) \(([.0-9:]+)\)
            replace     {"log_type":"QUERY", "src":"\1","view":"\2","fqdn":"\3","class_type":"\4","dst":"\5"}
            last        true
        </rule>

        #--- パターンにマッチしないものは捨てる
        <rule>
            key         message
            pattern     .*
            ignore      true
        </rule>
    </store>

</match>

#----------------------------------------------------------
# JSON 処理
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-parser
#----------------------------------------------------------
<match named.filtered>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # message 部分を JSON化 して タグを切り替える
    # named.filtered -> named.json
    #----------------------------------------------------------
    <store>
        type            parser
        tag             named.json
        key_name        message
        format          json
        #reserve_data    yes
    </store>
</match>

#----------------------------------------------------------
# GEO IP 処理
# yum install geoip-devel --enablerepo=epel
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-geoip
#----------------------------------------------------------
<match named.json>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # SRC IP に 国別コードの付与 して タグを切り替える
    # named.json -> named.geoip-log
    #----------------------------------------------------------
    <store>
        type                geoip
        geoip_lookup_key    src

        <record>
            country         ${country_code['src']}
        </record>

        tag                 named.geoip-log
        log_level           debug
        flush_interval      1s
    </store>
</match>

#----------------------------------------------------------
# JSON フラット化処理
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-flatten-hash
#----------------------------------------------------------
<match named.geoip-log>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    #<store>
    #    type stdout
    #</store>

    #----------------------------------------------------------
    # ネストしたJSONをいい感じにフラット化する
    # syslog.named -> named.flat-log
    #----------------------------------------------------------
    <store>
        type flatten_hash
        tag  named.flat-log
        separator _
    </store>
</match>

#----------------------------------------------------------
# Elasticsearch に登録する
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-elasticsearch
#----------------------------------------------------------
<match named.flat-log>

    type copy

    #----------------------------------------------------------
    # テスト出力
    #----------------------------------------------------------
    <store>
        type stdout
    </store>
</match>

#----------------------------------------------------------
# named.flat-log -> influxdb[:8086]
# /usr/lib64/fluent/ruby/bin/gem install fluent-plugin-influxdb
#----------------------------------------------------------
<match named.flat-log>
    type           influxdb
    host           localhost
    port           8086
    dbname         named_log
    user           fluent
    password       ****
    time_precision s
    flush_interval 1s
</match>
shell
/etc/init.d/td-agent start

Grafana インストール

shell
wget http://grafanarel.s3.amazonaws.com/grafana-1.9.1.tar.gz
tar zxvf grafana-1.9.1.tar.gz
cd grafana-1.9.1
cp -av config.sample.js config.js
vi config.js
config.js
// == Configuration
// config.js is where you will find the core Grafana configuration. This file contains parameter that
// must be set before Grafana is run for the first time.

define(['settings'], function(Settings) {

  return new Settings({

      /* Data sources
      * ========================================================
      * Datasources are used to fetch metrics, annotations, and serve as dashboard storage
      *  - You can have multiple of the same type.
      *  - grafanaDB: true    marks it for use for dashboard storage
      *  - default: true      marks the datasource as the default metric source (if you have multiple)
      *  - basic authentication: use url syntax http://username:password@domain:port
      */

      // InfluxDB example setup (the InfluxDB databases specified need to exist)
      datasources: {
        influxdb: {
          type: 'influxdb',
          url: "http://192.168.24.103:8086/db/named_log",
          username: 'fluent',
          password: '****',
        },
        grafana: {
          type: 'influxdb',
          url: "http://192.168.24.103:8086/db/grafana",
          username: 'grafana',
          password: '****',
          grafanaDB: true
        },
      },

      // Graphite & Elasticsearch example setup
      /*
      datasources: {
        graphite: {
          type: 'graphite',
          url: "http://my.graphite.server.com:8080",
        },
        elasticsearch: {
          type: 'elasticsearch',
          url: "http://my.elastic.server.com:9200",
          index: 'grafana-dash',
          grafanaDB: true,
        }
      },
      */

      // OpenTSDB & Elasticsearch example setup
      /*
      datasources: {
        opentsdb: {
          type: 'opentsdb',
          url: "http://opentsdb.server:4242",
        },
        elasticsearch: {
          type: 'elasticsearch',
          url: "http://my.elastic.server.com:9200",
          index: 'grafana-dash',
          grafanaDB: true,
        }
      },
      */

      /* Global configuration options
      * ========================================================
      */

      // specify the limit for dashboard search results
      search: {
        max_results: 100
      },

      // default home dashboard
      default_route: '/dashboard/file/default.json',

      // set to false to disable unsaved changes warning
      unsaved_changes_warning: true,

      // set the default timespan for the playlist feature
      // Example: "1m", "1h"
      playlist_timespan: "1m",

      // If you want to specify password before saving, please specify it below
      // The purpose of this password is not security, but to stop some users from accidentally changing dashboards
      admin: {
        password: ''
      },

      // Change window title prefix from 'Grafana - <dashboard title>'
      window_title_prefix: 'Grafana - ',

      // Add your own custom panels
      plugins: {
        // list of plugin panels
        panels: [],
        // requirejs modules in plugins folder that should be loaded
        // for example custom datasources
        dependencies: [],
      }

    });
});
shell
python -m SimpleHTTPServer 9000

http://192.168.24.103:9000/

6
4
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
6
4