LoginSignup
1
4

More than 3 years have passed since last update.

CentOS8でマルチマスターノードのkubernetesを構築

Last updated at Posted at 2020-03-10

2020/03/11
CNIをkube-routeからcalicoに変更

2020/03/17
誤記とかを修正

2020/03/26
OSの設定でインストールするパッケージにconntrack-toolsを追加
※conntrack-toolsをインストールしておかないとkubernetes 1.18.0でkubeadmするときに以下のエラーがでる。

error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileExisting-conntrack]: conntrack not found in system path 

2019/03/30
kubernetes 1.18.0のsystemd unitの設定ファイルが存在しない(リンク切れ?)のため手動で作成する手順に変更

2020/04/01
swap無効化する際のsedコマンドの誤記を修正


1.環境

以下の環境を構築する。

・ノード構成

ノード種類 ホスト名 IPアドレス サブネットマスク NIC
MASTER VM100001 192.168.1.201 24 eth0
MASTER VM100002 192.168.1.202 24 eth0
MASTER VM100003 192.168.1.203 24 eth0
WORKER VM100004 192.168.1.204 24 eth0
WORKER VM100005 192.168.1.205 24 eth0
WORKER VM100006 192.168.1.206 24 eth0

・MASTERノード用のAPI代表IPアドレス

代表IPアドレス サブネットマスク
192.168.1.200 24

・kubernetesネットワーク

ネットワーク種類 セグメント サブネットマスク
pod cider 10.244.0.0 16
service cider 10.0.0.0 16

2.OSの設定

全ノードで実施

2.1.conntrack-tools,socat,iproute-tc,ipvsadm,tar,jqのインストール

# dnf install conntrack-tools socat iproute-tc ipvsadm tar jq

2.2.podman,runc,containernetworking-pluginsのアンインストール

2.2.1.アンインストール

# dnf remove podman runc containernetworking-plugins

2.2.2.自動インストールの無効化

# echo "exclude=podman* runc* containernetworking-plugins*"  >> /etc/yum.conf

2.3.swap無効化

# swapoff -a
# sed -i -e '/swap/d' /etc/fstab

2.4.SELinux無効化

# setenforce 0
# sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

2.5.カーネルパラメタ設定

# cat << EOF >  /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# sysctl --system

2.6.firewalld,nftablesの無効化

# systemctl stop firewalld
# systemctl stop nftables

# systemctl disable firewalld
# systemctl disable nftables

2.7./etc/hostsの設定

/etc/hostsに全ノードを登録する。

192.168.1.201 VM100001
192.168.1.202 VM100002
192.168.1.203 VM100003
192.168.1.204 VM100004
192.168.1.205 VM100005
192.168.1.206 VM100006

3.docker-ceのインストール

全ノードで実施

3.1.dockerグループ作成

# groupadd -g 2000 docker

3.2.バイナリパッケージの展開

# DOCKER_VERSION=19.03.7
# mkdir -p /opt/docker
# curl -L https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz | tar -C /opt/docker -xz
# cd /opt/docker
# chown -R root:docker docker
# mv docker ${DOCKER_VERSION}
# ln -nfs ${DOCKER_VERSION} stable
# ln -nfs /opt/docker/stable/* /usr/bin/

3.3.systemd unit作成

# cat << EOF > /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP \$MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
# cat << EOF > /usr/lib/systemd/system/docker.socket
[Unit]
Description=Docker Socket for the API
PartOf=docker.service

[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF
# cat << EOF > /usr/lib/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
KillMode=process
Delegate=yes
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity

[Install]
WantedBy=multi-user.target
EOF

3.4.CNI設定用ディレクトリ作成

# mkdir -p /etc/cni/net.d/

3.5.Cgroup設定

# mkdir -p /etc/docker
# echo {\"exec-opts\":[\"native.cgroupdriver=systemd\"]} | jq . > /etc/docker/daemon.json

3.6.起動

# systemctl daemon-reload
# systemctl enable containerd.service
# systemctl enable docker.socket
# systemctl enable docker.service
# systemctl restart docker

4.CNI pluginsのインストール

全ノードで実施

# CNI_VERSION="v0.8.5"
# mkdir -p /opt/cni/${CNI_VERSION}
# curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/${CNI_VERSION} -xz
# cd /opt/cni
# ln -nfs ${CNI_VERSION} bin

5.crictlのインストール

全ノードで実施

# CRICTL_VERSION="v1.17.0"
# mkdir -p /opt/crictl/${CRICTL_VERSION}
# curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/crictl/${CRICTL_VERSION} -xz
# cd /opt/crictl
# ln -nfs ${CRICTL_VERSION} stable
# ln -nfs /opt/crictl/stable/* /usr/bin/

6.kubernetesのインストール

全ノードで実施

6.1.インストール

# RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)"

# mkdir -p /opt/kubernetes/${RELEASE}
# cd /opt/kubernetes/${RELEASE}
# curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
# chmod +x {kubeadm,kubelet,kubectl}
# cd /opt/kubernetes/
# ln -nfs ${RELEASE} stable
# ln -nfs /opt/kubernetes/stable/* /usr/bin/

6.2.systemd unit作成

# cat <<EOF > /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=http://kubernetes.io/docs/

[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF
# mkdir -p /etc/systemd/system/kubelet.service.d
# cat << EOF > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_KUBEADM_ARGS \$KUBELET_EXTRA_ARGS
EOF
# mkdir -p /etc/default
# echo KUBELET_EXTRA_ARGS= >  /etc/default/kubelet

6.3.自動起動有効化

# systemctl daemon-reload
# systemctl enable kubelet

7.keepalivedインストール

MASTERノードでのみ実施

7.1.インストール

# dnf install keepalived

7.2.設定

# cp -p /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
}

vrrp_instance KUBERNETES_APISERVER {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    preempt
    authentication {
        auth_type PASS
        auth_pass kubernetes-apiserver-pass
    }
    virtual_ipaddress {
        192.168.1.200/24
    }
}
EOF

7.3.keepalivedの停止(後で起動する。)

# systemctl stop keepalived

8.一台目のMASTERノードの構築

一台目のMASTERノードでのみ実施

8.1.keepalivedの起動

# systemctl enable keepalived
# systemctl start keepalived

8.2.MASTERノードの初期化

# kubeadm init \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.0.0.0/16 \
--apiserver-advertise-address=192.168.1.200 \
--control-plane-endpoint=192.168.1.200

8.3.CNI(calico)のインストール

・RHEL8,CentOS8ではfiewwallのバックエンドがnftableに代わっているためFELIX_IPTABLESBACKENDを設定
・CALICO_IPV4POOL_CIDRをkubeadm initで設定したpod ciderに変更

# export KUBECONFIG=/etc/kubernetes/admin.conf
# curl -L https://docs.projectcalico.org/manifests/calico.yaml | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\            # ADD' | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\            - name: FELIX_IPTABLESBACKEND' | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\              value: Auto'  | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\            # ADD' | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\            - name: CALICO_IPV4POOL_CIDR' | \
sed  '/            - name: CALICO_DISABLE_FILE_LOGGING/i\              value: \"10.244.0.0\/16\"' | \
kubectl apply -f -

9.二台目以降のMASTERノードを追加

9.1.一台目のノードから証明書をコピー

追加するMASTERノードでのみ実施

# ssh root@192.168.1.200 \
"cd / ; tar zcf - \
./etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} \
./etc/kubernetes/pki/etcd/{ca.crt,ca.key} \
./etc/kubernetes/admin.conf" \
| tar -C / -xz

※一台目のMATERノードのrootアカウントにsshでログイン出来ない場合は、何らかの方法で以下のファイルを一台目のMASTERノードから追加するMASTERノードにコピーする。

ファイル
/etc/kubernetes/pki/ca.crt
/etc/kubernetes/pki/ca.key
/etc/kubernetes/pki/sa.key
/etc/kubernetes/pki/sa.pub
/etc/kubernetes/pki/front-proxy-ca.crt
/etc/kubernetes/pki/front-proxy-ca.key
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/ca.key
/etc/kubernetes/admin.conf

9.2.トークンの発行とハッシュ値を確認

一台目のMASTERノードで実施

9.2.1.トークンの発行

# kubeadm token create

・出力例

W0310 12:30:15.081490    5622 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0310 12:30:15.081543    5622 validation.go:28] Cannot validate kubelet config - no validator is available
abcd12.abcd1234efgh567 ★<- 出力されるトークン

9.2.2.ハッシュ値の確認

# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt \
| openssl rsa -pubin -outform der 2>/dev/null \
| openssl dgst -sha256 -hex | sed 's/^.* //'

・出力例

1234567890f8abcdef1234567890f8abcdef1234567890f8abcdef1234567890 ★<- 出力されるハッシュ値

9.3.MASTERノードを追加

追加するMASTERノードでのみ実施

# kubeadm join 192.168.1.200:6443 \
--token トークン \
--discovery-token-ca-cert-hash sha256:ハッシュ値 \
--control-plane

9.4.keepalivedの起動

# systemctl enable keepalived
# systemctl start keepalived

10.WORKERノードを追加

10.1.トークンの発行とハッシュ値を確認

一台目のMASTERノードで実施

10.1.1.トークンの発行

# kubeadm token create

・出力例

W0310 12:30:15.081490    5622 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0310 12:30:15.081543    5622 validation.go:28] Cannot validate kubelet config - no validator is available
abcd12.abcd1234efgh567 ★<- 出力されるトークン

10.1.2.ハッシュ値の確認

# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt \
| openssl rsa -pubin -outform der 2>/dev/null \
| openssl dgst -sha256 -hex | sed 's/^.* //'

・出力例

1234567890f8abcdef1234567890f8abcdef1234567890f8abcdef1234567890 ★<- 出力されるハッシュ値

10.2.WORKERノードを追加

追加するWORKERノードでのみ実施

# kubeadm join 192.168.1.200:6443 \
--token トークン \
--discovery-token-ca-cert-hash sha256:ハッシュ値

11.おまけ

[root@VM100002 ~]#
[root@VM100002 ~]# kubectl  get node -o wide
NAME       STATUS   ROLES    AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
vm100001   Ready    master   28m   v1.17.3   192.168.1.201   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
vm100002   Ready    master   31m   v1.17.3   192.168.1.202   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
vm100003   Ready    master   28m   v1.17.3   192.168.1.203   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
vm100004   Ready    <none>   27m   v1.17.3   192.168.1.204   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
vm100005   Ready    <none>   27m   v1.17.3   192.168.1.205   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
vm100006   Ready    <none>   27m   v1.17.3   192.168.1.206   <none>        CentOS Linux 8 (Core)   4.18.0-147.5.1.el8_1.x86_64   docker://19.3.7
[root@VM100002 ~]#
[root@VM100002 ~]#
[root@VM100002 ~]#
[root@VM100002 ~]#
[root@VM100002 ~]#
[root@VM100002 ~]# kubectl  get all -A -o wide
NAMESPACE     NAME                                           READY   STATUS    RESTARTS   AGE   IP              NODE       NOMINATED NODE   READINESS GATES
kube-system   pod/calico-kube-controllers-68dc4cf88f-h968r   1/1     Running   0          31m   10.244.27.3     vm100002   <none>           <none>
kube-system   pod/calico-node-6qlt9                          1/1     Running   0          28m   192.168.1.205   vm100005   <none>           <none>
kube-system   pod/calico-node-7sb9l                          1/1     Running   0          28m   192.168.1.206   vm100006   <none>           <none>
kube-system   pod/calico-node-mpwpc                          1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>
kube-system   pod/calico-node-qz86s                          1/1     Running   0          28m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/calico-node-s9t7z                          1/1     Running   0          27m   192.168.1.204   vm100004   <none>           <none>
kube-system   pod/calico-node-zbbq6                          1/1     Running   0          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/coredns-6955765f44-mnnkb                   1/1     Running   0          31m   10.244.27.1     vm100002   <none>           <none>
kube-system   pod/coredns-6955765f44-st8s8                   1/1     Running   0          31m   10.244.27.2     vm100002   <none>           <none>
kube-system   pod/etcd-vm100001                              1/1     Running   0          28m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/etcd-vm100002                              1/1     Running   0          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/etcd-vm100003                              1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>
kube-system   pod/kube-apiserver-vm100001                    1/1     Running   0          26m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/kube-apiserver-vm100002                    1/1     Running   0          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/kube-apiserver-vm100003                    1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>
kube-system   pod/kube-controller-manager-vm100001           1/1     Running   0          26m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/kube-controller-manager-vm100002           1/1     Running   1          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/kube-controller-manager-vm100003           1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>
kube-system   pod/kube-proxy-5bbd6                           1/1     Running   0          27m   192.168.1.204   vm100004   <none>           <none>
kube-system   pod/kube-proxy-8phr6                           1/1     Running   0          28m   192.168.1.206   vm100006   <none>           <none>
kube-system   pod/kube-proxy-hxxdz                           1/1     Running   0          28m   192.168.1.205   vm100005   <none>           <none>
kube-system   pod/kube-proxy-mq74l                           1/1     Running   0          28m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/kube-proxy-tlvjw                           1/1     Running   0          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/kube-proxy-xl225                           1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>
kube-system   pod/kube-scheduler-vm100001                    1/1     Running   0          27m   192.168.1.201   vm100001   <none>           <none>
kube-system   pod/kube-scheduler-vm100002                    1/1     Running   1          31m   192.168.1.202   vm100002   <none>           <none>
kube-system   pod/kube-scheduler-vm100003                    1/1     Running   0          28m   192.168.1.203   vm100003   <none>           <none>

NAMESPACE     NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
default       service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP                  31m   <none>
kube-system   service/kube-dns     ClusterIP   10.0.0.10    <none>        53/UDP,53/TCP,9153/TCP   31m   k8s-app=kube-dns

NAMESPACE     NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE   CONTAINERS    IMAGES                          SELECTOR
kube-system   daemonset.apps/calico-node   6         6         6       6            6           kubernetes.io/os=linux        31m   calico-node   calico/node:v3.13.0             k8s-app=calico-node
kube-system   daemonset.apps/kube-proxy    6         6         6       6            6           beta.kubernetes.io/os=linux   31m   kube-proxy    k8s.gcr.io/kube-proxy:v1.17.3   k8s-app=kube-proxy

NAMESPACE     NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS                IMAGES                            SELECTOR
kube-system   deployment.apps/calico-kube-controllers   1/1     1            1           31m   calico-kube-controllers   calico/kube-controllers:v3.13.0   k8s-app=calico-kube-controllers
kube-system   deployment.apps/coredns                   2/2     2            2           31m   coredns                   k8s.gcr.io/coredns:1.6.5          k8s-app=kube-dns

NAMESPACE     NAME                                                 DESIRED   CURRENT   READY   AGE   CONTAINERS                IMAGES                            SELECTOR
kube-system   replicaset.apps/calico-kube-controllers-68dc4cf88f   1         1         1       31m   calico-kube-controllers   calico/kube-controllers:v3.13.0   k8s-app=calico-kube-controllers,pod-template-hash=68dc4cf88f
kube-system   replicaset.apps/coredns-6955765f44                   2         2         2       31m   coredns                   k8s.gcr.io/coredns:1.6.5          k8s-app=kube-dns,pod-template-hash=6955765f44
[root@VM100002 ~]#

参考

Install Docker Engine - Community from binarie
Installing kubeadm - Kubernetes

1
4
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
4