kubeadmでFlatcar Container LinuxにKubernetes 1.17を入れる場合はこちら
環境
Master Node
EC2 | OS | Kubernetes | Docker |
---|---|---|---|
t3.small | Flatcar Container Linux stable 2345.3.0 | 1.18.0 | 18.06.3-ce |
Flatcar Container Linux AMI のユーザー名は core です
Master Nodeは、2CPU、メモリ2GB以下の場合、kubeadm init実行時にエラーになります。
ディスクサイズはデフォルトの8G
Master NodeとWorker Nodeは同じVPC
Master NodeのIPアドレスは172.31.20.57
Worker Node
EC2 | OS | Kubernetes | Docker |
---|---|---|---|
t3.small | Flatcar Container Linux stable 2345.3.0 | 1.18.0 | 18.06.3-ce |
Flatcar Container Linux AMI のユーザー名は core です
Master Nodeは、2CPU、メモリ2GB以下の場合、kubeadm init実行時にエラーになります。
ディスクサイズはデフォルトの8G
Master NodeとWorker Nodeは同じVPC
Worker NodeのIPアドレスは172.31.27.87
SELinux設定確認
getenforce
# 実行結果
Permissive
swap確認
# ないので何もしない。ある場合はswapoff -a
free
# 実行結果
total used free shared buff/cache available
Mem: 2001376 75864 1496924 204416 428588 1579056
Swap: 0 0 0
Docker設定
docker -v
# 実行結果
Docker version 18.06.3-ce, build d7080c1
# Dockerの自動起動有効
sudo systemctl enable docker
# daemon.json
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
sudo mkdir -p /etc/systemd/system/docker.service.d
sudo systemctl daemon-reload
sudo systemctl restart docker
# 設定確認
docker info | grep -i driver
# 実行結果
Storage Driver: overlay2
Logging Driver: json-file
Cgroup Driver: systemd
kubelet、kubeadm、kubectlインストール
CNI_VERSION="v0.8.5"
sudo mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz
CRICTL_VERSION="v1.17.0"
sudo mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C /opt/bin -xz
RELEASE="v1.18.0"
sudo mkdir -p /opt/bin
cd /opt/bin
sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
sudo chmod +x {kubeadm,kubelet,kubectl}
# k8s 1.18.0のkubelet.serviceと10-kubeadm.confがなかったので
# k8s 1.17.4のファイルを使用する。
RELEASE="v1.17.4"
cd
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ./kubelet.service
sudo mv ./kubelet.service /etc/systemd/system/
sudo mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ./10-kubeadm.conf
sudo mv 10-kubeadm.conf /etc/systemd/system/kubelet.service.d
kubelet 起動
sudo systemctl enable --now kubelet
Masterノードのセットアップ
Rogue One: Network Connection Tracing on Kubernetes Pods using conntrack on CoreOS Container Linux
kubeadm init時にconfigファイルを用意する必要があります。詳細は以下のissuesを見てください。
failed to join the cluster with /usr/libexec/kubernetes: read-only file system #88210
add workarounds for Fedora Coreos's R/O /usr/libexec/ #2031
# kubeadm initでエラー「[ERROR FileExisting-conntrack]:
# conntrack not found in system path」でるので、
# Dockerイメージ作成
cd
mkdir conntrack
cd conntrack
vi Dockerfile
FROM ubuntu:18.04
RUN apt update \
&& apt install -y --no-install-recommends \
conntrack \
&& apt -y clean \
&& rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["conntrack"]
docker build -t conntrack:1.0 .
cd /opt/bin
sudo vi conntrack
# !/bin/bash
docker run --net=host --privileged --rm conntrack:1.0
sudo chmod +x conntrack
cat <<EOF > ./kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
controllerManager:
extraArgs:
flex-volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
networking:
podSubnet: 192.168.0.0/16
EOF
sudo kubeadm init --config kubeadm-config.yaml
kubectl 接続設定
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 確認
kubectl get po -A
# 実行結果
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-66bff467f8-glfcp 0/1 Pending 0 82s
kube-system coredns-66bff467f8-vmhxj 0/1 Pending 0 82s
kube-system etcd-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 98s
kube-system kube-apiserver-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 98s
kube-system kube-controller-manager-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 98s
kube-system kube-proxy-8q5l6 1/1 Running 0 82s
kube-system kube-scheduler-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 98s
Calicoデプロイ
curl -L -O https://docs.projectcalico.org/v3.13/manifests/calico.yaml
# Calico 3.8 fails to install under CoreOS stable #2712
# https://github.com/projectcalico/calico/issues/2712
# flexvol-driver-host の path を修正
sed -i -e "s?/usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds?/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds?g" calico.yaml
kubectl apply -f calico.yaml
# 確認
$ kubectl get po -A
# 実行結果
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-5b8b769fcd-6nn5h 1/1 Running 0 52s
kube-system calico-node-4zwrt 1/1 Running 0 52s
kube-system coredns-66bff467f8-glfcp 1/1 Running 0 3m13s
kube-system coredns-66bff467f8-vmhxj 1/1 Running 0 3m13s
kube-system etcd-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 3m29s
kube-system kube-apiserver-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 3m29s
kube-system kube-controller-manager-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 3m29s
kube-system kube-proxy-8q5l6 1/1 Running 0 3m13s
kube-system kube-scheduler-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 3m29s
kubectl get node
# 実行結果
NAME STATUS ROLES AGE VERSION
ip-172-31-20-57.ap-northeast-1.compute.internal Ready master 4m13s v1.18.0
kubectl get cs
# 実行結果
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
WorkerNodeを構築する場合
以下はMaster Nodeの手順を参照
・SELinux設定確認
・swap確認
・Docker設定
・kubelet、kubeadm、kubectlインストール
・kubelet 起動
クラスタに参加
Joinする時にconfigファイルを用意する必要があります。詳細は以下のissuesを見てください。
failed to join the cluster with /usr/libexec/kubernetes: read-only file system #88210
add workarounds for Fedora Coreos's R/O /usr/libexec/ #2031
# kubeadm joinでエラー「[ERROR FileExisting-conntrack]:
# conntrack not found in system path」でるので、
# Dockerイメージ作成
cd
mkdir conntrack
cd conntrack
vi Dockerfile
FROM ubuntu:18.04
RUN apt update \
&& apt install -y --no-install-recommends \
conntrack \
&& apt -y clean \
&& rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["conntrack"]
docker build -t conntrack:1.0 .
cd /opt/bin
sudo vi conntrack
# !/bin/bash
docker run --net=host --privileged --rm conntrack:1.0
sudo chmod +x conntrack
# Master Nodeで実行
kubeadm token create --print-join-command
# configファイル作成
cd
vi node-join-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
bootstrapToken:
token: "kubeadm joinコマンドのトークンの値を指定"
caCertHashes:
- "kubeadm joinコマンドのdiscovery-token-ca-cert-hashを指定"
apiServerEndpoint: "マスターノードのIPアドレスを指定:6443"
nodeRegistration:
kubeletExtraArgs:
volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
# WorkerNodeで生成されたコマンドに先頭に「sudo 」をつけて実行
sudo kubeadm join マスターノードのIPアドレスを指定:6443 --config=node-join-config.yaml
確認
kubectl get node
# 実行結果
NAME STATUS ROLES AGE VERSION
ip-172-31-20-57.ap-northeast-1.compute.internal Ready master 20m v1.18.0
ip-172-31-27-87.ap-northeast-1.compute.internal Ready <none> 21s v1.18.0
kubectl get po -A
# 実行結果
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-5b8b769fcd-6nn5h 1/1 Running 0 19m
kube-system calico-node-4zwrt 1/1 Running 0 19m
kube-system calico-node-fqh62 1/1 Running 0 102s
kube-system coredns-66bff467f8-glfcp 1/1 Running 0 21m
kube-system coredns-66bff467f8-vmhxj 1/1 Running 0 21m
kube-system etcd-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 22m
kube-system kube-apiserver-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 22m
kube-system kube-controller-manager-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 22m
kube-system kube-proxy-8q5l6 1/1 Running 0 21m
kube-system kube-proxy-d5kw2 1/1 Running 0 102s
kube-system kube-scheduler-ip-172-31-20-57.ap-northeast-1.compute.internal 1/1 Running 0 22m
Redisデプロイ・動作確認
vi redis.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-svc
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: redis
clusterIP: None
---
apiVersion: v1
kind: ConfigMap
metadata:
name: redis.config
data:
redis.conf: |
requirepass password
bind 0.0.0.0
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
spec:
selector:
matchLabels:
app: redis
replicas: 1
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:5.0.8
command:
- "redis-server"
- "/redis-master/redis.conf"
ports:
- name: redis
containerPort: 6379
volumeMounts:
- name: data
mountPath: /redis-master-data
- name: config
mountPath: /redis-master
volumes:
- name: data
emptyDir: {}
- name: config
configMap:
name: redis.config
kubectl apply -f redis.yaml
kubectl run -it redis-cli --rm --image redis:5.0.8 --restart=Never -- bash
If you don't see a command prompt, try pressing enter.
root@redis-cli:/data# redis-cli -c -h redis-svc -p 6379 -a password
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
redis-svc:6379> set a 1
OK
redis-svc:6379> get a
"1"
redis-svc:6379> exit
root@redis-cli:/data# exit
参考URL
Flatcar Container Linux
Flatcar Container Linux Documentation