0
0

More than 1 year has passed since last update.

AWSでのセルフインストールのKubernetesクラスター作成を自動化する

Last updated at Posted at 2021-11-01

先日書いた、AWSにセルフインストールでKubernetesクラスターを建てる件、案外消しては思い出してまた作ってというのを繰り返しているので自動化してみようかと。aws cliの勉強も兼ねて。CloudFormationを使うのか?いやいや、PowerShellで。
結局スクリプト作成に8時間ぐらいかかり、手動でやっても30分ぐらいで出来るものに手間かけすぎであり、後悔の気持ちが大きい。自動化なんて大抵そのようなものだ。

前提

Windows 10のPowershellを使う。
aws cliがWindows 10に導入済みで、API Keyとか、aws configure設定済みな事。以下のような感じで。

> aws configure
AWS Access Key ID [****************6EMR]:
AWS Secret Access Key [****************BTr/]:
Default region name [ap-northeast-1]:
Default output format [json]:

また、試したのはユーザーにAdminisratorAccessがついている状態で。

>aws iam list-attached-user-policies --user-name admin
{
    "AttachedPolicies": [
        {
            "PolicyName": "AdministratorAccess",
            "PolicyArn": "arn:aws:iam::aws:policy/AdministratorAccess"
        }
    ]
}

スクリプト

以下のスクリプトをコピーしてPowerShellのプロンプトにコピーする。
関数を定義しているだけなので、これをプロンプトに貼って実行しても何も起きない。
やっていることは、概ね先日の記事でKubernetesクラスターを構築する手順の通り。

function Create-Cluster {
  function Get-Public-IPAddr ($instid) {
    return aws ec2 describe-instances --instance-ids $instid | ConvertFrom-Json |% { $_.Reservations[0].Instances[0].PublicIpAddress }
  }

  function Run-SSH($ipaddr, $command) {
    ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i kube.pem ubuntu@$ipaddr -- $command
  }

  function Create-VM($ami,$type,$az,$name) {
    $i = aws ec2 describe-instances --filters Name=tag:Name,Values=$name | ConvertFrom-Json
    if ($i.Reservations.Count -ne 0) {
      return $i.Reservations[0].Instances[0].InstanceId
    }
    $i = aws ec2 run-instances `
      --image-id $ami --instance-type $type --security-groups kube `
      --key-name kube --placement AvailabilityZone=$az `
      --tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=$name}]" `
      | ConvertFrom-Json
    return $i.Instances[0].InstanceId
  }

  function Install-Kubeadm($instid) {
    echo "--- Starting to install kubeadm..."

    echo "--- Wating for the instance become running state..."
    $s = $null
    while ($s -ne "running") {
      echo .; sleep 2
      $i = aws ec2 describe-instance-status --instance-ids $instid | ConvertFrom-Json
      $s = $i.InstanceStatuses[0].InstanceState.Name
    }

    $ipaddr = Get-Public-IPAddr $instid
    echo "--- Instance IP address: $ipaddr"

    echo "--- Wating for the instance accept ssh..."
    $c = $False
    while (-not $c) {
      echo .; sleep 5
      $res = Run-SSH $ipaddr "echo hello"
      $c = ($res -eq "hello")
    }

    echo "--- Checking kubeadm..."
    $res = Run-SSH $ipaddr "dpkg -l | grep kubeadm"
    if ($res -match "kubeadm") {
      echo "--- kubeadm already installed"
      echo $res
      return
    }

    echo "--- Setting --no-source-dest-check..."
    aws ec2 modify-instance-attribute --instance-id=$instid --no-source-dest-check

    echo "--- Installing docker..."
    Run-SSH $ipaddr "sudo apt-get update"
    Run-SSH $ipaddr "sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release"
    Run-SSH $ipaddr "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg"
    Run-SSH $ipaddr 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu  $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null'
    Run-SSH $ipaddr "sudo apt-get update"
    Run-SSH $ipaddr "sudo apt-get install -y docker-ce=5:19.03.11~3-0~ubuntu-focal docker-ce-cli=5:19.03.11~3-0~ubuntu-focal containerd.io"
    $daemon = @'
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2"
}
'@
    $daemon | % { [Text.Encoding]::UTF8.GetBytes($_) } | Set-Content -Path "daemon.json" -Encoding Byte
    scp -o StrictHostKeyChecking=no -i kube.pem daemon.json ubuntu@${ipaddr}:.
    rm daemon.json
    Run-SSH $ipaddr "sudo cp daemon.json /etc/docker/daemon.json"
    Run-SSH $ipaddr "sudo mkdir -p /etc/systemd/system/docker.service.d"
    Run-SSH $ipaddr "sudo systemctl restart docker"
    Run-SSH $ipaddr "docker --version"
    Run-SSH $ipaddr "sudo docker run hello-world"

    echo "--- Installing kubeadm..."
    Run-SSH $ipaddr "sudo apt-get update && sudo apt-get install -y apt-transport-https curl"
    Run-SSH $ipaddr "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -"
    $sources = @'
deb https://apt.kubernetes.io/ kubernetes-xenial main
'@
    $sources | % { [Text.Encoding]::UTF8.GetBytes($_) } | Set-Content -Path "sources.list" -Encoding Byte
    scp -o StrictHostKeyChecking=no -i kube.pem sources.list ubuntu@${ipaddr}:.
    rm sources.list
    Run-SSH $ipaddr "sudo cp sources.list /etc/apt/sources.list.d/kubernetes.list"
    Run-SSH $ipaddr "sudo apt-get update"
    Run-SSH $ipaddr "sudo apt-get install -y kubelet kubeadm kubectl"
    Run-SSH $ipaddr "sudo apt-mark hold kubelet kubeadm kubectl"

    echo "--- kubeadm installed"
  }

  echo "--- Creating EC2 key-pair..."
  if (-not (Test-Path .\kube.pem)) {
    $keypair = aws ec2 create-key-pair --key-name kube | ConvertFrom-Json
    $keypair.KeyMaterial | % { [Text.Encoding]::UTF8.GetBytes($_) } | Set-Content -Path "kube.pem" -Encoding Byte
    icacls.exe .\kube.pem /reset
    icacls.exe .\kube.pem /grant:r "$($env:USERNAME):(R)"
    icacls.exe .\kube.pem /inheritance:r
  }

  echo "--- Creating security-group..."
  $sg = aws ec2 describe-security-groups --group-names "kube" | ConvertFrom-Json
  if (-not ($sg.SecurityGroups.Count -eq 1)) {
    $sg = aws ec2 create-security-group --description kube --group-name kube | ConvertFrom-Json
    aws ec2 authorize-security-group-ingress --group-id $sg.GroupId --protocol all --source-group $sg.GroupId
    aws ec2 authorize-security-group-ingress --group-id $sg.GroupId --protocol tcp --port 22 --cidr 0.0.0.0/0
    aws ec2 authorize-security-group-ingress --group-id $sg.GroupId --protocol tcp --port 80 --cidr 0.0.0.0/0
  }

  echo "--- Creating master node VM..."
  # Ubuntu Server 20.04 LTS (HVM), SSD Volume Type - ami-0df99b3a8349462c6
  $ami = "ami-0df99b3a8349462c6"
  # VCPU=2, Memory=2GB
  $type = "t3.small"
  # AZ
  $az = "ap-northeast-1d"
  # Name
  $name = "kube-master"

  $master = Create-VM $ami $type $az $name
  Install-Kubeadm $master

  echo "--- Checking cluster already exist"
  $ipaddr = Get-Public-IPAddr $master
  $res = Run-SSH $ipaddr "kubectl get pod -A"
  if (($res -match "coredns.*1/1.*Running").Count -eq 2) {
    echo "--- Cluster already exists"
  } else {
    echo "--- Establishing kubernetes cluster..."
    $ipaddr = Get-Public-IPAddr $master
    Run-SSH $ipaddr "sudo kubeadm init --pod-network-cidr=192.168.0.0/16"

    Run-SSH $ipaddr 'mkdir -p $HOME/.kube'
    Run-SSH $ipaddr 'sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config'
    Run-SSH $ipaddr 'sudo chown $(id -u):$(id -g) $HOME/.kube/config'

    echo "--- Installing calico network driver..."
    Run-SSH $ipaddr "kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml"
    Run-SSH $ipaddr "kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml"

    echo "--- Wating for calico running..."
    $c = $False
    while (-not $c) {
      echo .; sleep 3
      $res = (Run-SSH $ipaddr "kubectl get pods -n calico-system")
      $c1 = ($res -match "^calico-kube-controllers.* 1/1 +Running")
      $c2 = ($res -match "^calico-node.* 1/1 +Running")
      $c3 = ($res -match "^calico-typha.* 1/1 +Running")
      $c = ($c1.Count -eq 1) -and ($c2.Count -eq 1) -and ($c3.Count -eq 1)
    }

    echo "--- Waiting for coredns running..."
    $c = $False
    while (-not $c) {
      echo .; sleep 3
      $res = (Run-SSH $ipaddr "kubectl get pod -A")
      $c1 = ($res -match "coredns.* 1/1 +Running")
      $c = ($c1.Count -eq 2)
    }
  }
  $join = Run-SSH $ipaddr "sudo kubeadm token create --print-join-command"
  echo "--- join-command: $join"


  echo "--- Creating worker node 1 VM..."
  # Ubuntu Server 20.04 LTS (HVM), SSD Volume Type - ami-0df99b3a8349462c6
  $ami = "ami-0df99b3a8349462c6"
  # VCPU=2, Memory=2GB
  $type = "t3.small"
  # AZ
  $az = "ap-northeast-1d"
  # Name
  $name = "kube-worker1"

  $worker1 = Create-VM $ami $type $az $name
  Install-Kubeadm $worker1

  echo "--- Joining to cluster..."
  $ipaddr = Get-Public-IPAddr $worker1
  Run-SSH $ipaddr "sudo $join"


  echo "--- Creating worker node 2 VM..."
  # Ubuntu Server 20.04 LTS (HVM), SSD Volume Type - ami-0df99b3a8349462c6
  $ami = "ami-0df99b3a8349462c6"
  # VCPU=2, Memory=2GB
  $type = "t3.small"
  # AZ
  $az = "ap-northeast-1d"
  # Name
  $name = "kube-worker2"

  $worker2 = Create-VM $ami $type $az $name
  Install-Kubeadm $worker2

  echo "--- Joining to cluster..."
  $ipaddr = Get-Public-IPAddr $worker2
  Run-SSH $ipaddr "sudo $join"


  echo "--- Wating all nodes ready..."
  $ipaddr = Get-Public-IPAddr $master
  $c = $False
  while (-not $c) {
    echo .; sleep 3
    $res = (Run-SSH $ipaddr "kubectl get nodes")
    $c = (($res -match "Ready").Count -eq 3)
  }

  echo "--- Relocating coredns..."
  Run-SSH $ipaddr "kubectl scale --replicas=0 -n kube-system deployment/coredns"
  Run-SSH $ipaddr "kubectl scale --replicas=2 -n kube-system deployment/coredns"

  echo "--- Installing ingress controller..."
  $res = Run-SSH $ipaddr "kubectl get deploy -n ingress-nginx ingress-nginx-controller -o yaml"
  if (-not ($res -match "hostPort: 80")) {
    Run-SSH $ipaddr "kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.4/deploy/static/provider/baremetal/deploy.yaml"
    Run-SSH $ipaddr ("kubectl get deploy -n ingress-nginx ingress-nginx-controller -o yaml " `
      + "| sed 's/        - containerPort: 80/&\n          hostPort: 80/' " `
      + "| kubectl apply -f -")
    Run-SSH $ipaddr "kubectl scale --replicas=2 -n ingress-nginx deploy/ingress-nginx-controller"

    Run-SSH $ipaddr ("kubectl get IngressClass nginx -o yaml " `
      + "| sed -e 's/  annotations:/&\n    ingressclass.kubernetes.io\/is-default-class: \`"true\`"/' " `
      + "| kubectl apply -f -")
  }

  echo "--- Ingress IP addresses: $(Get-Public-IPAddr $worker1),$(Get-Public-IPAddr $worker2)"
  echo "--- Run kubectl command: ssh -i kube.pem ubuntu@$ipaddr -- kubectl get pod -A"
  echo "--- Cluster created"
}

function Clean-Up() {
  $insts = aws ec2 describe-instances --filters Name=tag:Name,Values=kube-master,kube-worker1,kube-worker2 | ConvertFrom-Json |% { $_.Reservations }
  aws ec2 terminate-instances --instance-ids $($insts.Instances[0].InstanceId) $($insts.Instances[1].InstanceId) $($insts.Instances[2].InstanceId) 
  $c = $False
  while (-not $c) {
    $insts = aws ec2 describe-instances --filters Name=tag:Name,Values=kube-master,kube-worker1,kube-worker2 | ConvertFrom-Json |% { $_.Reservations }
    $c1 = ($($insts.Instances[0].State.Name) -eq "terminated")
    $c2 = ($($insts.Instances[1].State.Name) -eq "terminated")
    $c3 = ($($insts.Instances[2].State.Name) -eq "terminated")
    $c = $c1 -and $c2 -and $c3
  }
  aws ec2 delete-tags --resources $($insts.Instances[0].InstanceId) $($insts.Instances[1].InstanceId) $($insts.Instances[2].InstanceId) --tags Key=Name
  aws ec2 delete-security-group --group-name "kube"
  aws ec2 delete-key-pair --key-name "kube"
  icacls.exe .\kube.pem /reset
  rm .\kube.pem
}

クラスターを作る

PowerShellのプロンプトで以下を実行する。
「--- Cluster created」と最後に表示されればクラスター作成完了。
10分くらいで終わるが。手で作っても大して時間変わらないのが残念。

> Create-Cluster
...
--- Ingress IP addresses: 18.181.232.74,18.183.118.202
--- Run kubectl command: ssh -i kube.pem ubuntu@18.181.111.31 -- kubectl get pod -A
--- Cluster created

クラスターを消す

Clean-Upで、作成したEC2インスタンス、セキュリティグループ、クラスタ用のEC2キーペアを削除する。

> Clean-Up
0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0