ls kaida-0.1
  Dockerfile  Makefile  app.js  html/
cat Dockerfile
  FROM node:16
  COPY app.js /app.js
  COPY html /html
  ENTRYPOINT ["node", "app.js"]
docker build -t kiada:latest .
docker run --namee kiada-container -p 1234:8080 -d kiada
docker tag kiada jnuho/kiada:0.1

docker login -u jnuho docker.io
docker push jnuho/kiada:0.1

# Run the image on other Hosts
docker run --name kiada-container -p 1234:8080 -d jnuho/kiada:0.1

kubectl api-resources
kubectl explain pod
kubectl explain configmap
kubectl explain service

# 파드: pods, pod, po
kubectl get pod --all-namespaces
kubectl get po --all-namespaces
### METHOD 1. minikube로 싱글 node 클러스터 생성

# Create a single-node Kubernetes Cluster to test Kubernetes functionalities
# testing certain features related to managing apps on multiple nodes are limited

minikube start --driver=docker

kubectl cluster-info

### METHOD 2. kind
# 문서: https://kind.sigs.k8s.io/docs/user/quick-start/
# 다른 minikube와 같은 툴에비해 아직 unstable

curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.12.0/kind-linux-amd64
chmod +x ./kind
mv ./kind /usr/local/kind
ls -al /usr/local/kind
  drwxr-xr-x  2 root root 4096 Apr  5 15:20 kind
  alias k=kubectl
  complete -F __start_kubectl k
  source <(kubectl completion bash)
  export PATH=/usr/local/kind:$PATH

vim ~ubuntu/.bashrc
  export PATH=/usr/local/kind:$PATH
source .bashrc

cat kind-multi-node.yaml
  kind: Cluster
  apiVersion: kind.sigs.k8s.io/v1alpha3
  nodes:
  - role: control-plane
  - role: worker
  - role: worker

# 3-node cluster
kind create cluster --config kind-multi-node.yaml

kind get nodes
  kind-worker
  kind-control-plane
  kind-worker2

# Instead of using Docker to run containers, nodes created by kind use the CRI-O container runtime
crictl ps
docker ps


### METHOD 3. GKE로 멀티 node 클러스터 생성 (Google 가입 및 신용카드 등록 필요)

# Signing up for a Google account, in the unlikely case you don’t have one already.
# Creating a project in the Google Cloud Platform Console.
# Enabling billing. This does require your credit card info, but Google provides a
#   12-month free trial. And they’re nice enough to not start charging automatically after the free trial is over.)
# Enabling the Kubernetes Engine API.
# Downloading and installing Google Cloud SDK. (This includes the gcloud
#   command-line tool, which you’ll need to create a Kubernetes cluster.)
# Installing the kubectl command-line tool with `gcloud components install kubectl`

# After completing the installation, you can create a Kubernetes cluster with three
#   worker nodes using the command shown in the following listing.
# Then interact with cluster using `kubectl` which issues REST request
# to the Kubernetes API Server running on the master node

# set default zone
# region europe-west3 has three different zones
# but here we set all nodes to use the same zone 'europe-west3-c'
gcloud config set compute/zone europe-west3-c

# create kubernetes cluster
# three worker nodes in the same zone
# thus --num-nodes indicates the number of nodes per zone
# if the regions contains three zones and you only want three nodes, set --num-nodes=1
gcloud container clusters create kiada --num-nodes 3

gcloud compute instances list

# scale the number of nodes
gcloud container clusters resize kiada --size 0

### 클러스터 정보 확인

# explore what's running inside a node; you can check logs inside it
gcloud compute ssh <node-name>

# check if cluster is up by listing cluster nodes
#   minikube : 1 node in the cluster
#   GKE : 3 nodes in the cluster



### METHOD 4. EKS
# Install `eksctl` command-line tool
# https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html

# creates a three-node cluster in the eu-central-1 region
eksctl create cluster --name kiada --region eu-central-1 --nodes 3 --ssh-access
kubectl get nodes
kubectl describe nodes
kubectl describe node <node-name>
cat ~/.kube/config
export KUBECONFIG=/path/to/custom/kubeconfig
# https://github.com/kubernetes/dashboard
# add a configuration to a resource by file name or stdin
# k apply [options]

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml

# This command runs a local proxy to the API server, allowing you to access the services through it. Let the proxy process run and use the browser to open the dashboard at the following URL:

kubectl proxy

# 대시보드 URL:
# http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

# Token 생성 해야함:
# https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md#getting-a-bearer-token

kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template=""


kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | sls admin-user | ForEach-Object { $_ -Split '\s+' } | Select -First 1)


# Using Helm to install dashboard?  Install helm :
# https://medium.com/@munza/local-kubernetes-with-kind-helm-dashboard-41152e4b3b3d
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh

helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm install dashboard kubernetes-dashboard/kubernetes-dashboard -n kubernetes-dashboard --create-namespace

### POD 생성

# OLD VERSION :
# --generator=run/v1 옵션은 Deployment대신 ReplicationController를 create
# kubectl run kubia --image=jnuho/kubia --port=8080

# kubectl로 dokcerhub 이미지를 쿠버네티스 클러스터에서 실행
# create all the necessary components without having to deal with JSON or YAML
# Create a deployment object, called 'kiada'
# deployment to use the container image jnuho/kiada:0.1
# the Deployment object is stored in Kubernetes API
# now the jnuho/kiada container is run in the Cluster

kubectl create deployment kiada --image=jnuho/kiada:0.1
  deployment.apps/kiada created

# 생성된 Deployment 리스트 : NOT READY (컨테이너가 준비되지 않음)
# 다만 컨테이너 조회 하는 커멘드는 없음 e.g. k get containers 는 없음
# 컨테이너가 아닌 PODS가 쿠버네티스에서 가장 작은 단위임!
# POD 내의 컨테이너는 네트워크와 UTS 네임스페이스를 공유
# 각 POD은 논리적인 단위의 컴퓨터로서 어플리케이션을 실행
# 하나의 노드에 여러개의 POD가 있더라도, 각 POD들은 서로 다른 POD들의 프로세스를 볼 수 없음
k get deployments


# 생성된 pods 정보 (Pending/ContainerCreating -> Running)
kubectl get pods
kubectl describe pods

# 생성된 pod 상세 정보에 worker노드에서 실행 중 확인
kubectl describe pod <pod-name>
  Name:         kiada-7fc9cfcf4b-t8fcg
  Namespace:    default
  Priority:     0
  Node:         kind-worker/172.18.0.2


### SERVICE 생성
# Exposing your application to the world

# Create Service Object
#   expose all pods that belong to the kiada Deployment as a new service.
#   pods to be accessed from outside the cluster via a load balancer.
#   application listens on port 8080, so you want to access it via that port.
#   로드밸런서 타입 서비스는 cluster내에만 expose 시키거나 퍼블릭IP로 외부로 expose 가능

kubectl expose deployment kiada --type=LoadBalancer --name kiada-http --port 8080

# load balancer가 생성되고 EXTERNAL-IP 할당됨
kubectl get services
kubectl get svc

# List all available resources - Kubernetes Objects
kubectl api-resources


# 32232 is the node port on the worker node
# that forwards connections to your service.
# 이 포트로 모든 워커 노드에서 서비스에서 접근가능
# (minikube, kind, 등등 클러스터 종류 상관 없이)

regardless of whether you’re using Minikube or any other Kubernetes cluster.
kubectl get svc kiada-http
  NAME         TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
  kiada-http   LoadBalancer   10.96.59.164   <pending>     8080:32232/TCP   83m
  kubernetes   ClusterIP      10.96.0.1      <none>        443/TCP          6h50m


# Kubernetes 클러스터가 클라우드 (AWS, Gcloud)에 deploy 된다면
# 해당 클라우드 인프라에 로드밸런서를 통해
# 클러스터로 오는 트래픽을 실행 중인 컨테이너로 forward 함
# 해당 인프라는 Kubernetes에게 로드밸런서의 Ip를 알려주고 서비스의 외부주소가 됨


# Accessing your application through Load Balancer
curl <EXTERNAL-IP>:8080
curl 104.155.74.57:8080


# Accessing your application without Load Balancer
#   not all kubernetes clusters provide Load Balancer
#   minikube shows where to access the services
#     prints the url of the service
minikube service kiada --url
curl <ip:port>


# you can access this service locally using the Kubectl proxy tool.
kubectl port-forward kiada-http 8080:3002

# Deploying your application REQUIRES only two steps
# 'Deployment' is a representation of an application
# 'Service' exposes that deployment

kubectl create deployment [options]
kubectl expose deployment [options]

# the result of 'Scale Out'
k get deploy
k get deployments.apps
k get deployment

  NAME    READY   UP-TO-DATE   AVAILABLE   AGE
  kiada   3/3     3            3           31h

k get pods

  NAME                     READY   STATUS    RESTARTS       AGE
  kiada-7fc9cfcf4b-64qvl   1/1     Running   0              3m3s
  kiada-7fc9cfcf4b-mf8ls   1/1     Running   0              3m3s
  kiada-7fc9cfcf4b-t8fcg   1/1     Running   1 (148m ago)   31h

image

k get pods -o wide

# observing requests hitting all three pods when using the service
k get svc
  NAME         TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
  kiada-http   LoadBalancer   10.96.59.164   <pending>     8080:32232/TCP   43h
  kubernetes   ClusterIP      10.96.0.1      <none>        443/TCP          2d

k describe pods | grep Node:
Node:         kind-worker2/172.18.0.3
Node:         kind-worker/172.18.0.4
Node:         kind-worker/172.18.0.4

# Each request arrives at a different pod in random order.
# This is what services in Kubernetes do when more than one pod instance is behind them.
# They act as load balancers in front of the pods.

curl 172.18.0.3:32232
curl 172.18.0.4:32232


# get info about a node in a yaml format
kubectl get node <node-name> -o yaml


# one can access the API directly through proxy using plain HTTP
kubectl proxy

kubectl explain node.spec
kubectl get node kind-control-plane -o yaml