原文内容:https://gitee.com/dev-99cloud/training-kubernetes ,在此基础上有新增。

Lesson 03:K8S concepts

3.1 什么是 YAML?

  • 怎么理解 YAML?列表 / 字典 / 数字 / 字符串 / Bool

3.2 什么是 Namespace & Quota?

  • Namespace & 租户隔离

  • 实验:namespace & quota

    # 创建一个 namespace
    kubectl create namespace quota-mem-cpu-example
    
    # 为这个 namespace 限定配额
    kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu.yaml --namespace=quota-mem-cpu-example
    
    # 查看配额的详细信息
    kubectl get resourcequota mem-cpu-demo --namespace=quota-mem-cpu-example --output=yaml
    
    # 创建一个 pod,并限制它的资源使用
    kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod.yaml --namespace=quota-mem-cpu-example
    
    # 确认 pod 已经启动
    kubectl get pod quota-mem-cpu-demo --namespace=quota-mem-cpu-example
    
    # 再次查看配额信息,检查已用部分
    kubectl get resourcequota mem-cpu-demo --namespace=quota-mem-cpu-example --output=yaml
    
    # 尝试启动第二个 pod,因为配额原因,失败
    kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod-2.yaml --namespace=quota-mem-cpu-example
    
    # Error from server (Forbidden): error when creating "examples/admin/resource/quota-mem-cpu-pod-2.yaml":pods "quota-mem-cpu-demo-2" is forbidden: exceeded quota: mem-cpu-demo, requested: requests.memory=700Mi,used: requests.memory=600Mi, limited: requests.memory=1Gi
    
    # 删除命名空间
    kubectl delete namespace quota-mem-cpu-example
    

3.3 什么是 Deployment & ReplicaSet?

  • 实验:Pod Label 和 Replica Controller

  • 实验:Deployment 相关

    kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml
    kubectl get pods
    kubectl get deployments
    
    kubectl set image deployment/nginx-deployment nginx=nginx:1.16.1 --record
    kubectl get pods
    
    kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.161 --record=true
    kubectl get pods
    
    kubectl rollout history deployment.v1.apps/nginx-deployment
    kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2
    kubectl rollout undo deployment.v1.apps/nginx-deployment --to-revision=2
    kubectl get pods
    # 如果原来的 ErrorImagePull 的 pod 一直失败,可以 kuctl delete pod <pod-name> 删除掉
    
    kubectl scale deployment.v1.apps/nginx-deployment --replicas=10
    kubectl get pods
    

3.4 什么是 Services?

  • 基本概念:Service

  • 实验:Service

    创建文件 service.yaml,内容如下:

    apiVersion: v1
    kind: Service
    metadata:
      name: hello-python-service
    spec:
      type: NodePort
      selector:
        app: nginx
      ports:
      - protocol: "TCP"
        port: 6000
        targetPort: 80
        nodePort: 31000
    
    kubectl apply -f service.yaml
    
    # 建好 services 后,可以看 iptables
    $ iptables -t nat -n -L -v
    
    # 能看到如下字样,进入 service IP 的请求按比例分给各个 pod
    Chain KUBE-SVC-C5I534CP62HG2LN3 (2 references)
    pkts bytes target     prot opt in     out     source               destination
        0     0 KUBE-SEP-FBKE4RDEE4U4O7NI  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* default/hello-python-service */ statistic mode random probability 0.50000000000
        0     0 KUBE-SEP-ZIK7TOCY5OVWTBMA  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* default/hello-python-service */
    
    kubectl get pods -o wide
    kubectl run curl --image=radial/busyboxplus:curl -i --tty
    
    nslookup hello-python-service
    curl http://hello-python-service.default.svc.cluster.local:6000
    
    # 在不同的 namespaces 或者宿主机节点上,需要 FQDN 长名
    nslookup hello-python-service.default.svc.cluster.local 10.96.0.10
    
  • LB 类型的 Service:metallb

3.5 实验:K8S Dashboard

3.6 实验:K8S 怎么发布服务和扩缩容?

3.7 DaemonSet & StatefulSet

  • DaemonSet

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: fluentd-elasticsearch
      namespace: kube-system
      labels:
        k8s-app: fluentd-logging
    spec:
      selector:
        matchLabels:
          name: fluentd-elasticsearch
      template:
        metadata:
          labels:
            name: fluentd-elasticsearch
        spec:
          containers:
          - name: fluentd-elasticsearch
            image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
            resources:
              limits:
                memory: 200Mi
              requests:
                cpu: 100m
                memory: 200Mi
          terminationGracePeriodSeconds: 30
    
  • StatefulSet

    apiVersion: v1
    kind: Service
    metadata:
      name: nginx
      labels:
        app: statefulset
    spec:
      ports:
      - port: 80
        name: web
      clusterIP: None
      selector:
        app: statefulset
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: web
    spec:
      selector:
        matchLabels:
          app: statefulset # has to match .spec.template.metadata.labels
      serviceName: "nginx"
      replicas: 3 # by default is 1
      template:
        metadata:
          labels:
            app: statefulset # has to match .spec.selector.matchLabels
        spec:
          terminationGracePeriodSeconds: 10
          containers:
          - name: nginx
            image: nginx
            ports:
            - containerPort: 80
              name: web
    
    kubectl apply -f test-statefulset.yaml
    kubectl get pods
    
    # headless 服务,没有 service IP
    kubectl run curl --image=radial/busyboxplus:curl -i --tty
    nslookup nginx
    
    # 多执行几次 nslookup nginx 命令,可以看到每次返回的顺序都不一样
    # headless 服务就是通过 dns 返回顺序变化来实现负载均衡
    

3.8 实验:ETCD 操作

  • Set & Get

    # Ubuntu 环境上用 apt-get 安装
    root@ckalab001:~# apt install etcd-client
    
    # 其它环境直接 https://github.com/etcd-io/etcd/releases 下载二进制文件
    
    root@ckalab001:~# ps -ef | grep api | grep -i etcd
    root       24761   24743  3 10:17 ?        00:06:53 kube-apiserver --advertise-address=172.31.43.206 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/pki/ca.crt --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/etc/kubernetes/pki/sa.pub --service-account-signing-key-file=/etc/kubernetes/pki/sa.key --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    
    # 设置环境变量,ETCDCTL_API=3
    root@ckalab001:~# export ETCDCTL_API=3
    
    root@ckalab001:~# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" --endpoints=https://127.0.0.1:2379 put /firstkey trystack
    OK
    
    root@ckalab001:~# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" --endpoints=https://127.0.0.1:2379 get /firstkey
    /firstkey
    trystack
    
    # list 所有的 key
    etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" --endpoints=https://127.0.0.1:2379 get --prefix --keys-only ""
    
    # list 所有的 key & value
    etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" --endpoints=https://127.0.0.1:2379 get --prefix ""
    
    # backup & restore
    etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" snapshot save a.txt
    
    # 恢复备份的步骤
    # 1. 停止 ETCD 服务
    # 2. 删除 ETCD data 目录
    # 3. snapshot restore
    # 4. 重新启动 ETCD 服务
    
    etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" snapshot restore a.txt
    

    备份恢复实验:

    [root@ckalab003 ~]# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" put /firstkey-1 test1
    OK
    
    [root@ckalab003 ~]# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" get /firstkey-1
    /firstkey-1
    test1
    
    [root@ckalab003 ~]# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt  --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot save backup1
    ...
    Snapshot saved at backup1
    
    [root@ckalab003 ~]# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" put /firstkey-2 test2
    OK
    
    [root@ckalab003 ~]# etcdctl --cert="/etc/kubernetes/pki/apiserver-etcd-client.crt" --key="/etc/kubernetes/pki/apiserver-etcd-client.key" --cacert="/etc/kubernetes/pki/etcd/ca.crt" get /firstkey-2
    /firstkey-2
    test2
    
    [root@ckalab003 ~]# mv /etc/kubernetes/manifests/etcd.yaml .
    [root@ckalab003 ~]# rm -rf /var/lib/etcd/*
    [root@ckalab003 ~]# etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt  --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot restore backup1 --data-dir /var/lib/etcd
    ...
    2023-06-17T17:35:25+08:00	info	snapshot/v3_snapshot.go:272	restored snapshot	{"path": "backup1", "wal-dir": "/var/lib/etcd/member/wal", "data-dir": "/var/lib/etcd", "snap-dir": "/var/lib/etcd/member/snap"}
    
    [root@ckalab003 ~]# mv ./etcd.yaml /etc/kubernetes/manifests/
    
    [root@ckalab003 ~]# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt  --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key get /firstkey-1
    /firstkey-1
    test1
    [root@ckalab003 ~]# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt  --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key get /firstkey-2
    

    /firstkey-1 能看到而 /firstkey-2 看不到,符合预期

    多集群的恢复:

    etcdctl --name ckalab001 --initial-cluster ckalab001=https://139.224.191.4:2380 --initial-cluster-token etcd-cluster --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key --cacert=/etc/kubernetes/pki/etcd/ca.crt --initial-advertise-peer-urls https://139.224.191.4:2380 snapshot restore /root/backup-1 --data-dir /var/lib/etcd
    

3.9 什么是静态 Pod?

  • Static Pod

  • /etc/kubernetes/manifests

    root@CKA003:~# ps -ef | grep kubelet
    root     10572     1  2 09:37 ?        00:07:41 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-driver=cgroupfs --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2 --resolv-conf=/run/systemd/resolve/resolv.conf
    
    root@CKA003:~# grep mani /var/lib/kubelet/config.yaml
    staticPodPath: /etc/kubernetes/manifests
    
    root@CKA003:~# cd /etc/kubernetes/manifests
    root@CKA003:/etc/kubernetes/manifests# ls
    etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml  static-web.yaml
    
    root@CKA003:/etc/kubernetes/manifests# cat static-web.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: static-web
      labels:
        role: myrole
    spec:
      containers:
      - name: web
        image: nginx
        ports:
          - name: web
            containerPort: 80
            protocol: TCP
    
    root@CKA003:/etc/kubernetes/manifests# kubectl get pods
    NAME                READY   STATUS    RESTARTS   AGE
    dnsutils            1/1     Running   4          4h17m
    static-web          1/1     Running   0          98m
    static-web-cka003   1/1     Running   0          98m
    web-0               0/1     Pending   0          116m
    
    root@CKA003:/etc/kubernetes/manifests# kubectl delete pod static-web
    pod "static-web" deleted
    root@CKA003:/etc/kubernetes/manifests# kubectl get pods
    NAME                READY   STATUS    RESTARTS   AGE
    dnsutils            1/1     Running   4          4h17m
    static-web-cka003   1/1     Running   0          99m
    web-0               0/1     Pending   0          117m
    
    root@CKA003:/etc/kubernetes/manifests# kubectl delete pod static-web-cka003
    pod "static-web-cka003" deleted
    root@CKA003:/etc/kubernetes/manifests# kubectl get pods
    NAME                READY   STATUS    RESTARTS   AGE
    dnsutils            1/1     Running   4          4h18m
    static-web-cka003   1/1     Running   0          10s
    web-0               0/1     Pending   0          117m
    
    root@CKA003:/etc/kubernetes/manifests# ls
    etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml  static-web.yaml
    root@CKA003:/etc/kubernetes/manifests# rm -rf static-web.yaml
    root@CKA003:/etc/kubernetes/manifests# kubectl get pods
    NAME       READY   STATUS    RESTARTS   AGE
    dnsutils   1/1     Running   4          4h18m
    web-0      0/1     Pending   0          117m