from notebook.auth import passwd   

 

해당 모듈  import  시 설치가 되어 있지 않다고  나올 경우

(base) root@jupyterlab-deployment-96d5cd489-kjrd4:~# jupyter notebook --generate-config
Writing default config to: /home/jovyan/.jupyter/jupyter_notebook_config.py

 

 

!pip3 install notebook==6.5.5

 

이렇게 해서 해결이 안되면

 

!pip3 install --upgrade ipython 

요렇게 도 해본다

 

다시

from notebook.auth import passwd    

 

# 아래 전체가 암호이다

'argon2:$argon2id$v=19$m=10240,t=10,p=8$QbPT9aYvhdqRFecguJX5sg$ArunzaJ65DCEA6MnCL7N9Y2m0Y1XwVCR3N95PC3lGxE'

 

 

(base) root@jupyterlab-deployment-96d5cd489-kjrd4:~/.jupyter# vi jupyter_notebook_config.py

 

 없으면 추가

 

c.ServerApp.password_required = True
c.PasswordIdentityProvider.hashed_password = 'argon2:$argon2id$v=19$m=10240,t=10,p=8$QbPT9aYvhdqRFecguJX5sg$ArunzaJ65DCEA6MnCL7N9Y2m0Y1XwVCR3N95PC3lGxE'

 

 

deply.yaml 에서 

           chmod -R 777 /home/jovyan/work
              start.sh jupyter lab --LabApp.token='password' --LabApp.ip='0.0.0.0' --LabApp.allow_root=True


        해당 부분을   

start.sh jupyter lab --LabApp.token='' --LabApp.password='' 

 

이렇게 변경

해당 부분을

 

 

 

jupyter-deploy.txt
0.00MB

 

 

apiVersion: apps/v1
kind: Deployment
metadata:
  name: jupyterlab-deployment
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jupyterlab
  template:
    metadata:
      labels:
        app: jupyterlab
    spec:
      securityContext:
        runAsUser: 0
        fsGroup: 0
      containers:
        - name: jupyterlab
          image: jupyter/datascience-notebook:latest
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 8888
          command:
            - /bin/bash
            - -c
            - |
              chmod -R 777 /home/jovyan/work
              start.sh jupyter lab --LabApp.token='' --LabApp.password='' --LabApp.ip='0.0.0.0' --LabApp.allow_root=True
              #start.sh jupyter lab --LabApp.token='password' --LabApp.ip='0.0.0.0' --LabApp.allow_root=True
          volumeMounts:
            - name: jupyterlab-data
              mountPath: /home/jovyan/work
              #mountPath: /data
            - name: jupyterlab-config
              mountPath: /home/jovyan/.jupyter
          securityContext:
              runAsUser: 0
          resources:
            requests:
              memory: 500Mi
              cpu: 250m
      restartPolicy: Always
      volumes:
      - name: jupyterlab-data
        #persistentVolumeClaim:
        #claimName: fileshare-pvc
        hostPath:
           path: /home/sysic/Data
      - name: jupyterlab-config
        hostPath:
           path: /home/sysic/config

 

################### 홈 디렉토리 변경하기 #########################################

 

(base) root@jupyterlab-deployment-6886db8858-rf68k:~/.jupyter# pwd
/home/jovyan/.jupyter
(base) root@jupyterlab-deployment-6886db8858-rf68k:~/.jupyter# more jupyter_notebook_config.py |grep jaeyon
c.ServerApp.root_dir = '/home/jaeyong'
(base) root@jupyterlab-deployment-6886db8858-rf68k:~/.jupyter#



apiVersion: apps/v1
kind: Deployment
metadata:
  name: jupyterlab-deployment
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jupyterlab
  template:
    metadata:
      labels:
        app: jupyterlab
    spec:
      securityContext:
        runAsUser: 0
        fsGroup: 0
      containers:
        - name: jupyterlab
          image: jupyter/datascience-notebook:latest
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 8888
          command:
            - /bin/bash
            - -c
            - |
              chmod -R 777 /home/jaeyong
              #chmod -R 777 /home/jovyan/work
              start.sh jupyter lab --LabApp.token='' --LabApp.password='' --LabApp.ip='0.0.0.0' --LabApp.allow_root=True
              #start.sh jupyter lab --LabApp.token='password' --LabApp.ip='0.0.0.0' --LabApp.allow_root=True
          volumeMounts:
            - name: jupyterlab-data
              mountPath: /home/jaeyong
              #mountPath: /home/jovyan/work
              #mountPath: /data
            - name: jupyterlab-config
              mountPath: /home/jovyan/.jupyter
          securityContext:
              runAsUser: 0
          resources:
            requests:
              memory: 500Mi
              cpu: 250m
      restartPolicy: Always
      volumes:
      - name: jupyterlab-data
        #persistentVolumeClaim:
        #claimName: fileshare-pvc
        hostPath:
           path: /home/sysic/Data
      - name: jupyterlab-config
        hostPath:
           path: /home/sysic/config

 

${__to:date:MMDDHH}

 ${__to}
 
디비의 table이 TABLE명-YYMMDDHH 형태 이름으로 나누어져 있음
 DASHBOARD의 TIME RANGEㅇㅔ서 값을 받아서.. 현재 선택 시간과 이전 시간(-1)을 보여 주고자함
QUERY를 두개 만들어서
하나는 선택, 하나는 -1 값으로 해주면 됨 
두개 그래프에 LEGEND가 2개 나타나는ㄷㅔ 색을 동일 패턴으로 만들어줌(그림 LEGEND 항목에 마우스 클릭)
하나만 보여주는건 나중에 할 생각.. 아래 패턴 찾는것도 시간을 너무 허비해서..낮은 버젼은 모르겠음
 
 
  time range값을 변수로 받아옴
 ${__from:date:YYMMDDHH}
 
현재 시간보다 -9 시간이라 ${__from  값보다 시간 늦은 갑을 구할때 사용 - dashboard variable에서 등록해서 변수로 사용하면 좋을듯

SELECT to_char(to_timestamp(${__from:date:seconds}) + INTERVAL '8 hour', 'YYMMDDHH')

대쉬보더 변수에서 refresh에 시간병경시 refresh선택해줘야함
 
 ${__to:date:YYMMDDHH}
 
 "re-${__from:date:YYMMDDHH}"

 

 

test 용

SELECT COALESCE(to_char(to_timestamp(${__from:date:seconds}) + INTERVAL '12 hour', 'YYMMDDHH24'), to_char(to_timestamp(${__from:date:seconds}, 'YYMMDDHH24') )

참고로 minikube는 /var/lib/minikube/certs에 있다

 

 

 

[root@minikube home]#  kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'

CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Nov 24, 2022 14:07 UTC   364d                                    no
apiserver                  Nov 24, 2022 14:07 UTC   364d            ca                      no
apiserver-etcd-client      Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
apiserver-kubelet-client   Nov 24, 2022 14:07 UTC   364d            ca                      no
controller-manager.conf    Nov 24, 2022 14:07 UTC   364d                                    no
etcd-healthcheck-client    Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
etcd-peer                  Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
etcd-server                Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
front-proxy-client         Nov 24, 2022 14:07 UTC   364d            front-proxy-ca          no
scheduler.conf             Nov 24, 2022 14:07 UTC   364d                                    no

CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Nov 17, 2031 15:30 UTC   9y              no
etcd-ca                 Nov 17, 2031 15:32 UTC   9y              no
front-proxy-ca          Nov 17, 2031 15:32 UTC   9y              no


 kubeadm certs generate-csr  --cert-dir /home/GOOD --kubeconfig-dir /home/GOOD
 cp /etc/kubernetes/pki/ca.* /home/GOOD/
 
  openssl x509 -req -in apiserver.csr -CAcreateserial -CA ca.crt -CAkey ca.key -days 10000 -out apiserver.crt

cd /etc/kubernetes/pki/
rm  apiserver.crt
cp /home/GOOD/apiserver.crt .
 
 
 [root@minikube certs]#  kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'

CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Nov 24, 2022 14:07 UTC   364d                                    no
apiserver                  Apr 12, 2049 13:32 UTC   27y             ca                      no
apiserver-etcd-client      Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
apiserver-kubelet-client   Nov 24, 2022 14:07 UTC   364d            ca                      no
controller-manager.conf    Nov 24, 2022 14:07 UTC   364d                                    no
etcd-healthcheck-client    Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
etcd-peer                  Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
etcd-server                Nov 24, 2022 14:07 UTC   364d            etcd-ca                 no
front-proxy-client         Nov 24, 2022 14:07 UTC   364d            front-proxy-ca          no
scheduler.conf             Nov 24, 2022 14:07 UTC   364d                                    no

CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Nov 17, 2031 15:30 UTC   9y              no
etcd-ca                 Nov 17, 2031 15:32 UTC   9y              no
front-proxy-ca          Nov 17, 2031 15:32 UTC   9y              no
[root@minikube certs]#

 

 

 

The Kubernetes cluster certificates have a lifespan of one year. If the Kubernetes cluster certificate expires on the Kubernetes master, then the kubelet service will fail. Issuing a kubectl command, such as kubectl get pods or kubectl exec -it container_name bash, will result in a message similar to Unable to connect to the server: x509: certificate has expired or is not yet valid.

Procedure

  1. Log on to the Kubernetes master node as the root user and run the following command to check when the Kubernetes certificates will expire.
    kubeadm alpha certs check-expiration
    The output will be similar to the following. In this case the certificates will expire in 273 days.
    CERTIFICATE                EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
    admin.conf                 Sep 17, 2020 21:24 UTC   273d            no
    apiserver                  Sep 17, 2020 21:24 UTC   273d            no
    apiserver-etcd-client      Sep 17, 2020 21:24 UTC   273d            no
    apiserver-kubelet-client   Sep 17, 2020 21:24 UTC   273d            no
    controller-manager.conf    Sep 17, 2020 21:24 UTC   273d            no
    etcd-healthcheck-client    Sep 17, 2020 21:24 UTC   273d            no
    etcd-peer                  Sep 17, 2020 21:24 UTC   273d            no
    etcd-server                Sep 17, 2020 21:24 UTC   273d            no
    front-proxy-client         Sep 17, 2020 21:24 UTC   273d            no
    scheduler.conf             Sep 17, 2020 21:24 UTC   273d            no
  2. Run the following commands to back up the existing Kubernetes certificates:
    mkdir -p $HOME/fcik8s-old-certs/pki
    /bin/cp -p /etc/kubernetes/pki/*.* $HOME/fcik8s-old-certs/pki
    ls -l $HOME/fcik8s-old-certs/pki/
    The output will be similar to the following:
    total 56
    -rw-r--r-- 1 root root 1261 Sep  4  2019 apiserver.crt
    -rw-r--r-- 1 root root 1090 Sep  4  2019 apiserver-etcd-client.crt
    -rw------- 1 root root 1679 Sep  4  2019 apiserver-etcd-client.key
    -rw------- 1 root root 1679 Sep  4  2019 apiserver.key
    -rw-r--r-- 1 root root 1099 Sep  4  2019 apiserver-kubelet-client.crt
    -rw------- 1 root root 1679 Sep  4  2019 apiserver-kubelet-client.key
    -rw-r--r-- 1 root root 1025 Sep  4  2019 ca.crt
    -rw------- 1 root root 1675 Sep  4  2019 ca.key
    -rw-r--r-- 1 root root 1038 Sep  4  2019 front-proxy-ca.crt
    -rw------- 1 root root 1675 Sep  4  2019 front-proxy-ca.key
    -rw-r--r-- 1 root root 1058 Sep  4  2019 front-proxy-client.crt
    -rw------- 1 root root 1679 Sep  4  2019 front-proxy-client.key
    -rw------- 1 root root 1675 Sep  4  2019 sa.key
    -rw------- 1 root root  451 Sep  4  2019 sa.pub
  3. Run the following commands to back up the existing configurtion files:
    /bin/cp -p /etc/kubernetes/*.conf $HOME/fcik8s-old-certs
    ls -ltr $HOME/fcik8s-old-certs
    The output will be similar to the following:
    total 36
    -rw------- 1 root root 5451 Sep  4  2019 admin.conf
    -rw------- 1 root root 5595 Sep  4  2019 kubelet.conf
    -rw------- 1 root root 5483 Sep  4  2019 controller-manager.conf
    -rw------- 1 root root 5435 Sep  4  2019 scheduler.conf
    drwxr-xr-x 2 root root 4096 Dec 19 21:21 pki
  4. Run the following commands to back up your home configuration:
    mkdir -p $HOME/fcik8s-old-certs/.kube
    /bin/cp -p ~/.kube/config $HOME/fcik8s-old-certs/.kube/.
    ls -l $HOME/fcik8s-old-certs/.kube/.
    The output will be similar to the following:
    -rw------- 1 root root 5451 Sep  4  2019 config
  5. Run the following command to renew all the Kubernetes certificates:
    kubeadm alpha certs renew all
    The output of the command will be similar to the following:
    certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself renewed
    certificate for serving the Kubernetes API renewed
    certificate the apiserver uses to access etcd renewed
    certificate for the API server to connect to kubelet renewed
    certificate embedded in the kubeconfig file for the controller manager to use renewed
    certificate for liveness probes to healtcheck etcd renewed
    certificate for etcd nodes to communicate with each other renewed
    certificate for serving etcd renewed
    certificate for the front proxy client renewed
    certificate embedded in the kubeconfig file for the scheduler manager to use renewed
  6. Run the following command to confirm the certificates have been renewed and will expire in 364 days:
    kubeadm alpha certs check-expiration
    The output should look similar to the following:
    CERTIFICATE                EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
    admin.conf                 Dec 20, 2021 02:35 UTC   364d            no      
    apiserver                  Dec 20, 2021 02:35 UTC   364d            no      
    apiserver-etcd-client      Dec 20, 2021 02:35 UTC   364d            no      
    apiserver-kubelet-client   Dec 20, 2021 02:35 UTC   364d            no      
    controller-manager.conf    Dec 20, 2021 02:35 UTC   364d            no      
    etcd-healthcheck-client    Dec 20, 2021 02:35 UTC   364d            no      
    etcd-peer                  Dec 20, 2021 02:35 UTC   364d            no      
    etcd-server                Dec 20, 2021 02:35 UTC   364d            no      
    front-proxy-client         Dec 20, 2021 02:35 UTC   364d            no      
    scheduler.conf             Dec 20, 2021 02:35 UTC   364d            no
  7. Confirm the kubelet services are running and communication between the worker nodes and the Kubernetes master is working.
  8. After waiting a few minutes, run the following command from the Kubernetes master node to confirm that the worker nodes are available:
    kubectl get nodes
    If you get a response similar to the following:
    The connection to the server 9.37.21.119:6443 was refused - did you specify the right host or port?
    
    continue with the next steps to resolve the issue. Otherwise, your Kubernetes cluster certificates have been successfully renewed.
  9. Run the following command:
    diff $HOME/fcik8s-old-certs/kubelet.conf /etc/kubernetes/kubelet.conf
    If there is no output, the kubelet.conf file was not updated with the new certificate information.
  10. Update the /etc/kubernetes/kubelet.conf file and display the difference from the old version to the new one:
    cd /etc/kubernetes
    sudo kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > kubelet.conf
    diff $HOME/fcik8s-old-certs/kubelet.conf /etc/kubernetes/kubelet.conf
    If the output shows a difference, the file kubelet.conf was updated with the new certificate information.
  11. Run the following command:
    diff ~/.kube/config $HOME/fcik8s-old-certs/.kube/config
    If there is no output, the config file still has the outdated keys and certificate values in it.
  12. Update client-certificate-data and client-key-data in ~/.kube/config with the values from the updated file in /etc/kubernetes/kubelet.conf:
    • cat /etc/kubernetes/kubelet.conf

      Select and copy the output after client-key-data:.

    • In the ~/.kube/config file, replace the information after client-key-data: with the text copied in the previous step.
    • cat /etc/kubernetes/kubelet.conf

      Select and copy the output after client-certificate-data:.

    • In the ~/.kube/config file, replace the information after client-certificate-data: with the text copied in the previous step.
  13. Restart the kubelet service:
    systemctl daemon-reload&&systemctl restart kubelet
    This command is successful if there is no output.
  14. Verify master and worker nodes are available:
    kubectl get nodes
  15. Verify all pods are in the running state:
    kubectl get pods

해당 기간을 확인해야 하는 이유는 인증서를 renew할경우 1.17이하 버젼에서는 나머지는 다 renew가 되지만

kublet.conf값이 renew되지 않는 버그가 존재하므로 항상 체크하는 습관을 가지는게 좋을거 같다

 

kubelet.conf certification 기간 확인

음.. 우선 2가지를 확인해야한다..
[root@minikube kubernetes]# pwd
/etc/kubernetes
[root@minikube kubernetes]# cat kubelet.conf

- name: system:node:minikube
  user:
    client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem
    client-key: /var/lib/kubelet/pki/kubelet-client-current.pem

위 부분이 코드로 된것도 있고 위처럼 파일 경로로된것도 있다. 아마 파일이면 minkube일거구 나머지는 암호화 코드일것이다

우선 암호화된 파일일 경우
echo -n "암호화된 내용" |base64 -d > test.txt
openssl x509 -in test.txt  -text -noout

인증일자를 확인하면 된다

 

 

 cd /etc/kubernetes

kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > kubelet.conf

systemctl restart kubelet

 

========== 참고 자료 =============

# On master - See https://kubernetes.io/docs/setup/certificates/#all-certificates

# Generate the new certificates - you may have to deal with AWS - see above re extra certificate SANs
sudo kubeadm alpha certs renew apiserver
sudo kubeadm alpha certs renew apiserver-etcd-client
sudo kubeadm alpha certs renew apiserver-kubelet-client
sudo kubeadm alpha certs renew front-proxy-client

# Generate new kube-configs with embedded certificates - Again you may need extra AWS specific content - see above
sudo kubeadm alpha kubeconfig user --org system:masters --client-name kubernetes-admin  > admin.conf
sudo kubeadm alpha kubeconfig user --client-name system:kube-controller-manager > controller-manager.conf
sudo kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > kubelet.conf
sudo kubeadm alpha kubeconfig user --client-name system:kube-scheduler > scheduler.conf

# chown and chmod so they match existing files
sudo chown root:root {admin,controller-manager,kubelet,scheduler}.conf
sudo chmod 600 {admin,controller-manager,kubelet,scheduler}.conf

# Move to replace existing kubeconfigs
sudo mv admin.conf /etc/kubernetes/
sudo mv controller-manager.conf /etc/kubernetes/
sudo mv kubelet.conf /etc/kubernetes/
sudo mv scheduler.conf /etc/kubernetes/

# Restart the master components
sudo kill -s SIGHUP $(pidof kube-apiserver)
sudo kill -s SIGHUP $(pidof kube-controller-manager)
sudo kill -s SIGHUP $(pidof kube-scheduler)

# Verify master component certificates - should all be 1 year in the future
# Cert from api-server
echo -n | openssl s_client -connect localhost:6443 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not
# Cert from controller manager
echo -n | openssl s_client -connect localhost:10257 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not
# Cert from scheduler
echo -n | openssl s_client -connect localhost:10259 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not

# Generate kubelet.conf
sudo kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > kubelet.conf
sudo chown root:root kubelet.conf
sudo chmod 600 kubelet.conf

# Drain
kubectl drain --ignore-daemonsets $(hostname)
# Stop kubelet
sudo systemctl stop kubelet
# Delete files
sudo rm /var/lib/kubelet/pki/*
# Copy file
sudo mv kubelet.conf /etc/kubernetes/
# Restart
sudo systemctl start kubelet
# Uncordon
kubectl uncordon $(hostname)

# Check kubelet
echo -n | openssl s_client -connect localhost:10250 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not

'나는 노동자 > KUBERNETES' 카테고리의 다른 글

인증서 기간 연장하기  (0) 2021.11.25
인증서 갱신 - 전통적인 방법  (0) 2021.11.24
계속 꺼지는 etcd 컨테이너 etcd 용량 줄이기  (0) 2021.11.21
minikube etcd 조각 모음 defrag  (0) 2021.11.21
metallb  (0) 2021.10.27

계속 꺼지는 etcd 컨테이너들

위 문제를 해결하기 위해서는 etcdctl 명령을 이용해서 etcd클러스터 구성원들의 과도한 키 스페이스 데이터들을 제거하고, 데이터베이스 조각모음을 수행해서 quota 범위 내로 크기를 되돌리는 과정이 필요합니다. 하지만, etcd 컨테이너들이 2~3분에 한번씩 죽어대는 바람에 제대로 작업을 진행하기가 불가능했습니다.

컨테이너가 계속 꺼지고 켜지기를 반복하는 이유는 컨테이너에 livenessProbe 설정이 세팅되어 있어서 etcd 컨테이너가 정상동작하지 않으면 healthcheck에 실패한 것으로 보고 컨테이너를 계속 재기동 하기 때문이었습니다. 우선 이 현상을 해결하기 위해서 etcd pod에 세팅되어 있는 livenessProbe 설정을 제거해 주기로 합니다. etcd는 kubernetes를 구성하는 핵심 구성요소 중 하나이기 때문에 /etc/kubernetes/manifests/ 디렉토리에 pod 구성정보가 존재합니다. 찾아서 수정해 줍니다.

 
# /etc/kubernetes/manifests/etcd.yaml
 
 
 
apiVersion: v1
 
kind: Pod
 
metadata:
 
creationTimestamp: null
 
labels:
 
component: etcd
 
tier: control-plane
 
name: etcd
 
namespace: kube-system
 
spec:
 
containers:
 
- command:
 
- etcd
 
- --advertise-client-urls=https://192.168.0.220:2379
 
- --cert-file=/etc/kubernetes/pki/etcd/server.crt
 
- --client-cert-auth=true
 
- --data-dir=/var/lib/etcd
 
- --election-timeout=5000
 
- --heartbeat-interval=250
 
- --initial-advertise-peer-urls=https://192.168.0.220:2380
 
- --initial-cluster=k8s-master1=https://192.168.0.220:2380
 
- --key-file=/etc/kubernetes/pki/etcd/server.key
 
- --listen-client-urls=https://127.0.0.1:2379,https://192.168.0.220:2379
 
- --listen-metrics-urls=http://127.0.0.1:2381
 
- --listen-peer-urls=https://192.168.0.220:2380
 
- --name=k8s-master1
 
- --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
 
- --peer-client-cert-auth=true
 
- --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
 
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
 
- --snapshot-count=10000
 
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
 
image: k8s.gcr.io/etcd:3.3.15-0
 
imagePullPolicy: IfNotPresent
 
# 컨테이너가 꺼지는 현상을 방지하기 위해 주석처리 해줍니다.
 
# livenessProbe:
 
# failureThreshold: 8
 
# httpGet:
 
# host: 127.0.0.1
 
# path: /health
 
# port: 2381
 
# scheme: HTTP
 
# initialDelaySeconds: 15
 
# timeoutSeconds: 15
 
name: etcd
 
resources: {}
 
volumeMounts:
 
- mountPath: /var/lib/etcd
 
name: etcd-data
 
- mountPath: /etc/kubernetes/pki/etcd
 
name: etcd-certs
 
hostNetwork: true
 
priorityClassName: system-cluster-critical
 
volumes:
 
- hostPath:
 
path: /etc/kubernetes/pki/etcd
 
type: DirectoryOrCreate
 
name: etcd-certs
 
- hostPath:
 
path: /var/lib/etcd
 
type: DirectoryOrCreate
 
name: etcd-data
 
status: {}

위와같은 주석처리를 모든 master 노드의 해당 경로에 존재한 yaml파일에 작업해주면, etcd 컨테이너가 죽지 않게 됩니다.

etcdctl 명령어

konvoy로 설치한 kubernetes에서 etcdctl 명령어를 사용하기 위해서 가장 정석적인 방법은 etcd 컨테이너 내에 접속해서 etcd 명령어를 사용하는 것이지만, 본인은 귀찮아서 그냥 컨테이너 밖에서 찾아서 사용해보기로 했습니다. (당연히 etcd컨테이너가 동작중인 master노드에서 해야합니다.)

 
#bash
 
find / -type f -name etcdctl 2>/dev/null
 
#출력예시
 
[root@k8s-master1 manifests]# find / -type f -name etcdctl 2>/dev/null
 
/run/containerd/io.containerd.runtime.v1.linux/k8s.io/4fc80ceb99dfc0dca39e726d95104f5e424c53e618fd71d201b9b8b9c75a6d5d/rootfs/usr/local/bin/etcdctl
 
/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/9/fs/usr/local/bin/etcdctl

둘중에 아무거나 선택해서 alias를 걸어서 사용하도록 합니다. alias를 걸어줄때 etcdctl 명령어로 클러스터와 통신할 때 사용하기 위한 인증서등을 함께 세팅해서 걸어줍니다.

 
#bash
 
 
 
alias etcdctl="\
 
ETCDCTL_API=3 \
 
/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/9/fs/usr/local/bin/etcdctl \
 
--cacert='/etc/kubernetes/pki/etcd/ca.crt' \
 
--cert='/etc/kubernetes/pki/etcd/server.crt' \
 
--key='/etc/kubernetes/pki/etcd/server.key' "

테스트

 
#bash
 
 
 
etcdctl member list

문제 해결

우선, 알람이 설정되어 있는 목록과 현재 클러스터 상태를 확인합니다.

 
#bash
 
 
 
etcdctl alarm list
 
etcdctl -w table endpoint status --cluster

etcd용량을 다이어트 해봅니다. 현재 상태를 제외한 나머지 오래된 revision들을 제거하기 위해 current revision 값을 가져옵니다.

 
#bash
 
c_revision=$(etcdctl endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9].*')
 
echo ${c_revision}

오래된 revision들을 날립니다.

 
#bash
 
etcdctl --endpoints=$(etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ',') compact $c_revision

조각모음을 합니다. 본인의 경우에는 이 작업에서 용량이 드라마틱하게 줄어들었습니다.

 
#bash
 
etcdctl --endpoints=$(etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ',') defrag

클러스터 상태를 확인합니다.

 
#bash
 
etcdctl -w table endpoint status --cluster
 
#출력결과
 
+----------------------------+------------------+---------+---------+-----------+-----------+------------+
 
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
 
+----------------------------+------------------+---------+---------+-----------+-----------+------------+
 
| https://192.168.0.221:2379 | 1806ccfb80e73faf | 3.3.15 | 7.8 MB | false | 602 | 66877835 |
 
| https://192.168.0.222:2379 | e7c82e12168d0897 | 3.3.15 | 7.8 MB | false | 602 | 66877835 |
 
| https://192.168.0.220:2379 | edabb0b65fe02a4c | 3.3.15 | 7.8 MB | true | 602 | 66877835 |
 
+----------------------------+------------------+---------+---------+-----------+-----------+------------+

경보를 해제하고 확인합니다.

 
#bash
 
etcdctl alarm disarm
 
etcdctl alarm list

'나는 노동자 > KUBERNETES' 카테고리의 다른 글

인증서 갱신 - 전통적인 방법  (0) 2021.11.24
kubelet.conf certification 기간 확인  (0) 2021.11.24
minikube etcd 조각 모음 defrag  (0) 2021.11.21
metallb  (0) 2021.10.27
etcd 설치 - 간략문서  (0) 2019.09.19


root@k8s-master1 manifests]# find / -type f -name etcdctl 2>/dev/null
/run/containerd/io.containerd.runtime.v1.linux/k8s.io/4fc80ceb99dfc0dca39e726d95104f5e424c53e618fd71d201b9b8b9c75a6d5d/rootfs/usr/local/bin/etcdctl
/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/9/fs/usr/local/bin/etcdctl

etcdctl을 /usr/bin/ 밑으로 복사


#!/bin/bash

alias etcdctl3='ETCDCTL_API=3 etcdctl --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/server.crt --key=/var/lib/minikube/certs/etcd/server.key'

root@minikube home]# etcdctl3 -w table endpoint status --cluster
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  1.6 MB |      true |      false |         4 |      46687 |              46687 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+


## 알람제거 ##
etcdctl3 --endpoints="https://${endpoint}:2379" alarm disarm
etcdctl3 --endpoints="https://${endpoint}:2379" alarm list

각 서버에서 실행하기 위해서는 
etcdctl3 alarm disarm
etcdctl3 alarm list


########### 압축하기 ####

etcdctl3 endpoint status --write-out="json" | egrep -o '"revision":[0-9]*'| egrep -o '[0-9].*'
"revision":41713

etcdctl3 compact 41713
#######################

## 조각 모음하기 ####
== 클러스터 전체 동시 ===
[root@minikube home]# etcdctl3 defrag --cluster    #클러스터 전체를 동시에
Finished defragmenting etcd member[https://192.168.45.100:2379]


[root@minikube home]# etcdctl3 -w table endpoint status --cluster
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  856 kB |      true |      false |         4 |      48305 |              48305 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

=== etcd서버 하나씩 조각 모음 : 서비스 영향확인 필요 ==


etcdctl3 -w table endpoint status --cluster 

리더를 확인한다. 리더를 제외한 나머지 부분에서 진행하며, 리더는 맨 마지막에 진행 


etcdctl3 defrag --endpoints="https://${endpoint}:2379"
etcdctl3 --endpoints="https://192.168.45.100:2379" --write-out=table endpoint status
해당서버에서 etcd pod가 정상적으로 올라오는지 확인한다
- 반복 --



# set a very small 16MB quota
$ etcd.yaml에  --quota-backend-bytes=$((16*1024*1024))  #계산된 값을 넣어준다


# fill keyspace
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024  | etcdctl3 put key  || break; done
...
Error:  rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
# confirm quota space is exceeded

[root@minikube manifests]# etcdctl3 -w table endpoint status --cluster                  +-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------------------------------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX |             ERRORS             |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------------------------------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |   16 MB |      true |      false |         5 |      50310 |              50310 |  memberID:15398285247096893192 |
|                             |                  |         |         |           |            |           |            |                    |                 alarm:NOSPACE  |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------------------------------+
[root@minikube manifests]# etcdctl3 alarm list
memberID:15398285247096893192 alarm:NOSPACE

[root@minikube manifests]# etcdctl3 alarm disarm
memberID:15398285247096893192 alarm:NOSPACE

[root@minikube manifests]# etcdctl3 alarm list

## etcd.yaml에서 추가 ###

============ 압축하기 ============
 keep one hour of history
 --auto-compaction-retention=1


======용량 늘리기 ==================
예제: 8G  DEFAULT: 2G이며 MAX: 8G이다

--quota-backend-bytes=8589934592 


###################  job으로 조각모음 하기  :  etcd 2G기준   ########

[root@minikube manifests]#  etcdctl3 -w table endpoint status --cluster
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  408 MB |      true |      false |         2 |       1144 |               1144 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
kubectl label node minikube etcd="true"

[root@minikube home]#  etcdctl3 --endpoints="https://192.168.45.100:2379" --write-out=table endpoint status;
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  176 MB |      true |      false |         2 |       2819 |               2819 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

 kubectl apply -f minikube-etcd-defrag-job.yaml

minikube-etcd-defrag-job.yaml
0.00MB



[root@minikube home]# etcdctl --endpoints="https://192.168.45.100:2379" --write-out=table endpoint status;
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  2.0 MB |      true |      false |         2 |       3456 |               3456 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

## 스냅샷 파일 정보보기 ##

$ etcdctl snapshot save backup.db
$ etcdctl --write-out=table snapshot status backup.db
+----------+----------+------------+------------+
|   HASH   | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
| fe01cf57 |       10 |          7 | 2.1 MB     |
+----------+----------+------------+------------+


## db 파일에 직업 조각 모음 하기
  - --data-dir=/var/lib/minikube/etcd


[root@minikube snap]# etcdctl3 --endpoints="https://192.168.45.100:2379" --write-out=table endpoint status;
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  5.2 MB |      true |      false |         2 |      23546 |              23546 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@minikube snap]# etcdctl3 defrag  /var/lib/minikube/etcd
Finished defragmenting etcd member[127.0.0.1:2379]
[root@minikube snap]# etcdctl3 --endpoints="https://192.168.45.100:2379" --write-out=table endpoint status;
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.45.100:2379 | d5b1b2d93f592f08 |   3.5.0 |  1.8 MB |      true |      false |         2 |      23564 |              23564 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@minikube snap]# ls -al
total 3792
drwx------ 2 root root     108 Nov 21 15:26 .
drwx------ 4 root root      29 Nov 21 08:29 ..
-rw-r--r-- 1 root root    7148 Nov 21 11:15 0000000000000002-0000000000002711.snap
-rw-r--r-- 1 root root    7148 Nov 21 14:20 0000000000000002-0000000000004e22.snap
-rw------- 1 root root 1843200 Nov 21 15:27 db
[root@minikube snap]#


##이건 뭘까 ? ##
 ./etcdctl3 check datascale --load="s" --auto-compact=true --auto-defrag=true

apiVersion: v1

kind: Namespace

metadata:

  name: metallb-system

  labels: app: metallb

 

metallb.yaml
0.01MB

 

 

etcd 설치 - 간략문서

별도의 서버로 구성

 

Kubernetes-CKA-0900-Install-v1.4.pdf
2.55MB

'나는 노동자 > KUBERNETES' 카테고리의 다른 글

minikube etcd 조각 모음 defrag  (0) 2021.11.21
metallb  (0) 2021.10.27
Article on Setting up Basic Authentication  (0) 2019.05.27
Backup and Restore  (0) 2019.05.23
cluster upgrade process  (0) 2019.05.22

Article on Setting up Basic Authentication
Setup basic authentication on kubernetes
Note: This is not recommended in a production environment. This is only for learning purposes.
Follow the below instructions to configure basic authentication in a kubeadm setup.

Create a file with user details locally at /tmp/users/user-details.csv

# User File Contents
password123,user1,u0001
password123,user2,u0002
password123,user3,u0003
password123,user4,u0004
password123,user5,u0005


Edit the kube-apiserver static pod configured by kubeadm to pass in the user details. The file is located at /etc/kubernetes/manifests/kube-apiserver.yaml



apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
<content-hidden>
image: k8s.gcr.io/kube-apiserver-amd64:v1.11.3
name: kube-apiserver
volumeMounts:
- mountPath: /tmp/users
name: usr-details
readOnly: true
volumes:
- hostPath:
path: /tmp/users
type: DirectoryOrCreate
name: usr-details


Modify the kube-apiserver startup options to include the basic-auth file



apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --authorization-mode=Node,RBAC
<content-hidden>
- --basic-auth-file=/tmp/users/user-details.csv
Create the necessary roles and role bindings for these users:



---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: default
name: pod-reader
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["pods"]
verbs: ["get", "watch", "list"]

---
# This role binding allows "jane" to read pods in the "default" namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: read-pods
namespace: default
subjects:
- kind: User
name: user1 # Name is case sensitive
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role #this must be Role or ClusterRole
name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to
apiGroup: rbac.authorization.k8s.io
Once created, you may authenticate into the kube-api server using the users credentials

curl -v -k https://localhost:6443/api/v1/pods -u "user1:password123"

'나는 노동자 > KUBERNETES' 카테고리의 다른 글

metallb  (0) 2021.10.27
etcd 설치 - 간략문서  (0) 2019.09.19
Backup and Restore  (0) 2019.05.23
cluster upgrade process  (0) 2019.05.22
OS Upgrade drain cordon uncordon  (0) 2019.05.22

+ Recent posts