Sabtu, 21 September 2024

Bash Script Backup Database Postgresql

Script Backup Full Database 

#!/bin/bash

PATH_FILE=/var/lib/pgsql/12/backups #sesuaikan directory, testing menggunakan postgres12 di linux rhel

LOG_PATH=/var/lib/pgsql/12/backups #sesuaikan directory, testing menggunakan postgres12 di linux rhel

DATE=`date +%Y%m%d`

export PGPASSWORD=password_akses_postgres

 

echo START `date` >> ${LOG_PATH}/backup_full_one_day.log

pg_dumpall -U postgres -h 127.0.0.1 -p 5432 -v --roles-only -f ${PATH_FILE}/full_backup/"all_roles_db_${DATE}_dc1.sql" 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

echo START BACKUP Schema database_A `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -s database_A | gzip -9 > ${PATH_FILE}/full_backup/database_A_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -a database_A | gzip -9 > ${PATH_FILE}/full_backup/database_A_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

echo DONE BACKUP Schema database_A `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

echo START BACKUP Schema database_B `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -s database_B | gzip -9 > ${PATH_FILE}/full_backup/database_B_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -a database_B | gzip -9 > ${PATH_FILE}/full_backup/database_B_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

echo DONE BACKUP Schema database_B `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

echo START BACKUP Schema database_C `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -s database_C | gzip -9 > ${PATH_FILE}/full_backup/database_C_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -a database_C | gzip -9 > ${PATH_FILE}/full_backup/database_C_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

echo DONE BACKUP Schema database_C`date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

echo START BACKUP Schema database_D `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -s database_D | gzip -9 > ${PATH_FILE}/full_backup/database_D_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -a database_D | gzip -9 > ${PATH_FILE}/full_backup/database_D_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

echo DONE BACKUP Schema database_D `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

echo START BACKUP Schema database_E `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -s database_E | gzip -9 > ${PATH_FILE}/full_backup/database_E_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log.log 2>&1

pg_dump -U postgres -h 127.0.0.1 -p 5432 -a database_E | gzip -9 > ${PATH_FILE}/full_backup/database_E_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full_one_day.log 2>&1

echo DONE BACKUP Schema database_E `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

 

cd ${PATH_FILE}/full_backup

gzip -9 all_roles_db_${DATE}_dc1.sql

 

echo END `date` >> ${LOG_PATH}/backup_full_one_day.log 2>&1

find ${PATH_FILE}/full_backup -type f -iname "*.sql.gz" -mtime +2 -exec rm {} \; #menyisakan backup 2 hari

 

echo ======================================================================================================================= >> ${LOG_PATH}/backup_full_one_day.log



Script Backup Database per Schema

#!/bin/bash

PATH_FILE=/var/lib/pgsql/12/backups

LOG_PATH=/var/lib/pgsql/12/backups

DATE=`date +%Y%m%d`

export PGPASSWORD=password_login_postgres

 

 

echo ======================================================================================================================= >> ${LOG_PATH}/backup_full.log

echo START `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Schema NAMA_DATABASE `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=public -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_public_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_2 -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_2_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_3 -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_3_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_4 -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_4_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_5 -s NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_5_${DATE}_dc1_schema.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Schema NAMA_DATABASE `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema public `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=public -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_public_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema public `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_2 `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_2 -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_2_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_2 `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_3 `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_3 -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_3_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_3 `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_4 `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_4 -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_4_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_4 `date` >> ${LOG_PATH}/backup_full.log

 

echo START BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_5 `date` >> ${LOG_PATH}/backup_full.log

pg_dump -U postgres -h 127.0.0.1 -p 5432 --schema=NAMA_SCHEMA_5 -a NAMA_DATABASE | gzip -9 > ${PATH_FILE}/full_backup/NAMA_DATABASE_NAMA_SCHEMA_5_${DATE}_dc1_data.sql.gz 2>> ${LOG_PATH}/backup_full.log

echo END BACKUP Data NAMA_DATABASE schema NAMA_SCHEMA_5 `date` >> ${LOG_PATH}/backup_full.log

 

echo END `date` >> ${LOG_PATH}/backup_full.log

echo ======================================================================================================================= >> ${LOG_PATH}/backup_full.log

Bash Script Monitoring Certificate Kubernetes Cluster (Telegram Alert)

 

#!/bin/bash

 

HOST=$(hostname)

 

# ID Telegram

TOKEN=telegram_token

CHAT_ID=telegram_chatId

 

# Cek Sertifikat SSL Kubernetes

cert_info=$(kubeadm certs check-expiration)

expires_date=$(echo "$cert_info" | awk 'NR>6 {print $2, $3, $4, $5; exit}')

days_remaining=$(echo "$cert_info" | awk 'NR>6 {print $7; exit}')

 

# Hapus "d" dari variable days_remaining menjadi full integer

days_remaining="${days_remaining%d}"

 

# Mengecek apakah sisa hari kurang dari 90

if [ "$days_remaining" -lt 90 ]; then

  # Tampilkan Sertifikat SSL Kubernetes

  echo "Cluster    : $HOST" > /opt/scripts/ssl_kuber.log

  echo "Expire on : $expires_date" >> /opt/scripts/ssl_kuber.log

  echo "Counting  : $days_remaining days" >> /opt/scripts/ssl_kuber.log

 

  # Kirim Notifikasi

  MESSAGE="$(cat /opt/scriptss/ssl_kuber.log)"

  URL="https://api.telegram.org/bot$TOKEN/sendMessage"

  curl -s -X POST $URL -d chat_id=$CHAT_ID -d text="$(echo -e '\U0001f525 \U0001f514 \nNP2.1 Kubernetes SSLn\n')$(echo -e '\n-----------------------------------\n')$(echo -e '\n'"$MESSAGE")

"

else

  echo "check-expiration: OK: $days_remaining days"

fi

 

sleep 3

rm -f /opt/scripts/ssl_kuber.log

Bash Script Monitoring CSR Kubernetes (Telegram Alert)

 

#!/bin/bash

HOST=$(hostname)

TOKEN=telegram_token

CHAT_ID=telegram_chat_id

 

pending_csrs=$(kubectl get csr | awk '$4=="Pending"')

  if [ -z "$pending_csrs" ]; then

    echo "No pending CSRs."

  else

    # Tampilkan CSR Kubernetes

    echo "Cluster     : $HOST" > /opt/scripts/csr_kuber.log

    echo "Status CSR : You have pending CSRs" >> /opt/scripts/csr_kuber.log

    sumpending=$(kubectl get csr | awk '$4=="Pending"'| wc -l)

    echo "Total      : $sumpending" >> /opt/scripts/csr_kuber.log

 

    # Kirim telegram

    MESSAGE="$(cat /opt/scripts/csr_kuber.log)"

    curl -s -X POST "https://api.telegram.org/bot$TOKEN/sendMessage" -d "chat_id=$CHAT_ID" -d "text="$(echo -e '\U0001f525 \U0001f514 \nNP2 Kubernetes SSLn\n')$(echo -e '\n-----------------------------------\n')$(echo -e '\n'"$MESSAGE")

  fi

 

sleep 3

rm -f /opt/scripts/csr_kuber.log

Bash Script Monitoring Service Kubernetes (Telegram Alert)

#!/bin/bash

 

TOKEN=telegram_token

CHAT_ID=telegram_id

log=/var/log/svc.log

tgl=$(date +"%A %d-%m-%Y %T")

 

export PATH=/root/.local/bin:/root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin

export KUBECONFIG=/etc/kubernetes/admin.conf

 

function getsvc {

        echo "" > $log

        /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'ImagePullBackOff' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

        /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'CrashLoopBackOff' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

        /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'Pending' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

        /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'Outofmemory' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

        /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'Outofcpu' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

#       /usr/bin/kubectl get pods --all-namespaces --no-headers | grep 'Running' | awk '{print "\nProject = " $1 "\nPods = " $2 "\nStatus =  " $4 "\n_____________________________"}' >> $log

}

 

getsvc

 

message=$(<$log)

send() {

        curl  \

        -X POST \

        https://api.telegram.org/bot$TOKEN/sendMessage \

        -d chat_id=$CHAT_ID \

        -d text="`echo -e '\U0001f525 \u274C \U0001f514 \nNP2 Service Down\n.'`$tgl`echo -e '\n_____________________________\n'` $message"

}

 

if [[ ! -z "$message" ]]; then

        send

else

        echo "Error message"

fi

 

Setup: Install Rancher onto Existing Kubernetes

Environment yang digunakan
  • kubernetes menggunakan microk8s
  • versi kubernetes v1.28.13
  • nginx untuk expose rancher

Install & Setup Rancher
  1. Create directory dan masuk dir untuk persiapan instalasi rancher

    [root@yys-oci yoni]# mkdir -p rancher

    [root@yys-oci yoni]# cd rancher/


  2. Create config untuk generate certificate yg akan digunakan oleh Rancher

    [root@yys-oci rancher]# cat rancher.conf

    [req]

    distinguished_name = req_distinguished_name

    x509_extensions = v3_req

    prompt = no

    [req_distinguished_name]

    C = ID

    ST = DIY

    L = Yogyakarta

    O = your-domain

    OU = yys

    CN = rancher.your-domain.com

    [v3_req]

    keyUsage = keyEncipherment, dataEncipherment

    extendedKeyUsage = serverAuth

    subjectAltName = @alt_names

    [alt_names]

    DNS.1 = rancher.your-domain.com

    DNS.2 = your-domain.com


  3. Generate certificate menggunakan openssl

    [root@yys-oci rancher]# openssl req -x509 -nodes -days 365300 -newkey rsa:2048 -keyout tls.key -out tls.crt -config rancher.conf
    Generating a RSA private key

    ...+++++

    ............................................+++++

    writing new private key to 'tls.key'

    -----


  4. Pastikan certificate berhasil tergenarate terdapat dua file (tls.crt dan tls.key)

    [root@yys-oci rancher]# ll

    total 12

    -rw-r--r--. 1 root root  357 Aug  7 20:06 rancher.conf

    -rw-r--r--. 1 root root 1350 Aug  9 21:56 tls.crt

    -rw-------. 1 root root 1704 Aug  9 21:56 tls.key


  5. Create namespace pada kubernetes

    [root@yys-oci rancher]# kubectl create namespace cattle-system

    namespace/cattle-system created


  6. Create secret yg berisi certificate hasil generate pada point 2

    [root@yys-oci rancher]# kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=tls.crt --key=tls.key

    secret/tls-rancher-ingress created


  7. Menambahkan repo rancher menggunakan 'helm'

    [root@yys-oci rancher]# helm repo add rancher-stable https://releases.rancher.com/server-charts/stable

    "rancher-stable" has been added to your repositories


  8. Instalasi rancher

    [root@yys-oci rancher]# helm install rancher rancher-stable/rancher --namespace cattle-system --set hostname=rancher.your-domain.com --set bootstrapPassword=your-password --set ingress.tls.source=secret --set ingress.tls.secretName=tls-rancher-ingress --version=2.8.5

    NAME: rancher

    LAST DEPLOYED: Sun Aug 11 09:42:55 2024

    NAMESPACE: cattle-system

    STATUS: deployed

    REVISION: 1

    TEST SUITE: None

    NOTES:

    Rancher Server has been installed.

     

    NOTE: Rancher may take several minutes to fully initialize. Please standby while Certificates are being issued, Containers are started and the Ingress rule comes up.

     

    Check out our docs at https://rancher.com/docs/

     

    If you provided your own bootstrap password during installation, browse to https://rancher.your-domain.com to get started.

     

    If this is the first time you installed Rancher, get started by running this command and clicking the URL it generates:

     

    ```

    echo https://rancher.your-domain.com/dashboard/?setup=$(kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}')

    ```

     

    To get just the bootstrap password on its own, run:

     

    ```

    kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ "\n" }}'

    ```

     

     

    Happy Containering!

    [root@yys-oci rancher]#


  9. Pengecekan service rancher

    [root@yys-oci rancher]# kubectl get pod -A | grep rancher

    cattle-system                     rancher-7d8b95f8-5mm77                                      1/1     Running   2 (19d ago)   22d

    cattle-system                     rancher-webhook-684fb7899b-kdrcv                            1/1     Running   2 (19d ago)   22d

    [root@yys-oci rancher]#


  10. Describe svc rancher, nantinya akan disesuaikan svc rancher

    [root@yys-oci rancher]# kubectl describe svc rancher -n cattle-system

    Name:              rancher

    Namespace:         cattle-system

    Labels:            app=rancher

                       app.kubernetes.io/managed-by=Helm

                       chart=rancher-2.8.5

                       heritage=Helm

                       release=rancher

    Annotations:       meta.helm.sh/release-name: rancher

                       meta.helm.sh/release-namespace: cattle-system

    Selector:          app=rancher

    Type:              ClusterIP

    IP Family Policy:  SingleStack

    IP Families:       IPv4

    IP:                10.152.183.25

    IPs:               10.152.183.25

    Port:              http  80/TCP

    TargetPort:        80/TCP

    Endpoints:         10.1.138.134:80

    Port:              https-internal  443/TCP

    TargetPort:        444/TCP

    Endpoints:         10.1.138.134:444

    Session Affinity:  None

    Events:            <none>

    Bisa dilihat untuk svc rancher default menggunakan type ClusterIP

  11. Penyesuaian svc rancher agar dapat diakses melalui browser

    [root@yys-oci rancher]# kubectl edit svc rancher -n cattle-system

    service/rancher edited

     

    # Please edit the object below. Lines beginning with a '#' will be ignored,

    # and an empty file will abort the edit. If an error occurs while saving this file will be

    # reopened with the relevant failures.

    #

    apiVersion: v1

    kind: Service

    metadata:

      annotations:

        field.cattle.io/publicEndpoints: '[{"port":32141,"protocol":"TCP","serviceName":"cattle-system:rancher","allNodes":true},{"port":32443,"protocol":"TCP","serviceName":"cattle-system:rancher","allNodes":true}]'

        meta.helm.sh/release-name: rancher

        meta.helm.sh/release-namespace: cattle-system

      creationTimestamp: "2024-08-11T02:42:56Z"

      labels:

        app: rancher

        app.kubernetes.io/managed-by: Helm

        chart: rancher-2.8.5

        heritage: Helm

        release: rancher

      name: rancher

      namespace: cattle-system

      resourceVersion: "14610"

      uid: f66721b2-67c9-4e4f-88b4-2c8cd8675aa1

    spec:

      clusterIP: 10.152.183.25

      clusterIPs:

      - 10.152.183.25

      externalTrafficPolicy: Cluster

      internalTrafficPolicy: Cluster

      ipFamilies:

      - IPv4

      ipFamilyPolicy: SingleStack

      ports:

      - name: http

        nodePort: 32141         >>>> menambahkan nodePort (bebas menggunakan port berapapun)

        port: 80

        protocol: TCP

        targetPort: 80

      - name: https-internal

        nodePort: 32443         >>>> menambahkan nodePort (bebas menggunakan port berapapun)

        port: 443

        protocol: TCP

        targetPort: 444

      selector:

        app: rancher

      sessionAffinity: None

      type: NodePort             >>>> merubah dari type ClusterIP menjadi NodePort

    status:

      loadBalancer: {}


  12. Cek kembali svc rancher, pastikan perubahan type dan penambahan nodeport sudah sesuai

    [root@yys-oci rancher]# kubectl describe svc rancher -n cattle-system

    Name:                     rancher

    Namespace:                cattle-system

    Labels:                   app=rancher

                              app.kubernetes.io/managed-by=Helm

                              chart=rancher-2.8.5

                              heritage=Helm

                              release=rancher

    Annotations:              field.cattle.io/publicEndpoints:

                                [{"port":32141,"protocol":"TCP","serviceName":"cattle-system:rancher","allNodes":true},{"port":32443,"protocol":"TCP","serviceName":"cattl...

                              meta.helm.sh/release-name: rancher

                              meta.helm.sh/release-namespace: cattle-system

    Selector:                 app=rancher

    Type:                     NodePort

    IP Family Policy:         SingleStack

    IP Families:              IPv4

    IP:                       10.152.183.25

    IPs:                      10.152.183.25

    Port:                     http  80/TCP

    TargetPort:               80/TCP

    NodePort:                 http  32141/TCP

    Endpoints:                10.1.138.134:80

    Port:                     https-internal  443/TCP

    TargetPort:               444/TCP

    NodePort:                 https-internal  32443/TCP

    Endpoints:                10.1.138.134:444

    Session Affinity:         None

    External Traffic Policy:  Cluster

    Events:                   <none>


  13. Akses Rancher bisa menggunakan ssh tunnel atau expose menggunakan web server (nginx/apache)
    Contoh menggunakan ssh tunnel

    ┌──(yysyys)-[~]

    └─$ ssh -L 32443:localhost:32443 yoni@192.168.94.94      

    Activate the web console with: systemctl enable --now cockpit.socket

     

    Last login: Sat Sep 21 10:37:14 2024 from 124.40.251.124

    [yoni@yys-oci ~]$

    Port 32443 depan : merupakan port nodePort https dari svc rancher.
    localhost:32443 : ini digunakan untuk akses via browser (32443 yg belakang bisa disesuaikan dengan port lain).
    yoni@192.168.94.94 : yoni sebagai user akses server, dan 192.168.94.94 merupakan ip server.
    Jadi node port bisa diakses dari semua node master kubernetes.

  14. Akses via browser menggunakan https://localhost:32443
    Login menggunakan admin dan password menggunakan password yg dibuat waktu proses instalasi rancher atau bisa dicek di secret rancher pada kubernetes.

  15. --Finish--

Jumat, 20 September 2024

Installing Istio on MicroK8s for AArch64 / ARM Processors

Issue : Istio tidak tersedia di microk8s untuk architectur aarch / arm

[root@yys-oci ~]# microk8s.enable istio
Addon istio was not found in any repository
[root@yys-oci istio-1.23.0]# export PATH=$PWD/bin:$PATH
[root@yys-oci istio-1.23.0]# istioctl install --set profile=default -y
        |\          
        | \         
        |  \        
        |   \       
      /||    \      
     / ||     \     
    /  ||      \    
   /   ||       \                                                    
  /    ||        \                                                   
 /     ||         \                                                  
/______||__________\                                                 
  \__       _____/                                                   
     \_____/
Error: check minimum supported Kubernetes version: error getting Kubernetes version: Get "http://localhost:8080/version?timeout=5s": dial tcp [::1]:8080: connect: connection refused
[root@yys-oci istio-1.23.0]#


Solving

  1. Login Rancher

  2. Open Kubectl Shell Rancher



  3. Download istio

    > curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.23.0 TARGET_ARCH=aarch64 sh -
      % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                     Dload  Upload   Total   Spent    Left  Speed
    100   102  100   102    0     0   1767      0 --:--:-- --:--:-- --:--:--  1821
    100  4899  100  4899    0     0   6358      0 --:--:-- --:--:-- --:--:-- 14845 
    Downloading istio-1.23.0 from https://github.com/istio/istio/releases/download/1.23.0/istio-1.23.0-linux-arm64.tar.gz ...
    Istio 1.23.0 Download Complete!
    Istio has been successfully downloaded into the istio-1.23.0 folder on your system.
    Next Steps:
    See https://istio.io/latest/docs/setup/install/ to add Istio to your Kubernetes cluster.
    To configure the istioctl client tool for your workstation,
    add the /home/shell/istio-1.23.0/bin directory to your environment path variable with:
             export PATH="$PATH:/home/shell/istio-1.23.0/bin"
    Begin the Istio pre-installation check by running:
             istioctl x precheck
    Need more information? Visit https://istio.io/latest/docs/setup/install/


  4. Masuk Dir istio dan lakukan export

    > cd istio-1.23.0/
    > export PATH=$PWD/bin:$PATH
    >


  5. Install istio

    > istioctl install --set profile=default -y
            |\
            | \
            |  \
            |   \
          /||    \
         / ||     \
        /  ||      \
       /   ||       \
      /    ||        \
     /     ||         \
    /______||__________\
    ____________________
      \__       _____/  
         \_____/        
     
    ✔ Istio core installe
    ✔ Istiod installe
    ✔ Ingress gateways installe
    ✔Installation complete                                                      
    Made this installation the default for cluster-wide operations.
    >


  6. Pengecekan Service Istio

    # Run kubectl commands inside here

    # e.g. kubectl get all

    > kubectl get pod -A | grep istio

    istio-system istio-ingressgateway-58bc8c7d89-l6pqj 1/1 Running   0             13d

    istio-system istiod-5df659df75-p6l5c               1/1 Running   0             13d

    >