From 9838757b8e47dd404fb1b7e323f4339ef23ea0b5 Mon Sep 17 00:00:00 2001 From: Pierre Ozoux Date: Tue, 30 Nov 2021 13:04:45 +0100 Subject: [PATCH 1/3] Adds migration to new cold minio. --- createLiiibre | 25 ++---- .../30-11-2021-move-dumps-to-new-minio.sh | 49 +++++++++++ scripts/functions.sh | 86 +++++++++++++++++++ 3 files changed, 143 insertions(+), 17 deletions(-) create mode 100644 migrations/30-11-2021-move-dumps-to-new-minio.sh create mode 100644 scripts/functions.sh diff --git a/createLiiibre b/createLiiibre index 1f5feff..5d7b1c5 100755 --- a/createLiiibre +++ b/createLiiibre @@ -13,6 +13,7 @@ if [ $# -ne 1 ] # we expect 1 arg fi cd /root/domains +source ./common/scrips/functions.sh export DOMAIN=${1} export NUAGE_SUBDOMAIN=${NUAGE_SUBDOMAIN:-nuage} @@ -76,32 +77,22 @@ kubectl -n ${tld} create secret generic ${CHAT_SUBDOMAIN}-${tld}-smtp --from-lit # Create Buckets ## Create dumps bucket +export STORAGE_CLASS=cold export AWS_ACCESS_KEY_ID=${NS}-dumps -export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18` -mc admin user add cold ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} -kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID} --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} -mc admin policy set cold username-rw user=${AWS_ACCESS_KEY_ID} -mc mb cold/${AWS_ACCESS_KEY_ID} +create_bucket ## Create data buckets +export STORAGE_CLASS=hot ### For chats export AWS_ACCESS_KEY_ID=${CHAT_SUBDOMAIN}-${NS} -export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18` -mc admin user add hot ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} -kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID}-s3 --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} -mc admin policy set hot username-rw user=${AWS_ACCESS_KEY_ID} -mc mb hot/${AWS_ACCESS_KEY_ID} -mc version enable hot/${AWS_ACCESS_KEY_ID} +create_bucket +mc version enable ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID} ### For Nuage export AWS_ACCESS_KEY_ID=${NUAGE_SUBDOMAIN}-${NS} -export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18` -mc admin user add hot ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} -kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID}-s3 --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} -mc admin policy set hot username-rw user=${AWS_ACCESS_KEY_ID} -mc mb hot/${AWS_ACCESS_KEY_ID} -mc version enable hot/${AWS_ACCESS_KEY_ID} +create_bucket +mc version enable ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID} # Create secrets diff --git a/migrations/30-11-2021-move-dumps-to-new-minio.sh b/migrations/30-11-2021-move-dumps-to-new-minio.sh new file mode 100644 index 0000000..727c0f3 --- /dev/null +++ b/migrations/30-11-2021-move-dumps-to-new-minio.sh @@ -0,0 +1,49 @@ +#!/bin/bash -eux + +source /root/domains/common/scripts/functions.sh + +export S3_COLD_ENDPOINT=https://cold-objects.liiib.re +export STORAGE_CLASS=cold + +for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name"`; do + # If secret for dump s3 bucket doesn't exists, skip this NS + if ! kubectl -n ${NS} get secret ${NS}-dumps 2>/dev/null; then + continue + fi + + echo "Working on NS: $NS" + + # Backup old dump secret + kubectl -n ${NS} get secret ${NS}-dumps --export -o yaml 2> /dev/null | sed "s/${NS}-dumps/${NS}-dumps-backup/g" | kubectl -n ${NS} apply -f - + kubectl -n ${NS} delete secret ${NS}-dumps + + # Create new cold dumps bucket + export AWS_ACCESS_KEY_ID=${NS}-dumps + create_bucket + + # If a PG is Running + if kubectl -n ${NS} get pg --no-headers | grep ${NS} | grep -q Running; then + # Update dump cronjob + export CJ=`kubectl -n $NS get cj --no-headers -o custom-columns=":metadata.name" | grep dump` + kubectl set env cj/$CJ LOGICAL_BACKUP_S3_ENDPOINT=${S3_COLD_ENDPOINT} + + # Patch pg config map + export ARCHIVE_MODE=off + pg_set_archive_mode_and_wait + kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_ACCESS_KEY_ID":"'${AWS_ACCESS_KEY_ID}'"}}' + kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_SECRET_ACCESS_KEY":"'${AWS_SECRET_ACCESS_KEY}'"}}' + kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_ENDPOINT":"'${S3_COLD_ENDPOINT}'"}}' + kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"WAL_S3_ENDPOINT":"'${S3_COLD_ENDPOINT}'"}}' + export ARCHIVE_MODE=on + pg_set_archive_mode_and_wait + else + echo "No PG in Running state" + fi + + # If a Mongo is Ready + if kubectl -n ${NS} get perconaservermongodbs.psmdb.percona.com --no-headers | grep -q ready; then + # Patch mongo + MONGO=`kubectl -n ${NS} get perconaservermongodbs.psmdb.percona.com --no-headers -o custom-columns=":metadata.name"` + kubectl -n ${NS} patch --type merge perconaservermongodbs.psmdb.percona.com ${MONGO} --patch '{"spec":{"backup":{"storages":{"backup":{"s3":{"endpointUrl":"'${S3_COLD_ENDPOINT}'"}}}}}}' + fi +done diff --git a/scripts/functions.sh b/scripts/functions.sh new file mode 100644 index 0000000..1f7dc4f --- /dev/null +++ b/scripts/functions.sh @@ -0,0 +1,86 @@ +function create_bucket() { + export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18` + mc admin user add ${STORAGE_CLASS} ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} + kubectl -n ${NS} create secret generic ${SECRET_NAME} --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + mc mb ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID} + mc admin policy set ${STORAGE_CLASS} username-rw user=${AWS_ACCESS_KEY_ID} +} + +function pg_zero_lag() { + cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 7 | grep -q 0 +} + +function pg_two_running() { + cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 5 | grep running | wc -l | grep -q 2 +} + +function pg_consistent_tl() { + TL_ONE=`cat /tmp/patronictl_list |tail -n2 | head -n1 | cut -d$'\t' -f 6` + TL_TWO=`cat /tmp/patronictl_list |tail -n1 | cut -d$'\t' -f 6` + test "$TL_ONE" = "$TL_TWO" +} + +function pg_current_tl() { + if pg_consistent_tl; then + export PG_CURRENT_TL=`cat /tmp/patronictl_list |tail -n1 | cut -d$'\t' -f 6` + fi +} + +function pg_different_tl() { + if pg_consistent_tl; then + pg_current_tl + test "$PG_CURRENT_TL" != "$PG_PREVIOUS_TL" + fi +} + +function pg_one_leader() { + cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 4 | grep -q Leader +} + +function pg_save_patronictl_list_to_temp_file() { + set -o pipefail + until kubectl -n ${NS} exec -it ${PG_CLUSTER}-0 -- patronictl list -f tsv 1> /tmp/patronictl_list 2> /dev/null + do + echo -n "." + sleep 2 + done +} + + +function pg_healthy() { + test pg_zero_lag && pg_two_running && pg_consistent_tl && pg_one_leader +} + +function pg_ensure_rolling_update_is_done() { + echo -n "Waiting pg to roll" + pg_save_patronictl_list_to_temp_file + pg_current_tl + export PG_PREVIOUS_TL=$PG_CURRENT_TL + until pg_healthy && pg_different_tl + do + pg_save_patronictl_list_to_temp_file + echo -n "." + sleep 2 + done + echo "Rolling is done and successful!" + kubectl -n ${NS} exec -it ${PG_CLUSTER}-0 -- patronictl list + k logs ${PG_CLUSTER}-0 --tail=2 + k logs ${PG_CLUSTER}-1 --tail=2 +} + +function pg_set_archive_mode_and_wait() { + pg_save_patronictl_list_to_temp_file + until pg_healthy + do + echo -n "Waiting PG to be healthy" + pg_save_patronictl_list_to_temp_file + echo -n "." + sleep 2 + done + if kubectl -n ${NS} patch --type merge pg ${PG_CLUSTER} --patch '{"spec":{"postgresql":{"parameters":{"archive_mode":"'${ARCHIVE_MODE}'"}}}}' | grep -q "no change" + then + echo "PG not patched, going to next step." + else + pg_ensure_rolling_update_is_done + fi +} -- GitLab From 0dc5ff8d15ddf3cbd99370c34faab5e371e5c1a4 Mon Sep 17 00:00:00 2001 From: Pierre Ozoux Date: Tue, 30 Nov 2021 15:59:06 +0100 Subject: [PATCH 2/3] f --- migrations/30-11-2021-move-dumps-to-new-minio.sh | 4 +++- scripts/functions.sh | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/migrations/30-11-2021-move-dumps-to-new-minio.sh b/migrations/30-11-2021-move-dumps-to-new-minio.sh index 727c0f3..9f59083 100644 --- a/migrations/30-11-2021-move-dumps-to-new-minio.sh +++ b/migrations/30-11-2021-move-dumps-to-new-minio.sh @@ -1,4 +1,4 @@ -#!/bin/bash -eux +#!/bin/bash -eu source /root/domains/common/scripts/functions.sh @@ -19,6 +19,7 @@ for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name"`; do # Create new cold dumps bucket export AWS_ACCESS_KEY_ID=${NS}-dumps + export SECRET_NAME=${NS}-dumps create_bucket # If a PG is Running @@ -28,6 +29,7 @@ for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name"`; do kubectl set env cj/$CJ LOGICAL_BACKUP_S3_ENDPOINT=${S3_COLD_ENDPOINT} # Patch pg config map + export PG_CLUSTER=`kubectl -n ${NS} get pg --no-headers -o custom-columns=":metadata.name" | grep ${NS}` export ARCHIVE_MODE=off pg_set_archive_mode_and_wait kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_ACCESS_KEY_ID":"'${AWS_ACCESS_KEY_ID}'"}}' diff --git a/scripts/functions.sh b/scripts/functions.sh index 1f7dc4f..834bcfa 100644 --- a/scripts/functions.sh +++ b/scripts/functions.sh @@ -1,5 +1,5 @@ function create_bucket() { - export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18` + export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 32` mc admin user add ${STORAGE_CLASS} ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} kubectl -n ${NS} create secret generic ${SECRET_NAME} --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} mc mb ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID} @@ -64,8 +64,8 @@ function pg_ensure_rolling_update_is_done() { done echo "Rolling is done and successful!" kubectl -n ${NS} exec -it ${PG_CLUSTER}-0 -- patronictl list - k logs ${PG_CLUSTER}-0 --tail=2 - k logs ${PG_CLUSTER}-1 --tail=2 + kubectl logs ${PG_CLUSTER}-0 --tail=2 + kubectl logs ${PG_CLUSTER}-1 --tail=2 } function pg_set_archive_mode_and_wait() { -- GitLab From 01989c01141f0bc76adea9896aa8a744d54137c9 Mon Sep 17 00:00:00 2001 From: Pierre Ozoux Date: Tue, 30 Nov 2021 16:42:49 +0100 Subject: [PATCH 3/3] f --- migrations/30-11-2021-move-dumps-to-new-minio.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/30-11-2021-move-dumps-to-new-minio.sh b/migrations/30-11-2021-move-dumps-to-new-minio.sh index 9f59083..4e80728 100644 --- a/migrations/30-11-2021-move-dumps-to-new-minio.sh +++ b/migrations/30-11-2021-move-dumps-to-new-minio.sh @@ -5,7 +5,7 @@ source /root/domains/common/scripts/functions.sh export S3_COLD_ENDPOINT=https://cold-objects.liiib.re export STORAGE_CLASS=cold -for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name"`; do +for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name" | grep -v "liiib\|soci\|licoo`; do # If secret for dump s3 bucket doesn't exists, skip this NS if ! kubectl -n ${NS} get secret ${NS}-dumps 2>/dev/null; then continue @@ -26,7 +26,7 @@ for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name"`; do if kubectl -n ${NS} get pg --no-headers | grep ${NS} | grep -q Running; then # Update dump cronjob export CJ=`kubectl -n $NS get cj --no-headers -o custom-columns=":metadata.name" | grep dump` - kubectl set env cj/$CJ LOGICAL_BACKUP_S3_ENDPOINT=${S3_COLD_ENDPOINT} + kubectl -n ${NS} set env cj/$CJ LOGICAL_BACKUP_S3_ENDPOINT=${S3_COLD_ENDPOINT} # Patch pg config map export PG_CLUSTER=`kubectl -n ${NS} get pg --no-headers -o custom-columns=":metadata.name" | grep ${NS}` -- GitLab