Skip to content
Snippets Groups Projects
Commit 2d755069 authored by Pierre Ozoux's avatar Pierre Ozoux
Browse files

Adds migration to new cold minio.

parent deda8029
No related branches found
No related tags found
1 merge request!1Adds migration to new cold minio.
......@@ -13,6 +13,7 @@ if [ $# -ne 1 ] # we expect 1 arg
fi
cd /root/domains
source ./common/scrips/functions.sh
export DOMAIN=${1}
export NUAGE_SUBDOMAIN=${NUAGE_SUBDOMAIN:-nuage}
......@@ -76,32 +77,22 @@ kubectl -n ${tld} create secret generic ${CHAT_SUBDOMAIN}-${tld}-smtp --from-lit
# Create Buckets
## Create dumps bucket
export STORAGE_CLASS=cold
export AWS_ACCESS_KEY_ID=${NS}-dumps
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18`
mc admin user add cold ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY}
kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID} --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
mc admin policy set cold username-rw user=${AWS_ACCESS_KEY_ID}
mc mb cold/${AWS_ACCESS_KEY_ID}
create_bucket
## Create data buckets
export STORAGE_CLASS=hot
### For chats
export AWS_ACCESS_KEY_ID=${CHAT_SUBDOMAIN}-${NS}
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18`
mc admin user add hot ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY}
kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID}-s3 --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
mc admin policy set hot username-rw user=${AWS_ACCESS_KEY_ID}
mc mb hot/${AWS_ACCESS_KEY_ID}
mc version enable hot/${AWS_ACCESS_KEY_ID}
create_bucket
mc version enable ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID}
### For Nuage
export AWS_ACCESS_KEY_ID=${NUAGE_SUBDOMAIN}-${NS}
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 18`
mc admin user add hot ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY}
kubectl -n ${NS} create secret generic ${AWS_ACCESS_KEY_ID}-s3 --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
mc admin policy set hot username-rw user=${AWS_ACCESS_KEY_ID}
mc mb hot/${AWS_ACCESS_KEY_ID}
mc version enable hot/${AWS_ACCESS_KEY_ID}
create_bucket
mc version enable ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID}
# Create secrets
......
#!/bin/bash -eu
source /root/domains/common/scripts/functions.sh
export S3_COLD_ENDPOINT=https://cold-objects.liiib.re
export STORAGE_CLASS=cold
for NS in `kubectl get ns --no-headers -o custom-columns=":metadata.name" | grep -v "liiib\|soci\|licoo`; do
# If secret for dump s3 bucket doesn't exists, skip this NS
if ! kubectl -n ${NS} get secret ${NS}-dumps 2>/dev/null; then
continue
fi
echo "Working on NS: $NS"
# Backup old dump secret
kubectl -n ${NS} get secret ${NS}-dumps --export -o yaml 2> /dev/null | sed "s/${NS}-dumps/${NS}-dumps-backup/g" | kubectl -n ${NS} apply -f -
kubectl -n ${NS} delete secret ${NS}-dumps
# Create new cold dumps bucket
export AWS_ACCESS_KEY_ID=${NS}-dumps
export SECRET_NAME=${NS}-dumps
create_bucket
# If a PG is Running
if kubectl -n ${NS} get pg --no-headers | grep ${NS} | grep -q Running; then
# Update dump cronjob
export CJ=`kubectl -n $NS get cj --no-headers -o custom-columns=":metadata.name" | grep dump`
kubectl -n ${NS} set env cj/$CJ LOGICAL_BACKUP_S3_ENDPOINT=${S3_COLD_ENDPOINT}
# Patch pg config map
export PG_CLUSTER=`kubectl -n ${NS} get pg --no-headers -o custom-columns=":metadata.name" | grep ${NS}`
export ARCHIVE_MODE=off
pg_set_archive_mode_and_wait
kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_ACCESS_KEY_ID":"'${AWS_ACCESS_KEY_ID}'"}}'
kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_SECRET_ACCESS_KEY":"'${AWS_SECRET_ACCESS_KEY}'"}}'
kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"AWS_ENDPOINT":"'${S3_COLD_ENDPOINT}'"}}'
kubectl -n ${NS} patch --type merge cm postgres-pod-config --patch '{"data":{"WAL_S3_ENDPOINT":"'${S3_COLD_ENDPOINT}'"}}'
export ARCHIVE_MODE=on
pg_set_archive_mode_and_wait
else
echo "No PG in Running state"
fi
# If a Mongo is Ready
if kubectl -n ${NS} get perconaservermongodbs.psmdb.percona.com --no-headers | grep -q ready; then
# Patch mongo
MONGO=`kubectl -n ${NS} get perconaservermongodbs.psmdb.percona.com --no-headers -o custom-columns=":metadata.name"`
kubectl -n ${NS} patch --type merge perconaservermongodbs.psmdb.percona.com ${MONGO} --patch '{"spec":{"backup":{"storages":{"backup":{"s3":{"endpointUrl":"'${S3_COLD_ENDPOINT}'"}}}}}}'
fi
done
function create_bucket() {
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 32`
mc admin user add ${STORAGE_CLASS} ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY}
kubectl -n ${NS} create secret generic ${SECRET_NAME} --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
mc mb ${STORAGE_CLASS}/${AWS_ACCESS_KEY_ID}
mc admin policy set ${STORAGE_CLASS} username-rw user=${AWS_ACCESS_KEY_ID}
}
function pg_zero_lag() {
cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 7 | grep -q 0
}
function pg_two_running() {
cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 5 | grep running | wc -l | grep -q 2
}
function pg_consistent_tl() {
TL_ONE=`cat /tmp/patronictl_list |tail -n2 | head -n1 | cut -d$'\t' -f 6`
TL_TWO=`cat /tmp/patronictl_list |tail -n1 | cut -d$'\t' -f 6`
test "$TL_ONE" = "$TL_TWO"
}
function pg_current_tl() {
if pg_consistent_tl; then
export PG_CURRENT_TL=`cat /tmp/patronictl_list |tail -n1 | cut -d$'\t' -f 6`
fi
}
function pg_different_tl() {
if pg_consistent_tl; then
pg_current_tl
test "$PG_CURRENT_TL" != "$PG_PREVIOUS_TL"
fi
}
function pg_one_leader() {
cat /tmp/patronictl_list |tail -n2| cut -d$'\t' -f 4 | grep -q Leader
}
function pg_save_patronictl_list_to_temp_file() {
set -o pipefail
until kubectl -n ${NS} exec -it ${PG_CLUSTER}-0 -- patronictl list -f tsv 1> /tmp/patronictl_list 2> /dev/null
do
echo -n "."
sleep 2
done
}
function pg_healthy() {
test pg_zero_lag && pg_two_running && pg_consistent_tl && pg_one_leader
}
function pg_ensure_rolling_update_is_done() {
echo -n "Waiting pg to roll"
pg_save_patronictl_list_to_temp_file
pg_current_tl
export PG_PREVIOUS_TL=$PG_CURRENT_TL
until pg_healthy && pg_different_tl
do
pg_save_patronictl_list_to_temp_file
echo -n "."
sleep 2
done
echo "Rolling is done and successful!"
kubectl -n ${NS} exec -it ${PG_CLUSTER}-0 -- patronictl list
kubectl logs ${PG_CLUSTER}-0 --tail=2
kubectl logs ${PG_CLUSTER}-1 --tail=2
}
function pg_set_archive_mode_and_wait() {
pg_save_patronictl_list_to_temp_file
until pg_healthy
do
echo -n "Waiting PG to be healthy"
pg_save_patronictl_list_to_temp_file
echo -n "."
sleep 2
done
if kubectl -n ${NS} patch --type merge pg ${PG_CLUSTER} --patch '{"spec":{"postgresql":{"parameters":{"archive_mode":"'${ARCHIVE_MODE}'"}}}}' | grep -q "no change"
then
echo "PG not patched, going to next step."
else
pg_ensure_rolling_update_is_done
fi
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment