Merge branch 'feature/minio-migration' into feature/minio-fixes-updates

This commit is contained in:
Maximilian Kratz 2023-03-30 08:26:31 +02:00
commit 17a4398828
11 changed files with 191 additions and 0 deletions

2
.gitignore vendored
View file

@ -18,3 +18,5 @@ vars/woodpecker_production.yml
vars/woodpecker_staging.yml
vars/backup.yml
s3-migration/local-storage/

View file

@ -25,6 +25,7 @@ sshd_port: "50001"
staging_url: "staging.forgejo.dev"
prod_url: "forgejo.dev"
migration_url: "s3-mig.forgejo.dev"
#
# Forgejo config

View file

@ -1,2 +1,3 @@
staging ansible_host=staging.forgejo.dev ansible_user=maxkratz ansible_port="{{ sshd_port }}"
production ansible_host=forgejo.dev ansible_user=maxkratz ansible_port="{{ sshd_port }}"
s3-mig ansible_host=s3-mig.forgejo.dev ansible_user=maxkratz ansible_port="{{ sshd_port }}"

40
main.tf
View file

@ -128,3 +128,43 @@ output "server_ipv4_production" {
output "server_ipv6_production" {
value = "${hcloud_server.production.ipv6_address}"
}
#
# Create the s3 migration server
#
resource "hcloud_server" "s3-migration" {
name = "s3-migration"
image = "debian-11"
server_type = "cx11"
location = "nbg1"
ssh_keys = ["${data.hcloud_ssh_key.ssh_key.id}"]
user_data = templatefile("user_data.yml.tpl", {
ssh_pub_key = var.ssh_pub_key
passwd = var.passwd
fqdn = "s3-mig.forgejo.dev"
})
}
# Set RDNS entry of s3 migration server IPv4
resource "hcloud_rdns" "s3-migration-rdns-v4" {
server_id = hcloud_server.s3-migration.id
ip_address = hcloud_server.s3-migration.ipv4_address
dns_ptr = "s3-mig.forgejo.dev"
}
# Set RDNS entry of s3 migration server IPv6
resource "hcloud_rdns" "s3-migration-rdns-v6" {
server_id = hcloud_server.s3-migration.id
ip_address = hcloud_server.s3-migration.ipv6_address
dns_ptr = "s3-mig.forgejo.dev"
}
# Output Server Public IP address
output "server_ipv4_s3-migration" {
value = "${hcloud_server.s3-migration.ipv4_address}"
}
output "server_ipv6_s3-migration" {
value = "${hcloud_server.s3-migration.ipv6_address}"
}

13
s3-mig.yaml Normal file
View file

@ -0,0 +1,13 @@
---
- name: Check SSH port
import_playbook: ssh.yaml
- name: Basic infrastructure
import_playbook: infra.yaml
- name: (Re-)Deploy Minio S3
hosts: s3-mig
roles:
- reverse-proxy
- minio-dc
become: true
vars:
instance_url: "{{migration_url}}"

4
s3-migration/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
*.json
*.zip
*.zip*
*.txt

52
s3-migration/MIGRATION.md Normal file
View file

@ -0,0 +1,52 @@
# Minio S3 Migration
In order to migrate the Minio S3 data + metadata, one has to spin up a temporary (newer) Minio S3 instance.
Steps (in theory):
- Create new VM
- Deploy temporary Minio S3 instance
- Migrate metadata + bucket data from old instance to temporary instance
- Shutdown the old instance
- Also clear its data/config mounts
- Re-deploy a new instance with the new backend version to replace the old instance
- Migrate metadata + bucket data from temporary instance to the new instance
Please notice: `mc` copies the whole data over the network. Due to slow speeds at home/office, it is recommend to run the migration on one of the servers.
## Commands/Steps to run
- `$ terraform apply -var-file="secrets.tfvars" -target hcloud_server.s3-migration`
- Adjust the DNS settings of these zones:
- `s3-mig.forgejo.dev`
- `s3.s3-mig.forgejo.dev`
- `console.s3.s3-mig.forgejo.dev`
- `$ ansible-playbook s3-mig.yaml -l s3-mig`
- Verify that the temporary instance is available at https://consolse.s3.s3-mig.forgejo.dev
- [Install `mc`](https://min.io/docs/minio/linux/reference/minio-mc.html) on your local machine if not already done
- Set the environment variables:
- `$ export ACCESS_KEY=123`
- `$ export SECRET_KEY=456`
- `$ export SOURCE=https://s3.forgejo.dev`
- `$ export TARGET=https://s3.s3-mig.forgejo.dev`
- `$ cd s3-migration && chmod +x migration.sh && ./migration.sh`
- Wait until the mirror process has finished
- Stop the old instance: `$ cd /srv/docker-compose/minio && docker-compose down`
- Delete or rename the old data folders: `$ cd /srv/docker-compose/minio && mv minio-config minio-config_old && mv minio-data minio-data_old`
- Re-deploy Minio S3 with the new backend: `$ ansible-playbook s3.yaml -l production`
- Set the environment variables but replace `SOURCE` with `TARGET` and vice versa:
- `$ export ACCESS_KEY=123`
- `$ export SECRET_KEY=456`
- `$ export SOURCE=https://s3.s3-mig.forgejo.dev`
- `$ export TARGET=https://s3.forgejo.dev`
- `$ cd s3-migration && chmod +x migration.sh && ./migration.sh`
- Wait until the mirror process has finished
- Verify that all data has successfully been migrated to the new instance
- Stop the temporary instance and destroy the VM: `$ terraform destroy -var-file="secrets.tfvars" -target hcloud_server.s3-migration`
## References
- https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html
- https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-single-node-single-drive.html#minio-snsd
- https://min.io/docs/minio/linux/reference/minio-mc.html

10
s3-migration/install-mc.sh Executable file
View file

@ -0,0 +1,10 @@
#!/bin/bash
set -e
curl https://dl.min.io/client/mc/release/linux-amd64/mc \
--create-dirs \
-o $HOME/minio-binaries/mc
chmod +x $HOME/minio-binaries/mc
export PATH=$PATH:$HOME/minio-binaries/

34
s3-migration/migrate-s3.sh Executable file
View file

@ -0,0 +1,34 @@
#!/bin/bash
set -e
# Check for existing ENVs
if [[ -z "$ACCESSKEY" ]]; then
echo "=> No ACCESSKEY ENV found. Exit."; exit 1 ;
fi
if [[ -z "$SECRETKEY" ]]; then
echo "=> No SECRETKEY ENV found. Exit."; exit 1 ;
fi
if [[ -z "$SOURCE" ]]; then
echo "=> No SOURCE ENV found. Exit."; exit 1 ;
fi
if [[ -z "$TARGET" ]]; then
echo "=> No TARGET ENV found. Exit."; exit 1 ;
fi
mc alias set s3src $SOURCE $ACCESSKEY $SECRETKEY
mc alias set s3trg $TARGET $ACCESSKEY $SECRETKEY
mc admin config export s3src > config.txt
mc admin config import s3trg < config.txt
mc admin service restart s3trg
mc admin cluster bucket export s3src
mc admin cluster bucket import s3trg cluster-metadata.zip
# mirroring existing bucket -> migration server/bucket
mc mirror -preserve --watch s3src/gitea s3trg/gitea

View file

@ -0,0 +1,24 @@
#!/bin/bash
set -e
# Check for existing ENVs
if [[ -z "$ACCESSKEY" ]]; then
echo "=> No ACCESSKEY ENV found. Exit."; exit 1 ;
fi
if [[ -z "$SECRETKEY" ]]; then
echo "=> No SECRETKEY ENV found. Exit."; exit 1 ;
fi
if [[ -z "$SOURCE" ]]; then
echo "=> No SOURCE ENV found. Exit."; exit 1 ;
fi
if [[ -z "$TARGET" ]]; then
echo "=> No TARGET ENV found. Exit."; exit 1 ;
fi
mc alias set s3src $SOURCE $ACCESSKEY $SECRETKEY
mc mirror -preserve --watch s3src/gitea $TARGET

10
s3.yaml Normal file
View file

@ -0,0 +1,10 @@
---
- name: Check SSH port
import_playbook: ssh.yaml
- name: (Re-)Deploy Minio S3
hosts: all
roles:
- minio-dc
become: true
vars:
instance_url: "{{prod_url}}"