infrastructure/databases/postgres.yaml

177 lines
5.3 KiB
YAML

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: databases
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 50Gi
limits:
storage: 50Gi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: databases
spec:
serviceName: "postgres"
replicas: 1
selector:
matchLabels:
name: postgres
template:
metadata:
labels:
name: postgres
spec:
terminationGracePeriodSeconds: 120
containers:
- name: postgres
image: postgres:16.1
args:
[
"-c",
"max_connections=1000",
"-c",
"listen_addresses=*",
"-c",
"shared_preload_libraries=pg_stat_statements,pg_buffercache,auto_explain",
]
ports:
- containerPort: 5432
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
subPath: postgres
env:
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
volumes:
- name: data
capacity:
storage: 50Gi
persistentVolumeClaim:
claimName: postgres-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: databases
spec:
type: NodePort
selector:
name: postgres
ports:
- port: 5432
targetPort: 5432
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres-exporter
namespace: databases
spec:
replicas: 1
selector:
matchLabels:
name: postgres-exporter
template:
metadata:
labels:
name: postgres-exporter
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
spec:
containers:
- name: postgres-exporter
image: quay.io/prometheuscommunity/postgres-exporter
ports:
- containerPort: 9187
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
- name: DATA_SOURCE_NAME
value: postgresql://postgres:$(POSTGRES_PASSWORD)@postgres.databases:5432/postgres?sslmode=disable
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
namespace: databases
spec:
schedule: "0 12 * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
ttlSecondsAfterFinished: 60
template:
spec:
restartPolicy: Never
containers:
- name: postgres-backup
image: container-registry.nocodelytics.com/postgres-s3
command:
- /bin/sh
- -c
- >
pg_dump -U postgres -h postgres.databases nocodelytics_production | gzip > /backup/nocodelytics_production_$(date +'%Y-%m-%d').sql.gzip &&
rclone copy /backup/nocodelytics_production_$(date '+%Y-%m-%d').sql.gzip contabo:postgres &&
rm /backup/nocodelytics_production_$(date '+%Y-%m-%d').sql.gzip &&
pg_dump -U postgres -h postgres.databases grafana | gzip > /backup/grafana_$(date +'%Y-%m-%d').sql.gzip &&
rclone copy /backup/grafana_$(date '+%Y-%m-%d').sql.gzip contabo:postgres &&
rm /backup/grafana_$(date '+%Y-%m-%d').sql.gzip &&
pg_dump -U postgres -h postgres.databases gitea | gzip > /backup/gitea_$(date +'%Y-%m-%d').sql.gzip &&
rclone copy /backup/gitea_$(date '+%Y-%m-%d').sql.gzip contabo:postgres &&
rm /backup/gitea_$(date '+%Y-%m-%d').sql.gzip &&
pg_dump -U postgres -h postgres.databases drone | gzip > /backup/drone_$(date +'%Y-%m-%d').sql.gzip &&
rclone copy /backup/drone_$(date '+%Y-%m-%d').sql.gzip contabo:postgres &&
rm /backup/drone_$(date '+%Y-%m-%d').sql.gzip
env:
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
- name: RCLONE_CONFIG_CONTABO_TYPE
value: "s3"
- name: RCLONE_CONFIG_CONTABO_PROVIDER
value: "Other"
- name: RCLONE_CONFIG_CONTABO_ENV_AUTH
value: "false"
- name: RCLONE_CONFIG_CONTABO_ENDPOINT
value: "https://eu2.contabostorage.com"
- name: RCLONE_CONFIG_CONTABO_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: secrets
key: AWS_ACCESS_KEY_ID
- name: RCLONE_CONFIG_CONTABO_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
volumeMounts:
- mountPath: /backup
name: backup-volume
volumes:
- name: backup-volume
emptyDir: {}