This commit is contained in:
Florian Herrengt 2023-12-01 17:14:07 +00:00
parent dca0e13c53
commit 8c0dbb0cb6
35 changed files with 3890 additions and 3672 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,25 +0,0 @@
name: Deploy app
on:
push:
branches:
- main
env:
ENCRYPTION_KEY: ${{ secrets.ENCRYPTION_KEY }}
DOCKERCONFIG_JSON: ${{ secrets.DOCKERCONFIG_JSON }}
NATS_STAGING_PASSWORD: ${{ secrets.NATS_STAGING_PASSWORD }}
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Download kubectl
run: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: chmod kubectl
run: chmod +x ./kubectl
- name: Setting up kubeconfig
run: echo "${{ secrets.KUBE_CONFIG }}" > kube.config
- name: Setting up secrets
working-directory: kustomization/bases/secrets/
run: ./generate.sh
- name: Deploy
run: ./kubectl --kubeconfig ./kube.config apply -k ./kustomization/overlays/staging

View File

@ -1,22 +0,0 @@
name: Deploy app
on: workflow_dispatch
env:
ENCRYPTION_KEY: ${{ secrets.ENCRYPTION_KEY }}
DOCKERCONFIG_JSON: ${{ secrets.DOCKERCONFIG_JSON }}
NATS_PRODUCTION_PASSWORD: ${{ secrets.NATS_PRODUCTION_PASSWORD }}
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Download kubectl
run: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: chmod kubectl
run: chmod +x ./kubectl
- name: Setting up kubeconfig
run: echo "${{ secrets.KUBE_CONFIG }}" > kube.config
- name: Setting up secrets
working-directory: kustomization/bases/secrets/
run: ./generate.sh
- name: Deploy
run: ./kubectl --kubeconfig ./kube.config apply -k ./kustomization/overlays/production

3
.gitignore vendored
View File

@ -1,2 +1,3 @@
**/*.env
kustomization/bases/charts
kustomization/bases/charts
*.tar.gz

12
Dockerfile.PostgresS3 Normal file
View File

@ -0,0 +1,12 @@
# Use the official PostgreSQL image as the base image
FROM postgres:latest
# Install rclone for S3 interactions
RUN apt-get update && apt-get install -y curl \
&& curl -O https://downloads.rclone.org/rclone-current-linux-amd64.deb \
&& dpkg -i rclone-current-linux-amd64.deb \
&& rm rclone-current-linux-amd64.deb \
&& rm -rf /var/lib/apt/lists/*
# Set a default command or an entrypoint as needed
CMD ["echo", "command needed"]

View File

@ -6,3 +6,5 @@
2. Add base64 encoded secrets
3. `export $(cat .env | xargs)`
4. `cat secrets.yaml | envsubst | kubectl apply -f -`
Caddyfile location /etc/caddy/Caddyfile

11
caddy/Caddyfile Normal file
View File

@ -0,0 +1,11 @@
:80 {
respond "OK"
}
grafana.nocodelytics.com {
reverse_proxy :5478
}
prometheus.nocodelytics.com {
reverse_proxy :9090
}

12
caddy/upload_caddy_conf.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
PRIVATE_KEY="${PRIVATE_KEY}"
SSH_PORT="${SSH_PORT}"
REMOTE_USER="${REMOTE_USER}"
SERVER_IP="${SERVER_IP}"
TMP_DIR_UPLOAD_FILE="/home/$REMOTE_USER/"
scp -i $PRIVATE_KEY -P $SSH_PORT ./Caddyfile $REMOTE_USER@$SERVER_IP:$TMP_DIR_UPLOAD_FILE/Caddyfile
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp /etc/caddy/Caddyfile /etc/caddy/Caddyfile.bak"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp $TMP_DIR_UPLOAD_FILE/Caddyfile /etc/caddy/Caddyfile"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo systemctl restart caddy"

44
cadvisor.yaml Normal file
View File

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cadvisor
namespace: kube-system
spec:
selector:
matchLabels:
name: cadvisor
template:
metadata:
labels:
name: cadvisor
spec:
automountServiceAccountToken: false
containers:
- name: cadvisor
image: gcr.io/cadvisor/cadvisor:v0.47.2
volumeMounts:
- name: rootfs
mountPath: /rootfs
readOnly: true
- name: var-run
mountPath: /var/run
readOnly: true
- name: sys
mountPath: /sys
readOnly: true
- name: docker
mountPath: /var/lib/docker
readOnly: true
volumes:
- name: rootfs
hostPath:
path: /
- name: var-run
hostPath:
path: /var/run
- name: sys
hostPath:
path: /sys
- name: docker
hostPath:
path: /var/lib/docker

File diff suppressed because it is too large Load Diff

173
clickhouse.yaml Normal file
View File

@ -0,0 +1,173 @@
apiVersion: v1
kind: Namespace
metadata:
name: databases
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: clickhouse-data-pvc
namespace: databases
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 100Gi
limits:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: clickhouse-logs-pvc
namespace: databases
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi
limits:
storage: 1Gi
---
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-configmap
namespace: databases
data:
custom-config.xml: |
<clickhouse>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<prometheus>
<endpoint>/metrics</endpoint>
<port>9363</port>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
</prometheus>
<backups>
<allowed_disk>s3_plain</allowed_disk>
</backups>
</clickhouse>
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse
namespace: databases
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
ms: clickhouse
template:
metadata:
labels:
ms: clickhouse
spec:
terminationGracePeriodSeconds: 120
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:23.10
ports:
- containerPort: 8123
- containerPort: 9000
- containerPort: 9363
volumeMounts:
- name: data
mountPath: /var/lib/clickhouse/
- name: logs
mountPath: /var/log/clickhouse-server/
- name: config
mountPath: /etc/clickhouse-server/config.d/
env:
- name: CLICKHOUSE_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: CLICKHOUSE_PASSWORD
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: secrets
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
volumes:
- name: data
persistentVolumeClaim:
claimName: clickhouse-data-pvc
- name: logs
persistentVolumeClaim:
claimName: clickhouse-logs-pvc
- name: config
configMap:
name: clickhouse-configmap
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: clickhouse-backup
namespace: databases
spec:
schedule: "0 0 * * *"
jobTemplate:
spec:
template:
spec:
restartPolicy: Never
containers:
- name: clickhouse-backup
image: clickhouse/clickhouse-server:23.10
env:
- name: CLICKHOUSE_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: CLICKHOUSE_PASSWORD
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: secrets
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
command:
- /bin/sh
- -c
- >
clickhouse client --host clickhouse.databases --user default --password $CLICKHOUSE_PASSWORD --query="BACKUP TABLE nocodelytics_production.events TO S3('https://eu2.contabostorage.com/clickhouse/backup/events/$(date +\%Y-\%m-\%d).zip', '$AWS_ACCESS_KEY_ID', '$AWS_SECRET_ACCESS_KEY');"
---
apiVersion: v1
kind: Service
metadata:
name: clickhouse
namespace: databases
spec:
type: ClusterIP
selector:
ms: clickhouse
ports:
- name: http
port: 8123
targetPort: 8123
- name: native
port: 9000
targetPort: 9000
- name: prometheus
port: 9363
targetPort: 9363

View File

@ -1,72 +1,8 @@
apiVersion: v1
kind: Namespace
metadata:
name: container-registry
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt-prod
namespace: container-registry
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: container-registry-server
namespace: container-registry
spec:
secretName: container-registry-server-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: container-registry.nocodelytics.com
dnsNames:
- container-registry.nocodelytics.com
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: container-registry-server-pvc
namespace: container-registry
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 1Gi
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt-prod
namespace: container-registry
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: v1
kind: ConfigMap
metadata:
name: container-registry-server-config
namespace: container-registry
namespace: sysadmin
data:
config.yml: |
version: 0.1
@ -77,8 +13,9 @@ data:
cache:
blobdescriptor: inmemory
s3:
region: eu-west-1
region: eu
bucket: container-registry
regionendpoint: https://eu2.contabostorage.com
http:
addr: :5000
headers:
@ -88,7 +25,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: container-registry-server
namespace: container-registry
namespace: sysadmin
spec:
replicas: 1
selector:
@ -103,8 +40,6 @@ spec:
- name: container-registry-server
image: registry:2
volumeMounts:
- name: volv
mountPath: /var/lib/registry
- name: config-volume
mountPath: /etc/docker/registry/config.yml
subPath: config.yml
@ -128,14 +63,7 @@ spec:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
resources:
limits:
memory: "512Mi"
cpu: "100m"
volumes:
- name: volv
persistentVolumeClaim:
claimName: container-registry-server-pvc
- name: config-volume
configMap:
name: container-registry-server-config
@ -148,7 +76,7 @@ apiVersion: v1
kind: Service
metadata:
name: container-registry-server
namespace: container-registry
namespace: sysadmin
spec:
type: NodePort
selector:
@ -157,16 +85,43 @@ spec:
- port: 5000
targetPort: 5000
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: sysadmin
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: sysadmin
name: container-registry
spec:
secretName: container-registry-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: container-registry.nocodelytics.com
dnsNames:
- container-registry.nocodelytics.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: sysadmin
name: container-registry-nginx-ingress
namespace: container-registry
annotations:
kubernetes.io/ingress.class: "traefik"
cert-manager.io/issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/redirect-entry-point: https
cert-manager.io/acme-challenge-type: http01
traefik.ingress.kubernetes.io/router.middlewares: default-https-redirect@kubernetescrd
spec:
rules:
- host: container-registry.nocodelytics.com
@ -189,4 +144,4 @@ spec:
tls:
- hosts:
- container-registry.nocodelytics.com
secretName: container-registry-server-net-tls
secretName: container-registry-net-tls

150
grafana.yaml Normal file
View File

@ -0,0 +1,150 @@
apiVersion: v1
kind: Namespace
metadata:
name: sysadmin
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-pvc
namespace: sysadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
limits:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
namespace: sysadmin
spec:
replicas: 1
selector:
matchLabels:
ms: grafana
template:
metadata:
labels:
ms: grafana
spec:
securityContext:
fsGroup: 472
supplementalGroups:
- 0
containers:
- name: grafana
image: grafana/grafana:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
name: http-grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /robots.txt
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
resources:
requests:
cpu: 250m
memory: 750Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-pv
volumes:
- name: grafana-pv
persistentVolumeClaim:
claimName: grafana-pvc
---
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: sysadmin
spec:
type: NodePort
selector:
ms: grafana
ports:
- port: 3000
targetPort: 3000
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: sysadmin
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: sysadmin
name: grafana
spec:
secretName: grafana-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: grafana.nocodelytics.com
dnsNames:
- grafana.nocodelytics.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: sysadmin
name: grafana-nginx-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-https-redirect@kubernetescrd,default-http-auth@kubernetescrd
spec:
rules:
- host: grafana.nocodelytics.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ssl-redirect
port:
name: use-annotation
- path: /
pathType: Prefix
backend:
service:
name: grafana
port:
number: 3000
tls:
- hosts:
- grafana.nocodelytics.com
secretName: grafana-net-tls

View File

@ -0,0 +1,53 @@
[Unit]
Description=Grafana instance
Documentation=http://docs.grafana.org
Wants=network-online.target
After=network-online.target
After=postgresql.service mariadb.service mysql.service influxdb.service
[Service]
EnvironmentFile=/etc/default/grafana-server
User=grafana
Group=grafana
Type=simple
Restart=on-failure
WorkingDirectory=/usr/share/grafana
RuntimeDirectory=grafana
RuntimeDirectoryMode=0750
ExecStart=/usr/share/grafana/bin/grafana server \
--config=${CONF_FILE} \
--pidfile=${PID_FILE_DIR}/grafana-server.pid \
--packaging=deb \
cfg:default.paths.logs=${LOG_DIR} \
cfg:default.paths.data=${DATA_DIR} \
cfg:default.paths.plugins=${PLUGINS_DIR} \
cfg:default.paths.provisioning=${PROVISIONING_CFG_DIR}
LimitNOFILE=10000
TimeoutStopSec=20
CapabilityBoundingSet=
DeviceAllow=
LockPersonality=true
MemoryDenyWriteExecute=false
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectProc=invisible
ProtectSystem=full
RemoveIPC=true
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
SystemCallArchitectures=native
UMask=0027
[Install]
WantedBy=multi-user.target

1468
grafana/grafana.ini Executable file

File diff suppressed because it is too large Load Diff

13
grafana/upload_grafana_conf.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
PRIVATE_KEY="${PRIVATE_KEY}"
SSH_PORT="${SSH_PORT}"
REMOTE_USER="${REMOTE_USER}"
SERVER_IP="${SERVER_IP}"
TMP_DIR_UPLOAD_FILE="/home/$REMOTE_USER/"
scp -i $PRIVATE_KEY -P $SSH_PORT ./grafana.ini $REMOTE_USER@$SERVER_IP:$TMP_DIR_UPLOAD_FILE/grafana.ini
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp /etc/grafana/grafana.ini /etc/grafana/grafana.ini.bak"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp $TMP_DIR_UPLOAD_FILE/grafana.ini /etc/grafana/grafana.ini"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo systemctl restart grafana-server"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo systemctl status grafana-server"

1
htpasswd Normal file
View File

@ -0,0 +1 @@
nocodelytics:$2y$05$fSiLFOq4Pl.t06uSXjw5nui6M2eMc5oKjFuy0mElgsjn3IxhGDB0W

View File

@ -23,9 +23,8 @@ metadata:
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30003
- port: 9090
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard
@ -179,15 +178,15 @@ spec:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
- containerPort: 9090
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
- --token-ttl=0
- --enable-insecure-login=true
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
@ -195,14 +194,13 @@ spec:
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
scheme: HTTP
path: /
port: 8443
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
@ -217,17 +215,6 @@ spec:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
kubernetes.io/arch: arm64
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
---
kind: Service
@ -288,17 +275,6 @@ spec:
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
kubernetes.io/arch: arm64
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
volumes:
- name: tmp-volume
emptyDir: {}
@ -321,3 +297,65 @@ kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: kubernetes-dashboard
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: kubernetes-dashboard
name: kubernetes-dashboard
spec:
secretName: kubernetes-dashboard-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: k3s-dashboard.nocodelytics.com
dnsNames:
- k3s-dashboard.nocodelytics.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: kubernetes-dashboard
name: kubernetes-dashboard-nginx-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-https-redirect@kubernetescrd
spec:
rules:
- host: k3s-dashboard.nocodelytics.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ssl-redirect
port:
name: use-annotation
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 9090
tls:
- hosts:
- k3s-dashboard.nocodelytics.com
secretName: kubernetes-dashboard-net-tls

108
loki.yaml Normal file
View File

@ -0,0 +1,108 @@
apiVersion: v1
kind: Namespace
metadata:
name: sysadmin
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: loki-pvc
namespace: sysadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
limits:
storage: 10Gi
---
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-configmap
namespace: sysadmin
data:
loki-config.yaml: |
auth_enabled: false
server:
http_listen_port: 3100
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v12
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki
namespace: sysadmin
spec:
replicas: 1
selector:
matchLabels:
ms: loki
template:
metadata:
labels:
ms: loki
spec:
containers:
- name: loki
image: grafana/loki:2.8.6
imagePullPolicy: IfNotPresent
args: ["-config.file=/etc/loki/config/loki-config.yaml"]
ports:
- containerPort: 3100
protocol: TCP
volumeMounts:
- mountPath: /loki
name: loki-pv
- name: config
mountPath: /etc/loki/config/
securityContext:
runAsUser: 1000
fsGroup: 2000
volumes:
- name: loki-pv
persistentVolumeClaim:
claimName: loki-pvc
- name: config
configMap:
name: loki-configmap
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: sysadmin
spec:
type: NodePort
selector:
ms: loki
ports:
- port: 3100
targetPort: 3100

62
longhorn.yaml Normal file
View File

@ -0,0 +1,62 @@
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: longhorn-system
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: longhorn-system
name: longhorn-system
spec:
secretName: longhorn-system-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: longhorn.nocodelytics.com
dnsNames:
- longhorn.nocodelytics.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: longhorn-system
name: longhorn-system-nginx-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-https-redirect@kubernetescrd,default-http-auth@kubernetescrd
spec:
rules:
- host: longhorn.nocodelytics.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ssl-redirect
port:
name: use-annotation
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- hosts:
- longhorn.nocodelytics.com
secretName: longhorn-system-net-tls

141
nats.yaml Normal file
View File

@ -0,0 +1,141 @@
apiVersion: v1
kind: Namespace
metadata:
name: databases
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nats-pvc
namespace: databases
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
limits:
storage: 10Gi
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nats-configmap
namespace: databases
data:
server.conf: |
port: 4222
monitor_port: 8222
jetstream {
store_dir: nats
# 1GB
max_memory_store: 1073741824
# 10GB
max_file_store: 10737418240
}
authorization: {
users: [
{user: $SYS, password: $NATS_PASSWORD},
{user: default, password: $NATS_PASSWORD}
]
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nats
namespace: databases
spec:
replicas: 1
selector:
matchLabels:
ms: nats
template:
metadata:
labels:
ms: nats
spec:
containers:
- name: nats
image: nats:2.10.5
ports:
- containerPort: 4222
- containerPort: 8222
volumeMounts:
- name: data
mountPath: /data
env:
- name: NATS_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: NATS_PASSWORD
volumes:
- name: data
persistentVolumeClaim:
claimName: nats-pvc
- name: config
configMap:
name: nats-configmap
---
apiVersion: v1
kind: Service
metadata:
name: nats
namespace: databases
spec:
type: NodePort
selector:
ms: nats
ports:
- port: 4222
name: server
targetPort: 4222
- port: 8222
name: monitor
targetPort: 8222
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nats-exporter
namespace: databases
spec:
replicas: 1
selector:
matchLabels:
ms: nats-exporter
template:
metadata:
labels:
ms: nats-exporter
spec:
containers:
- name: nats-exporter
image: natsio/prometheus-nats-exporter
args:
- -channelz
- -connz
- -connz_detailed
- -healthz
- -jsz=all
- -serverz
- -subz
- -varz
- http://nats.databases:8222
ports:
- containerPort: 7777
---
apiVersion: v1
kind: Service
metadata:
name: nats-exporter
namespace: databases
spec:
type: NodePort
selector:
ms: nats-exporter
ports:
- port: 7777
targetPort: 7777

221
postgres.yaml Normal file
View File

@ -0,0 +1,221 @@
apiVersion: v1
kind: Namespace
metadata:
name: databases
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: databases
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
limits:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres
namespace: databases
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
ms: postgres
template:
metadata:
labels:
ms: postgres
spec:
terminationGracePeriodSeconds: 120
containers:
- name: postgres
image: postgres:16.1
args:
[
"-c",
"max_connections=200",
"-c",
"listen_addresses=*",
"-c",
"shared_preload_libraries=pg_stat_statements,pg_buffercache,auto_explain",
]
ports:
- containerPort: 5432
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
subPath: postgres
env:
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
volumes:
- name: data
persistentVolumeClaim:
claimName: postgres-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: databases
spec:
type: NodePort
selector:
ms: postgres
ports:
- port: 5432
targetPort: 5432
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres-exporter
namespace: databases
spec:
replicas: 1
selector:
matchLabels:
ms: postgres-exporter
template:
metadata:
labels:
ms: postgres-exporter
spec:
containers:
- name: postgres-exporter
image: quay.io/prometheuscommunity/postgres-exporter
ports:
- containerPort: 9187
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
- name: DATA_SOURCE_NAME
value: postgresql://postgres:$(POSTGRES_PASSWORD)@postgres.databases:5432/postgres?sslmode=disable
---
apiVersion: v1
kind: Service
metadata:
name: postgres-exporter
namespace: databases
spec:
type: NodePort
selector:
ms: postgres-exporter
ports:
- port: 9187
targetPort: 9187
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
namespace: databases
spec:
schedule: "0 0 * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
ttlSecondsAfterFinished: 60
template:
spec:
restartPolicy: Never
containers:
- name: test-container
image: container-registry.nocodelytics.com/postgres-s3
command:
- /bin/sh
- -c
- >
pg_dump -U postgres -h postgres.databases nocodelytics_production | gzip > /backup/nocodelytics_production_$(date +'%Y-%m-%d').sql.gzip &&
rclone copy /backup/nocodelytics_production_$(date '+%Y-%m-%d').sql.gzip contabo:postgres
env:
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
- name: RCLONE_CONFIG_CONTABO_TYPE
value: "s3"
- name: RCLONE_CONFIG_CONTABO_PROVIDER
value: "Other"
- name: RCLONE_CONFIG_CONTABO_ENV_AUTH
value: "false"
- name: RCLONE_CONFIG_CONTABO_ENDPOINT
value: "https://eu2.contabostorage.com"
- name: RCLONE_CONFIG_CONTABO_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: secrets
key: AWS_ACCESS_KEY_ID
- name: RCLONE_CONFIG_CONTABO_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
volumeMounts:
- mountPath: /backup
name: backup-volume
volumes:
- name: backup-volume
emptyDir: {}
---
apiVersion: v1
kind: Pod
metadata:
name: test-pod
namespace: databases
spec:
containers:
- name: test-container
image: container-registry.nocodelytics.com/postgres-s3
command: ["sh", "-c", "echo Image pulled successfully && sleep 3600"]
env:
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: secrets
key: POSTGRES_PASSWORD
- name: RCLONE_CONFIG_CONTABO_TYPE
value: "s3"
- name: RCLONE_CONFIG_CONTABO_PROVIDER
value: "Other"
- name: RCLONE_CONFIG_CONTABO_ENV_AUTH
value: "false"
- name: RCLONE_CONFIG_CONTABO_ENDPOINT
value: "https://eu2.contabostorage.com"
- name: RCLONE_CONFIG_CONTABO_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: secrets
key: AWS_ACCESS_KEY_ID
- name: RCLONE_CONFIG_CONTABO_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: secrets
key: AWS_SECRET_ACCESS_KEY
volumeMounts:
- mountPath: /backup
name: backup-volume
volumes:
- name: backup-volume
emptyDir: {}

798
postgres/postgresql.conf Normal file
View File

@ -0,0 +1,798 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 6389 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 32GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
work_mem = 10485kB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
maintenance_work_mem = 2GB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 200 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
max_worker_processes = 32
max_parallel_workers_per_gather = 16 # taken from max_parallel_workers
max_parallel_maintenance_workers = 4 # taken from max_parallel_workers
max_parallel_workers = 32 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
wal_buffers = 16MB # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 16GB
min_wal_size = 4GB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
random_page_cost = 1.1 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
effective_cache_size = 96GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
default_statistics_target = 500 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Berlin'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Berlin'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

6
postgres/queries.sql Normal file
View File

@ -0,0 +1,6 @@
-- https://vault.bitwarden.com/#/vault?search=post&itemId=89d31444-68a6-4fd5-b03d-af61010f87d1&cipherId=7295df35-fe24-403d-b8c8-afc6017bf42f
CREATE USER grafana WITH PASSWORD 'yourpassword';
CREATE DATABASE grafana;
ALTER DATABASE grafana OWNER TO grafana;
CREATE USER nocodelytics_prod WITH PASSWORD 'yourpassword';

8
postgres/restart_postgres.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
PRIVATE_KEY="${PRIVATE_KEY}"
SSH_PORT="${SSH_PORT}"
REMOTE_USER="${REMOTE_USER}"
SERVER_IP="${SERVER_IP}"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo systemctl restart postgresql.service"

View File

@ -0,0 +1,11 @@
#!/bin/bash
PRIVATE_KEY="${PRIVATE_KEY}"
SSH_PORT="${SSH_PORT}"
REMOTE_USER="${REMOTE_USER}"
SERVER_IP="${SERVER_IP}"
TMP_UPLOAD_FILE="/home/$REMOTE_USER/postgresql.conf"
scp -i $PRIVATE_KEY -P $SSH_PORT ./postgresql.conf $REMOTE_USER@$SERVER_IP:$TMP_UPLOAD_FILE
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp /etc/postgresql/14/main/postgresql.conf /etc/postgresql/14/main/postgresql.conf.bak"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp $TMP_UPLOAD_FILE /etc/postgresql/14/main/postgresql.conf"

221
prometheus-deployment.yaml Normal file
View File

@ -0,0 +1,221 @@
apiVersion: v1
kind: Namespace
metadata:
name: sysadmin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
subjects:
- kind: ServiceAccount
name: default
namespace: sysadmin
roleRef:
kind: ClusterRole
name: prometheus
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-configmap
namespace: sysadmin
data:
prometheus.yml: |
global:
scrape_interval: 60s
scrape_configs:
- job_name: "node_exporter"
static_configs:
- targets: ["144.76.186.182:9100"]
- job_name: "postgres_exporter"
static_configs:
- targets: ["postgres-exporter.databases:9187"]
- job_name: "clickhouse_exporter"
static_configs:
- targets: ["clickhouse.databases:9363"]
- job_name: "nats_exporter"
static_configs:
- targets: ["nats-exporter.databases:7777"]
- job_name: "kube_exporter"
static_configs:
- targets: ["kube-state-metrics.kube-system.svc.cluster.local:8080"]
- job_name: "kubernetes-cadvisor"
scheme: https
kubernetes_sd_configs:
- role: node
tls_config:
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: "kubelet"
scheme: https
kubernetes_sd_configs:
- role: node
tls_config:
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: "longhorn_exporter"
static_configs:
- targets: ["longhorn-backend.longhorn-system:9500"]
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus-pvc
namespace: sysadmin
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
limits:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: sysadmin
spec:
replicas: 1
selector:
matchLabels:
ms: prometheus
template:
metadata:
labels:
ms: prometheus
spec:
containers:
- name: prometheus
image: prom/prometheus
args:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.retention.size=8GB
volumeMounts:
- name: data
mountPath: /prometheus/
- name: config
mountPath: /etc/prometheus/
securityContext:
runAsUser: 1000
fsGroup: 2000
volumes:
- name: data
persistentVolumeClaim:
claimName: prometheus-pvc
- name: config
configMap:
name: prometheus-configmap
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: sysadmin
spec:
type: NodePort
selector:
ms: prometheus
ports:
- port: 9090
targetPort: 9090
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: sysadmin
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: florian@nocodelytics.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: sysadmin
name: prometheus
spec:
secretName: prometheus-net-tls
issuerRef:
name: letsencrypt-prod
kind: Issuer
commonName: prometheus.nocodelytics.com
dnsNames:
- prometheus.nocodelytics.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: sysadmin
name: prometheus-nginx-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-https-redirect@kubernetescrd,default-http-auth@kubernetescrd
spec:
rules:
- host: prometheus.nocodelytics.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ssl-redirect
port:
name: use-annotation
- path: /
pathType: Prefix
backend:
service:
name: prometheus
port:
number: 9090
tls:
- hosts:
- prometheus.nocodelytics.com
secretName: prometheus-net-tls

138
promtail.yaml Normal file
View File

@ -0,0 +1,138 @@
--- # Daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail-daemonset
namespace: sysadmin
spec:
selector:
matchLabels:
name: promtail
template:
metadata:
labels:
name: promtail
spec:
serviceAccount: promtail-serviceaccount
containers:
- name: promtail-container
image: grafana/promtail
args:
- -config.file=/etc/promtail/promtail.yaml
env:
- name: "HOSTNAME" # needed when using kubernetes_sd_configs
valueFrom:
fieldRef:
fieldPath: "spec.nodeName"
volumeMounts:
- name: logs
mountPath: /var/log
- name: promtail-config
mountPath: /etc/promtail
- mountPath: /var/lib/docker/containers
name: varlibdockercontainers
readOnly: true
volumes:
- name: logs
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: promtail-config
configMap:
name: promtail-config
--- # configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-config
namespace: sysadmin
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
clients:
- url: http://loki.sysadmin:3100/loki/api/v1/push
positions:
filename: /tmp/positions.yaml
target_config:
sync_period: 10s
scrape_configs:
- job_name: pod-logs
kubernetes_sd_configs:
- role: pod
pipeline_stages:
- docker: {}
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_pod_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
--- # Clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: promtail-clusterrole
namespace: sysadmin
rules:
- apiGroups: [""]
resources:
- nodes
- services
- pods
verbs:
- get
- watch
- list
--- # ServiceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail-serviceaccount
namespace: sysadmin
--- # Rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: promtail-clusterrolebinding
namespace: sysadmin
subjects:
- kind: ServiceAccount
name: promtail-serviceaccount
namespace: sysadmin
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io

BIN
scripts/.DS_Store vendored Normal file

Binary file not shown.

31
scripts/README.md Normal file
View File

@ -0,0 +1,31 @@
# Servers configuration
## All serveres
- Create a new nocodelytics user
- `useradd nocodelytics`
- `usermod -aG sudo nocodelytics`
- User `visudo` to add `NOPASSWD:ALL` to sudo users
- Transfer ssh keys
- `mkdir /home/$USER/.ssh`
- `chmod 700 /home/$USER/.ssh`
- `sudo cp /root/.ssh/authorized_keys /home/$USER/.ssh/authorized_keys`
- `sudo chown -R $USER:$USER /home/$USER/.ssh`
- `sudo chmod 600 /home/$USER/.ssh/authorized_keys`
- Replace `/etc/ssh/sshd_config` with the provided sshd_config and run `service ssh restart`
## Main server
- Install all the requirements in ./install.sh
- Configure postgres
- Configure grafana
## add the storage box
- `vim /etc/fstab`
Add the following
```
//u363334.your-storagebox.de/backup /mnt/backup-server cifs iocharset=utf8,rw,credentials=/etc/backup-credentials.txt,uid=1000,gid=1000,file_mode=0660,dir_mode=0770 0 0
```

15
scripts/install.sh Normal file
View File

@ -0,0 +1,15 @@
#!/bin/bash"
apt update
apt upgrade
apt install -y ufw
apt install -y fail2ban
apt install -y debian-keyring debian-archive-keyring apt-transport-https
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
apt upgrade
apt install -y caddy
curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash - &&\
apt install -y nodejs
apt install -y postgresql postgresql-contrib
apt install -y cifs-utils

35
scripts/system_config_backup.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
# Retrieve environment variables
PRIVATE_KEY="${PRIVATE_KEY}"
SSH_PORT="${SSH_PORT}"
REMOTE_USER="${REMOTE_USER}"
SERVER_IP="${SERVER_IP}"
BACKUP_DIR="/home/$REMOTE_USER/system_config_backup"
set -e # Exit on error
# Create the backup directory remotely
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "mkdir -p $BACKUP_DIR"
# Backup UFW configurations
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp -r /etc/ufw/* $BACKUP_DIR/ufw/"
# Backup Fail2Ban configurations
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "mkdir -p $BACKUP_DIR/fail2ban && sudo cp -r /etc/fail2ban/* $BACKUP_DIR/fail2ban/"
# Backup SSH configurations
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo cp /etc/ssh/sshd_config $BACKUP_DIR/"
# Backup Systemd configurations
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "mkdir -p $BACKUP_DIR/systemd && sudo cp -r /etc/systemd/system/* $BACKUP_DIR/systemd/"
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "mkdir -p $BACKUP_DIR/caddy && sudo cp -r /var/lib/caddy/.local/share/caddy/ $BACKUP_DIR/"
# Tar the backup directory
ssh -i $PRIVATE_KEY -p $SSH_PORT $REMOTE_USER@$SERVER_IP "sudo tar czvf ~/system_config_backup.tar.gz -C ~ system_config_backup/"
# Optionally fetch the backup to local computer
scp -i $PRIVATE_KEY -P $SSH_PORT $REMOTE_USER@$SERVER_IP:~/system_config_backup.tar.gz .
echo "Backup process completed and fetched to local machine!"

12
tracker/Caddyfile Normal file
View File

@ -0,0 +1,12 @@
:80 {
respond /healthz "OK" 200
}
tracker.nocodelytics.com {
reverse_proxy localhost:8000 {
lb_try_duration 10s
health_uri /healthz
health_interval 1s
health_timeout 5s
}
}

View File

@ -0,0 +1,17 @@
[Unit]
Description=Tracker API
Wants=network-online.target
After=network-online.target
[Service]
User=app
Group=app
Type=simple
ExecStart=/home/app/.nvm/versions/node/v18.17.1/bin/node /home/app/projects/nocodelytics/api/.build/src/tracker/index.js
Environment="ENCRYPTION_KEY=KmHZYNf53RGkhfzSWqdhj64br2z3FqN4"
Environment="NODE_ENV=production"
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

18
traefik-middlewares.yaml Normal file
View File

@ -0,0 +1,18 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: https-redirect
namespace: default
spec:
redirectScheme:
scheme: https
permanent: true
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: http-auth
namespace: default
spec:
basicAuth:
secret: http-auth-secret