Pārlūkot izejas kodu

Adding ntfy service and using it from diun and backup jobs - ntfy.bodicsek.host

bodicsek 1 gadu atpakaļ
vecāks
revīzija
2eec0b275b
7 mainītis faili ar 211 papildinājumiem un 97 dzēšanām
  1. 8 8
      README.org
  2. 50 6
      backup.cronjob.yaml
  3. 0 29
      backup.job.yaml
  4. 54 10
      backup.remote.cronjob.yaml
  5. 0 44
      backup.remote.job.yaml
  6. 6 0
      docker-image-update-notifier.yaml
  7. 93 0
      ntfy.yaml

+ 8 - 8
README.org

@@ -12,14 +12,14 @@ localhost:/dockervol /mnt glusterfs defaults,_netdev,noauto,x-systemd.automount
 192.168.1.1:/mnt/sda1/backup /data/backup nfs defaults 0 0
 #+end_src
 
-** One time backup
-
-Apply =backup.job.yaml=.
-
 ** Scheduled backup
 
 Apply =backup.cronjob.yaml=.
 
+** One time backup
+
+Run =kubectl create job --from=cronjob/backup backup-manual=.
+
 ** Restore
 
 Apply =backup.pod.yaml=.
@@ -34,14 +34,14 @@ Run =restic --repo /data/repo restore <snapshot_id> /data/glusterfs --insecure-n
 Remote destination is currently an Oracle Object Storage called ~backup~ in ~davidnabraczkyhajos~ tenancy.
 The API key secret and the rclone/oci config maps are created in =backup.remote.cronjob.yaml=. Apply this before anything else, it will create a cron job that runs every Sunday at 8:00 am.
 
-** One time backup
-
-Apply =backup.remote.job.yaml=.
-
 ** Scheduled backup
 
 Apply =backup.remote.cronjob.yaml=.
 
+** One time backup
+
+Run =kubectl create job --from=cronjob/backup-remote backup-remote-manual=.
+
 ** Restore
 
 Apply =backup.remote.pod.yaml=.

+ 50 - 6
backup.cronjob.yaml

@@ -8,16 +8,58 @@ spec:
     spec:
       template:
         spec:
+          initContainers:
+          - name: prepare-wrapper
+            image: busybox
+            command: ["/bin/sh", "-c"]
+            args:
+            - |
+              cat > /shared/wrapper.sh << 'EOF'
+              #!/bin/sh
+              # Execute the original entrypoint with all arguments
+              /usr/bin/restic "$@"
+              # Capture the exit code
+              echo $? > /shared/exit-code
+              EOF
+              chmod +x /shared/wrapper.sh
+            volumeMounts:
+            - name: shared-data
+              mountPath: /shared
           containers:
-          - args: ["backup", "--repo", "/data/repo", "--insecure-no-password", "/data/glusterfs"]
+          - name: restic
             image: restic/restic:0.17.2
+            command: ["/shared/wrapper.sh"]
+            args: ["backup", "--repo", "/data/repo", "--insecure-no-password", "/data/glusterfs"]
             imagePullPolicy: IfNotPresent
-            name: restic
             volumeMounts:
-              - name: restic-repo-vol
-                mountPath: /data/repo
-              - name: backup-data-vol
-                mountPath: /data/glusterfs
+            - name: restic-repo-vol
+              mountPath: /data/repo
+            - name: backup-data-vol
+              mountPath: /data/glusterfs
+            - name: shared-data
+              mountPath: /shared
+          - name: notify
+            image: curlimages/curl
+            command: ["/bin/sh"]
+            args:
+            - -c
+            - |
+              while [ ! -f /shared/exit-code ]; do sleep 1; done
+              
+              AUTH_HEADER="Authorization: Bearer tk_up3glyzhhojl1w5lt32jq5vqjzdgb"
+              URL="http://ntfy/backup"
+              MESSAGE="Local restic backup"
+
+              if [ "$(cat /shared/exit-code)" -eq 0 ]; then
+                STATUS="was successful"
+              else
+                STATUS="FAILED"
+              fi
+
+              curl -X POST -H "$AUTH_HEADER" -d "${MESSAGE} ${STATUS}." "$URL"
+            volumeMounts:
+            - name: shared-data
+              mountPath: /shared
           volumes:
           - name: restic-repo-vol
             hostPath:
@@ -27,6 +69,8 @@ spec:
             hostPath:
               path: /mnt
               type: Directory
+          - name: shared-data
+            emptyDir: {}
           restartPolicy: Never
           nodeSelector:
             kubernetes.io/hostname: raspberrypi4

+ 0 - 29
backup.job.yaml

@@ -1,29 +0,0 @@
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: backup
-spec:
-  template:
-    spec:
-      containers:
-      - args: ["backup", "--repo", "/data/repo", "--insecure-no-password", "/data/glusterfs"]
-        image: restic/restic:0.17.2
-        imagePullPolicy: IfNotPresent
-        name: restic
-        volumeMounts:
-          - name: restic-repo-vol
-            mountPath: /data/repo
-          - name: backup-data-vol
-            mountPath: /data/glusterfs
-      volumes:
-      - name: restic-repo-vol
-        hostPath:
-          path: /data/backup
-          type: Directory
-      - name: backup-data-vol
-        hostPath:
-          path: /mnt
-          type: Directory
-      restartPolicy: Never
-      nodeSelector:
-        kubernetes.io/hostname: raspberrypi4

+ 54 - 10
backup.remote.cronjob.yaml

@@ -38,20 +38,62 @@ spec:
     spec:
       template:
         spec:
+          initContainers:
+          - name: prepare-wrapper
+            image: busybox
+            command: ["/bin/sh", "-c"]
+            args:
+            - |
+              cat > /shared/wrapper.sh << 'EOF'
+              #!/bin/sh
+              # Execute the original entrypoint with all arguments
+              /usr/local/bin/rclone "$@"
+              # Capture the exit code
+              echo $? > /shared/exit-code
+              EOF
+              chmod +x /shared/wrapper.sh
+            volumeMounts:
+            - name: shared-data
+              mountPath: /shared
           containers:
-          - args: ["sync", "--verbose", "--verbose", "--cache-workers", "4", "--transfers", "4", "--retries", "32", "--oos-attempt-resume-upload", "--oos-leave-parts-on-error", "/data/repo", "oos:backup"]
+          - name: rclone
             image: rclone/rclone:1.68
+            command: ["/shared/wrapper.sh"]
+            args: ["sync", "--verbose", "--verbose", "--cache-workers", "4", "--transfers", "4", "--retries", "32", "--oos-attempt-resume-upload", "--oos-leave-parts-on-error", "/data/repo", "oos:backup"]
             imagePullPolicy: IfNotPresent
-            name: rclone-cli
             volumeMounts:
-              - name: restic-repo-vol
-                mountPath: /data/repo
-              - name: rclone-conf-vol
-                mountPath: /config/rclone
-              - name: oci-conf-vol
-                mountPath: /config/oci
-              - name: oci-api-key-vol
-                mountPath: /config/oci-keys
+            - name: restic-repo-vol
+              mountPath: /data/repo
+            - name: rclone-conf-vol
+              mountPath: /config/rclone
+            - name: oci-conf-vol
+              mountPath: /config/oci
+            - name: oci-api-key-vol
+              mountPath: /config/oci-keys
+            - name: shared-data
+              mountPath: /shared
+          - name: notify
+            image: curlimages/curl
+            command: ["/bin/sh"]
+            args:
+            - -c
+            - |
+              while [ ! -f /shared/exit-code ]; do sleep 1; done
+              
+              AUTH_HEADER="Authorization: Bearer tk_up3glyzhhojl1w5lt32jq5vqjzdgb"
+              URL="http://ntfy/backup"
+              MESSAGE="Remote restic backup"
+
+              if [ "$(cat /shared/exit-code)" -eq 0 ]; then
+                STATUS="was successful"
+              else
+                STATUS="FAILED"
+              fi
+
+              curl -X POST -H "$AUTH_HEADER" -d "${MESSAGE} ${STATUS}." "$URL"
+            volumeMounts:
+            - name: shared-data
+              mountPath: /shared
           volumes:
           - name: restic-repo-vol
             hostPath:
@@ -72,6 +114,8 @@ spec:
           - name: oci-api-key-vol
             secret:
               secretName: rclone-remote-backup-api-key
+          - name: shared-data
+            emptyDir: {}
           restartPolicy: Never
           nodeSelector:
             kubernetes.io/hostname: raspberrypi4

+ 0 - 44
backup.remote.job.yaml

@@ -1,44 +0,0 @@
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: backup-remote
-spec:
-  template:
-    spec:
-      containers:
-      - args: ["sync", "--verbose", "--verbose", "--cache-workers", "4", "--transfers", "4", "--retries", "32", "--oos-attempt-resume-upload", "--oos-leave-parts-on-error", "/data/repo", "oos:backup"]
-        image: rclone/rclone:1.68
-        imagePullPolicy: IfNotPresent
-        name: rclone-cli
-        volumeMounts:
-          - name: restic-repo-vol
-            mountPath: /data/repo
-          - name: rclone-conf-vol
-            mountPath: /config/rclone
-          - name: oci-conf-vol
-            mountPath: /config/oci
-          - name: oci-api-key-vol
-            mountPath: /config/oci-keys
-      volumes:
-      - name: restic-repo-vol
-        hostPath:
-          path: /data/backup
-          type: Directory
-      - name: rclone-conf-vol
-        configMap:
-          name: rclone-config-oos
-          items:
-          - key: rclone.conf
-            path: rclone.conf
-      - name: oci-conf-vol
-        configMap:
-          name: rclone-config-oos
-          items:
-          - key: oci.conf
-            path: config
-      - name: oci-api-key-vol
-        secret:
-          secretName: rclone-remote-backup-api-key
-      restartPolicy: Never
-      nodeSelector:
-        kubernetes.io/hostname: raspberrypi4

+ 6 - 0
docker-image-update-notifier.yaml

@@ -71,6 +71,12 @@ spec:
               value: "true"
             - name: DIUN_PROVIDERS_KUBERNETES_WATCHBYDEFAULT
               value: "true"
+            - name: DIUN_NOTIF_NTFY_ENDPOINT
+              value: "http://ntfy"
+            - name: DIUN_NOTIF_NTFY_TOKEN
+              value: "tk_vlw5tjsiwnb8b91w2lv4hh9ad6q5b"
+            - name: DIUN_NOTIF_NTFY_TOPIC
+              value: "image-update"
           volumeMounts:
             - mountPath: "/data"
               name: "data"

+ 93 - 0
ntfy.yaml

@@ -0,0 +1,93 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ntfy
+data:
+  server.yml: |
+    # Template: https://github.com/binwiederhier/ntfy/blob/main/server/server.yml
+    base-url: https://ntfy.bodicsek.host
+    behind-proxy: true
+    auth-file: /config/user.db
+    auth-default-access: deny-all
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: ntfy
+spec:
+  selector:
+    matchLabels:
+      app: ntfy
+  template:
+    metadata:
+      labels:
+        app: ntfy
+    spec:
+      containers:
+      - name: ntfy
+        image: binwiederhier/ntfy:v2.11.0
+        args: ["serve"]
+        resources:
+          limits:
+            memory: "128Mi"
+            cpu: "500m"
+        ports:
+        - containerPort: 80
+          name: http
+        volumeMounts:
+        - name: config
+          mountPath: /config
+        - name: config-file
+          mountPath: /etc/ntfy
+          readOnly: true
+      volumes:
+        - name: config
+          hostPath:
+            path: /mnt/ntfy/config
+            type: Directory
+        - name: config-file
+          configMap:
+            name: ntfy
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ntfy
+spec:
+  selector:
+    app: ntfy
+  ports:
+  - port: 80
+    targetPort: 80
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: ntfy.bodicsek.host
+spec:
+  secretName: ntfy.bodicsek.host
+  issuerRef:
+    name: letsencrypt-prod
+    kind: ClusterIssuer
+  dnsNames:
+    - ntfy.bodicsek.host
+---
+apiVersion: traefik.containo.us/v1alpha1
+kind: IngressRoute
+metadata:
+  name: ntfy
+  namespace: default
+spec:
+  entryPoints:
+    - websecure
+  routes:
+  - kind: Rule
+    match: Host(`ntfy.bodicsek.host`)
+    services:
+    - kind: Service
+      name: ntfy
+      namespace: default
+      passHostHeader: true
+      port: 80
+  tls:
+    secretName: ntfy.bodicsek.host