starship: enable
[julm/julm-nix.git] / hosts / oignon / backup.nix
index 3425e52432d3c1e62a08aa9d2d114c08f5a15b73..f625f123aa87dd41507bd95e36d39fc1cb59b928 100644 (file)
-{ pkgs, lib, config, hostName, ... }:
+{ pkgs, lib, hostName, ... }:
 with builtins;
 {
-# syncoid --create-bookmark --no-privilege-elevation --no-sync-snap --recvoptions '' --sendoptions raw --recursive oignon/home off2/julm/backup/oignon/home
-# zfs list -t snapshot -o name | grep ^oignon/home | while read -r snap; do zfs bookmark "$snap" "${snap//@/#}"; done
-# Take regular snapshots, and prune old ones
-services.sanoid = {
-  enable = true;
-  extraArgs = [ "--verbose" ];
-  datasets = {
-    "${hostName}/home" = {
-      autosnap = true;
-      autoprune = true;
-      hourly = 12;
-      daily = 3;
-      monthly = 0;
-      yearly = 0;
-      recursive = true;
+  # syncoid --create-bookmark --no-privilege-elevation --no-sync-snap --recvoptions '' --sendoptions raw --recursive oignon/home off2/julm/backup/oignon/home
+  # zfs list -t snapshot -o name | grep ^oignon/home | while read -r snap; do zfs bookmark "$snap" "${snap//@/#}"; done
+  # Take regular snapshots, and prune old ones
+  services.sanoid = {
+    enable = true;
+    extraArgs = [ "--verbose" ];
+    datasets = {
+      "${hostName}/home" = {
+        autosnap = true;
+        autoprune = true;
+        hourly = 12;
+        daily = 3;
+        monthly = 0;
+        yearly = 0;
+        recursive = true;
+      };
+      "${hostName}/var" = {
+        autosnap = true;
+        autoprune = true;
+        hourly = 12;
+        daily = 1;
+        monthly = 0;
+        yearly = 0;
+        recursive = true;
+      };
+      "off2/julm/backup/oignon" = {
+        autosnap = false;
+        autoprune = true;
+        hourly = 0;
+        daily = 7;
+        monthly = 3;
+        yearly = 0;
+        recursive = true;
+      };
     };
-    "${hostName}/var" = {
-      autosnap = true;
-      autoprune = true;
-      hourly = 12;
-      daily = 1;
-      monthly = 0;
-      yearly = 0;
-      recursive = true;
-    };
-    "off2/julm/backup/oignon" = {
-      autosnap = false;
-      autoprune = true;
-      hourly = 0;
-      daily = 7;
-      monthly = 3;
-      yearly = 0;
-      recursive = true;
-    };
-  };
-};
-# Trigger backups when disks are plugged
-services.udev.extraRules = ''
-  ACTION=="add",    SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
-  # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
-  ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
-'';
-# Show what's happening to the user
-systemd.services."zfs-term@" = {
-  description = "ZFS terminal for: %I";
-  unitConfig.StopWhenUnneeded = false;
-  environment.DISPLAY = ":0";
-  environment.XAUTHORITY = "/home/julm/.Xauthority";
-  after = [ "graphical.target" ];
-  bindsTo = [ "sys-subsystem-usb-%i.device" ];
-  serviceConfig = {
-    Type = "simple";
-    PrivateTmp = true;
-    ExecStart = pkgs.writeShellScript "zfs-force-import" ''
-      DESTPOOL=$1
-      set -eux
-      ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
-        -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
-        -u zfs-force-import@$DESTPOOL \
-        -u zfs-local-backup-home@$DESTPOOL"
-    '' + " %I";
   };
-};
-# Force zpool import, even if the disk has not been exported, or has been imported on another computer
-systemd.services."zfs-force-import@" = {
-  description = "ZFS force import: %I";
-  unitConfig = {
-    StartLimitBurst = 5;
-    StartLimitInterval = 200;
-    StopWhenUnneeded = true;
+  # Trigger backups when disks are plugged
+  services.udev.extraRules = ''
+    ACTION=="add",    SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
+    # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
+    ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
+  '';
+  # Show what's happening to the user
+  systemd.services."zfs-term@" = {
+    description = "ZFS terminal for: %I";
+    unitConfig.StopWhenUnneeded = false;
+    environment.DISPLAY = ":0";
+    environment.XAUTHORITY = "/home/julm/.Xauthority";
+    after = [ "graphical.target" ];
+    bindsTo = [ "sys-subsystem-usb-%i.device" ];
+    serviceConfig = {
+      Type = "simple";
+      PrivateTmp = true;
+      ExecStart = pkgs.writeShellScript "zfs-force-import" ''
+        DESTPOOL=$1
+        set -eux
+        ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
+          -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
+          -u zfs-force-import@$DESTPOOL \
+          -u zfs-local-backup-home@$DESTPOOL"
+      '' + " %I";
+    };
   };
-  wants = [ "zfs-term@%i.service" ];
-  bindsTo = [ "sys-subsystem-usb-%i.device" ];
-  path = lib.mkBefore [ "/run/booted-system/sw" ];
-  serviceConfig = {
-    Type = "oneshot";
-    RemainAfterExit = true;
-    PrivateTmp = true;
-    SyslogIdentifier = "zfs-force-import@%i";
-    Restart = "on-failure";
-    ExecStart = pkgs.writeShellScript "zfs-force-import" ''
-      DESTPOOL=$1
-      set -eux
-      # Import the zpool, using stable paths
-      zpool import -d /dev/disk/by-id/ || true
-      zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
-      zpool reopen "$DESTPOOL" ||
-      zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
-      zpool clear -nFX "$DESTPOOL"
-    '' + " %I";
+  # Force zpool import, even if the disk has not been exported, or has been imported on another computer
+  systemd.services."zfs-force-import@" = {
+    description = "ZFS force import: %I";
+    unitConfig = {
+      StartLimitBurst = 5;
+      StartLimitInterval = 200;
+      StopWhenUnneeded = true;
+    };
+    wants = [ "zfs-term@%i.service" ];
+    bindsTo = [ "sys-subsystem-usb-%i.device" ];
+    path = lib.mkBefore [ "/run/booted-system/sw" ];
+    serviceConfig = {
+      Type = "oneshot";
+      RemainAfterExit = true;
+      PrivateTmp = true;
+      SyslogIdentifier = "zfs-force-import@%i";
+      Restart = "on-failure";
+      ExecStart = pkgs.writeShellScript "zfs-force-import" ''
+        DESTPOOL=$1
+        set -eux
+        # Import the zpool, using stable paths
+        zpool import -d /dev/disk/by-id/ || true
+        zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
+        zpool reopen "$DESTPOOL" ||
+        zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
+        zpool clear -nFX "$DESTPOOL"
+      '' + " %I";
+    };
   };
-};
-# Prune old snapshots on the backup and send new ones
-systemd.services."zfs-local-backup-home@" = {
-  description = "ZFS backup home, on: %I";
-  wants = [ "zfs-term@%i.service" ];
-  after = [ "zfs-force-import@%i.service" ];
-  requires = [ "zfs-force-import@%i.service" ];
-  bindsTo = [ "sys-subsystem-usb-%i.device" ];
-  path = lib.mkBefore [ "/run/booted-system/sw" ];
-  serviceConfig = rec {
-    Type = "oneshot";
-    PrivateTmp = true;
-    CacheDirectory = [ "zfs-usb-backup/%I" ];
-    RuntimeDirectory = [ "zfs-usb-backup/%I" ];
-    User = "julm";
-    Group = "users";
-    SyslogIdentifier = "zfs-local-backup-home@%i";
-    ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
-      DESTPOOL=$1
-      set -eux
-      if zpool status "$DESTPOOL"; then
-        zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
-        zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
-        zpool scrub -p "$DESTPOOL" || true
-      fi
-    '' + " %I";
-    ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
-      set -eu
-      DESTPOOL=$1
-      # sanoid is quite conservative:
-      # by setting hourly=24, a snapshot must be >24 hours old
-      # and there must been >24 total hourly snapshots,
-      # or nothing is pruned.
-      install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
-        [template_remote]
-        autoprune=true
-        autosnap=false
-        process_children_only=false
+  # Prune old snapshots on the backup and send new ones
+  systemd.services."zfs-local-backup-home@" = {
+    description = "ZFS backup home, on: %I";
+    wants = [ "zfs-term@%i.service" ];
+    after = [ "zfs-force-import@%i.service" ];
+    requires = [ "zfs-force-import@%i.service" ];
+    bindsTo = [ "sys-subsystem-usb-%i.device" ];
+    path = lib.mkBefore [ "/run/booted-system/sw" ];
+    serviceConfig = rec {
+      Type = "oneshot";
+      PrivateTmp = true;
+      CacheDirectory = [ "zfs-usb-backup/%I" ];
+      RuntimeDirectory = [ "zfs-usb-backup/%I" ];
+      User = "julm";
+      Group = "users";
+      SyslogIdentifier = "zfs-local-backup-home@%i";
+      ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
+        DESTPOOL=$1
+        set -eux
+        if zpool status "$DESTPOOL"; then
+          zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
+          zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
+          zpool scrub -p "$DESTPOOL" || true
+        fi
+      '' + " %I";
+      ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
+        set -eu
+        DESTPOOL=$1
+        # sanoid is quite conservative:
+        # by setting hourly=24, a snapshot must be >24 hours old
+        # and there must been >24 total hourly snapshots,
+        # or nothing is pruned.
+        install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
+          [template_remote]
+          autoprune=true
+          autosnap=false
+          process_children_only=false
 
-        [$DESTPOOL/${User}/backup/${hostName}/home]
-        hourly=6
-        daily=31
-        monthly=3
-        recursive=true
-        use_template=remote
-      EOF
-      set -x
-      ${pkgs.sanoid}/bin/sanoid \
-        --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
-        --configdir /tmp/sanoid \
-        --prune-snapshots \
-        --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
-        --verbose
+          [$DESTPOOL/${User}/backup/${hostName}/home]
+          hourly=6
+          daily=31
+          monthly=3
+          recursive=true
+          use_template=remote
+        EOF
+        set -x
+        ${pkgs.sanoid}/bin/sanoid \
+          --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
+          --configdir /tmp/sanoid \
+          --prune-snapshots \
+          --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
+          --verbose
 
-      for dataset in ${hostName}/home; do
-        ${pkgs.sanoid}/bin/syncoid \
-          --create-bookmark \
-          --exclude "home/room" \
-          --force-delete \
-          --no-privilege-elevation \
-          --no-sync-snap \
-          --recursive \
-          --recvoptions "" \
-          --sendoptions raw  \
-          --skip-parent \
-          "$dataset" \
-          "$DESTPOOL"/${User}/backup/"$dataset"
-      done
-    '' + " %I";
-    ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
-      DESTPOOL=$1
+        for dataset in ${hostName}/home; do
+          ${pkgs.sanoid}/bin/syncoid \
+            --create-bookmark \
+            --exclude "home/room" \
+            --force-delete \
+            --no-privilege-elevation \
+            --no-sync-snap \
+            --recursive \
+            --recvoptions "" \
+            --sendoptions raw  \
+            --skip-parent \
+            "$dataset" \
+            "$DESTPOOL"/${User}/backup/"$dataset"
+        done
+      '' + " %I";
+      ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
+        DESTPOOL=$1
+        set -eux
+        # Only if the zpool still exists to avoid uninterruptible hanging
+        if zpool status -v "$DESTPOOL"; then
+          # Scrub the zpool 1 minute (in the background)
+          zpool scrub "$DESTPOOL"
+          sleep 60
+        fi
+        while zpool status -v "$DESTPOOL"; do
+          zpool scrub -p "$DESTPOOL" || true
+          sleep 20
+          # Export the zpool (to avoid a forced import later on)
+          zpool export "$DESTPOOL" || true
+        done
+        systemctl --no-block stop zfs-term@"$DESTPOOL"
+      '' + " %I";
+    };
+  };
+  programs.bash.interactiveShellInit = ''
+    mount-zfs-backup () {
+      (
       set -eux
-      # Only if the zpool still exists to avoid uninterruptible hanging
-      if zpool status -v "$DESTPOOL"; then
-        # Scrub the zpool 1 minute (in the background)
-        zpool scrub "$DESTPOOL"
-        sleep 60
-      fi
-      while zpool status -v "$DESTPOOL"; do
-        zpool scrub -p "$DESTPOOL" || true
-        sleep 20
-        # Export the zpool (to avoid a forced import later on)
-        zpool export "$DESTPOOL" || true
-      done
-      systemctl --no-block stop zfs-term@"$DESTPOOL"
-    '' + " %I";
+      zpool="$1"
+      zpool status "$zpool" 2>/dev/null ||
+      sudo zpool import -d /dev/disk/by-id/ "$zpool"
+      trap "sudo zpool export $zpool" EXIT
+      zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
+      grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
+      ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
+      )
+    }
+  '';
+  programs.bash.shellAliases = {
+    mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
   };
-};
-programs.bash.interactiveShellInit = ''
-  mount-zfs-backup () {
-    (
-    set -eux
-    zpool="$1"
-    zpool status "$zpool" 2>/dev/null ||
-    sudo zpool import -d /dev/disk/by-id/ "$zpool"
-    trap "sudo zpool export $zpool" EXIT
-    zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
-    grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
-    ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
-    )
-  }
-'';
-programs.bash.shellAliases = {
-  mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
-};
 }