{ pkgs, lib, config, hostName, ... }:
with builtins;
{
  systemd.services.sanoid.serviceConfig.SupplementaryGroups = [
    config.users.groups."disk".name
  ];
  services.sanoid = {
    enable = true;
    extraArgs = [ "--verbose" ];
    datasets = {
      "${hostName}/root" = {
        autosnap = true;
        autoprune = true;
        frequently = 0;
        hourly = 12;
        daily = 7;
        monthly = 0;
        yearly = 0;
        recursive = true;
      };
      "${hostName}/root/nix" = {
        autosnap = false;
      };
      "${hostName}/root/var/cache" = {
        autosnap = false;
      };
      "${hostName}/root/var/log" = {
        autosnap = false;
      };
      "${hostName}/root/home/julm/.cache" = {
        autosnap = false;
      };
      "${hostName}/root/home/julm/.local" = {
        hourly = 0;
        daily = 1;
      };
      "${hostName}/root/home/julm/.mozilla" = {
        hourly = 0;
        daily = 1;
      };
      "off2/julm/backup/${hostName}" = {
        autosnap = false;
        autoprune = true;
        hourly = 12;
        daily = 14;
        monthly = 3;
        yearly = 0;
      };
    };
  };

  programs.bash.interactiveShellInit = ''
    backup-pumpkin () {
      local -
      set -x
      dst=
      if ! zpool list off2
        then dst=aubergine.sp:
        fi
      sudo syncoid --sshkey ~julm/.ssh/id_ed25519 \
        --create-bookmark --no-sync-snap --no-privilege-elevation \
        --preserve-properties --preserve-recordsize \
        --recursive --sendoptions=w --recvoptions=u \
        --exclude pumpkin/root/nix \
        --exclude pumpkin/root/var/cache \
        --exclude pumpkin/root/var/log \
        --exclude pumpkin/root/home/julm/.cache \
        pumpkin/root \
        ''${dst}off2/julm/backup/pumpkin
      zfs-fix-bookmarks pumpkin 2>/dev/null
    }
    mount-zfs-backup () {
      (
      set -eux
      zpool="$1"
      zpool status "$zpool" 2>/dev/null ||
      sudo zpool import -d /dev/disk/by-id/ "$zpool"
      trap "sudo zpool export $zpool" EXIT
      zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
      grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
      ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
      )
    }
  '';
  # Trigger backups when disks are plugged
  services.udev.extraRules = ''
    ACTION=="add",    SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
    # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
    ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
  '';
  # Show what's happening to the user
  systemd.services."zfs-term@" = {
    description = "ZFS terminal for: %I";
    unitConfig.StopWhenUnneeded = false;
    environment.DISPLAY = ":0";
    environment.XAUTHORITY = "/home/julm/.Xauthority";
    after = [ "graphical.target" ];
    bindsTo = [ "sys-subsystem-usb-%i.device" ];
    serviceConfig = {
      Type = "simple";
      PrivateTmp = true;
      ExecStart = pkgs.writeShellScript "zfs-force-import" ''
        DESTPOOL=$1
        set -eux
        ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
          -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
          -u zfs-force-import@$DESTPOOL \
          -u zfs-local-backup-home@$DESTPOOL"
      '' + " %I";
    };
  };
  # Force zpool import, even if the disk has not been exported, or has been imported on another computer
  systemd.services."zfs-force-import@" = {
    description = "ZFS force import: %I";
    unitConfig = {
      StartLimitBurst = 5;
      StartLimitInterval = 200;
      StopWhenUnneeded = true;
    };
    wants = [ "zfs-term@%i.service" ];
    bindsTo = [ "sys-subsystem-usb-%i.device" ];
    path = lib.mkBefore [ "/run/booted-system/sw" ];
    serviceConfig = {
      Type = "oneshot";
      RemainAfterExit = true;
      PrivateTmp = true;
      SyslogIdentifier = "zfs-force-import@%i";
      Restart = "on-failure";
      ExecStart = pkgs.writeShellScript "zfs-force-import" ''
        DESTPOOL=$1
        set -eux
        # Import the zpool, using stable paths
        zpool import -d /dev/disk/by-id/ || true
        zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
        zpool reopen "$DESTPOOL" ||
        zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
        zpool clear -nFX "$DESTPOOL"
      '' + " %I";
    };
  };
  # Prune old snapshots on the backup and send new ones
  systemd.services."zfs-local-backup-home@" = {
    description = "ZFS backup home, on: %I";
    wants = [ "zfs-term@%i.service" ];
    after = [ "zfs-force-import@%i.service" ];
    requires = [ "zfs-force-import@%i.service" ];
    bindsTo = [ "sys-subsystem-usb-%i.device" ];
    path = lib.mkBefore [ "/run/booted-system/sw" ];
    serviceConfig = rec {
      Type = "oneshot";
      PrivateTmp = true;
      CacheDirectory = [ "zfs-usb-backup/%I" ];
      RuntimeDirectory = [ "zfs-usb-backup/%I" ];
      User = "julm";
      Group = "users";
      SyslogIdentifier = "zfs-local-backup-home@%i";
      ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
        DESTPOOL=$1
        set -eux
        if zpool status "$DESTPOOL"; then
          zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
          zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
          zpool scrub -p "$DESTPOOL" || true
        fi
      '' + " %I";
      ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
        set -eu
        DESTPOOL=$1
        # sanoid is quite conservative:
        # by setting hourly=24, a snapshot must be >24 hours old
        # and there must been >24 total hourly snapshots,
        # or nothing is pruned.
        install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
          [template_remote]
          autoprune=true
          autosnap=false
          process_children_only=false

          [$DESTPOOL/${User}/backup/${hostName}/home]
          hourly=6
          daily=31
          monthly=3
          recursive=true
          use_template=remote
        EOF
        set -x
        ${pkgs.sanoid}/bin/sanoid \
          --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
          --configdir /tmp/sanoid \
          --prune-snapshots \
          --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
          --verbose

        for dataset in ${hostName}/home; do
          ${pkgs.sanoid}/bin/syncoid \
            --create-bookmark \
            --exclude "home/room" \
            --force-delete \
            --no-privilege-elevation \
            --no-sync-snap \
            --recursive \
            --recvoptions "" \
            --sendoptions raw  \
            --skip-parent \
            "$dataset" \
            "$DESTPOOL"/${User}/backup/"$dataset"
        done
      '' + " %I";
      ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
        DESTPOOL=$1
        set -eux
        # Only if the zpool still exists to avoid uninterruptible hanging
        if zpool status -v "$DESTPOOL"; then
          # Scrub the zpool 1 minute (in the background)
          zpool scrub "$DESTPOOL"
          sleep 60
        fi
        while zpool status -v "$DESTPOOL"; do
          zpool scrub -p "$DESTPOOL" || true
          sleep 20
          # Export the zpool (to avoid a forced import later on)
          zpool export "$DESTPOOL" || true
        done
        systemctl --no-block stop zfs-term@"$DESTPOOL"
      '' + " %I";
    };
  };
  programs.bash.shellAliases = {
    mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
  };
}