1 { pkgs, lib, hostName, ... }:
4 # Take regular snapshots, and prune old ones
7 extraArgs = [ "--verbose" ];
19 "${hostName}/root/nix" = {
22 "${hostName}/root/var/cache" = {
25 "${hostName}/root/var/log" = {
28 "${hostName}/root/home/julm/.cache" = {
31 "${hostName}/root/home/julm/.local" = {
35 "${hostName}/root/home/julm/.mozilla" = {
39 "off2/julm/backup/${hostName}" = {
49 programs.bash.interactiveShellInit = ''
55 then dst=aubergine.sp:
57 sudo syncoid --sshkey ~julm/.ssh/id_ed25519 \
58 --create-bookmark --no-sync-snap --no-privilege-elevation \
59 --preserve-properties --preserve-recordsize \
60 --recursive --sendoptions=w --recvoptions=u \
61 --exclude pumpkin/root/nix \
62 --exclude pumpkin/root/var/cache \
63 --exclude pumpkin/root/var/log \
64 --exclude pumpkin/root/home/julm/.cache \
66 ''${dst}off2/julm/backup/pumpkin
67 zfs-fix-bookmarks pumpkin 2>/dev/null
73 zpool status "$zpool" 2>/dev/null ||
74 sudo zpool import -d /dev/disk/by-id/ "$zpool"
75 trap "sudo zpool export $zpool" EXIT
76 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
77 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
78 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
82 # Trigger backups when disks are plugged
83 services.udev.extraRules = ''
84 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
85 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
86 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
88 # Show what's happening to the user
89 systemd.services."zfs-term@" = {
90 description = "ZFS terminal for: %I";
91 unitConfig.StopWhenUnneeded = false;
92 environment.DISPLAY = ":0";
93 environment.XAUTHORITY = "/home/julm/.Xauthority";
94 after = [ "graphical.target" ];
95 bindsTo = [ "sys-subsystem-usb-%i.device" ];
99 ExecStart = pkgs.writeShellScript "zfs-force-import" ''
102 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
103 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
104 -u zfs-force-import@$DESTPOOL \
105 -u zfs-local-backup-home@$DESTPOOL"
109 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
110 systemd.services."zfs-force-import@" = {
111 description = "ZFS force import: %I";
114 StartLimitInterval = 200;
115 StopWhenUnneeded = true;
117 wants = [ "zfs-term@%i.service" ];
118 bindsTo = [ "sys-subsystem-usb-%i.device" ];
119 path = lib.mkBefore [ "/run/booted-system/sw" ];
122 RemainAfterExit = true;
124 SyslogIdentifier = "zfs-force-import@%i";
125 Restart = "on-failure";
126 ExecStart = pkgs.writeShellScript "zfs-force-import" ''
129 # Import the zpool, using stable paths
130 zpool import -d /dev/disk/by-id/ || true
131 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
132 zpool reopen "$DESTPOOL" ||
133 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
134 zpool clear -nFX "$DESTPOOL"
138 # Prune old snapshots on the backup and send new ones
139 systemd.services."zfs-local-backup-home@" = {
140 description = "ZFS backup home, on: %I";
141 wants = [ "zfs-term@%i.service" ];
142 after = [ "zfs-force-import@%i.service" ];
143 requires = [ "zfs-force-import@%i.service" ];
144 bindsTo = [ "sys-subsystem-usb-%i.device" ];
145 path = lib.mkBefore [ "/run/booted-system/sw" ];
146 serviceConfig = rec {
149 CacheDirectory = [ "zfs-usb-backup/%I" ];
150 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
153 SyslogIdentifier = "zfs-local-backup-home@%i";
154 ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
157 if zpool status "$DESTPOOL"; then
158 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
159 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
160 zpool scrub -p "$DESTPOOL" || true
163 ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
166 # sanoid is quite conservative:
167 # by setting hourly=24, a snapshot must be >24 hours old
168 # and there must been >24 total hourly snapshots,
169 # or nothing is pruned.
170 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
174 process_children_only=false
176 [$DESTPOOL/${User}/backup/${hostName}/home]
184 ${pkgs.sanoid}/bin/sanoid \
185 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
186 --configdir /tmp/sanoid \
188 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
191 for dataset in ${hostName}/home; do
192 ${pkgs.sanoid}/bin/syncoid \
194 --exclude "home/room" \
196 --no-privilege-elevation \
203 "$DESTPOOL"/${User}/backup/"$dataset"
206 ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
209 # Only if the zpool still exists to avoid uninterruptible hanging
210 if zpool status -v "$DESTPOOL"; then
211 # Scrub the zpool 1 minute (in the background)
212 zpool scrub "$DESTPOOL"
215 while zpool status -v "$DESTPOOL"; do
216 zpool scrub -p "$DESTPOOL" || true
218 # Export the zpool (to avoid a forced import later on)
219 zpool export "$DESTPOOL" || true
221 systemctl --no-block stop zfs-term@"$DESTPOOL"
225 programs.bash.shellAliases = {
226 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";