10 systemd.services.sanoid.serviceConfig.SupplementaryGroups = [
11 config.users.groups."disk".name
15 extraArgs = [ "--verbose" ];
17 "${hostName}/root" = {
27 "${hostName}/root/nix" = {
30 "${hostName}/root/var/cache" = {
33 "${hostName}/root/var/log" = {
36 "${hostName}/root/home/julm/.cache" = {
39 "${hostName}/root/home/julm/.local" = {
43 "${hostName}/root/home/julm/.mozilla" = {
47 "off2/julm/backup/${hostName}" = {
58 programs.bash.interactiveShellInit = ''
64 then dst=aubergine.sp:
66 sudo syncoid --sshkey ~julm/.ssh/id_ed25519 \
67 --create-bookmark --no-sync-snap --no-privilege-elevation \
68 --preserve-properties --preserve-recordsize \
69 --recursive --sendoptions=w --recvoptions=u \
70 --exclude pumpkin/root/nix \
71 --exclude pumpkin/root/var/cache \
72 --exclude pumpkin/root/var/log \
73 --exclude pumpkin/root/home/julm/.cache \
74 --exclude pumpkin/root/home/julm/games \
76 ''${dst}off2/julm/backup/pumpkin
77 zfs-fix-bookmarks pumpkin 2>/dev/null
83 zpool status "$zpool" 2>/dev/null ||
84 sudo zpool import -d /dev/disk/by-id/ "$zpool"
85 trap "sudo zpool export $zpool" EXIT
86 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
87 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
88 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
92 # Trigger backups when disks are plugged
93 services.udev.extraRules = ''
94 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
95 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
96 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
98 # Show what's happening to the user
99 systemd.services."zfs-term@" = {
100 description = "ZFS terminal for: %I";
101 unitConfig.StopWhenUnneeded = false;
102 environment.DISPLAY = ":0";
103 environment.XAUTHORITY = "/home/julm/.Xauthority";
104 after = [ "graphical.target" ];
105 bindsTo = [ "sys-subsystem-usb-%i.device" ];
110 pkgs.writeShellScript "zfs-force-import" ''
113 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
114 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
115 -u zfs-force-import@$DESTPOOL \
116 -u zfs-local-backup-home@$DESTPOOL"
121 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
122 systemd.services."zfs-force-import@" = {
123 description = "ZFS force import: %I";
126 StartLimitInterval = 200;
127 StopWhenUnneeded = true;
129 wants = [ "zfs-term@%i.service" ];
130 bindsTo = [ "sys-subsystem-usb-%i.device" ];
131 path = lib.mkBefore [ "/run/booted-system/sw" ];
134 RemainAfterExit = true;
136 SyslogIdentifier = "zfs-force-import@%i";
137 Restart = "on-failure";
139 pkgs.writeShellScript "zfs-force-import" ''
142 # Import the zpool, using stable paths
143 zpool import -d /dev/disk/by-id/ || true
144 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
145 zpool reopen "$DESTPOOL" ||
146 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
147 zpool clear -nFX "$DESTPOOL"
152 # Prune old snapshots on the backup and send new ones
153 systemd.services."zfs-local-backup-home@" = {
154 description = "ZFS backup home, on: %I";
155 wants = [ "zfs-term@%i.service" ];
156 after = [ "zfs-force-import@%i.service" ];
157 requires = [ "zfs-force-import@%i.service" ];
158 bindsTo = [ "sys-subsystem-usb-%i.device" ];
159 path = lib.mkBefore [ "/run/booted-system/sw" ];
160 serviceConfig = rec {
163 CacheDirectory = [ "zfs-usb-backup/%I" ];
164 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
167 SyslogIdentifier = "zfs-local-backup-home@%i";
170 + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
173 if zpool status "$DESTPOOL"; then
174 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
175 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
176 zpool scrub -p "$DESTPOOL" || true
181 pkgs.writeShellScript "zfs-local-backup-home" ''
184 # sanoid is quite conservative:
185 # by setting hourly=24, a snapshot must be >24 hours old
186 # and there must been >24 total hourly snapshots,
187 # or nothing is pruned.
188 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
192 process_children_only=false
194 [$DESTPOOL/${User}/backup/${hostName}/home]
202 ${pkgs.sanoid}/bin/sanoid \
203 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
204 --configdir /tmp/sanoid \
206 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
209 for dataset in ${hostName}/home; do
210 ${pkgs.sanoid}/bin/syncoid \
212 --exclude "home/room" \
214 --no-privilege-elevation \
221 "$DESTPOOL"/${User}/backup/"$dataset"
227 + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
230 # Only if the zpool still exists to avoid uninterruptible hanging
231 if zpool status -v "$DESTPOOL"; then
232 # Scrub the zpool 1 minute (in the background)
233 zpool scrub "$DESTPOOL"
236 while zpool status -v "$DESTPOOL"; do
237 zpool scrub -p "$DESTPOOL" || true
239 # Export the zpool (to avoid a forced import later on)
240 zpool export "$DESTPOOL" || true
242 systemctl --no-block stop zfs-term@"$DESTPOOL"
247 programs.bash.shellAliases = {
248 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";