10 systemd.services.sanoid.serviceConfig.SupplementaryGroups = [
11 config.users.groups."disk".name
15 extraArgs = [ "--verbose" ];
17 "${hostName}/root" = {
27 "${hostName}/root/nix" = {
30 "${hostName}/root/var/cache" = {
33 "${hostName}/root/var/log" = {
36 "${hostName}/root/home/julm/.cache" = {
39 "${hostName}/root/home/julm/.local" = {
43 "${hostName}/root/home/julm/.mozilla" = {
47 "off2/julm/backup/${hostName}" = {
58 programs.bash.interactiveShellInit = ''
64 then dst=aubergine.sp:
66 sudo syncoid --sshkey ~julm/.ssh/id_ed25519 \
67 --create-bookmark --no-sync-snap --no-privilege-elevation \
68 --preserve-properties --preserve-recordsize \
69 --recursive --sendoptions=w --recvoptions=u \
70 --exclude pumpkin/root/nix \
71 --exclude pumpkin/root/var/cache \
72 --exclude pumpkin/root/var/log \
73 --exclude pumpkin/root/home/julm/.cache \
75 ''${dst}off2/julm/backup/pumpkin
76 zfs-fix-bookmarks pumpkin 2>/dev/null
82 zpool status "$zpool" 2>/dev/null ||
83 sudo zpool import -d /dev/disk/by-id/ "$zpool"
84 trap "sudo zpool export $zpool" EXIT
85 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
86 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
87 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
91 # Trigger backups when disks are plugged
92 services.udev.extraRules = ''
93 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
94 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
95 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
97 # Show what's happening to the user
98 systemd.services."zfs-term@" = {
99 description = "ZFS terminal for: %I";
100 unitConfig.StopWhenUnneeded = false;
101 environment.DISPLAY = ":0";
102 environment.XAUTHORITY = "/home/julm/.Xauthority";
103 after = [ "graphical.target" ];
104 bindsTo = [ "sys-subsystem-usb-%i.device" ];
109 pkgs.writeShellScript "zfs-force-import" ''
112 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
113 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
114 -u zfs-force-import@$DESTPOOL \
115 -u zfs-local-backup-home@$DESTPOOL"
120 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
121 systemd.services."zfs-force-import@" = {
122 description = "ZFS force import: %I";
125 StartLimitInterval = 200;
126 StopWhenUnneeded = true;
128 wants = [ "zfs-term@%i.service" ];
129 bindsTo = [ "sys-subsystem-usb-%i.device" ];
130 path = lib.mkBefore [ "/run/booted-system/sw" ];
133 RemainAfterExit = true;
135 SyslogIdentifier = "zfs-force-import@%i";
136 Restart = "on-failure";
138 pkgs.writeShellScript "zfs-force-import" ''
141 # Import the zpool, using stable paths
142 zpool import -d /dev/disk/by-id/ || true
143 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
144 zpool reopen "$DESTPOOL" ||
145 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
146 zpool clear -nFX "$DESTPOOL"
151 # Prune old snapshots on the backup and send new ones
152 systemd.services."zfs-local-backup-home@" = {
153 description = "ZFS backup home, on: %I";
154 wants = [ "zfs-term@%i.service" ];
155 after = [ "zfs-force-import@%i.service" ];
156 requires = [ "zfs-force-import@%i.service" ];
157 bindsTo = [ "sys-subsystem-usb-%i.device" ];
158 path = lib.mkBefore [ "/run/booted-system/sw" ];
159 serviceConfig = rec {
162 CacheDirectory = [ "zfs-usb-backup/%I" ];
163 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
166 SyslogIdentifier = "zfs-local-backup-home@%i";
169 + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
172 if zpool status "$DESTPOOL"; then
173 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
174 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
175 zpool scrub -p "$DESTPOOL" || true
180 pkgs.writeShellScript "zfs-local-backup-home" ''
183 # sanoid is quite conservative:
184 # by setting hourly=24, a snapshot must be >24 hours old
185 # and there must been >24 total hourly snapshots,
186 # or nothing is pruned.
187 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
191 process_children_only=false
193 [$DESTPOOL/${User}/backup/${hostName}/home]
201 ${pkgs.sanoid}/bin/sanoid \
202 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
203 --configdir /tmp/sanoid \
205 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
208 for dataset in ${hostName}/home; do
209 ${pkgs.sanoid}/bin/syncoid \
211 --exclude "home/room" \
213 --no-privilege-elevation \
220 "$DESTPOOL"/${User}/backup/"$dataset"
226 + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
229 # Only if the zpool still exists to avoid uninterruptible hanging
230 if zpool status -v "$DESTPOOL"; then
231 # Scrub the zpool 1 minute (in the background)
232 zpool scrub "$DESTPOOL"
235 while zpool status -v "$DESTPOOL"; do
236 zpool scrub -p "$DESTPOOL" || true
238 # Export the zpool (to avoid a forced import later on)
239 zpool export "$DESTPOOL" || true
241 systemctl --no-block stop zfs-term@"$DESTPOOL"
246 programs.bash.shellAliases = {
247 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";