9 # syncoid --create-bookmark --no-privilege-elevation --no-sync-snap --recvoptions '' --sendoptions raw --recursive oignon/home off2/julm/backup/oignon/home
10 # zfs list -t snapshot -o name | grep ^oignon/home | while read -r snap; do zfs bookmark "$snap" "${snap//@/#}"; done
11 # Take regular snapshots, and prune old ones
14 extraArgs = [ "--verbose" ];
16 "${hostName}/home" = {
34 "off2/julm/backup/oignon" = {
45 # Trigger backups when disks are plugged
46 services.udev.extraRules = ''
47 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
48 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
49 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
51 # Show what's happening to the user
52 systemd.services."zfs-term@" = {
53 description = "ZFS terminal for: %I";
54 unitConfig.StopWhenUnneeded = false;
55 environment.DISPLAY = ":0";
56 environment.XAUTHORITY = "/home/julm/.Xauthority";
57 after = [ "graphical.target" ];
58 bindsTo = [ "sys-subsystem-usb-%i.device" ];
63 pkgs.writeShellScript "zfs-force-import" ''
66 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
67 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
68 -u zfs-force-import@$DESTPOOL \
69 -u zfs-local-backup-home@$DESTPOOL"
74 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
75 systemd.services."zfs-force-import@" = {
76 description = "ZFS force import: %I";
79 StartLimitInterval = 200;
80 StopWhenUnneeded = true;
82 wants = [ "zfs-term@%i.service" ];
83 bindsTo = [ "sys-subsystem-usb-%i.device" ];
84 path = lib.mkBefore [ "/run/booted-system/sw" ];
87 RemainAfterExit = true;
89 SyslogIdentifier = "zfs-force-import@%i";
90 Restart = "on-failure";
92 pkgs.writeShellScript "zfs-force-import" ''
95 # Import the zpool, using stable paths
96 zpool import -d /dev/disk/by-id/ || true
97 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
98 zpool reopen "$DESTPOOL" ||
99 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
100 zpool clear -nFX "$DESTPOOL"
105 # Prune old snapshots on the backup and send new ones
106 systemd.services."zfs-local-backup-home@" = {
107 description = "ZFS backup home, on: %I";
108 wants = [ "zfs-term@%i.service" ];
109 after = [ "zfs-force-import@%i.service" ];
110 requires = [ "zfs-force-import@%i.service" ];
111 bindsTo = [ "sys-subsystem-usb-%i.device" ];
112 path = lib.mkBefore [ "/run/booted-system/sw" ];
113 serviceConfig = rec {
116 CacheDirectory = [ "zfs-usb-backup/%I" ];
117 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
120 SyslogIdentifier = "zfs-local-backup-home@%i";
123 + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
126 if zpool status "$DESTPOOL"; then
127 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
128 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
129 zpool scrub -p "$DESTPOOL" || true
134 pkgs.writeShellScript "zfs-local-backup-home" ''
137 # sanoid is quite conservative:
138 # by setting hourly=24, a snapshot must be >24 hours old
139 # and there must been >24 total hourly snapshots,
140 # or nothing is pruned.
141 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
145 process_children_only=false
147 [$DESTPOOL/${User}/backup/${hostName}/home]
155 ${pkgs.sanoid}/bin/sanoid \
156 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
157 --configdir /tmp/sanoid \
159 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
162 for dataset in ${hostName}/home; do
163 ${pkgs.sanoid}/bin/syncoid \
165 --exclude "home/room" \
167 --no-privilege-elevation \
174 "$DESTPOOL"/${User}/backup/"$dataset"
180 + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
183 # Only if the zpool still exists to avoid uninterruptible hanging
184 if zpool status -v "$DESTPOOL"; then
185 # Scrub the zpool 1 minute (in the background)
186 zpool scrub "$DESTPOOL"
189 while zpool status -v "$DESTPOOL"; do
190 zpool scrub -p "$DESTPOOL" || true
192 # Export the zpool (to avoid a forced import later on)
193 zpool export "$DESTPOOL" || true
195 systemctl --no-block stop zfs-term@"$DESTPOOL"
200 programs.bash.interactiveShellInit = ''
201 mount-zfs-backup () {
205 zpool status "$zpool" 2>/dev/null ||
206 sudo zpool import -d /dev/disk/by-id/ "$zpool"
207 trap "sudo zpool export $zpool" EXIT
208 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
209 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
210 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
214 programs.bash.shellAliases = {
215 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";