]> Git — Sourcephile - julm/julm-nix.git/blob - hosts/pumpkin/backup.nix
pumpkin: sanoid: use recursive=zfs
[julm/julm-nix.git] / hosts / pumpkin / backup.nix
1 { pkgs, lib, hostName, ... }:
2 with builtins;
3 {
4 # Take regular snapshots, and prune old ones
5 services.sanoid = {
6 enable = true;
7 extraArgs = [ "--verbose" ];
8 datasets = {
9 "${hostName}/root" = {
10 autosnap = true;
11 autoprune = true;
12 frequently = 0;
13 hourly = 12;
14 daily = 7;
15 monthly = 0;
16 yearly = 0;
17 recursive = "zfs";
18 };
19 "off2/julm/backup/oignon" = {
20 autosnap = false;
21 autoprune = true;
22 hourly = 0;
23 daily = 7;
24 monthly = 3;
25 yearly = 0;
26 recursive = "zfs";
27 };
28 };
29 };
30 programs.bash.interactiveShellInit = ''
31 backup-pumpkin () {
32 sudo syncoid --sshkey ~julm/.ssh/id_ed25519 \
33 --create-bookmark --no-sync-snap --no-privilege-elevation \
34 --sendoptions=Rw --recvoptions=u \
35 --exclude pumpkin/root/nix \
36 pumpkin/root \
37 aubergine.sp:off2/julm/backup/pumpkin
38 }
39 mount-zfs-backup () {
40 (
41 set -eux
42 zpool="$1"
43 zpool status "$zpool" 2>/dev/null ||
44 sudo zpool import -d /dev/disk/by-id/ "$zpool"
45 trap "sudo zpool export $zpool" EXIT
46 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
47 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
48 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
49 )
50 }
51 '';
52 # Trigger backups when disks are plugged
53 services.udev.extraRules = ''
54 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
55 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
56 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
57 '';
58 # Show what's happening to the user
59 systemd.services."zfs-term@" = {
60 description = "ZFS terminal for: %I";
61 unitConfig.StopWhenUnneeded = false;
62 environment.DISPLAY = ":0";
63 environment.XAUTHORITY = "/home/julm/.Xauthority";
64 after = [ "graphical.target" ];
65 bindsTo = [ "sys-subsystem-usb-%i.device" ];
66 serviceConfig = {
67 Type = "simple";
68 PrivateTmp = true;
69 ExecStart = pkgs.writeShellScript "zfs-force-import" ''
70 DESTPOOL=$1
71 set -eux
72 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
73 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
74 -u zfs-force-import@$DESTPOOL \
75 -u zfs-local-backup-home@$DESTPOOL"
76 '' + " %I";
77 };
78 };
79 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
80 systemd.services."zfs-force-import@" = {
81 description = "ZFS force import: %I";
82 unitConfig = {
83 StartLimitBurst = 5;
84 StartLimitInterval = 200;
85 StopWhenUnneeded = true;
86 };
87 wants = [ "zfs-term@%i.service" ];
88 bindsTo = [ "sys-subsystem-usb-%i.device" ];
89 path = lib.mkBefore [ "/run/booted-system/sw" ];
90 serviceConfig = {
91 Type = "oneshot";
92 RemainAfterExit = true;
93 PrivateTmp = true;
94 SyslogIdentifier = "zfs-force-import@%i";
95 Restart = "on-failure";
96 ExecStart = pkgs.writeShellScript "zfs-force-import" ''
97 DESTPOOL=$1
98 set -eux
99 # Import the zpool, using stable paths
100 zpool import -d /dev/disk/by-id/ || true
101 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
102 zpool reopen "$DESTPOOL" ||
103 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
104 zpool clear -nFX "$DESTPOOL"
105 '' + " %I";
106 };
107 };
108 # Prune old snapshots on the backup and send new ones
109 systemd.services."zfs-local-backup-home@" = {
110 description = "ZFS backup home, on: %I";
111 wants = [ "zfs-term@%i.service" ];
112 after = [ "zfs-force-import@%i.service" ];
113 requires = [ "zfs-force-import@%i.service" ];
114 bindsTo = [ "sys-subsystem-usb-%i.device" ];
115 path = lib.mkBefore [ "/run/booted-system/sw" ];
116 serviceConfig = rec {
117 Type = "oneshot";
118 PrivateTmp = true;
119 CacheDirectory = [ "zfs-usb-backup/%I" ];
120 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
121 User = "julm";
122 Group = "users";
123 SyslogIdentifier = "zfs-local-backup-home@%i";
124 ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
125 DESTPOOL=$1
126 set -eux
127 if zpool status "$DESTPOOL"; then
128 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
129 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
130 zpool scrub -p "$DESTPOOL" || true
131 fi
132 '' + " %I";
133 ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
134 set -eu
135 DESTPOOL=$1
136 # sanoid is quite conservative:
137 # by setting hourly=24, a snapshot must be >24 hours old
138 # and there must been >24 total hourly snapshots,
139 # or nothing is pruned.
140 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
141 [template_remote]
142 autoprune=true
143 autosnap=false
144 process_children_only=false
145
146 [$DESTPOOL/${User}/backup/${hostName}/home]
147 hourly=6
148 daily=31
149 monthly=3
150 recursive=true
151 use_template=remote
152 EOF
153 set -x
154 ${pkgs.sanoid}/bin/sanoid \
155 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
156 --configdir /tmp/sanoid \
157 --prune-snapshots \
158 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
159 --verbose
160
161 for dataset in ${hostName}/home; do
162 ${pkgs.sanoid}/bin/syncoid \
163 --create-bookmark \
164 --exclude "home/room" \
165 --force-delete \
166 --no-privilege-elevation \
167 --no-sync-snap \
168 --recursive \
169 --recvoptions "" \
170 --sendoptions raw \
171 --skip-parent \
172 "$dataset" \
173 "$DESTPOOL"/${User}/backup/"$dataset"
174 done
175 '' + " %I";
176 ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
177 DESTPOOL=$1
178 set -eux
179 # Only if the zpool still exists to avoid uninterruptible hanging
180 if zpool status -v "$DESTPOOL"; then
181 # Scrub the zpool 1 minute (in the background)
182 zpool scrub "$DESTPOOL"
183 sleep 60
184 fi
185 while zpool status -v "$DESTPOOL"; do
186 zpool scrub -p "$DESTPOOL" || true
187 sleep 20
188 # Export the zpool (to avoid a forced import later on)
189 zpool export "$DESTPOOL" || true
190 done
191 systemctl --no-block stop zfs-term@"$DESTPOOL"
192 '' + " %I";
193 };
194 };
195 programs.bash.shellAliases = {
196 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
197 };
198 }