# none is the recommended elevator with ZFS (which has its own I/O scheduler)
# and/or for SSD, whereas HDD could use mq-deadline.
services.udev.extraRules = ''
- # set none scheduler for non-rotating disks
- ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
- ACTION=="add|change", KERNEL=="nvme[0-9]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
+ ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
+ ACTION=="add|change", KERNEL=="sd[a-z][0-9]*", ATTR{../queue/rotational}=="0", ATTR{../queue/scheduler}="none"
+ ACTION=="add|change", KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ATTR{../queue/rotational}=="0", ATTR{../queue/scheduler}="none"
'';
boot.supportedFilesystems = [ "zfs" ];
boot.initrd.supportedFilesystems = [ "zfs" ];
- boot.zfs.requestEncryptionCredentials = lib.mkDefault [ hostName ];
# Using ZFS together with hibernation (suspend to disk)
# may cause filesystem corruption.
# See https://github.com/openzfs/zfs/issues/260
boot.kernelParams = [ "nohibernate" ];
- # Ensure extra safeguards are active that zfs uses to protect zfs pools.
+ # Stable enough, clearer, and faster than the default /dev/disk/by-id
+ boot.zfs.devNodes = "/dev/disk/by-partlabel";
+ # Not useful so far.
boot.zfs.forceImportAll = false;
- boot.zfs.forceImportRoot = false;
+ # More resilient for remote hosts,
+ # though it may call zpool clear.
+ boot.zfs.forceImportRoot = true;
+ boot.zfs.requestEncryptionCredentials = lib.mkDefault [ "${hostName}/root" ];
boot.zfs.enableUnstable = false;
services.zfs.trim.interval = "Sun *-*-01..07 00:15:00";
environment.systemPackages = [
+ pkgs.lzop # For remote syncoid
+ pkgs.mbuffer # For remote syncoid
pkgs.sanoid
];
}