user = "sevy";
#proxyJump = "mermet.wg";
};
+ "courge.wg" = {
+ user = "mo";
+ #proxyJump = "mermet.wg";
+ };
} //
lib.genAttrs [ "lan.losurdo.sourcephile.fr" "losurdo.wg" ]
(_: {
--- /dev/null
+{ pkgs, lib, config, hostName, ... }:
+{
+ imports = [
+ ../home-manager/profiles/essential.nix
+ ../home-manager/profiles/vim.nix
+ ../home-manager/options.nix
+ (import (mo/hosts + "/${hostName}.nix"))
+ ];
+ programs.firefox.profiles =
+ let
+ defaultProfile = {
+ settings = {
+ "browser.bookmarks.showMobileBookmarks" = true;
+ "browser.compactmode.show" = true;
+ "browser.search.isUS" = false;
+ "browser.search.region" = "FR";
+ "distribution.searchplugins.defaultLocale" = "fr-FR";
+ "dom.security.https_first" = true;
+ "dom.security.https_only_mode" = true;
+ "general.useragent.locale" = "fr-FR";
+ "privacy.globalprivacycontrol.enabled" = true;
+ "privacy.globalprivacycontrol.functionality.enabled" = true;
+ "security.identityblock.show_extended_validation" = true;
+ "toolkit.legacyUserProfileCustomizations.stylesheets" = true;
+ #"privacy.firstparty.isolate" = true;
+ };
+ userChrome = lib.readFile ../home-manager/profiles/firefox/userChrome.css;
+ };
+ in
+ {
+ "646f487e.default" = lib.mkMerge [
+ defaultProfile
+ {
+ id = 0;
+ name = "default";
+ settings = {
+ "browser.startup.homepage" = "https://google.fr";
+ };
+ }
+ ];
+ };
+}
--- /dev/null
+{ pkgs, lib, config, ... }:
+{
+ imports = [
+ ../../../home-manager/profiles/gnupg.nix
+ ../../../home-manager/profiles/graphical.nix
+ ../../../home-manager/profiles/networking.nix
+ ../../../home-manager/profiles/office.nix
+ ../../../home-manager/profiles/science.nix
+ ../../../home-manager/profiles/sharing.nix
+ ../../../home-manager/profiles/video.nix
+ ../../../home-manager/profiles/wireless.nix
+
+ ../../../home-manager/profiles/firefox.nix
+ ];
+ home.sessionVariables = { };
+ home.packages = [
+ pkgs.gthumb
+ pkgs.dino
+ #pkgs.chromium
+ pkgs.fluidsynth
+ pkgs.gpsbabel
+ #(pkgs.qgis.override { extraPythonPackages = (ps: [
+ # ps.pyqt5_with_qtwebkit
+ #]); })
+ #pkgs.libva-utils
+ #pkgs.ristretto
+ pkgs.xfce.mousepad
+ #pkgs.mate.pluma
+ pkgs.wxmaxima
+ pkgs.espeak-ng
+ pkgs.iodine
+ #pkgs.qsynth
+ ];
+
+ /* Cannot be automounted
+ systemd.user.mounts = {
+ mnt-aubergine = {
+ Unit = {
+ Wants = [
+ "network-online.target"
+ "wireguard-wg-intra.target"
+ ];
+ After = [
+ "network-online.target"
+ "wireguard-wg-intra.target"
+ ];
+ };
+ Install = {
+ WantedBy = ["default.target"];
+ };
+ Mount = {
+ What = "julm@aubergine.wg:/";
+ Where = "/mnt/aubergine";
+ Type = "fuse.sshfs";
+ Options = lib.concatStringsSep "," [
+ "user"
+ "uid=julm"
+ "gid=users"
+ "allow_other"
+ "exec" # Override "user"'s noexec
+ "noatime"
+ "nosuid"
+ "noauto"
+ "dir_cache=no"
+ #"reconnect"
+ "x-gvfs-hide"
+ # Does not work for user mounts
+ #"x-systemd.automount"
+ "IdentityFile=/home/julm/.ssh/id_ed25519"
+ #"Compression=yes" # YMMV
+ # Disconnect approximately 2*15=30 seconds after a network failure
+ "ServerAliveCountMax=1"
+ "ServerAliveInterval=15"
+ ];
+ };
+ };
+ };
+ */
+ /*
+ Automounting does not work without root privileges
+ systemd.user.automounts = {
+ mnt-aubergine = {
+ Install = {
+ WantedBy = ["user.target"];
+ };
+ Unit = {
+ };
+ Automount = {
+ Where = "/mnt/aubergine";
+ TimeoutIdleSec = "5 min";
+ };
+ };
+ };
+ */
+}
--- /dev/null
+{ config, pkgs, lib, inputs, hostName, ... }:
+{
+ imports = [
+ ../nixos/profiles/graphical.nix
+ ../nixos/profiles/lang-fr.nix
+ ../nixos/profiles/printing.nix
+ courge/backup.nix
+ courge/hardware.nix
+ courge/networking.nix
+ ];
+
+ # Lower kernel's security for better performances
+ boot.kernelParams = [ "mitigations=off" ];
+
+ home-manager.users.mo = {
+ imports = [ ../homes/mo.nix ];
+ };
+ systemd.services.home-manager-mo.postStart = ''
+ ${pkgs.nix}/bin/nix-env --delete-generations +1 --profile /nix/var/nix/profiles/per-user/mo/home-manager
+ '';
+ users.users.root = {
+ openssh.authorizedKeys.keys = map lib.readFile [
+ ../users/julm/ssh/oignon.pub
+ ../users/julm/ssh/losurdo.pub
+ ];
+ };
+ users.users.mo = {
+ isNormalUser = true;
+ uid = 1000;
+ # Put the hashedPassword in /nix/store,
+ # though /etc/shadow is not world readable...
+ # printf %s $(mkpasswd -m md5crypt)
+ hashedPassword = lib.readFile oignon/users/mo/login/hashedPassword.clear;
+ extraGroups = [
+ "adbusers"
+ "dialout"
+ "lp"
+ "networkmanager"
+ #"plugdev" # For rtl-sdr
+ "scanner"
+ #"tor"
+ "video"
+ "wheel"
+ #"ipfs"
+ config.services.davfs2.davGroup
+ #"vboxusers"
+ ];
+ # If created, zfs-mount.service would require:
+ # zfs set overlay=yes ${hostName}/home
+ createHome = false;
+ openssh.authorizedKeys.keys = map lib.readFile [
+ ../users/julm/ssh/oignon.pub
+ ../users/julm/ssh/losurdo.pub
+ ];
+ };
+
+ environment.systemPackages = [
+ #pkgs.riseup-vpn # Can't be installed by home-manager because it needs to install policy-kit rules
+ ];
+
+ boot.extraModulePackages = [
+ ];
+
+ #programs.fuse.userAllowOther = true;
+
+ services.physlock = {
+ enable = true;
+ allowAnyUser = true;
+ # NOTE: xfconf-query -c xfce4-session -p /general/LockCommand -s "physlock" --create -t string
+ };
+
+ services.xserver = {
+ desktopManager = {
+ mate.enable = true;
+ xfce.enable = false;
+ xterm.enable = false;
+ };
+ displayManager = {
+ defaultSession = "mate";
+ autoLogin = {
+ user = config.users.users.mo.name;
+ };
+ };
+ };
+
+ # This value determines the NixOS release with which your system is to be
+ # compatible, in order to avoid breaking some software such as database
+ # servers. You should change this only after NixOS release notes say you should.
+ system.stateVersion = "23.05"; # Did you read the comment?
+}
#cwd := $(notdir $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))))
hostName := courge
-disk_ssd := /dev/disk/by-id/ata-Samsung_SSD_850_PRO_128GB_S1SMNSAFC36436X
+disk_ssd := /dev/disk/by-id/FIXME
zpool := $(hostName)
cipher := aes-128-gcm
autotrim := on
part: wipe
# https://wiki.archlinux.org/index.php/BIOS_boot_partition
- sudo $$(which sgdisk) -a1 -n0:34:2047 -t0:EF02 -c0:"$(hostName)_ssd_bios" $(disk_ssd)
- sudo $$(which sgdisk) -n0:1M:+32M -t0:EF00 -c0:"$(hostName)_ssd_efi" $(disk_ssd)
- sudo $$(which sgdisk) -n0:0:+256M -t0:8300 -c0:"$(hostName)_ssd_boot" $(disk_ssd)
+ #sudo $$(which sgdisk) -a1 -n0:34:2047 -t0:EF02 -c0:"$(hostName)_ssd_bios" $(disk_ssd)
+ sudo $$(which sgdisk) -n0::+256M -t0:EF00 -c0:"$(hostName)_ssd_efi" $(disk_ssd)
+ #sudo $$(which sgdisk) -n0:0:+256M -t0:8300 -c0:"$(hostName)_ssd_boot" $(disk_ssd)
sudo $$(which sgdisk) -n0:0:+4G -t0:8200 -c0:"$(hostName)_ssd_swap" $(disk_ssd)
sudo $$(which sgdisk) -n0:0:0 -t0:BF01 -c0:"$(hostName)_ssd_zpool" $(disk_ssd)
# https://wiki.archlinux.org/index.php/Partitioning#Tricking_old_BIOS_into_booting_from_GPT
- printf '\200\0\0\0\0\0\0\0\0\0\0\0\001\0\0\0' | sudo dd of=$(disk_ssd) bs=1 seek=462
+ #printf '\200\0\0\0\0\0\0\0\0\0\0\0\001\0\0\0' | sudo dd of=$(disk_ssd) bs=1 seek=462
sudo $$(which sgdisk) --randomize-guids $(disk_ssd)
sudo $$(which sgdisk) --backup=$(hostName)_ssd.sgdisk $(disk_ssd)
+ sudo partprobe
+ sudo udevadm settle
-format: umount format-efi format-boot format-zpool
+format: umount format-efi format-zpool
format-efi:
sudo blkid /dev/disk/by-partlabel/$(hostName)_ssd_efi -t TYPE=vfat || \
- sudo mkfs.vfat -F 16 -s 1 -n EFI /dev/disk/by-partlabel/$(hostName)_ssd_efi
-format-boot:
- sudo mkdir -p /mnt/$(hostName)
- sudo blkid -t TYPE=ext2 /dev/disk/by-partlabel/$(hostName)_ssd_boot; test $$? != 2 || \
- sudo mkfs.ext2 /dev/disk/by-partlabel/$(hostName)_ssd_boot
+ sudo mkfs.vfat -F 32 -s 1 -n EFI /dev/disk/by-partlabel/$(hostName)_ssd_efi
format-zpool:
sudo zpool list $(zpool) 2>/dev/null || \
sudo zpool create -o ashift=12 \
dnodesize=auto \
relatime=on \
xattr=off \
- mountpoint=/ \
+ mountpoint=none \
$(zpool)
# https://nixos.wiki/wiki/NixOS_on_ZFS#Reservations
sudo zfs list $(zpool)/reserved 2>/dev/null || \
sudo zfs create -o canmount=off -o mountpoint=none $(zpool)/reserved
sudo zfs set refreservation=$(reservation) $(zpool)/reserved
# /
- # mountpoint=legacy is required to let NixOS mount the ZFS filesystems.
sudo zfs list $(zpool)/root 2>/dev/null || \
- sudo zfs create \
- -o canmount=on \
- -o mountpoint=legacy \
- $(zpool)/root
+ sudo zfs create -o canmount=on -o mountpoint=/ $(zpool)/root
# /*
- for p in \
- nix \
- var \
- ; do \
- sudo zfs list $(zpool)/"$$p" 2>/dev/null || \
- sudo zfs create \
- -o canmount=on \
- -o mountpoint=legacy \
- $(zpool)/"$$p" ; \
+ for p in nix home var; do \
+ sudo zfs list $(zpool)/root/"$$p" 2>/dev/null || \
+ sudo zfs create $(zpool)/root/"$$p" ; \
done
- #sudo zfs set sync=disabled $(zpool)/var/tmp
- #sudo zfs set copies=2 $(zpool)/home/files
+ #sudo zfs set sync=disabled $(zpool)/root/var/tmp
+ #sudo zfs set copies=2 $(zpool)/root/home/files
-mount: mount-zpool mount-boot mount-efi
+mount: mount-zpool mount-efi
mount-zpool:
# scan needed zpools
sudo zpool list $(zpool) || \
sudo zpool import -f $(zpool)
# load encryption key
- sudo zfs get -H encryption $(zpool) | \
- grep -q '^$(zpool)\s*encryption\s*off' || \
- sudo zfs get -H keystatus $(zpool) | \
- grep -q '^$(zpool)\s*keystatus\s*available' || \
- sudo zfs load-key $(zpool)
+ sudo zfs get -H encryption $(zpool)/root | \
+ grep -q '^$(zpool)/root\s*encryption\s*off' || \
+ sudo zfs get -H keystatus $(zpool)/root | \
+ grep -q '^$(zpool)/root\s*keystatus\s*available' || \
+ sudo zfs load-key $(zpool)/root
# /
sudo mkdir -p /mnt/$(hostName)
sudo mountpoint /mnt/$(hostName) || \
- sudo mount -v -t zfs $(zpool)/root /mnt/$(hostName)
+ sudo mount -v -o zfsutil -t zfs $(zpool) /mnt/$(hostName)
# /*
- for p in \
- nix \
- var \
- ; do \
+ for p in nix var ; do \
sudo mkdir -p /mnt/$(hostName)/"$$p"; \
sudo mountpoint /mnt/$(hostName)/"$$p" || \
- sudo mount -v -t zfs $(zpool)/"$$p" /mnt/$(hostName)/"$$p" ; \
+ sudo mount -v -o zfsutil -t zfs $(zpool)/"$$p" /mnt/$(hostName)/"$$p" ; \
done
#sudo chmod 1777 /mnt/$(hostName)/var/tmp
-mount-boot:
- sudo mkdir -p /mnt/$(hostName)/boot
- sudo mountpoint /mnt/$(hostName)/boot || \
- sudo mount -v /dev/disk/by-partlabel/$(hostName)_ssd_boot /mnt/$(hostName)/boot
- #sudo mount -v -t zfs bpool/boot /mnt/$(hostName)/boot
-mount-efi: | mount-boot
+mount-efi:
sudo mkdir -p /mnt/$(hostName)/boot/efi
sudo mountpoint /mnt/$(hostName)/boot/efi || \
sudo mount -v /dev/disk/by-partlabel/$(hostName)_ssd_efi /mnt/$(hostName)/boot/efi
#sudo zpool export bpool
! sudo zpool list $(zpool) 2>/dev/null || \
sudo zpool export $(zpool)
-
-unlock:
- pass hostNames/$(hostName)/zfs/zpool | \
- NIXOPS_DEPLOYMENT="$${NIXOPS_DEPLOYMENT:-$(LOSURDO_DEPLOYMENT)}" \
- nixops ssh $(hostName) -p 2222 'zfs load-key $(zpool) && pkill zfs'
{ pkgs, lib, hostName, ... }:
with builtins;
{
- # syncoid --create-bookmark --no-privilege-elevation --no-sync-snap --recvoptions '' --sendoptions raw --recursive oignon/home off2/julm/backup/oignon/home
- # zfs list -t snapshot -o name | grep ^oignon/home | while read -r snap; do zfs bookmark "$snap" "${snap//@/#}"; done
- # Take regular snapshots, and prune old ones
services.sanoid = {
enable = true;
extraArgs = [ "--verbose" ];
datasets = {
- "${hostName}/home" = {
+ "${hostName}/root" = {
autosnap = true;
autoprune = true;
hourly = 12;
- daily = 3;
+ daily = 31;
monthly = 0;
yearly = 0;
recursive = true;
};
- "${hostName}/var" = {
- autosnap = true;
- autoprune = true;
- hourly = 12;
- daily = 1;
- monthly = 0;
- yearly = 0;
- recursive = true;
- };
- "off2/julm/backup/oignon" = {
- autosnap = false;
- autoprune = true;
- hourly = 0;
- daily = 7;
- monthly = 3;
- yearly = 0;
- recursive = true;
- };
- };
- };
- # Trigger backups when disks are plugged
- services.udev.extraRules = ''
- ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
- # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
- ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
- '';
- # Show what's happening to the user
- systemd.services."zfs-term@" = {
- description = "ZFS terminal for: %I";
- unitConfig.StopWhenUnneeded = false;
- environment.DISPLAY = ":0";
- environment.XAUTHORITY = "/home/julm/.Xauthority";
- after = [ "graphical.target" ];
- bindsTo = [ "sys-subsystem-usb-%i.device" ];
- serviceConfig = {
- Type = "simple";
- PrivateTmp = true;
- ExecStart = pkgs.writeShellScript "zfs-force-import" ''
- DESTPOOL=$1
- set -eux
- ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
- -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
- -u zfs-force-import@$DESTPOOL \
- -u zfs-local-backup-home@$DESTPOOL"
- '' + " %I";
- };
- };
- # Force zpool import, even if the disk has not been exported, or has been imported on another computer
- systemd.services."zfs-force-import@" = {
- description = "ZFS force import: %I";
- unitConfig = {
- StartLimitBurst = 5;
- StartLimitInterval = 200;
- StopWhenUnneeded = true;
};
- wants = [ "zfs-term@%i.service" ];
- bindsTo = [ "sys-subsystem-usb-%i.device" ];
- path = lib.mkBefore [ "/run/booted-system/sw" ];
- serviceConfig = {
- Type = "oneshot";
- RemainAfterExit = true;
- PrivateTmp = true;
- SyslogIdentifier = "zfs-force-import@%i";
- Restart = "on-failure";
- ExecStart = pkgs.writeShellScript "zfs-force-import" ''
- DESTPOOL=$1
- set -eux
- # Import the zpool, using stable paths
- zpool import -d /dev/disk/by-id/ || true
- zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
- zpool reopen "$DESTPOOL" ||
- zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
- zpool clear -nFX "$DESTPOOL"
- '' + " %I";
- };
- };
- # Prune old snapshots on the backup and send new ones
- systemd.services."zfs-local-backup-home@" = {
- description = "ZFS backup home, on: %I";
- wants = [ "zfs-term@%i.service" ];
- after = [ "zfs-force-import@%i.service" ];
- requires = [ "zfs-force-import@%i.service" ];
- bindsTo = [ "sys-subsystem-usb-%i.device" ];
- path = lib.mkBefore [ "/run/booted-system/sw" ];
- serviceConfig = rec {
- Type = "oneshot";
- PrivateTmp = true;
- CacheDirectory = [ "zfs-usb-backup/%I" ];
- RuntimeDirectory = [ "zfs-usb-backup/%I" ];
- User = "julm";
- Group = "users";
- SyslogIdentifier = "zfs-local-backup-home@%i";
- ExecStartPre = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
- DESTPOOL=$1
- set -eux
- if zpool status "$DESTPOOL"; then
- zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
- zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
- zpool scrub -p "$DESTPOOL" || true
- fi
- '' + " %I";
- ExecStart = pkgs.writeShellScript "zfs-local-backup-home" ''
- set -eu
- DESTPOOL=$1
- # sanoid is quite conservative:
- # by setting hourly=24, a snapshot must be >24 hours old
- # and there must been >24 total hourly snapshots,
- # or nothing is pruned.
- install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
- [template_remote]
- autoprune=true
- autosnap=false
- process_children_only=false
-
- [$DESTPOOL/${User}/backup/${hostName}/home]
- hourly=6
- daily=31
- monthly=3
- recursive=true
- use_template=remote
- EOF
- set -x
- ${pkgs.sanoid}/bin/sanoid \
- --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
- --configdir /tmp/sanoid \
- --prune-snapshots \
- --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
- --verbose
-
- for dataset in ${hostName}/home; do
- ${pkgs.sanoid}/bin/syncoid \
- --create-bookmark \
- --exclude "home/room" \
- --force-delete \
- --no-privilege-elevation \
- --no-sync-snap \
- --recursive \
- --recvoptions "" \
- --sendoptions raw \
- --skip-parent \
- "$dataset" \
- "$DESTPOOL"/${User}/backup/"$dataset"
- done
- '' + " %I";
- ExecStartPost = "+" + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
- DESTPOOL=$1
- set -eux
- # Only if the zpool still exists to avoid uninterruptible hanging
- if zpool status -v "$DESTPOOL"; then
- # Scrub the zpool 1 minute (in the background)
- zpool scrub "$DESTPOOL"
- sleep 60
- fi
- while zpool status -v "$DESTPOOL"; do
- zpool scrub -p "$DESTPOOL" || true
- sleep 20
- # Export the zpool (to avoid a forced import later on)
- zpool export "$DESTPOOL" || true
- done
- systemctl --no-block stop zfs-term@"$DESTPOOL"
- '' + " %I";
- };
- };
- programs.bash.interactiveShellInit = ''
- mount-zfs-backup () {
- (
- set -eux
- zpool="$1"
- zpool status "$zpool" 2>/dev/null ||
- sudo zpool import -d /dev/disk/by-id/ "$zpool"
- trap "sudo zpool export $zpool" EXIT
- zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
- grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
- ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
- )
- }
- '';
- programs.bash.shellAliases = {
- mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
};
}
-{ pkgs, hostName, ... }:
+{ pkgs, config, hostName, ... }:
{
imports = [
- ../../nixos/profiles/hardware/X201.nix
+ ../../nixos/profiles/hardware/T480.nix
../../nixos/profiles/zfs.nix
../../nixos/profiles/zramSwap.nix
];
# The 32-bit host id of the host, formatted as 8 hexadecimal characters.
# You should try to make this id unique among your hosts.
# Manually generated with : uuidgen | head -c8
- networking.hostId = "ce53d0c3";
+ networking.hostId = "e6eba6c4";
- /*
- boot.loader.efi = {
- canTouchEfiVariables = true;
- efiSysMountPoint = "/boot/efi";
+ boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
+
+ boot.loader = {
+ efi = {
+ canTouchEfiVariables = true;
+ efiSysMountPoint = "/boot/efi";
+ };
+ systemd-boot = {
+ enable = true;
+ #editor = false;
+ # Roughly 25MiB (initrd) + 9MiB (kernel) per configuration
+ configurationLimit = 6;
+ memtest86.enable = true;
+ /*
+ extraInstallCommands = ''
+ rm -rf /efiboot/efi2
+ cp -r /efiboot/efi1 /efiboot/efi2
+ '';
+ */
+ # FIXME: needs https://github.com/NixOS/nixpkgs/pull/246897
+ #mirroredBoots = [ ];
};
- */
- boot.loader.grub = {
- enable = true;
- memtest86.enable = true;
- devices = [
- "/dev/disk/by-id/ata-Samsung_SSD_850_PRO_128GB_S1SMNSAFC36436X"
- ];
- configurationLimit = 16;
- #zfsSupport = true;
- #efiSupport = true;
- #enableCryptodisk = true;
+ #generationsDir.copyKernels = true;
};
hardware.enableRedistributableFirmware = true;
- # Note that gobi_loader -2000 has to be rerun if the SIM is hot swapped
- services.udev.extraRules = ''
- ACTION=="add", SUBSYSTEM=="tty", KERNEL=="ttyUSB*", ATTRS{idVendor}=="05c6", ATTRS{idProduct}=="9204", RUN+="${pkgs.gobi_loader}/lib/udev/gobi_loader -2000 $env{DEVNAME} /home/julm/files/thinkpad-x201/gobi"
- '';
-
- fileSystems."/boot" =
- {
- device = "/dev/disk/by-partlabel/${hostName}_ssd_boot";
- fsType = "ext2";
- };
fileSystems."/boot/efi" =
{
device = "/dev/disk/by-partlabel/${hostName}_ssd_efi";
fsType = "vfat";
+ options = [ "rw" "noexec" "nodev" "nofail" "X-mount.mkdir" "iocharset=iso8859-1" ];
};
swapDevices = [
{
{
device = "${hostName}/root";
fsType = "zfs";
+ options = [ "zfsutil" ];
};
fileSystems."/nix" =
{
- device = "${hostName}/nix";
+ device = "${hostName}/root/nix";
fsType = "zfs";
+ options = [ "X-mount.mkdir" "zfsutil" ];
};
fileSystems."/var" =
{
- device = "${hostName}/var";
+ device = "${hostName}/root/var";
fsType = "zfs";
+ options = [ "X-mount.mkdir" "zfsutil" ];
};
services.pipewire.jack.enable = true;
../../nixos/profiles/wireguard/wg-intra.nix
../../nixos/profiles/networking/ssh.nix
../../nixos/profiles/networking/wifi.nix
- ../../nixos/profiles/openvpn/calyx.nix
./wireguard.nix
networking/nftables.nix
];
install.substituteOnDestination = false;
- #networking.domain = "sourcephile.fr";
+ #networking.domain = "wg";
networking.useDHCP = false;
networking.nftables.ruleset = lib.mkAfter ''
}
'';
- networking.hosts = {
- #"80.67.180.129" = ["salons.sourcephile.fr"];
- };
-
networking.interfaces = { };
networking.networkmanager = {
unmanaged = [
];
};
- environment.etc."NetworkManager/system-connections/Prixtel.nmconnection" = {
- mode = "600";
- text = ''
- [connection]
- id=Prixtel
- uuid=b223f550-dff1-4ba3-9755-cd4557faaa5a
- type=gsm
- autoconnect=false
- permissions=user:julm:;
-
- [gsm]
- apn=sl2sfr
- number=*99#
- home-only=true
-
- [ppp]
-
- [ipv4]
- method=auto
-
- [ipv6]
- addr-gen-mode=stable-privacy
- method=disabled
-
- [proxy]
- '';
- };
- environment.systemPackages = [
- pkgs.iw
- pkgs.modem-manager-gui
- ];
+ services.avahi.enable = true;
+ services.avahi.openFirewall = false;
+ services.avahi.publish.enable = false;
+ services.openssh.settings.X11Forwarding = true;
systemd.services.sshd.serviceConfig.LoadCredentialEncrypted = [
"host.key:${ssh/host.key.cred}"
];
+
+ services.vnstat.enable = true;
}