]> Git — Sourcephile - julm/julm-nix.git/blob - hosts/oignon/hardware.nix
nix: downgrade to 20.09 to benefit from the binary cache
[julm/julm-nix.git] / hosts / oignon / hardware.nix
1 { config, lib, pkgs, hostName, ... }:
2 {
3 hardware.cpu.intel.updateMicrocode = true;
4 powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
5 services.thinkfan = {
6 enable = true;
7 smartSupport = true;
8 # FIXME: uncomment when updating to 21.05
9 levels = ''
10 (0, 0, 57)
11 (1, 56, 62)
12 (2, 61, 65)
13 (3, 64, 66)
14 (4, 65, 68)
15 (5, 67, 71)
16 (6, 70, 76)
17 (7, 75, 81)
18 (127, 80, 32767)
19 '';
20 /*
21 levels = [
22 [0 0 57]
23 [1 56 62]
24 [2 61 65]
25 [3 64 66]
26 [4 65 68]
27 [5 67 71]
28 [6 70 76]
29 [7 75 81]
30 [127 80 32767]
31 ];
32 */
33 };
34 hardware.acpilight.enable = true;
35 services.acpid = {
36 enable = true;
37 handlers = {
38 brightnessDown = {
39 event = "video/brightnessdown.*";
40 action = "${pkgs.acpilight}/bin/xbacklight -dec 5";
41 };
42 brightnessUp = {
43 event = "video/brightnessup.*";
44 action = "${pkgs.acpilight}/bin/xbacklight -inc 5";
45 };
46 acAdapter = {
47 event = "ac_adapter/*";
48 action = ''
49 vals=($1)
50 case ''${vals[3]} in
51 00000000) # unplugged
52 ${pkgs.linuxPackages.cpupower}/bin/cpupower frequency-set -g powersave;;
53 00000001) # plugged in
54 ${pkgs.linuxPackages.cpupower}/bin/cpupower frequency-set -g ondemand;;
55 esac
56 '';
57 };
58 };
59 };
60
61 # https://bugzilla.kernel.org/show_bug.cgi?id=110941
62 boot.kernelParams = [ "intel_pstate=no_hwp" ];
63 boot.kernelModules = [ "kvm-intel" ];
64 boot.cleanTmpDir = true;
65 boot.tmpOnTmpfs = true;
66 boot.extraModulePackages = [
67 config.boot.kernelPackages.exfat-nofuse
68 ];
69 boot.loader.grub = {
70 enable = true;
71 version = 2;
72 device = "/dev/disk/by-id/ata-Samsung_SSD_850_PRO_128GB_S1SMNSAFC36436X";
73 configurationLimit = 3;
74 #zfsSupport = true;
75 /*
76 efiSupport = true;
77 efi = {
78 canTouchEfiVariables = false;
79 efiSysMountPoint = "/boot/efi";
80 };
81 */
82 #enableCryptodisk = true;
83 };
84
85 fileSystems."/boot" =
86 { device = "/dev/disk/by-partlabel/${hostName}_ssd_boot";
87 fsType = "ext2";
88 };
89 fileSystems."/boot/efi" =
90 { device = "/dev/disk/by-partlabel/${hostName}_ssd_efi";
91 fsType = "vfat";
92 };
93 swapDevices = [
94 { device = "/dev/disk/by-partlabel/${hostName}_ssd_swap";
95 randomEncryption = {
96 enable = true;
97 cipher = "aes-xts-plain64";
98 source = "/dev/urandom";
99 };
100 }
101 ];
102 zramSwap = {
103 enable = true;
104 algorithm = lib.mkDefault "zstd";
105 # There is little point creating a zram of greater
106 # than twice the size of memory
107 # since we expect a 2:1 compression ratio.
108 # Note that zram uses about 0.1% of the size of the disk
109 # when not in use so a huge zram is wasteful.
110 memoryPercent = lib.mkDefault 150;
111 # Linux supports multithreaded compression for 1 device since 3.15.
112 # See https://lkml.org/lkml/2014/2/28/404 for details.
113 swapDevices = lib.mkDefault 1;
114 };
115 boot.kernel.sysctl = {
116 # Increase cache pressure, which increases the tendency of the kernel to
117 # reclaim memory used for caching of directory and inode objects. You will use
118 # less memory over a longer period of time. The performance hit is negated by
119 # the downside of swapping sooner.
120 "vm.vfs_cache_pressure" = lib.mkDefault 500;
121
122 # Increasing how aggressively the kernel will swap memory pages since we are
123 # using ZRAM first.
124 "vm.swappiness" = lib.mkDefault 100;
125
126 # Background processes will start writing right away when it hits the 1% limit
127 "vm.dirty_background_ratio" = lib.mkDefault 1;
128
129 # The system won’t force synchronous I/O until it gets to 50% dirty_ratio.
130 "vm.dirty_ratio" = lib.mkDefault 50;
131 };
132
133 # The 32-bit host id of the host, formatted as 8 hexadecimal characters.
134 # You should try to make this id unique among your hosts.
135 # Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
136 networking.hostId = "ce53d0c3";
137
138 # none is the recommended elevator with ZFS (which has its own I/O scheduler)
139 # and/or for SSD, whereas HDD could use mq-deadline.
140 services.udev.extraRules = ''
141 # set none scheduler for non-rotating disks
142 ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
143 '';
144
145 boot.supportedFilesystems = [ "ntfs" "vfat" "zfs" ];
146 boot.initrd.supportedFilesystems = [ "zfs" ];
147 boot.initrd.availableKernelModules = [
148 "ahci"
149 "drbg"
150 "ehci_pci"
151 "gf128mul"
152 "hmac"
153 "sd_mod"
154 ];
155
156 boot.zfs.forceImportAll = false;
157 boot.zfs.forceImportRoot = false;
158 boot.zfs.enableUnstable = false;
159 boot.zfs.requestEncryptionCredentials = [ hostName ];
160 services.zfs.autoScrub.enable = true;
161
162 fileSystems."/" =
163 { device = "${hostName}/root";
164 fsType = "zfs";
165 };
166 fileSystems."/nix" =
167 { device = "${hostName}/nix";
168 fsType = "zfs";
169 };
170 fileSystems."/var" =
171 { device = "${hostName}/var";
172 fsType = "zfs";
173 };
174
175 }