Merge branch 'main' of ssh://git.0x76.dev:42/v/infrastructure

This commit is contained in:
Vivian 2022-10-03 10:44:35 +02:00
commit b3dab11a6b
9 changed files with 287 additions and 8 deletions

View file

@ -111,6 +111,7 @@
buildInputs = with pkgs; [
apply-local
colmena.packages.${system}.colmena
cachix
fluxcd
k9s
kubectl

View file

@ -41,6 +41,7 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKME+A5zu36tMIsY+PBoboizgAzt6xReUNrKRBkxvl3i victor@null"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC8llUcEBHsLqotFZc++LNP2fjItuuzeUsu5ObXecYNj victor@eevee"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICBhJAp7NWlHgwDYd2z6VNROy5RkeZHRINFLsFvwT4b3 victor@bastion"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMMbdjysLnmwJD5Fs/SjBPstdIQNUxy8zFHP0GlhHMJB victor@bastion"
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIM3TqXaApX2JZsgfZd7PKVFMecDgqTHKibpSzgdXNpYAAAAABHNzaDo= solov2-le"
];

View file

@ -50,13 +50,11 @@
hostname = "rtorrent";
ip = "192.168.0.111";
mac = "7a:5f:9b:62:49:91";
nix = false;
}
{
hostname = "minio";
ip = "192.168.0.112";
mac = "ae:c6:94:bb:c5:d9";
nix = false;
}
{
hostname = "cshub2";
@ -132,7 +130,6 @@
hostname = "docker-registry-proxy";
ip = "192.168.0.128";
mac = "0e:11:65:62:66:9f";
nix = false;
}
{
hostname = "hassio";
@ -144,7 +141,6 @@
hostname = "docker-registry";
ip = "192.168.0.130";
mac = "5e:0e:a6:cf:64:70";
nix = false;
}
{
hostname = "minecraft";

View file

@ -0,0 +1,40 @@
{ config, pkgs, lib, ... }:
let vs = config.vault-secrets.secrets; in
{
system.stateVersion = "22.05";
networking.interfaces.eth0.useDHCP = true;
# the registry port and metrics port
networking.firewall.allowedTCPPorts = [ config.services.dockerRegistry.port 5001 ];
vault-secrets.secrets.docker-registry = { };
# Sets the minio user and password
systemd.services.docker-registry.serviceConfig.EnvironmentFile = "${vs.docker-registry}/environment";
services.dockerRegistry = {
enable = true;
enableDelete = true;
enableGarbageCollect = true;
listenAddress = "0.0.0.0";
storagePath = null; # We want to store in s3
garbageCollectDates = "weekly";
extraConfig = {
# S3 Storages
storage.s3 = {
regionendpoint = "https://o.xirion.net";
bucket = "docker-registry-proxy";
region = "us-east-1"; # Fake but needed
};
# The actual proxy
proxy.remoteurl = "https://registry-1.docker.io";
# Enable prom under :5001/metrics
http.debug.addr = "0.0.0.0:5001";
http.debug.prometheus.enabled = true;
};
};
}

View file

@ -0,0 +1,49 @@
{ config, pkgs, lib, ... }:
let vs = config.vault-secrets.secrets; in
{
system.stateVersion = "22.05";
networking.interfaces.eth0.useDHCP = true;
# the registry port and metrics port
networking.firewall.allowedTCPPorts = [ config.services.dockerRegistry.port 5001 ];
vault-secrets.secrets.docker-registry = { };
# Sets the minio user and password
systemd.services.docker-registry.serviceConfig.EnvironmentFile = "${vs.docker-registry}/environment";
services.dockerRegistry = {
enable = true;
enableDelete = true;
enableGarbageCollect = true;
listenAddress = "0.0.0.0";
storagePath = null; # We want to store in s3
garbageCollectDates = "weekly";
extraConfig = {
# S3 Storages
storage.s3 = {
regionendpoint = "https://o.xirion.net";
bucket = "docker-registry";
region = "us-east-1"; # Fake but needed
};
# Enable prom under :5001/metrics
http.debug.addr = "0.0.0.0:5001";
http.debug.prometheus.enabled = true;
# Webhooks
notifications.endpoints = [
{
name = "keel";
url = "http://10.10.10.17:9300/v1/webhooks/registry";
timeout = "500ms";
treshold = 5;
backoff = "1s";
}
];
};
};
}

View file

@ -1,6 +1,9 @@
{ config, pkgs, lib, ... }:
let vs = config.vault-secrets.secrets;
in {
let
vs = config.vault-secrets.secrets;
cfg = config.services.mastodon;
in
{
system.stateVersion = "21.05";
# Use DHCP with static leases
networking.interfaces.eth0.useDHCP = true;
@ -16,6 +19,8 @@ in {
vault-secrets.secrets.mastodon = {
services = [ "mastodon-init-dirs" "mastodon" ];
user = cfg.user;
group = cfg.group;
};
# Append the init-dirs script to add AWS/Minio secrets
@ -87,6 +92,7 @@ in {
};
};
networking.firewall = let cfg = config.services.mastodon;
in { allowedTCPPorts = [ cfg.streamingPort cfg.webPort ]; };
networking.firewall =
let cfg = config.services.mastodon;
in { allowedTCPPorts = [ cfg.streamingPort cfg.webPort ]; };
}

View file

@ -0,0 +1,18 @@
{ config, pkgs, ... }:
let
vs = config.vault-secrets.secrets;
in
{
system.stateVersion = "22.11";
networking.firewall.allowedTCPPorts = [ 9000 9001 ];
networking.interfaces.eth0.useDHCP = true;
vault-secrets.secrets.minio = { };
services.minio = {
enable = true;
rootCredentialsFile = "${vs.minio}/environment";
};
}

View file

@ -0,0 +1,52 @@
{ config, pkgs, ... }:
let vs = config.vault-secrets.secrets; in
{
imports = [
./rtorrent.nix
];
networking.interfaces.eth0.useDHCP = true;
system.stateVersion = "22.05";
fileSystems."/mnt/storage" = {
device = "storage:/mnt/storage";
fsType = "nfs";
};
services.flood = {
enable = true;
host = "0.0.0.0";
openFirewall = true;
downloadDir = config.services.rtorrent.downloadDir;
};
vault-secrets.secrets.rtorrent = {
services = [ "wg-quick-wg0" ];
};
# # basically to override wireguard and route olympus IPs via the router
# networking.interfaces.eth0.ipv4.routes = [{
# address = "10.42.42.0";
# prefixLength = 23;
# via = "192.168.0.1";
# }];
# Mullvad VPN
networking.wg-quick.interfaces = {
wg0 = {
address = [ "10.66.153.191/32" "fc00:bbbb:bbbb:bb01::3:99be/128" ];
dns = [ "193.138.218.74" ];
privateKeyFile = "${vs.rtorrent}/wireguardKey";
postUp = "${pkgs.iproute2}/bin/ip route add 10.42.42.0/23 via 192.168.0.1";
peers = [
{
publicKey = "hnRorSW0YHlHAzGb4Uc/sjOqQIrqDnpJnTQi/n7Rp1c=";
allowedIPs = [ "0.0.0.0/0" "::/0" ];
endpoint = "185.65.134.223:51820";
persistentKeepalive = 25;
}
];
};
};
}

View file

@ -0,0 +1,116 @@
{ config, lib, pkgs, ... }:
{
services.rtorrent = {
enable = true;
port = 54945; # Port Forwarded in mullvad
downloadDir = "/mnt/storage/torrents/r";
package = pkgs.jesec-rtorrent;
configText = let cfg = config.services.rtorrent; in
pkgs.lib.mkForce ''
# rTorrent runtime directory (cfg.basedir) [default: "$HOME/.local/share/rtorrent"]
method.insert = cfg.basedir, private|const|string, (cat,"${cfg.dataDir}/")
# Default download directory (cfg.download) [default: "$(cfg.basedir)/download"]
method.insert = cfg.download, private|const|string, (cat,"${cfg.downloadDir}")
# RPC Socket
method.insert = cfg.rpcsock, private|const|string, (cat,"${cfg.rpcSocket}")
# Log directory (cfg.logs) [default: "$(cfg.basedir)/log"]
method.insert = cfg.logs, private|const|string, (cat,(cfg.basedir),"log/")
method.insert = cfg.logfile, private|const|string, (cat,(cfg.logs),"rtorrent-",(system.time),".log")
# Torrent session directory (cfg.session) [default: "$(cfg.basedir)/.session"]
method.insert = cfg.session, private|const|string, (cat,(cfg.basedir),".session/")
# Watch (drop to add) directories (cfg.watch) [default: "$(cfg.basedir)/watch"]
method.insert = cfg.watch, private|const|string, (cat,(cfg.basedir),"watch/")
# Create directories
fs.mkdir.recursive = (cat,(cfg.basedir))
fs.mkdir = (cat,(cfg.download))
fs.mkdir = (cat,(cfg.logs))
fs.mkdir = (cat,(cfg.session))
fs.mkdir = (cat,(cfg.watch))
fs.mkdir = (cat,(cfg.watch),"/load")
fs.mkdir = (cat,(cfg.watch),"/start")
# Drop to "$(cfg.watch)/load" to add torrent
schedule2 = watch_load, 11, 10, ((load.verbose, (cat, (cfg.watch), "load/*.torrent")))
# Drop to "$(cfg.watch)/start" to add torrent and start downloading
schedule2 = watch_start, 10, 10, ((load.start_verbose, (cat, (cfg.watch), "start/*.torrent")))
# Listening port for incoming peer traffic
network.port_range.set = ${toString cfg.port}-${toString cfg.port}
network.port_random.set = no
# Distributed Hash Table and Peer EXchange
dht.mode.set = disable
dht.port.set = 6881
protocol.pex.set = yes
# UDP tracker support
trackers.use_udp.set = yes
# Peer settings
throttle.max_uploads.set = 100
throttle.max_uploads.global.set = 250
throttle.min_peers.normal.set = 20
throttle.max_peers.normal.set = 60
throttle.min_peers.seed.set = 30
throttle.max_peers.seed.set = 80
trackers.numwant.set = 80
protocol.encryption.set = allow_incoming,try_outgoing,enable_retry
# Limits for file handle resources, this is optimized for
# an `ulimit` of 1024 (a common default). You MUST leave
# a ceiling of handles reserved for rTorrent's internal needs!
network.max_open_files.set = 600
network.max_open_sockets.set = 300
# Memory resource usage (increase if you have a large number of items loaded,
# and/or the available resources to spend)
pieces.memory.max.set = 1800M
network.xmlrpc.size_limit.set = 32M
# Basic operational settings
session.path.set = (cat, (cfg.session))
directory.default.set = (cat, (cfg.download))
log.execute = (cat, (cfg.logs), "execute.log")
# Other operational settings
encoding.add = utf8
system.umask.set = 0027
system.cwd.set = (directory.default)
#schedule2 = low_diskspace, 5, 60, ((close_low_diskspace, 500M))
#pieces.hash.on_completion.set = no
# HTTP and SSL
network.http.max_open.set = 50
network.http.dns_cache_timeout.set = 25
#network.http.ssl_verify_peer.set = 1
#network.http.ssl_verify_host.set = 1
# Run the rTorrent process as a daemon in the background
system.daemon.set = true
# XML-RPC interface
network.scgi.open_local = (cat,(cfg.rpcsock))
schedule = scgi_group,0,0,"execute.nothrow=chown,\":rtorrent\",(cfg.rpcsock)"
schedule = scgi_permission,0,0,"execute.nothrow=chmod,\"g+w,o=\",(cfg.rpcsock)"
# Logging:
# Levels = critical error warn notice info debug
# Groups = connection_* dht_* peer_* rpc_* storage_* thread_* tracker_* torrent_*
print = (cat, "Logging to ", (cfg.logfile))
log.open_file = "log", (cfg.logfile)
log.add_output = "debug", "log"
'';
};
}