{ nixpkgs, system, hostSystem, self, }: let envVar = name: let var = builtins.getEnv name; in if var == "" then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details" else var; envVarOr = name: default: let var = builtins.getEnv name; in if var != "" then var else default; plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory"; jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe"; in nixpkgs.lib.nixosSystem { inherit system; modules = [ self.nixosModules.did-method-plc self.nixosModules.bluesky-jetstream self.nixosModules.bluesky-relay self.nixosModules.knot self.nixosModules.spindle ({ lib, config, pkgs, ... }: { virtualisation.vmVariant.virtualisation = { host.pkgs = import nixpkgs {system = hostSystem;}; graphics = false; memorySize = 2048; diskSize = 10 * 1024; cores = 2; forwardPorts = [ # caddy { from = "host"; host.port = 80; guest.port = 80; } { from = "host"; host.port = 443; guest.port = 443; } { from = "host"; proto = "udp"; host.port = 443; guest.port = 443; } # ssh { from = "host"; host.port = 2222; guest.port = 22; } # knot { from = "host"; host.port = 6444; guest.port = 6444; } # spindle { from = "host"; host.port = 6555; guest.port = 6555; } ]; sharedDirectories = { # We can't use the 9p mounts directly for most of these # as SQLite is incompatible with them. So instead we # mount the shared directories to a different location # and copy the contents around on service start/stop. caddyData = { source = "$TANGLED_VM_DATA_DIR/caddy"; target = config.services.caddy.dataDir; }; knotData = { source = "$TANGLED_VM_DATA_DIR/knot"; target = "/mnt/knot-data"; }; spindleData = { source = "$TANGLED_VM_DATA_DIR/spindle"; target = "/mnt/spindle-data"; }; spindleLogs = { source = "$TANGLED_VM_DATA_DIR/spindle-logs"; target = "/var/log/spindle"; }; }; }; # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall networking.firewall.enable = false; # resolve `*.tngl.boltless.dev` to host services.dnsmasq.enable = true; services.dnsmasq.settings.address = "/tngl.boltless.dev/10.0.2.2"; security.pki.certificates = [ (builtins.readFile ../contrib/certs/root.crt) ]; time.timeZone = "Europe/London"; services.timesyncd.enable = lib.mkVMOverride true; services.getty.autologinUser = "root"; environment.systemPackages = with pkgs; [curl vim git sqlite litecli]; virtualisation.docker.extraOptions = '' --dns 172.17.0.1 ''; services.tangled.knot = { enable = true; motd = "Welcome to the development knot!\n"; server = { owner = envVar "TANGLED_VM_KNOT_OWNER"; hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6444"; plcUrl = plcUrl; jetstreamEndpoint = jetstream; listenAddr = "0.0.0.0:6444"; }; }; services.tangled.spindle = { enable = true; server = { owner = envVar "TANGLED_VM_SPINDLE_OWNER"; hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555"; plcUrl = plcUrl; jetstreamEndpoint = jetstream; listenAddr = "0.0.0.0:6555"; dev = true; queueSize = 100; maxJobCount = 2; secrets = { provider = "sqlite"; }; }; }; services.did-method-plc.enable = true; services.bluesky-pds = { enable = true; # overriding package version to support emails package = pkgs.bluesky-pds.overrideAttrs (old: rec { version = "0.4.188"; src = pkgs.fetchFromGitHub { owner = "bluesky-social"; repo = "pds"; tag = "v${version}"; hash = "sha256-t8KdyEygXdbj/5Rhj8W40e1o8mXprELpjsKddHExmo0="; }; pnpmDeps = pkgs.fetchPnpmDeps { inherit version src; pname = old.pname; sourceRoot = old.sourceRoot; fetcherVersion = 2; hash = "sha256-lQie7f8JbWKSpoavnMjHegBzH3GB9teXsn+S2SLJHHU="; }; }); settings = { LOG_ENABLED = "true"; PDS_JWT_SECRET = "8cae8bffcc73d9932819650791e4e89a"; PDS_ADMIN_PASSWORD = "d6a902588cd93bee1af83f924f60cfd3"; PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX = "2e92e336a50a618458e1097d94a1db86ec3fd8829d7735020cbae80625c761d7"; PDS_EMAIL_SMTP_URL = envVarOr "TANGLED_VM_PDS_EMAIL_SMTP_URL" null; PDS_EMAIL_FROM_ADDRESS = envVarOr "TANGLED_VM_PDS_EMAIL_FROM_ADDRESS" null; PDS_DID_PLC_URL = "http://localhost:8080"; PDS_CRAWLERS = "https://relay.tngl.boltless.dev"; PDS_HOSTNAME = "pds.tngl.boltless.dev"; PDS_PORT = 3000; }; }; services.bluesky-relay = { enable = true; }; services.bluesky-jetstream = { enable = true; livenessTtl = 300; websocketUrl = "ws://localhost:3000/xrpc/com.atproto.sync.subscribeRepos"; }; services.caddy = { enable = true; configFile = pkgs.writeText "Caddyfile" '' { debug cert_lifetime 3601d pki { ca local { intermediate_lifetime 3599d } } } plc.tngl.boltless.dev { tls internal reverse_proxy http://localhost:8080 } *.pds.tngl.boltless.dev, pds.tngl.boltless.dev { tls internal reverse_proxy http://localhost:3000 } jetstream.tngl.boltless.dev { tls internal reverse_proxy http://localhost:6008 } relay.tngl.boltless.dev { tls internal reverse_proxy http://localhost:2470 } knot.tngl.boltless.dev { tls internal reverse_proxy http://localhost:6444 } spindle.tngl.boltless.dev { tls internal reverse_proxy http://localhost:6555 } ''; }; users = { # So we don't have to deal with permission clashing between # blank disk VMs and existing state users.${config.services.tangled.knot.gitUser}.uid = 666; groups.${config.services.tangled.knot.gitUser}.gid = 666; # TODO: separate spindle user }; systemd.services = let mkDataSyncScripts = source: target: { enableStrictShellChecks = true; preStart = lib.mkBefore '' mkdir -p ${target} ${lib.getExe pkgs.rsync} -a ${source}/ ${target} ''; postStop = lib.mkAfter '' ${lib.getExe pkgs.rsync} -a ${target}/ ${source} ''; serviceConfig.PermissionsStartOnly = true; }; in { knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir; spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath); }; }) ]; }