summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorDaniel Peebles <copumpkin@users.noreply.github.com>2017-07-21 10:49:20 -0400
committerGitHub <noreply@github.com>2017-07-21 10:49:20 -0400
commit1698bda6b3acc600fbd8e1cf6577c68ec5f2e852 (patch)
tree573686dc1adb811a7ec40ce4365b734bf4689061 /nixos
parent8e37e3b7f19ecb8f74a033843d0fbf6203c45a92 (diff)
parent4b42fc4b8a969dc4621c76d556309021591a17a2 (diff)
downloadnixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar.gz
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar.bz2
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar.lz
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar.xz
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.tar.zst
nixlib-1698bda6b3acc600fbd8e1cf6577c68ec5f2e852.zip
Merge pull request #27508 from joelthompson/exhibitor
exhibitor: init at 3.4.9
Diffstat (limited to 'nixos')
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/services/misc/exhibitor.nix361
2 files changed, 362 insertions, 0 deletions
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 8f78652fbfd6..b97c3b0d816b 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -287,6 +287,7 @@
   ./services/misc/emby.nix
   ./services/misc/errbot.nix
   ./services/misc/etcd.nix
+  ./services/misc/exhibitor.nix
   ./services/misc/felix.nix
   ./services/misc/folding-at-home.nix
   ./services/misc/fstrim.nix
diff --git a/nixos/modules/services/misc/exhibitor.nix b/nixos/modules/services/misc/exhibitor.nix
new file mode 100644
index 000000000000..33580962bf71
--- /dev/null
+++ b/nixos/modules/services/misc/exhibitor.nix
@@ -0,0 +1,361 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.exhibitor;
+  exhibitor = cfg.package;
+  exhibitorConfig = ''
+    zookeeper-install-directory=${cfg.baseDir}/zookeeper
+    zookeeper-data-directory=${cfg.zkDataDir}
+    zookeeper-log-directory=${cfg.zkLogDir}
+    zoo-cfg-extra=${cfg.zkExtraCfg}
+    client-port=${toString cfg.zkClientPort}
+    connect-port=${toString cfg.zkConnectPort}
+    election-port=${toString cfg.zkElectionPort}
+    cleanup-period-ms=${toString cfg.zkCleanupPeriod}
+    servers-spec=${concatStringsSep "," cfg.zkServersSpec}
+    auto-manage-instances=${toString cfg.autoManageInstances}
+    ${cfg.extraConf}
+  '';
+  configDir = pkgs.writeTextDir "exhibitor.properties" exhibitorConfig;
+  cliOptionsCommon = {
+    configtype = cfg.configType;
+    defaultconfig = "${configDir}/exhibitor.properties";
+    port = toString cfg.port;
+    hostname = cfg.hostname;
+  };
+  s3CommonOptions = { s3region = cfg.s3Region; s3credentials = cfg.s3Credentials; };
+  cliOptionsPerConfig = {
+    s3 = {
+      s3config = "${cfg.s3Config.bucketName}:${cfg.s3Config.objectKey}";
+      s3configprefix = cfg.s3Config.configPrefix;
+    };
+    zookeeper = {
+      zkconfigconnect = concatStringsSep "," cfg.zkConfigConnect;
+      zkconfigexhibitorpath = cfg.zkConfigExhibitorPath;
+      zkconfigpollms = toString cfg.zkConfigPollMs;
+      zkconfigretry = "${toString cfg.zkConfigRetry.sleepMs}:${toString cfg.zkConfigRetry.retryQuantity}";
+      zkconfigzpath = cfg.zkConfigZPath;
+      zkconfigexhibitorport = toString cfg.zkConfigExhibitorPort; # NB: This might be null
+    };
+    file = {
+      fsconfigdir = cfg.fsConfigDir;
+      fsconfiglockprefix = cfg.fsConfigLockPrefix;
+      fsConfigName = fsConfigName;
+    };
+    none = {
+      noneconfigdir = configDir;
+    };
+  };
+  cliOptions = concatStringsSep " " (mapAttrsToList (k: v: "--${k} ${v}") (filterAttrs (k: v: v != null && v != "") (cliOptionsCommon //
+               cliOptionsPerConfig."${cfg.configType}" //
+               s3CommonOptions //
+               optionalAttrs cfg.s3Backup { s3backup = "true"; } //
+               optionalAttrs cfg.fileSystemBackup { filesystembackup = "true"; }
+               )));
+in
+{
+  options = {
+    services.exhibitor = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "
+          Whether to enable the exhibitor server.
+        ";
+      };
+      # See https://github.com/soabase/exhibitor/wiki/Running-Exhibitor for what these mean
+      # General options for any type of config
+      port = mkOption {
+        type = types.int;
+        default = 8080;
+        description = ''
+          The port for exhibitor to listen on and communicate with other exhibitors.
+        '';
+      };
+      baseDir = mkOption {
+        type = types.str;
+        default = "/var/exhibitor";
+        description = ''
+          Baseline directory for exhibitor runtime config.
+        '';
+      };
+      configType = mkOption {
+        type = types.enum [ "file" "s3" "zookeeper" "none" ];
+        description = ''
+          Which configuration type you want to use. Additional config will be
+          required depending on which type you are using.
+        '';
+      };
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        description = ''
+          Hostname to use and advertise
+        '';
+        default = null;
+      };
+      autoManageInstances = mkOption {
+        type = types.bool;
+        description = ''
+          Automatically manage ZooKeeper instances in the ensemble
+        '';
+        default = false;
+      };
+      zkDataDir = mkOption {
+        type = types.str;
+        default = "${cfg.baseDir}/zkData";
+        description = ''
+          The Zookeeper data directory
+        '';
+      };
+      zkLogDir = mkOption {
+        type = types.path;
+        default = "${cfg.baseDir}/zkLogs";
+        description = ''
+          The Zookeeper logs directory
+        '';
+      };
+      extraConf = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Extra Exhibitor configuration to put in the ZooKeeper config file.
+        '';
+      };
+      zkExtraCfg = mkOption {
+        type = types.str;
+        default = ''initLimit=5&syncLimit=2&tickTime=2000'';
+        description = ''
+          Extra options to pass into Zookeeper
+        '';
+      };
+      zkClientPort = mkOption {
+        type = types.int;
+        default = 2181;
+        description = ''
+          Zookeeper client port
+        '';
+      };
+      zkConnectPort = mkOption {
+        type = types.int;
+        default = 2888;
+        description = ''
+          The port to use for followers to talk to each other.
+        '';
+      };
+      zkElectionPort = mkOption {
+        type = types.int;
+        default = 3888;
+        description = ''
+          The port for Zookeepers to use for leader election.
+        '';
+      };
+      zkCleanupPeriod = mkOption {
+        type = types.int;
+        default = 0;
+        description = ''
+          How often (in milliseconds) to run the Zookeeper log cleanup task.
+        '';
+      };
+      zkServersSpec = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Zookeeper server spec for all servers in the ensemble.
+        '';
+        example = [ "S:1:zk1.example.com" "S:2:zk2.example.com" "S:3:zk3.example.com" "O:4:zk-observer.example.com" ];
+      };
+
+      # Backup options
+      s3Backup = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable backups to S3
+        '';
+      };
+      fileSystemBackup = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enables file system backup of ZooKeeper log files
+        '';
+      };
+
+      # Options for using zookeeper configType
+      zkConfigConnect = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          The initial connection string for ZooKeeper shared config storage
+        '';
+        example = ["host1:2181" "host2:2181"];
+      };
+      zkConfigExhibitorPath = mkOption {
+        type = types.string;
+        description = ''
+          If the ZooKeeper shared config is also running Exhibitor, the URI path for the REST call
+        '';
+        default = "/";
+      };
+      zkConfigExhibitorPort = mkOption {
+        type = types.nullOr types.int;
+        description = ''
+          If the ZooKeeper shared config is also running Exhibitor, the port that
+          Exhibitor is listening on. IMPORTANT: if this value is not set it implies
+          that Exhibitor is not being used on the ZooKeeper shared config.
+        '';
+      };
+      zkConfigPollMs = mkOption {
+        type = types.int;
+        description = ''
+          The period in ms to check for changes in the config ensemble
+        '';
+        default = 10000;
+      };
+      zkConfigRetry = mkOption {
+        type = types.submodule {
+          options = {
+            sleepMs = mkOption {
+              type = types.int;
+            };
+            retryQuantity = mkOption {
+              type = types.int;
+            };
+          };
+        };
+        description = ''
+          The retry values to use
+        '';
+        default = { sleepMs = 1000; retryQuantity = 3; };
+      };
+      zkConfigZPath = mkOption {
+        type = types.str;
+        description = ''
+          The base ZPath that Exhibitor should use
+        '';
+        example = "/exhibitor/config";
+      };
+
+      # Config options for s3 configType
+      s3Config = mkOption {
+        type = types.submodule {
+          options = {
+            bucketName = mkOption {
+              type = types.str;
+              description = ''
+                Bucket name to store config
+              '';
+            };
+            objectKey = mkOption {
+              type = types.str;
+              description = ''
+                S3 key name to store the config
+              '';
+            };
+            configPrefix = mkOption {
+              type = types.str;
+              description = ''
+                When using AWS S3 shared config files, the prefix to use for values such as locks
+              '';
+              default = "exhibitor-";
+            };
+          };
+        };
+      };
+
+      # The next two are used for either s3backup or s3 configType
+      s3Credentials = mkOption {
+        type = types.nullOr types.path;
+        description = ''
+          Optional credentials to use for s3backup or s3config. Argument is the path
+          to an AWS credential properties file with two properties:
+          com.netflix.exhibitor.s3.access-key-id and com.netflix.exhibitor.s3.access-secret-key
+        '';
+        default = null;
+      };
+      s3Region = mkOption {
+        type = types.nullOr types.str;
+        description = ''
+          Optional region for S3 calls
+        '';
+        default = null;
+      };
+
+      # Config options for file config type
+      fsConfigDir = mkOption {
+        type = types.path;
+        description = ''
+          Directory to store Exhibitor properties (cannot be used with s3config).
+          Exhibitor uses file system locks so you can specify a shared location
+          so as to enable complete ensemble management.
+        '';
+      };
+      fsConfigLockPrefix = mkOption {
+        type = types.str;
+        description = ''
+          A prefix for a locking mechanism used in conjunction with fsconfigdir
+        '';
+        default = "exhibitor-lock-";
+      };
+      fsConfigName = mkOption {
+        type = types.str;
+        description = ''
+          The name of the file to store config in
+        '';
+        default = "exhibitor.properties";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.exhibitor = {
+      description = "Exhibitor Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        ZOO_LOG_DIR = cfg.baseDir;
+      };
+      serviceConfig = {
+        /***
+          Exhibitor is a bit un-nixy. It wants to present to you a user interface in order to
+          mutate the configuration of both itself and ZooKeeper, and to coordinate changes
+          among the members of the Zookeeper ensemble. I'm going for a different approach here,
+          which is to manage all the configuration via nix and have it write out the configuration
+          files that exhibitor will use, and to reduce the amount of inter-exhibitor orchestration.
+        ***/
+        ExecStart = ''
+          ${pkgs.exhibitor}/bin/startExhibitor.sh ${cliOptions}
+        '';
+        User = "zookeeper";
+        PermissionsStartOnly = true;
+      };
+      # This is a bit wonky, but the reason for this is that Exhibitor tries to write to
+      # ${cfg.baseDir}/zookeeper/bin/../conf/zoo.cfg
+      # I want everything but the conf directory to be in the immutable nix store, and I want defaults
+      # from the nix store
+      # If I symlink the bin directory in, then bin/../ will resolve to the parent of the symlink in the
+      # immutable nix store. Bind mounting a writable conf over the existing conf might work, but it gets very
+      # messy with trying to copy the existing out into a mutable store.
+      # Another option is to try to patch upstream exhibitor, but the current package just pulls down the
+      # prebuild JARs off of Maven, rather than building them ourselves, as Maven support in Nix isn't
+      # very mature. So, it seems like a reasonable compromise is to just copy out of the immutable store
+      # just before starting the service, so we're running binaries from the immutable store, but we work around
+      # Exhibitor's desire to mutate its current installation.
+      preStart = ''
+        mkdir -m 0700 -p ${cfg.baseDir}/zookeeper
+        # Not doing a chown -R to keep the base ZK files owned by root
+        chown zookeeper ${cfg.baseDir} ${cfg.baseDir}/zookeeper
+        cp -Rf ${pkgs.zookeeper}/* ${cfg.baseDir}/zookeeper
+        chown -R zookeeper ${cfg.baseDir}/zookeeper/conf
+        chmod -R u+w ${cfg.baseDir}/zookeeper/conf
+      '';
+    };
+    users.extraUsers = singleton {
+      name = "zookeeper";
+      uid = config.ids.uids.zookeeper;
+      description = "Zookeeper daemon user";
+      home = cfg.baseDir;
+    };
+  };
+}