diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 00000000..10cad109
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,38 @@
+kind: pipeline
+type: docker
+name: default
+platform:
+ arch: amd64
+ os: linux
+steps:
+ - name: build_golang
+ image: golang:1.16.4-buster
+ commands:
+ - export SRC=$DRONE_WORKSPACE
+ - chmod 755 $SRC/docker/docker-compose.build.sh
+ - $SRC/docker/docker-compose.build.sh
+ - echo ${DRONE_STAGE_OS}-${DRONE_STAGE_ARCH}-${DRONE_REPO_BRANCH}-${DRONE_COMMIT_SHA:0:8}>version
+
+ # - name: deploy
+ # image: appleboy/drone-scp:linux-amd64
+ # settings:
+ # host: 10.10.14.176
+ # port: 22
+ # username: root
+ # password: dsideal
+ # target: /root/publish/bigdata
+ # source: ./docker
+ # rm: false
+
+ # - name: restart
+ # image: appleboy/drone-ssh:linux-amd64
+ # settings:
+ # host: 10.10.14.176
+ # port: 22
+ # username: root
+ # password: dsideal
+ # script:
+ # - cd /root/publish/bigdata/docker
+ # - chmod 755 start.sh
+ # - chmod 755 stop.sh
+ # - ./start.sh
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 10e7f48d..634e57b8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,4 +43,6 @@
/dsSupport/WinBuild/
/dsSupport/Logs/
-/dsSdsf/.idea/
\ No newline at end of file
+/dsSdsf/.idea/
+
+*.log
\ No newline at end of file
diff --git a/.idea/runConfigurations/2345.xml b/.idea/runConfigurations/2345.xml
new file mode 100644
index 00000000..6b8bd746
--- /dev/null
+++ b/.idea/runConfigurations/2345.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/2346.xml b/.idea/runConfigurations/2346.xml
new file mode 100644
index 00000000..ddc521e4
--- /dev/null
+++ b/.idea/runConfigurations/2346.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/2347.xml b/.idea/runConfigurations/2347.xml
new file mode 100644
index 00000000..d6b53439
--- /dev/null
+++ b/.idea/runConfigurations/2347.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/2348.xml b/.idea/runConfigurations/2348.xml
new file mode 100644
index 00000000..ed0d0436
--- /dev/null
+++ b/.idea/runConfigurations/2348.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/2349.xml b/.idea/runConfigurations/2349.xml
new file mode 100644
index 00000000..cd720814
--- /dev/null
+++ b/.idea/runConfigurations/2349.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/runConfigurations/docker__Compose_dev.xml b/.idea/runConfigurations/docker__Compose_dev.xml
new file mode 100644
index 00000000..6904bd03
--- /dev/null
+++ b/.idea/runConfigurations/docker__Compose_dev.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docker/.env.example b/docker/.env.example
new file mode 100644
index 00000000..05c82e93
--- /dev/null
+++ b/docker/.env.example
@@ -0,0 +1,38 @@
+DATA=../data
+LOG=../log
+TZ=Asia/Shanghai
+
+# IP
+IP=10.10.14.179
+
+# windows:DOCKER_SOCK=//var/run/docker.sock
+DOCKER_SOCK=/var/run/docker.sock
+
+# PORTAINER
+PORTAINER_PORT=9000
+PORTAINER_DATA=${DATA}/portainer
+
+# REDIS
+REDIS_PORT=18890
+REDIS_DATA=${DATA}/redis
+
+# MYSQL
+MYSQL_PORT=22066
+MYSQL_ROOT_PASSWORD=DsideaL147258369
+MYSQL_DATA=${DATA}/mariadb
+
+# NGINX
+NGINX_HTTP_PORT=80
+NGINX_SSL_PORT=443
+NGINX_LOG=${LOG}/openresty
+
+# ELASTICSEARCH
+ELASTICSEARCH_DATA=${DATA}/elasticsearch
+
+# KAFKA
+KAFKA_DATA=${DATA}/kafka
+KAFKA_LOG=${LOG}/kafka
+
+# GREENPLUM
+GREENPLUM_DATA=${DATA}/greenplum
+GREENPLUM_LOG=${LOG}/greenplum
\ No newline at end of file
diff --git a/docker/.gitignore b/docker/.gitignore
new file mode 100644
index 00000000..02b38524
--- /dev/null
+++ b/docker/.gitignore
@@ -0,0 +1,5 @@
+tmp
+log
+data
+app
+.env
\ No newline at end of file
diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev
new file mode 100644
index 00000000..c096cb67
--- /dev/null
+++ b/docker/Dockerfile.dev
@@ -0,0 +1,23 @@
+FROM golang:1.16.5-buster as build
+
+RUN go env -w GOPROXY=https://goproxy.cn
+RUN go get github.com/go-delve/delve/cmd/dlv
+
+ARG MODULE
+ADD . /go/src/$MODULE
+WORKDIR /go/src/$MODULE
+
+RUN go build -gcflags "all=-N -l" -o /example main.go
+
+FROM debian:buster-20210511 as Final
+
+WORKDIR /app
+COPY --from=build /go/bin/dlv /
+COPY --from=build /example .
+COPY *Config ./Config
+COPY *Xml ./Xml
+COPY *Sql ./Sql
+COPY *docs ./docs
+COPY *Shell ./
+
+CMD /dlv --headless --listen=:2345 --api-version=2 --accept-multiclient exec /app/example
\ No newline at end of file
diff --git a/docker/build.cmd b/docker/build.cmd
new file mode 100644
index 00000000..d543f556
--- /dev/null
+++ b/docker/build.cmd
@@ -0,0 +1 @@
+docker-compose -f docker-compose.build.yml up
\ No newline at end of file
diff --git a/docker/build.sh b/docker/build.sh
new file mode 100644
index 00000000..d543f556
--- /dev/null
+++ b/docker/build.sh
@@ -0,0 +1 @@
+docker-compose -f docker-compose.build.yml up
\ No newline at end of file
diff --git a/docker/conf/kafka/server.properties b/docker/conf/kafka/server.properties
new file mode 100644
index 00000000..eea6d553
--- /dev/null
+++ b/docker/conf/kafka/server.properties
@@ -0,0 +1,22 @@
+process.roles=broker,controller
+node.id=1
+controller.quorum.voters=1@localhost:9093
+listeners=PLAINTEXT://:9092,CONTROLLER://:9093
+inter.broker.listener.name=PLAINTEXT
+advertised.listeners=PLAINTEXT://kafka:9092
+controller.listener.names=CONTROLLER
+listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+num.network.threads=3
+num.io.threads=8
+socket.send.buffer.bytes=102400
+socket.receive.buffer.bytes=102400
+socket.request.max.bytes=104857600
+log.dirs=/tmp/kraft-combined-logs
+num.partitions=1
+num.recovery.threads.per.data.dir=1
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+log.retention.hours=168
+log.segment.bytes=1073741824
+log.retention.check.interval.ms=300000
\ No newline at end of file
diff --git a/docker/conf/mariadb/initdb.d/init.sql b/docker/conf/mariadb/initdb.d/init.sql
new file mode 100644
index 00000000..7778dc21
--- /dev/null
+++ b/docker/conf/mariadb/initdb.d/init.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE exampledb;
+CREATE USER 'slave'@'%' IDENTIFIED BY 'aA123456!';
+GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'slave'@'%';
+FLUSH PRIVILEGES;
\ No newline at end of file
diff --git a/docker/conf/mariadb/my.cnf b/docker/conf/mariadb/my.cnf
new file mode 100644
index 00000000..4ee98f18
--- /dev/null
+++ b/docker/conf/mariadb/my.cnf
@@ -0,0 +1,37 @@
+[client]
+
+# The MySQL server
+[mysqld]
+skip-external-locking
+skip-name-resolve
+back_log = 50
+max_connections = 2048
+max_connect_errors = 1000
+table_open_cache = 1024
+open_files_limit = 16384
+max_allowed_packet = 16M
+read_buffer_size = 8M
+read_rnd_buffer_size = 32M
+sort_buffer_size = 2M
+join_buffer_size = 2M
+thread_cache_size = 64
+query_cache_size = 64M
+query_cache_limit = 4M
+slow_query_log = 1
+long_query_time = 2
+lower_case_table_names = 1
+innodb_file_per_table = 1
+max_allowed_packet = 1G
+server-id = 1
+log-bin = mysql-bin
+expire_logs_days = 7
+binlog_format = ROW
+
+innodb_data_file_path = ibdata1:12M:autoextend
+innodb_buffer_pool_size = 2G
+innodb_write_io_threads = 12
+innodb_read_io_threads = 8
+innodb_flush_log_at_trx_commit = 2
+innodb_log_buffer_size = 16M
+innodb_log_file_size = 170M
+innodb_lock_wait_timeout = 60
diff --git a/docker/conf/openresty/nginx.conf b/docker/conf/openresty/nginx.conf
new file mode 100644
index 00000000..3f34c049
--- /dev/null
+++ b/docker/conf/openresty/nginx.conf
@@ -0,0 +1,68 @@
+
+#user nobody;
+worker_processes 8;
+
+#error_log logs/error.log;
+#error_log logs/error.log notice;
+#error_log logs/error.log info;
+
+#pid logs/nginx.pid;
+
+
+events {
+ worker_connections 1024;
+}
+
+
+http {
+ include mime.types;
+ default_type text/html;
+
+ #log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ # '$status $body_bytes_sent "$http_referer" '
+ # '"$http_user_agent" "$http_x_forwarded_for"';
+
+ #access_log logs/access.log main;
+
+ sendfile on;
+ #tcp_nopush on;
+
+ #keepalive_timeout 0;
+ keepalive_timeout 65;
+
+ #init_by_lua_file lua/lua_script/init.lua;
+
+ gzip_static on;
+ gzip on;
+ gzip_min_length 1k;
+ gzip_buffers 4 16k;
+ gzip_comp_level 4;
+ gzip_types text/plain text/css application/javascript application/x-javascript text/xml application/xml text/javascript application/json;
+ gzip_http_version 1.1;
+ gzip_disable "MSIE [1-6].";
+ gzip_vary on;
+
+ server {
+ listen 80;
+ server_name localhost;
+
+ charset utf-8;
+
+ #access_log logs/host.access.log main;
+
+ # location /sdsf/addInfo {
+ # content_by_lua_file /usr/local/openresty/nginx/lua/lua_script/addInfo.lua;
+ # }
+
+ # location ~ /sdsf/.*\.(html|css|js|eot|svg|ttf|woff)$ {
+ # root /usr/local/openresty/nginx/html/;
+ # expires -1d;
+ # }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+
+}
diff --git a/docker/conf/openresty/rsa.lua b/docker/conf/openresty/rsa.lua
new file mode 100644
index 00000000..4a006156
--- /dev/null
+++ b/docker/conf/openresty/rsa.lua
@@ -0,0 +1,440 @@
+-- Copyright (C) by Zhu Dejiang (doujiang24)
+-- Copyright (C) by Zexuan Luo (spacewander)
+
+
+local bit = require "bit"
+local band = bit.band
+local ffi = require "ffi"
+local ffi_new = ffi.new
+local ffi_gc = ffi.gc
+local ffi_copy = ffi.copy
+local ffi_str = ffi.string
+local C = ffi.C
+local tab_concat = table.concat
+local setmetatable = setmetatable
+
+
+local _M = { _VERSION = '0.05-dev' }
+
+local mt = { __index = _M }
+
+
+local PADDING = {
+ RSA_PKCS1_PADDING = 1, -- RSA_size - 11
+ RSA_SSLV23_PADDING = 2, -- RSA_size - 11
+ RSA_NO_PADDING = 3, -- RSA_size
+ RSA_PKCS1_OAEP_PADDING = 4, -- RSA_size - 42
+}
+_M.PADDING = PADDING
+
+local KEY_TYPE = {
+ PKCS1 = "PKCS#1",
+ PKCS8 = "PKCS#8",
+}
+_M.KEY_TYPE = KEY_TYPE
+
+
+ffi.cdef[[
+typedef struct bio_st BIO;
+typedef struct bio_method_st BIO_METHOD;
+BIO_METHOD *BIO_s_mem(void);
+BIO * BIO_new(BIO_METHOD *type);
+int BIO_puts(BIO *bp, const char *buf);
+void BIO_vfree(BIO *a);
+
+typedef struct rsa_st RSA;
+RSA *RSA_new(void);
+void RSA_free(RSA *rsa);
+typedef int pem_password_cb(char *buf, int size, int rwflag, void *userdata);
+RSA * PEM_read_bio_RSAPrivateKey(BIO *bp, RSA **rsa, pem_password_cb *cb,
+ void *u);
+RSA * PEM_read_bio_RSAPublicKey(BIO *bp, RSA **rsa, pem_password_cb *cb,
+ void *u);
+RSA * PEM_read_bio_RSA_PUBKEY(BIO *bp, RSA **rsa, pem_password_cb *cb,
+ void *u);
+
+unsigned long ERR_get_error_line_data(const char **file, int *line,
+ const char **data, int *flags);
+const char * ERR_reason_error_string(unsigned long e);
+
+typedef struct bignum_st BIGNUM;
+BIGNUM *BN_new(void);
+void BN_free(BIGNUM *a);
+typedef unsigned long BN_ULONG;
+int BN_set_word(BIGNUM *a, BN_ULONG w);
+typedef struct bn_gencb_st BN_GENCB;
+int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
+
+typedef struct evp_cipher_st EVP_CIPHER;
+int PEM_write_bio_RSAPrivateKey(BIO *bp, RSA *x, const EVP_CIPHER *enc,
+ unsigned char *kstr, int klen,
+ pem_password_cb *cb, void *u);
+int PEM_write_bio_RSAPublicKey(BIO *bp, RSA *x);
+int PEM_write_bio_RSA_PUBKEY(BIO *bp, RSA *x);
+
+long BIO_ctrl(BIO *bp, int cmd, long larg, void *parg);
+int BIO_read(BIO *b, void *data, int len);
+
+typedef struct evp_pkey_st EVP_PKEY;
+typedef struct engine_st ENGINE;
+typedef struct evp_pkey_ctx_st EVP_PKEY_CTX;
+
+EVP_PKEY *EVP_PKEY_new(void);
+void EVP_PKEY_free(EVP_PKEY *key);
+
+EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e);
+void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx);
+
+int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype,
+ int cmd, int p1, void *p2);
+
+int EVP_PKEY_size(EVP_PKEY *pkey);
+
+int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx);
+int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx,
+ unsigned char *out, size_t *outlen,
+ const unsigned char *in, size_t inlen);
+
+int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx);
+int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx,
+ unsigned char *out, size_t *outlen,
+ const unsigned char *in, size_t inlen);
+
+int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, RSA *key);
+int PEM_write_bio_PKCS8PrivateKey(BIO *bp, EVP_PKEY *x, const EVP_CIPHER *enc,
+ char *kstr, int klen, pem_password_cb *cb,
+ void *u);
+
+void OpenSSL_add_all_digests(void);
+typedef struct env_md_st EVP_MD;
+typedef struct env_md_ctx_st EVP_MD_CTX;
+const EVP_MD *EVP_get_digestbyname(const char *name);
+
+/* EVP_MD_CTX methods for OpenSSL < 1.1.0 */
+EVP_MD_CTX *EVP_MD_CTX_create(void);
+void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx);
+
+/* EVP_MD_CTX methods for OpenSSL >= 1.1.0 */
+EVP_MD_CTX *EVP_MD_CTX_new(void);
+void EVP_MD_CTX_free(EVP_MD_CTX *ctx);
+
+int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type);
+int EVP_DigestUpdate(EVP_MD_CTX *ctx, const unsigned char *in, int inl);
+int EVP_SignFinal(EVP_MD_CTX *ctx,unsigned char *sig,unsigned int *s,
+ EVP_PKEY *pkey);
+int EVP_VerifyFinal(EVP_MD_CTX *ctx,unsigned char *sigbuf, unsigned int siglen,
+ EVP_PKEY *pkey);
+int EVP_PKEY_set1_RSA(EVP_PKEY *e, RSA *r);
+
+void ERR_set_error_data(char *data, int flags);
+]]
+--[[
+# define EVP_PKEY_CTX_set_rsa_padding(ctx, pad) \
+ EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, EVP_PKEY_CTRL_RSA_PADDING, \
+ pad, NULL)
+# define EVP_SignInit(a,b) EVP_DigestInit(a,b)
+# define EVP_SignUpdate(a,b,c) EVP_DigestUpdate(a,b,c)
+--]]
+
+
+local EVP_PKEY_ALG_CTRL = 0x1000
+local EVP_PKEY_CTRL_RSA_PADDING = EVP_PKEY_ALG_CTRL + 1
+local NID_rsaEncryption = 6
+local EVP_PKEY_RSA = NID_rsaEncryption
+local ERR_TXT_STRING = 0x02
+
+local evp_md_ctx_new
+local evp_md_ctx_free
+if not pcall(function () return C.EVP_MD_CTX_create end) then
+ evp_md_ctx_new = C.EVP_MD_CTX_new
+ evp_md_ctx_free = C.EVP_MD_CTX_free
+else
+ evp_md_ctx_new = C.EVP_MD_CTX_create
+ evp_md_ctx_free = C.EVP_MD_CTX_destroy
+end
+
+local function ssl_err()
+ local err_queue = {}
+ local i = 1
+ local data = ffi_new("const char*[1]")
+ local flags = ffi_new("int[1]")
+
+ while true do
+ local code = C.ERR_get_error_line_data(nil, nil, data, flags)
+ if code == 0 then
+ break
+ end
+
+ local err = C.ERR_reason_error_string(code)
+ err_queue[i] = ffi_str(err)
+ i = i + 1
+
+ if data[0] ~= nil and band(flags[0], ERR_TXT_STRING) > 0 then
+ err_queue[i] = ffi_str(data[0])
+ i = i + 1
+ end
+ end
+
+ return nil, tab_concat(err_queue, ": ", 1, i - 1)
+end
+
+local function read_bio(bio)
+ local BIO_CTRL_PENDING = 10
+ local keylen = C.BIO_ctrl(bio, BIO_CTRL_PENDING, 0, nil);
+ local key = ffi.new("char[?]", keylen)
+ if C.BIO_read(bio, key, keylen) < 0 then
+ return ssl_err()
+ end
+ return ffi_str(key, keylen)
+end
+
+-- Follow the calling style to avoid careless mistake.
+function _M.generate_rsa_keys(_, bits, pkcs8)
+ local rsa = C.RSA_new()
+ ffi_gc(rsa, C.RSA_free)
+ local bn = C.BN_new()
+ ffi_gc(bn, C.BN_free)
+
+ -- Set public exponent to 65537
+ if C.BN_set_word(bn, 65537) ~= 1 then
+ return nil, ssl_err()
+ end
+
+ -- Generate key
+ if C.RSA_generate_key_ex(rsa, bits, bn, nil) ~= 1 then
+ return nil, ssl_err()
+ end
+
+ local pub_key_bio = C.BIO_new(C.BIO_s_mem())
+ ffi_gc(pub_key_bio, C.BIO_vfree)
+ if pkcs8 == true then
+ if C.PEM_write_bio_RSA_PUBKEY(pub_key_bio, rsa) ~= 1 then
+ return nil, ssl_err()
+ end
+ else
+ if C.PEM_write_bio_RSAPublicKey(pub_key_bio, rsa) ~= 1 then
+ return nil, ssl_err()
+ end
+ end
+
+ local public_key, err = read_bio(pub_key_bio)
+ if not public_key then
+ return nil, nil, err
+ end
+
+ local priv_key_bio = C.BIO_new(C.BIO_s_mem())
+ ffi_gc(priv_key_bio, C.BIO_vfree)
+ if pkcs8 == true then
+ local pk = C.EVP_PKEY_new()
+ ffi_gc(pk, C.EVP_PKEY_free)
+ if C.EVP_PKEY_set1_RSA(pk,rsa) ~= 1 then
+ return nil, ssl_err()
+ end
+ if C.PEM_write_bio_PKCS8PrivateKey(priv_key_bio, pk,
+ nil, nil, 0, nil, nil) ~= 1 then
+ return nil, ssl_err()
+ end
+ else
+ if C.PEM_write_bio_RSAPrivateKey(priv_key_bio, rsa,
+ nil, nil, 0, nil, nil) ~= 1 then
+ return nil, ssl_err()
+ end
+ end
+
+ local private_key
+ private_key, err = read_bio(priv_key_bio)
+ if not private_key then
+ return nil, nil, err
+ end
+
+ return public_key, private_key
+end
+
+function _M.new(_, opts)
+ local key, read_func, is_pub, md
+
+ if opts.public_key then
+ key = opts.public_key
+ if opts.key_type == KEY_TYPE.PKCS8 then
+ read_func = C.PEM_read_bio_RSA_PUBKEY
+ else
+ read_func = C.PEM_read_bio_RSAPublicKey
+ end
+ is_pub = true
+
+ elseif opts.private_key then
+ key = opts.private_key
+ read_func = C.PEM_read_bio_RSAPrivateKey
+
+ else
+ return nil, "public_key or private_key not found"
+ end
+
+ local bio_method = C.BIO_s_mem()
+ local bio = C.BIO_new(bio_method)
+ ffi_gc(bio, C.BIO_vfree)
+
+ local len = C.BIO_puts(bio, key)
+ if len < 0 then
+ return ssl_err()
+ end
+
+ local pass
+ if opts.password then
+ local plen = #opts.password
+ pass = ffi_new("unsigned char[?]", plen + 1)
+ ffi_copy(pass, opts.password, plen)
+ end
+
+ local rsa = read_func(bio, nil, nil, pass)
+ if rsa == nil then
+ return ssl_err()
+ end
+ ffi_gc(rsa, C.RSA_free)
+
+ -- EVP_PKEY
+ local pkey = C.EVP_PKEY_new()
+ ffi_gc(pkey, C.EVP_PKEY_free)
+ if C.EVP_PKEY_set1_RSA(pkey, rsa) == 0 then
+ return ssl_err()
+ end
+
+ --EVP_PKEY_CTX
+ local ctx = C.EVP_PKEY_CTX_new(pkey, nil)
+ if ctx == nil then
+ return ssl_err()
+ end
+ ffi_gc(ctx, C.EVP_PKEY_CTX_free)
+
+ -- md_ctx init for sign or verify; if signature algorithm is seted
+ if opts.algorithm then
+ md = C.EVP_get_digestbyname(opts.algorithm)
+ if md == nil then
+ return nil, "Unknown message digest"
+ end
+
+ end
+
+ -- ctx init for encrypt or decrypt
+ -- default for encrypt/decrypt if nothing is set
+ if opts.padding or not opts.digest then
+ local init_func = is_pub and C.EVP_PKEY_encrypt_init
+ or C.EVP_PKEY_decrypt_init
+ if init_func(ctx) <= 0 then
+ return ssl_err()
+ end
+
+ if C.EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, EVP_PKEY_CTRL_RSA_PADDING,
+ opts.padding or PADDING.RSA_PKCS1_PADDING, nil) <= 0 then
+ return ssl_err()
+ end
+ end
+
+ local size = C.EVP_PKEY_size(pkey)
+ return setmetatable({
+ pkey = pkey,
+ size = size,
+ buf = ffi_new("unsigned char[?]", size),
+ _encrypt_ctx = is_pub and ctx or nil,
+ _decrypt_ctx = not is_pub and ctx or nil,
+ is_pub = is_pub,
+ md = md,
+ }, mt)
+end
+
+
+function _M.decrypt(self, str)
+ local ctx = self._decrypt_ctx
+ if not ctx then
+ return nil, "not inited for decrypt"
+ end
+
+ local len = ffi_new("size_t [1]")
+ if C.EVP_PKEY_decrypt(ctx, nil, len, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ local buf = self.buf
+ if C.EVP_PKEY_decrypt(ctx, buf, len, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ return ffi_str(buf, len[0])
+end
+
+
+function _M.encrypt(self, str)
+ local ctx = self._encrypt_ctx
+ if not ctx then
+ return nil, "not inited for encrypt"
+ end
+
+ local len = ffi_new("size_t [1]")
+ if C.EVP_PKEY_encrypt(ctx, nil, len, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ local buf = self.buf
+ if C.EVP_PKEY_encrypt(ctx, buf, len, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ return ffi_str(buf, len[0])
+end
+
+
+function _M.sign(self, str)
+ if self.is_pub then
+ return nil, "not inited for sign"
+ end
+
+ local md_ctx = evp_md_ctx_new()
+ ffi_gc(md_ctx, evp_md_ctx_free)
+
+ if C.EVP_DigestInit(md_ctx, self.md) <= 0 then
+ return ssl_err()
+ end
+
+ if C.EVP_DigestUpdate(md_ctx, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ local buf = self.buf
+ local len = ffi_new("unsigned int[1]")
+ if C.EVP_SignFinal(md_ctx, self.buf, len, self.pkey) <= 0 then
+ return ssl_err()
+ end
+
+ return ffi_str(buf, len[0])
+end
+
+
+function _M.verify(self, str, sig)
+ if not self.is_pub then
+ return nil, "not inited for verify"
+ end
+
+ local md_ctx = evp_md_ctx_new()
+ ffi_gc(md_ctx, evp_md_ctx_free)
+
+ if C.EVP_DigestInit(md_ctx, self.md) <= 0 then
+ return ssl_err()
+ end
+
+ if C.EVP_DigestUpdate(md_ctx, str, #str) <= 0 then
+ return ssl_err()
+ end
+
+ local siglen = #sig
+ local buf = siglen <= self.size and self.buf
+ or ffi_new("unsigned char[?]", siglen)
+ ffi_copy(buf, sig, siglen)
+ if C.EVP_VerifyFinal(md_ctx, buf, siglen, self.pkey) <= 0 then
+ return ssl_err()
+ end
+
+ return true
+end
+
+
+return _M
diff --git a/docker/conf/redis/redis.conf b/docker/conf/redis/redis.conf
new file mode 100644
index 00000000..46f96662
--- /dev/null
+++ b/docker/conf/redis/redis.conf
@@ -0,0 +1,2051 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specifyst
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Note that option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all available network interfaces on the host machine.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+# Each address can be prefixed by "-", which means that redis will not fail to
+# start if the address is not available. Being not available only refers to
+# addresses that does not correspond to any network interfece. Addresses that
+# are already in use will always fail, and unsupported protocols will always BE
+# silently skipped.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses
+# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6
+# bind * -::* # like the default, all available interfaces
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only on the
+# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
+# will only be able to accept client connections from the same host that it is
+# running on).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT OUT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 127.0.0.1 -::1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 18890
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need a high backlog in order
+# to avoid slow clients connection issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /run/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Force network equipment in the middle to consider the connection to be
+# alive.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers. These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt
+# tls-key-file redis.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-key-file-pass secret
+
+# Normally Redis uses the same certificate for both server functions (accepting
+# connections) and client functions (replicating from a master, establishing
+# cluster bus connections, etc.).
+#
+# Sometimes certificates are issued with attributes that designate them as
+# client-only or server-only certificates. In that case it may be desired to use
+# different certificates for incoming (server) and outgoing (client)
+# connections. To do that, use the following directives:
+#
+# tls-client-cert-file client.crt
+# tls-client-key-file client.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-client-key-file-pass secret
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers. Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# If "no" is specified, client certificates are not required and not accepted.
+# If "optional" is specified, client certificates are accepted and must be
+# valid if provided, but are not required.
+#
+# tls-auth-clients no
+# tls-auth-clients optional
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
+# that older formally deprecated versions are kept disabled to reduce the attack surface.
+# You can explicitly specify TLS versions to support.
+# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
+# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
+# To enable only TLSv1.2 and TLSv1.3, use:
+#
+# tls-protocols "TLSv1.2 TLSv1.3"
+
+# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+# By default, TLS session caching is enabled to allow faster and less expensive
+# reconnections by clients that support it. Use the following directive to disable
+# caching.
+#
+# tls-session-caching no
+
+# Change the default number of TLS sessions cached. A zero value sets the cache
+# to unlimited size. The default size is 20480.
+#
+# tls-session-cache-size 5000
+
+# Change the default timeout of cached TLS sessions. The default timeout is 300
+# seconds.
+#
+# tls-session-cache-timeout 60
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+# When Redis is supervised by upstart or systemd, this parameter has no impact.
+daemonize yes
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# requires "expect stop" in your upstart job config
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# on startup, and updating Redis status on a regular
+# basis.
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous pings back to your supervisor.
+#
+# The default is "no". To run under upstart/systemd, you can simply uncomment
+# the line below:
+#
+# supervised auto
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+#
+# Note that on modern Linux systems "/run/redis.pid" is more conforming
+# and should be used instead.
+pidfile /var/run/redis.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ""
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# To disable the built in crash log, which will possibly produce cleaner core
+# dumps when they are needed, uncomment the following:
+#
+# crash-log-enabled no
+
+# To disable the fast memory check that's run as part of the crash log, which
+# will possibly let redis terminate sooner, uncomment the following:
+#
+# crash-memcheck-enabled no
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY and syslog logging is
+# disabled. Basically this means that normally a logo is displayed only in
+# interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo no
+
+# By default, Redis modifies the process title (as seen in 'top' and 'ps') to
+# provide some runtime information. It is possible to disable this and leave
+# the process name as executed by setting the following to no.
+set-proc-title yes
+
+# When changing the process title, Redis uses the following template to construct
+# the modified title.
+#
+# Template variables are specified in curly brackets. The following variables are
+# supported:
+#
+# {title} Name of process as executed if parent, or type of child process.
+# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or
+# Unix socket if only that's available.
+# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]".
+# {port} TCP port listening on, or 0.
+# {tls-port} TLS port listening on, or 0.
+# {unixsocket} Unix domain socket listening on, or "".
+# {config-file} Name of configuration file used.
+#
+proc-title-template "{title} {listen-addr} {server-mode}"
+
+################################ SNAPSHOTTING ################################
+
+# Save the DB to disk.
+#
+# save
+#
+# Redis will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# Snapshotting can be completely disabled with a single empty string argument
+# as in following example:
+#
+save ""
+#
+# Unless specified otherwise, by default Redis will save the DB:
+# * After 3600 seconds (an hour) if at least 1 key changed
+# * After 300 seconds (5 minutes) if at least 100 keys changed
+# * After 60 seconds if at least 10000 keys changed
+#
+# You can set these explicitly by uncommenting the three following lines.
+#
+# save 3600 1
+# save 300 100
+# save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# By default compression is enabled as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# Enables or disables full sanitation checks for ziplist and listpack etc when
+# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
+# crash later on while processing commands.
+# Options:
+# no - Never perform full sanitation
+# yes - Always perform full sanitation
+# clients - Perform full sanitation only for user connections.
+# Excludes: RDB files, RESTORE commands received from the master
+# connection, and client connections which have the
+# skip-sanitize-payload ACL flag.
+# The default should be 'clients' but since it currently affects cluster
+# resharding via MIGRATE, it is temporarily set to 'no' by default.
+#
+# sanitize-dump-payload no
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ./
+
+################################# REPLICATION #################################
+
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# +------------------+ +---------------+
+# | Master | ---> | Replica |
+# | (receive writes) | | (exact copy) |
+# +------------------+ +---------------+
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition replicas automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# replicaof
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the replica to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the replica request.
+#
+# masterauth
+#
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH .
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) If replica-serve-stale-data is set to 'no' the replica will reply with
+# an error "SYNC with master in progress" to all commands except:
+# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
+# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
+# HOST and LATENCY.
+#
+replica-serve-stale-data yes
+
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default replicas are read-only.
+#
+# Note: read only replicas are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only replica exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only replicas using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+replica-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
+#
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the replicas incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to replica sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the replicas.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# -----------------------------------------------------------------------------
+# WARNING: RDB diskless load is experimental. Since in this setup the replica
+# does not immediately store an RDB on disk, it may cause data loss during
+# failovers. RDB diskless load + Redis modules not handling I/O reads may also
+# cause Redis to abort in case of I/O errors during the initial synchronization
+# stage with the master. Use only if you know what you are doing.
+# -----------------------------------------------------------------------------
+#
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# received from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and salve buffers).
+# However, parsing the RDB file directly from the socket may mean that we have
+# to flush the contents of the current database before the full rdb was
+# received. For this reason we have the following options:
+#
+# "disabled" - Don't use diskless load (store the rdb file to the disk first)
+# "on-empty-db" - Use diskless load only when it is completely safe.
+# "swapdb" - Keep a copy of the current db contents in RAM while parsing
+# the data directly from the socket. note that this requires
+# sufficient memory, if you don't have it, you risk an OOM kill.
+repl-diskless-load disabled
+
+# Replicas send PINGs to server in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica. The default
+# value is 60 seconds.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the replica socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the replica side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and replicas are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
+#
+# The bigger the replication backlog, the longer the replica can endure the
+# disconnect and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated if there is at least one replica connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no connected replicas for some time, the backlog will be
+# freed. The following option configures the amount of seconds that need to
+# elapse, starting from the time the last replica disconnected, for the backlog
+# buffer to be freed.
+#
+# Note that replicas never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with other replicas: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
+#
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+replica-priority 100
+
+# -----------------------------------------------------------------------------
+# By default, Redis Sentinel includes all replicas in its reports. A replica
+# can be excluded from Redis Sentinel's announcements. An unannounced replica
+# will be ignored by the 'sentinel replicas ' command and won't be
+# exposed to Redis Sentinel's clients.
+#
+# This option does not change the behavior of replica-priority. Even with
+# replica-announced set to 'no', the replica can be promoted to master. To
+# prevent this behavior, set replica-priority to 0.
+#
+# replica-announced yes
+
+# It is possible for a master to stop accepting writes if there are less than
+# N replicas connected, having a lag less or equal than M seconds.
+#
+# The N replicas need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the replica, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough replicas
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
+#
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# replicas in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover replica instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP address and port normally reported by a replica is
+# obtained in the following way:
+#
+# IP: The address is auto detected by checking the peer address
+# of the socket used by the replica to connect with the master.
+#
+# Port: The port is communicated by the replica during the replication
+# handshake, and is normally the port that the replica is using to
+# listen for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the replica may actually be reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# a radix key indexed by key name, what clients have which keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# check this page to understand more about the feature:
+#
+# https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
+
+################################## SECURITY ###################################
+
+# Warning: since Redis is pretty fast, an outside user can try up to
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a shared secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+# user ... acl rules ...
+#
+# For example:
+#
+# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what a user can do are the following:
+#
+# on Enable the user: it is possible to authenticate as this user.
+# off Disable the user: it's no longer possible to authenticate
+# with this user, however the already authenticated connections
+# will still work.
+# skip-sanitize-payload RESTORE dump-payload sanitation is skipped.
+# sanitize-payload RESTORE dump-payload is sanitized (default).
+# + Allow the execution of that command
+# - Disallow the execution of that command
+# +@ Allow the execution of all the commands in such category
+# with valid categories are like @admin, @set, @sortedset, ...
+# and so forth, see the full list in the server.c file where
+# the Redis command table is described and defined.
+# The special category @all means all the commands, but currently
+# present in the server, and that will be loaded in the future
+# via modules.
+# +|subcommand Allow a specific subcommand of an otherwise
+# disabled command. Note that this form is not
+# allowed as negative like -DEBUG|SEGFAULT, but
+# only additive starting with "+".
+# allcommands Alias for +@all. Note that it implies the ability to execute
+# all the future commands loaded via the modules system.
+# nocommands Alias for -@all.
+# ~ Add a pattern of keys that can be mentioned as part of
+# commands. For instance ~* allows all the keys. The pattern
+# is a glob-style pattern like the one of KEYS.
+# It is possible to specify multiple patterns.
+# allkeys Alias for ~*
+# resetkeys Flush the list of allowed keys patterns.
+# & Add a glob-style pattern of Pub/Sub channels that can be
+# accessed by the user. It is possible to specify multiple channel
+# patterns.
+# allchannels Alias for &*
+# resetchannels Flush the list of allowed channel patterns.
+# > Add this password to the list of valid password for the user.
+# For example >mypass will add "mypass" to the list.
+# This directive clears the "nopass" flag (see later).
+# < Remove this password from the list of valid passwords.
+# nopass All the set passwords of the user are removed, and the user
+# is flagged as requiring no password: it means that every
+# password will work against this user. If this directive is
+# used for the default user, every new connection will be
+# immediately authenticated with the default user without
+# any explicit AUTH command required. Note that the "resetpass"
+# directive will clear this condition.
+# resetpass Flush the list of allowed passwords. Moreover removes the
+# "nopass" status. After "resetpass" the user has no associated
+# passwords and there is no way to authenticate without adding
+# some password (or setting it as "nopass" later).
+# reset Performs the following actions: resetpass, resetkeys, off,
+# -@all. The user returns to the same state it has immediately
+# after its creation.
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+# user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the exception of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+# user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
+# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
+# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the external
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH as usually, or more explicitly with AUTH default
+# if they follow the new protocol: both will work.
+#
+# The requirepass is not compatable with aclfile option and the ACL LOAD
+# command, these will cause requirepass to be ignored.
+#
+# requirepass foobared
+
+# New users are initialized with restrictive permissions by default, via the
+# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
+# is possible to manage access to Pub/Sub channels with ACL rules as well. The
+# default Pub/Sub channels permission if new users is controlled by the
+# acl-pubsub-default configuration directive, which accepts one of these values:
+#
+# allchannels: grants access to all Pub/Sub channels
+# resetchannels: revokes access to all Pub/Sub channels
+#
+# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default
+# defaults to the 'allchannels' permission.
+#
+# Future compatibility note: it is very likely that in a future version of Redis
+# the directive's default of 'allchannels' will be changed to 'resetchannels' in
+# order to provide better out-of-the-box Pub/Sub security. Therefore, it is
+# recommended that you explicitly define Pub/Sub permissions for all users
+# rather then rely on implicit default values. Once you've set explicit
+# Pub/Sub for all existing users, you should uncomment the following line.
+#
+# acl-pubsub-default resetchannels
+
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to replicas may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# IMPORTANT: When Redis Cluster is used, the max number of connections is also
+# shared with the cluster bus: every node in the cluster will use two
+# connections, one incoming and another outgoing. It is important to size the
+# limit accordingly in case of very large clusters.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select one from the following behaviors:
+#
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key having an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, when there are no suitable keys for
+# eviction, Redis will return an error on write operations that require
+# more memory. These are usually commands that create new keys, add data or
+# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
+# SORT (due to the STORE argument), and EXEC (if the transaction includes any
+# command that requires memory).
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. By default Redis will check five keys and pick the one that was
+# used least recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+# Eviction processing is designed to function well with the default setting.
+# If there is an unusually large amount of write traffic, this value may need to
+# be increased. Decreasing this value may reduce latency at the risk of
+# eviction processing effectiveness
+# 0 = minimum latency, 10 = default, 100 = process without regard to latency
+#
+# maxmemory-eviction-tenacity 10
+
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tolerate less already expired keys still present
+# in the system. It's a tradeoff between memory, CPU and latency.
+#
+# active-expire-effort 1
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+# in order to make room for new data, without going over the specified
+# memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+# EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+# already exist. For example the RENAME command may delete the old key
+# content when it is replaced with another one. Similarly SUNIONSTORE
+# or SORT with STORE option may delete existing keys. The SET command
+# itself removes any old content of the specified key in order to replace
+# it with the specified string.
+# 4) During replication, when a replica performs a full resynchronization with
+# its master, the content of the whole database is removed in order to
+# load the RDB file just transferred.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives.
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
+# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
+# commands. When neither flag is passed, this directive will be used to determine
+# if the data should be deleted asynchronously.
+
+lazyfree-lazy-user-flush no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speed up the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usual.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Aso this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis threads, otherwise you'll not
+# be able to notice the improvements.
+
+############################ KERNEL OOM CONTROL ##############################
+
+# On Linux, it is possible to hint the kernel OOM killer on what processes
+# should be killed first when out of memory.
+#
+# Enabling this feature makes Redis actively control the oom_score_adj value
+# for all its processes, depending on their role. The default scores will
+# attempt to have background child processes killed before all others, and
+# replicas killed before masters.
+#
+# Redis supports three options:
+#
+# no: Don't make changes to oom-score-adj (default).
+# yes: Alias to "relative" see below.
+# absolute: Values in oom-score-adj-values are written as is to the kernel.
+# relative: Values are used relative to the initial value of oom_score_adj when
+# the server starts and are then clamped to a range of -1000 to 1000.
+# Because typically the initial value is 0, they will often match the
+# absolute values.
+oom-score-adj no
+
+# When oom-score-adj is used, this directive controls the specific values used
+# for master, replica and background child processes. Values range -2000 to
+# 2000 (higher means more likely to be killed).
+#
+# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
+# can freely increase their value, but not decrease it below its initial
+# settings. This means that setting oom-score-adj to "relative" and setting the
+# oom-score-adj-values to positive values will always succeed.
+oom-score-adj-values 0 200 800
+
+
+#################### KERNEL transparent hugepage CONTROL ######################
+
+# Usually the kernel Transparent Huge Pages control is set to "madvise" or
+# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
+# case this config has no effect. On systems in which it is set to "always",
+# redis will attempt to disable it specifically for the redis process in order
+# to avoid latency problems specifically with fork(2) and CoW.
+# If for some reason you prefer to keep it enabled, you can set this config to
+# "no" and the kernel global to "always".
+
+disable-thp yes
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check https://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# When rewriting the AOF file, Redis is able to use an RDB preamble in the
+# AOF file for faster rewrites and recoveries. When this option is turned
+# on the rewritten AOF file is composed of two different stanzas:
+#
+# [RDB file][AOF tail]
+#
+# When loading, Redis recognizes that the AOF file starts with the "REDIS"
+# string and loads the prefixed RDB file, then continues loading the AOF
+# tail.
+aof-use-rdb-preamble yes
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet call any write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are a multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A replica of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a replica to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple replicas able to failover, they exchange messages
+# in order to try to give an advantage to the replica with the best
+# replication offset (more data from the master processed).
+# Replicas will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single replica computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the replica will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a replica will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
+#
+# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large cluster-replica-validity-factor may allow replicas with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a replica at all.
+#
+# For maximum availability, it is possible to set the cluster-replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-replica-validity-factor 10
+
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working replicas.
+#
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
+# master in your cluster.
+#
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value or
+# set cluster-allow-replica-migration to 'no'.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# Turning off this option allows to use less automatic cluster configuration.
+# It both disables migration to orphaned masters and migration from masters
+# that became empty.
+#
+# Default is 'yes' (allow automatic migrations).
+#
+# cluster-allow-replica-migration yes
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least a hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents replicas from trying to failover its
+# master during master failures. However the replica can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# the cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful for two cases. The first case is for when an application
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it.
+#
+# The second use case is for configurations that don't meet the recommended
+# three shards but want to enable cluster mode and scale later. A
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically.
+#
+# cluster-allow-reads-when-down no
+
+# In order to setup your cluster make sure to read the documentation
+# available at https://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following four options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-tls-port
+# * cluster-announce-bus-port
+#
+# Each instructs the node about its address, client ports (for connections
+# without and with TLS) and cluster message bus port. The information is then
+# published in the header of the bus packets so that other nodes will be able to
+# correctly map the address of the node publishing the information.
+#
+# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set
+# to zero, then cluster-announce-port refers to the TLS port. Note also that
+# cluster-announce-tls-port has no effect if cluster-tls is set to no.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usual.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-tls-port 6379
+# cluster-announce-port 0
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold " if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at https://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@__ prefix.
+# E Keyevent events, published with __keyevent@__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# t Stream commands
+# d Module key type events
+# m Key-miss events (Note: It is not included in the 'A' class)
+# A Alias for g$lshzxetd, so that the "AKE" string means all the events
+# (Except key-miss events which are excluded from 'A' due to their
+# unique nature).
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### GOPHER SERVER #################################
+
+# Redis contains an implementation of the Gopher protocol, as specified in
+# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
+#
+# The Gopher protocol was very popular in the late '90s. It is an alternative
+# to the web, and the implementation both server and client side is so simple
+# that the Redis server has just 100 lines of code in order to implement this
+# support.
+#
+# What do you do with Gopher nowadays? Well Gopher never *really* died, and
+# lately there is a movement in order for the Gopher more hierarchical content
+# composed of just plain text documents to be resurrected. Some want a simpler
+# internet, others believe that the mainstream internet became too much
+# controlled, and it's cool to create an alternative space for people that
+# want a bit of fresh air.
+#
+# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
+# as a gift.
+#
+# --- HOW IT WORKS? ---
+#
+# The Redis Gopher support uses the inline protocol of Redis, and specifically
+# two kind of inline requests that were anyway illegal: an empty request
+# or any request that starts with "/" (there are no Redis commands starting
+# with such a slash). Normal RESP2/RESP3 requests are completely out of the
+# path of the Gopher protocol implementation and are served as usual as well.
+#
+# If you open a connection to Redis when Gopher is enabled and send it
+# a string like "/foo", if there is a key named "/foo" it is served via the
+# Gopher protocol.
+#
+# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
+# talking), you likely need a script like the following:
+#
+# https://github.com/antirez/gopher2redis
+#
+# --- SECURITY WARNING ---
+#
+# If you plan to put Redis on the internet in a publicly accessible address
+# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
+# Once a password is set:
+#
+# 1. The Gopher server (when enabled, not by default) will still serve
+# content via Gopher.
+# 2. However other commands cannot be called before the client will
+# authenticate.
+#
+# So use the 'requirepass' option to protect your instance.
+#
+# Note that Gopher is not currently supported when 'io-threads-do-reads'
+# is enabled.
+#
+# To enable Gopher support, uncomment the following line and set the option
+# from no (the default) to yes.
+#
+# gopher-enabled no
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entries limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# replica -> replica clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited to 512 mb. However you can change this limit
+# here, but must be 1mb or greater
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporarily raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
+# +--------+------------+------------+------------+------------+------------+
+# | 0 | 104 | 255 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 1 | 18 | 49 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 10 | 10 | 18 | 142 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 100 | 8 | 11 | 49 | 143 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+# redis-benchmark -n 1000000 incr foo
+# redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be divided by two (or decremented if it has a value
+# less <= 10).
+#
+# The default value for the lfu-decay-time is 1. A special value of 0 means to
+# decay the counter every time it happens to be scanned.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in a "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+# to use the copy of Jemalloc we ship with the source code of Redis.
+# This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+# issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+# needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Enabled active defragmentation
+# activedefrag no
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
+
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11
+
+# In some cases redis will emit warnings and even refuse to start if it detects
+# that the system is in bad state, it is possible to suppress these warnings
+# by setting the following config which takes a space delimited list of warnings
+# to suppress
+#
+# ignore-warnings ARM64-COW-BUG
diff --git a/docker/docker-compose.build.sh b/docker/docker-compose.build.sh
new file mode 100644
index 00000000..2ab3f5bf
--- /dev/null
+++ b/docker/docker-compose.build.sh
@@ -0,0 +1,47 @@
+go env -w GOPROXY=https://goproxy.cn
+
+rm -rf $SRC/docker/app
+mkdir -p $SRC/docker/app
+
+# dsBaseRpc
+cd $SRC/dsBaseRpc
+go build -gcflags "all=-N -l" -o ./build/dsBaseRpc main.go
+cp -r ./Shell ./build
+cp -r ./Config ./build/Config
+#cp -r ./Xml ./build/Xml
+cp -r ./Sql ./build/Sql
+mv -f ./build ../docker/app/dsBaseRpc
+
+# dsBaseWeb
+cd $SRC/dsBaseWeb
+go build -gcflags "all=-N -l" -o ./build/dsBaseWeb main.go
+cp -r ./Shell ./build
+cp -r ./docs ./build/docs
+cp -r ./Config ./build/Config
+#cp -r ./Xml ./build/Xml
+mv -f ./build ../docker/app/dsBaseWeb
+
+# dsBigData
+cd $SRC/dsBigData
+go build -o ./build/dsBigData main.go
+cp -r ./Shell ./build
+cp -r ./Config ./build/Config
+#cp -r ./Xml ./build/Xml
+mv -f ./build ../docker/app/dsBigData
+
+# dsSso
+cd $SRC/dsSso
+go build -gcflags "all=-N -l" -o ./build/dsSso main.go
+cp -r ./Shell ./build
+cp -r ./docs ./build/docs
+cp -r ./Config ./build/Config
+#cp -r ./Xml ./build/Xml
+mv -f ./build ../docker/app/dsSso
+
+# dsSupport
+cd $SRC/dsSupport
+go build -gcflags "all=-N -l" -o ./build/dsSupport main.go
+cp -r ./Shell ./build
+cp -r ./docs ./build/docs
+cp -r ./Config ./build/Config
+mv -f ./build ../docker/app/dsSupport
diff --git a/docker/docker-compose.build.yml b/docker/docker-compose.build.yml
new file mode 100644
index 00000000..ee49aeab
--- /dev/null
+++ b/docker/docker-compose.build.yml
@@ -0,0 +1,9 @@
+version: "3.8"
+services:
+ build:
+ image: golang:1.16.5-buster
+ volumes:
+ - ../:/go/src
+ environment:
+ - SRC=/go/src
+ command: bash -c "$${SRC}/docker/docker-compose.build.sh"
\ No newline at end of file
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
new file mode 100644
index 00000000..92036da4
--- /dev/null
+++ b/docker/docker-compose.dev.yml
@@ -0,0 +1,79 @@
+version: "3.8"
+services:
+ dsbaserpc:
+ security_opt:
+ - seccomp:unconfined
+ cap_add:
+ - SYS_PTRACE
+ build:
+ context: ../dsBaseRpc
+ dockerfile: ../docker/Dockerfile.dev
+ args:
+ - MODULE=dsBaseRpc
+ ports:
+ - 8001:8001
+ - 2345:2345
+ volumes:
+ - ../dsBaseRpc/Config/Config.docker.ini:/app/Config/Config.ini
+ dsbaseweb:
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - seccomp:unconfined
+ build:
+ context: ../dsBaseWeb
+ dockerfile: ../docker/Dockerfile.dev
+ args:
+ - MODULE=dsBaseWeb
+ ports:
+ - 8002:8002
+ - 2346:2345
+ volumes:
+ - ../dsBaseWeb/Config/Config.docker.ini:/app/Config/Config.ini
+ dsbigdata:
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - seccomp:unconfined
+ build:
+ context: ../dsBigData
+ dockerfile: ../docker/Dockerfile.dev
+ args:
+ - MODULE=dsBigData
+ ports:
+ - 8004:8004
+ - 2347:2345
+ volumes:
+ - ../dsBigData/Config/Config.docker.ini:/app/Config/Config.ini
+ dssso:
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - seccomp:unconfined
+ build:
+ context: ../dsSso
+ dockerfile: ../docker/Dockerfile.dev
+ args:
+ - MODULE=dsSso
+ ports:
+ - 8000:8000
+ - 2348:2345
+ volumes:
+ - ../dsSso/Config/Config.docker.ini:/app/Config/Config.ini
+# dssupport:
+# cap_add:
+# - SYS_PTRACE
+# security_opt:
+# - seccomp:unconfined
+# build:
+# context: ../dsSupport
+# dockerfile: ../docker/Dockerfile.dev
+# args:
+# - MODULE=dsSupport
+# ports:
+# - 8005:8005
+# - 2349:2345
+# volumes:
+# - ../dsSupport/Config/Config.docker.ini:/app/Config/Config.ini
+# depends_on:
+# - elasticsearch
diff --git a/docker/docker-compose.pro.yml b/docker/docker-compose.pro.yml
new file mode 100644
index 00000000..f705988e
--- /dev/null
+++ b/docker/docker-compose.pro.yml
@@ -0,0 +1,60 @@
+version: "3.8"
+services:
+ dsbaserpc:
+ image: debian:buster-20210511
+ restart: always
+ ports:
+ - 8001:8001
+ volumes:
+ - ./app/dsBaseRpc:/app
+ - ./app/dsBaseRpc/Config/Config.docker.ini:/app/Config/Config.ini
+ working_dir: /app
+ command: bash -c "./dsBaseRpc"
+ dsbaseweb:
+ image: debian:buster-20210511
+ restart: always
+ ports:
+ - 8002:8002
+ volumes:
+ - ./app/dsBaseWeb:/app
+ - ./app/dsBaseWeb/Config/Config.docker.ini:/app/Config/Config.ini
+ working_dir: /app
+ command: bash -c "./dsBaseWeb"
+ depends_on:
+ - kafka
+ dsbigdata:
+ image: debian:buster-20210511
+ restart: always
+ ports:
+ - 8004:8004
+ volumes:
+ - ./app/dsBigData:/app
+ - ./app/dsBigData/Config/Config.docker.ini:/app/Config/Config.ini
+ working_dir: /app
+ command: bash -c "./dsBigData"
+ depends_on:
+ - elasticsearch
+ dssso:
+ image: debian:buster-20210511
+ restart: always
+ ports:
+ - 8000:8000
+ volumes:
+ - ./app/dsSso:/app
+ - ./app/dsSso/Config/Config.docker.ini:/app/Config/Config.ini
+ working_dir: /app
+ command: bash -c "./dsSso"
+ depends_on:
+ - kafka
+ dssupport:
+ image: debian:buster-20210511
+ restart: always
+ ports:
+ - 8005:8005
+ volumes:
+ - ./app/dsSupport:/app
+ - ./app/dsSupport/Config/Config.docker.ini:/app/Config/Config.ini
+ working_dir: /app
+ command: bash -c "./dsSupport"
+ depends_on:
+ - elasticsearch
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
new file mode 100644
index 00000000..9bf22743
--- /dev/null
+++ b/docker/docker-compose.yml
@@ -0,0 +1,93 @@
+version: "3.8"
+services:
+ portainer:
+ image: portainer/portainer:1.24.2
+ restart: always
+ ports:
+ - ${PORTAINER_PORT}:9000
+ volumes:
+ - ${DOCKER_SOCK}:/var/run/docker.sock
+ - ${PORTAINER_DATA}:/data
+ redis:
+ image: redis:6.2.4-alpine3.13
+ restart: always
+ environment:
+ - TZ=${TZ}
+ ports:
+ - ${REDIS_PORT}:6379
+ volumes:
+ - ./conf/redis/redis.conf:/usr/local/etc/redis/redis.conf
+ - ${REDIS_DATA}:/data
+ mariadb:
+ image: mariadb:10.5.10
+ restart: always
+ environment:
+ - TZ=${TZ}
+ - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
+ command: --default-authentication-plugin=mysql_native_password
+ ports:
+ - ${MYSQL_PORT}:3306
+ volumes:
+ - ./conf/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro
+ - ./conf/mariadb/initdb.d:/docker-entrypoint-initdb.d
+ - ${MYSQL_DATA}:/var/lib/mysql
+ healthcheck:
+ test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
+ interval: 10s
+ timeout: 10s
+ retries: 10
+ openresty:
+ image: openresty/openresty:1.19.3.1-8-alpine
+ restart: always
+ ports:
+ - ${NGINX_HTTP_PORT}:80
+ - ${NGINX_SSL_PORT}:443
+ volumes:
+ - ./conf/openresty/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf
+ - ./conf/openresty/rsa.lua:/usr/local/openresty/lualib/rsa.lua
+ - ${NGINX_LOG}:/usr/local/openresty/nginx/logs
+ kafka:
+ image: 76527413/kafka:2.8.0
+ restart: always
+ volumes:
+ - ./conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties
+ - ${KAFKA_DATA}:/tmp/kraft-combined-logs
+ - ${KAFKA_LOG}:/opt/kafka/logs
+ ports:
+ - 9092:9092
+ - 9093:9093
+ healthcheck:
+ test: ["CMD", "nc", "-vz", "localhost", "9092"]
+ interval: 10s
+ timeout: 10s
+ retries: 10
+ elasticsearch:
+ image: elasticsearch:7.12.0
+ healthcheck:
+ test: ["CMD", "nc", "-vz", "localhost", "9200"]
+ interval: 10s
+ timeout: 10s
+ retries: 10
+ restart: always
+ environment:
+ - ES_JAVA_OPTS=-Xms512m -Xmx512m
+ - discovery.type=single-node
+ - http.cors.enabled=true
+ - http.cors.allow-origin=*
+ ports:
+ - 9200:9200
+ - 9300:9300
+ volumes:
+ #chmod 777 elasticsearch
+ - ${ELASTICSEARCH_DATA}:/usr/share/elasticsearch/data
+ # greenplum:
+ # image: 76527413/greenplum:6.16.2
+ # restart: always
+ # hostname: greenplum_singlenode
+ # environment:
+ # - DATABASE=exampledb
+ # ports:
+ # - 5432:5432
+ # volumes:
+ # - ${GREENPLUM_DATA}:/data
+ # - ${GREENPLUM_LOG}:/home/gpadmin/gpAdminLogs
diff --git a/docker/start.cmd b/docker/start.cmd
new file mode 100644
index 00000000..cf06070c
--- /dev/null
+++ b/docker/start.cmd
@@ -0,0 +1 @@
+docker-compose --compatibility -f docker-compose.yml -f docker-compose.pro.yml up --remove-orphans -d
\ No newline at end of file
diff --git a/docker/start.dev.cmd b/docker/start.dev.cmd
new file mode 100644
index 00000000..1166383d
--- /dev/null
+++ b/docker/start.dev.cmd
@@ -0,0 +1 @@
+docker-compose --compatibility -f docker-compose.yml -f docker-compose.dev.yml up --remove-orphans -d
\ No newline at end of file
diff --git a/docker/start.sh b/docker/start.sh
new file mode 100644
index 00000000..cf06070c
--- /dev/null
+++ b/docker/start.sh
@@ -0,0 +1 @@
+docker-compose --compatibility -f docker-compose.yml -f docker-compose.pro.yml up --remove-orphans -d
\ No newline at end of file
diff --git a/docker/stop.cmd b/docker/stop.cmd
new file mode 100644
index 00000000..356959e4
--- /dev/null
+++ b/docker/stop.cmd
@@ -0,0 +1 @@
+docker-compose down --remove-orphans
\ No newline at end of file
diff --git a/docker/stop.sh b/docker/stop.sh
new file mode 100644
index 00000000..356959e4
--- /dev/null
+++ b/docker/stop.sh
@@ -0,0 +1 @@
+docker-compose down --remove-orphans
\ No newline at end of file
diff --git a/dsBaseRpc/Config/Config.docker.ini b/dsBaseRpc/Config/Config.docker.ini
new file mode 100644
index 00000000..fe9d3fdb
--- /dev/null
+++ b/dsBaseRpc/Config/Config.docker.ini
@@ -0,0 +1,37 @@
+[mysql] # mysql的配置项
+ip = mariadb
+port = 3306
+database = base_db_dev
+user = root
+pwd = DsideaL147258369
+
+[distribute] #发布功能的配置
+ip = server.dsmin.com
+port = 22
+user = root
+pwd = dsideal
+remotePath = /usr/local/dsMin/dsBaseRpc/
+localPath = E:\Work\dsMin\dsBaseRpc\build
+
+[redis]
+ip = redis
+port = 6379
+db = 0
+expireTime = 86400
+
+# 注册rpc server
+[rpcServer]
+port = 8001
+
+# 本项目名称,用于记录日志
+[project]
+project_name = dsBaseRpc
+
+# 数据汇集的地址
+[dataExchange]
+#host = http://10.10.14.186:9009
+host = http://10.10.14.213:9009
+exchangeUrl = /v1/dataex/DataexSet
+SystemAuthUrl = /v1/dataex/SystemAuth
+SystemId = BASE_GO
+SystemKey = 96fa57b8-ac44-11ea-bd48-f48e38f73cf7
diff --git a/dsBaseWeb/Config/Config.docker.ini b/dsBaseWeb/Config/Config.docker.ini
new file mode 100644
index 00000000..abeebdf7
--- /dev/null
+++ b/dsBaseWeb/Config/Config.docker.ini
@@ -0,0 +1,67 @@
+[distribute] #发布功能的配置
+ip = 10.10.14.187
+port = 22
+user = root
+pwd = dsideal
+remotePath = /usr/local/dsMin/dsBaseWeb/
+localPath = E:/Work/dsMin/dsBaseWeb
+
+
+[mysql]
+ip = mariadb
+port = 3306
+db_name = base_db_dev
+
+[redis]
+ip = redis
+port = 6379
+db = 0
+expireTime = 86400
+
+# 注册rpc server
+[rpcServer]
+ip = dsbaserpc
+port = 8001
+
+#gin服务器的端口
+[server]
+port = 8002
+
+# 本项目名称,用于记录日志
+[project]
+project_name = dsBaseWeb
+
+[kafka]
+brokers = kafka:9092
+partition = 20
+replication = 1
+KafkaAccessLogTopic = log_baseweb
+
+[sso]
+ssoServerNw = http://10.10.14.187
+#ssoServerNw = http://10.10.24.116:8000
+ssoServerWw = http://fort.edusoa.com:7777
+authCodeURI = /oauth2/authorize
+authTokenURI = /oauth2/access_token
+authLogoutURI = /oauth2/logout
+clientIdNw = br888ra563ugbm2ov77g
+clientSecretNw = 01e9f8ak57e3j03vrjq34zg6ws
+#clientIdNw = bu3pcf7cl45ug8dup8og
+#clientSecretNw = 01emmt5qdyszh9ndbpbg4zk4tn
+clientIdWw = bpomac2563uj213q1g2g
+clientSecretWw = 01e3nf6hnr2eysdxhc2n80w9ca
+responseType = code
+grantType = authorization_code
+redirectURINw = http://10.10.14.187/base/sso/CheckSsoCode
+#redirectURINw = http://127.0.0.1:8002/base/sso/CheckSsoCode
+redirectURIWw = http://fort.edusoa.com:7777/base/sso/CheckSsoCode
+accessTokenKey = ds_access_token
+
+[deleteXlsx]
+#删除7天前无用的模板文件
+diffTime = 7
+
+
+# 内部IP段起始
+[IP]
+intranetIP=192,172,10,127
diff --git a/dsBigData/build/Config/Config.ini b/dsBigData/Config/Config.docker.ini
similarity index 85%
rename from dsBigData/build/Config/Config.ini
rename to dsBigData/Config/Config.docker.ini
index 6fcda16c..ec2bead3 100644
--- a/dsBigData/build/Config/Config.ini
+++ b/dsBigData/Config/Config.docker.ini
@@ -7,8 +7,8 @@ remotePath = /usr/local/dsMin/dsBigData/
localPath = E:/Work/dsMin/dsBigData
[redis]
-ip = 127.0.0.1
-port = 18890
+ip = redis
+port = 6379
db = 0
expireTime = 86400
@@ -21,4 +21,4 @@ port = 8004
project_name = dsBigData
[elasticsearch]
-url = http://10.10.14.212:9200
+url = http://elasticsearch:9200
\ No newline at end of file
diff --git a/dsBigData/build/Config/White.txt b/dsBigData/build/Config/White.txt
deleted file mode 100644
index 3108d70c..00000000
--- a/dsBigData/build/Config/White.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-.ico
-.html
-.css
-.jpg
-.jpeg
-.png
-/base/sso/CheckSsoCode
\ No newline at end of file
diff --git a/dsBigData/build/Config/logo.txt b/dsBigData/build/Config/logo.txt
deleted file mode 100644
index 68d7f4a8..00000000
--- a/dsBigData/build/Config/logo.txt
+++ /dev/null
@@ -1,12 +0,0 @@
- _ ____ _ _____ _
- | | | _ \(_) | __ \ | |
- __| |___| |_) |_ __ _| | | | __ _| |_ __ _
- / _` / __| _ <| |/ _` | | | |/ _` | __/ _` |
- | (_| \__ \ |_) | | (_| | |__| | (_| | || (_| |
- \__,_|___/____/|_|\__, |_____/ \__,_|\__\__,_|
- __/ |
- |___/
-Created By HuangHai 2020-01-20
-http://patorjk.com/software/taag/#p=display&f=Big&t=dsBigData
-
-
diff --git a/dsBigData/build/debug.sh b/dsBigData/build/debug.sh
deleted file mode 100644
index ff54e018..00000000
--- a/dsBigData/build/debug.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# 杀掉进程
-kill -9 `pgrep -f dsBigData` 2>/dev/null
-sleep 3
-
-cd /usr/local/dsMin/dsBigData
-chmod +x dsBigData
-# 运行为后台进程
-/usr/local/dsMin/dsBigData/dsBigData
diff --git a/dsBigData/build/dsBigData b/dsBigData/build/dsBigData
deleted file mode 100644
index 51166bae..00000000
Binary files a/dsBigData/build/dsBigData and /dev/null differ
diff --git a/dsBigData/build/start.sh b/dsBigData/build/start.sh
deleted file mode 100644
index 87d65894..00000000
--- a/dsBigData/build/start.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-
-# 杀掉进程
-kill -9 `pgrep -f dsBigData` 2>/dev/null
-sleep 3
-
-chmod +x dsBigData
-# 运行为后台进程
-nohup /usr/local/dsMin/dsBigData/dsBigData >> /usr/local/dsMin/dsBigData/dsBigData.log 2>&1 &
diff --git a/dsBigData/build/stop.sh b/dsBigData/build/stop.sh
deleted file mode 100644
index 8bf6ece1..00000000
--- a/dsBigData/build/stop.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-# kill 命令不使用 -9 参数时,会回调 onStop() 方法,确定不需要此回调建议使用 -9 参数
-kill -9 `pgrep -f dsBigData` 2>/dev/null
diff --git a/dsSso/Config/Config.docker.ini b/dsSso/Config/Config.docker.ini
new file mode 100644
index 00000000..db8e286a
--- /dev/null
+++ b/dsSso/Config/Config.docker.ini
@@ -0,0 +1,33 @@
+[mysql] # mysql1的配置项
+ip = mariadb
+port = 3306
+database = base_db_dev
+user = root
+pwd = DsideaL147258369
+
+[distribute] #发布功能的配置
+ip = 10.10.14.187
+port = 22
+user = root
+pwd = dsideal
+remotePath = /usr/local/dsMin/dsSso/
+localPath = E:\Work\dsMin\dsSso\build
+
+[redis]
+ip = redis
+port = 6379
+db = 0
+
+[kafka]
+KafkaAddress = kafka:9092
+KafkaAccessLogTopic = dsAccessLog
+# ----------------------------------------------------------------------------------------------------
+
+[server] #gin服务器的端口
+port = 8000
+
+# 验证码的有效时间,单位:秒
+CaptchaExpireTime = 120
+
+[install_area]
+code = changchun
\ No newline at end of file
diff --git a/dsSso/Utils/RedisUtil/RedisUtil.go b/dsSso/Utils/RedisUtil/RedisUtil.go
index 39aa76ee..91c73c3d 100644
--- a/dsSso/Utils/RedisUtil/RedisUtil.go
+++ b/dsSso/Utils/RedisUtil/RedisUtil.go
@@ -44,7 +44,11 @@ func init() {
LogUtil.Error(ErrorConst.CreateRedisError, "Redis异常")
} else if err != nil {
LogUtil.Error(ErrorConst.CreateRedisError, err.Error())
- }
+ }
+}
+
+func FlushAll() {
+ RedisClient.FlushAll()
}
//======下面的代码由黄海增加于2020-02-18=============================================================
diff --git a/dsSso/go.mod b/dsSso/go.mod
index f72f8574..7d7c2b5d 100644
--- a/dsSso/go.mod
+++ b/dsSso/go.mod
@@ -18,20 +18,20 @@ require (
github.com/fatih/structs v1.1.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/garyburd/redigo v1.6.0
- github.com/gin-gonic/gin v1.6.3
+ github.com/gin-gonic/gin v1.7.2
github.com/go-openapi/spec v0.19.6 // indirect
github.com/go-openapi/swag v0.19.7 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
- github.com/go-playground/validator/v10 v10.4.1 // indirect
+ github.com/go-playground/validator/v10 v10.6.1 // indirect
github.com/go-redis/redis/v7 v7.2.0
github.com/go-sql-driver/mysql v1.5.0
github.com/go-xorm/cmd/xorm v0.0.0-20190426080617-f87981e709a1 // indirect
- github.com/golang/protobuf v1.4.3 // indirect
- github.com/json-iterator/go v1.1.10 // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/json-iterator/go v1.1.11 // indirect
github.com/klauspost/compress v1.10.2 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
- github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mattn/go-isatty v0.0.13 // indirect
github.com/mattn/go-sqlite3 v1.10.0 // indirect
github.com/oklog/ulid v1.3.1
github.com/pborman/uuid v1.2.0 // indirect
@@ -48,15 +48,15 @@ require (
github.com/tidwall/gjson v1.6.0
github.com/tidwall/pretty v1.0.1 // indirect
github.com/tracer0tong/kafkalogrus v0.0.0-20180816014403-290bb4d4d549
- github.com/ugorji/go v1.2.3 // indirect
+ github.com/ugorji/go v1.2.6 // indirect
github.com/xormplus/builder v0.0.0-20200331055651-240ff40009be // indirect
github.com/xormplus/core v0.0.0-20200308074340-f3bce19d5f31
github.com/xormplus/xorm v0.0.0-20200529061552-7d0d26c6f81c
- golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
- golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect
- golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
+ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
+ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
+ golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.0.0-20200213050514-49b8ac185c84 // indirect
- google.golang.org/protobuf v1.25.0 // indirect
+ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
gopkg.in/flosch/pongo2.v3 v3.0.0-20141028000813-5e81b817a0c4 // indirect
gopkg.in/go-playground/validator.v9 v9.31.0 // indirect
gopkg.in/ini.v1 v1.42.0
diff --git a/dsSso/go.sum b/dsSso/go.sum
index 8273b8dc..efc61c1a 100644
--- a/dsSso/go.sum
+++ b/dsSso/go.sum
@@ -93,6 +93,8 @@ github.com/gin-gonic/gin v1.5.0 h1:fi+bqFAx/oLK54somfCtEZs9HeH1LHVoEPUgARpTqyc=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
+github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA=
+github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
@@ -125,6 +127,8 @@ github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-playground/validator/v10 v10.6.1 h1:W6TRDXt4WcWp4c4nf/G+6BkGdhiIo0k417gfr+V6u4I=
+github.com/go-playground/validator/v10 v10.6.1/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-sql-driver/mysql v0.0.0-20180719071942-99ff426eb706/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@@ -161,6 +165,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -171,6 +178,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -201,6 +209,8 @@ github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGn
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -244,6 +254,8 @@ github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
+github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@@ -362,12 +374,16 @@ github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.2.3 h1:WbFSXLxDFKVN69Sk8t+XHGzVCD7R8UoAATR8NqZgTbk=
github.com/ugorji/go v1.2.3/go.mod h1:5l8GZ8hZvmL4uMdy+mhCO1LjswGRYco9Q3HfuisB21A=
+github.com/ugorji/go v1.2.6 h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E=
+github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0=
github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.3 h1:/mVYEV+Jo3IZKeA5gBngN0AvNnQltEDkR+eQikkWQu0=
github.com/ugorji/go/codec v1.2.3/go.mod h1:5FxzDJIgeiWJZslYHPj+LS1dq1ZBQVelZFnjsFGI/Uc=
+github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
+github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
@@ -397,6 +413,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnk
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -424,6 +442,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjut
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -451,13 +470,21 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -507,6 +534,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
diff --git a/dsSso/main.go b/dsSso/main.go
index 820981f4..ec1ea92a 100644
--- a/dsSso/main.go
+++ b/dsSso/main.go
@@ -27,6 +27,9 @@ import (
时间:2020-03-25
*/
func startOAuth2Server() {
+ //清空Redis
+ RedisUtil.FlushAll()
+
//声明OAuth2的Redis存储器
RedisStorage.OAuth2RedisStorage = &RedisStorage.RedisStorage{
Pool: RedisUtil.Pool,
diff --git a/dsSupport/Config/Config.docker.ini b/dsSupport/Config/Config.docker.ini
new file mode 100644
index 00000000..ee013de3
--- /dev/null
+++ b/dsSupport/Config/Config.docker.ini
@@ -0,0 +1,56 @@
+[distribute] #发布功能的配置
+ip = 10.10.14.187
+port = 22
+user = root
+pwd = dsideal
+remotePath = /usr/local/dsMin/dsSupport/
+localPath = E:/Work/dsMin/dsSupport
+
+[mysql]
+;ip = 10.10.6.200
+;port = 22066
+;database = base_db_zhangjun
+;user = root
+;pwd = DsideaL147258369
+ip = mariadb
+port = 3306
+database = base_db_dev
+user = root
+pwd = DsideaL147258369
+
+[redis]
+;ip = 127.0.0.1
+;port = 6379
+;db = 1
+;expireTime = 86400
+ip = redis
+port = 6379
+db = 1
+expireTime = 86400
+
+#gin服务器的端口
+[server]
+port = 8005
+
+[elasticsearch]
+;nodes = http://10.10.14.188:9200
+##nodes = http://10.10.14.61:9200,http://10.10.14.62:9200,http://10.10.14.63:9200
+##nodes = http://192.168.0.200:9200,http://192.168.0.200:9201,http://192.168.0.200:9202
+nodes = http://elasticsearch:9200
+;http://10.10.14.188:9200,
+;nodes = http://192.168.0.200:9200,http://192.168.0.200:9201,http://192.168.0.200:9202
+;user = root
+;pwd = dsideal
+
+# 本项目名称,用于记录日志
+[project]
+project_name = dsSupport
+
+[rpcServer]
+ip = 127.0.0.1
+port = 8001
+
+# 后台登陆账号密码
+[account]
+users = admin:dsideal,administrator:dsideal123@321,datams:data123
+