ToB企服应用市场:ToB评测及商务社交产业平台

标题: DockerCompose安装postgres、nginx、redis、rabbitmq、mongodb、nacos、yap [打印本页]

作者: 商道如狼道    时间: 2024-9-5 14:42
标题: DockerCompose安装postgres、nginx、redis、rabbitmq、mongodb、nacos、yap
dockercompose汇总

在工作中用到各种中央件,以下配置均为单提,没有采取集群,集群的配置后续加上
  1. version: '3'
  2. services:
  3.   gogs:
  4.     image: gogs/gogs # 指定使用gogs/gogs官方镜像
  5.     container_name: gogs
  6.     environment:
  7.       - TZ=Asia/Shanghai # 设置时区环境变量
  8.     #restart: always # 确保容器在停止后自动重启
  9.     privileged: true # Docker 容器默认以非特权用户的身份运行,这是出于安全考虑。特权模式允许容器拥有与主机相同的权限
  10.     volumes:
  11.       - ./data/gogs:/data # 映射本地的./data/gogs目录到容器的/data目录,用于存储Gogs数据
  12.     ports:
  13.       - "10022:22"
  14.       - "3001:3000" # 映射本地的端口3000到容器的SSH端口22和本地的端口3000到容器的HTTP端口3000
  15.   minio:
  16.     image: minio/minio
  17.     container_name: minio
  18.     volumes:
  19.       - ./data/minio:/data
  20.     ports:
  21.       - "9000:9000"
  22.       - "9001:9001"
  23.     environment:
  24.       MINIO_ROOT_USER: minio
  25.       MINIO_ROOT_PASSWORD: minio123
  26.     command: server /data --console-address ":9001" # command指令中添加--console-address参数,否则浏览器访问控制台自动跳转端口导致无法访问
  27.     #restart: always
  28.   nginx:
  29.     image: nginx:latest
  30.     #image: nginx:1.22.1
  31.     container_name: nginx
  32.     #restart: always
  33.     ports:
  34.       - "80:80"
  35.       - "443:443"
  36.     environment:
  37.       - NGINX_PORT=80
  38.     volumes:
  39.       - ./data/nginx/html:/usr/share/nginx/html  # 将本地的html目录挂载到容器的Nginx根目录
  40.       - ./data/nginx/nginx.conf:/etc/nginx/nginx.conf
  41.       - ./data/nginx/logs:/var/log/nginx
  42.   redis:
  43.     # 指定服务名称,命令:--name redis
  44.     # 如果不指定,则将默认用docker-compose.yml所在文件夹名_服务名称_n命名
  45.     container_name: redis
  46.     #docker启动,容器启动
  47.     #restart: always
  48.     # 指定镜像:命令 redis:latest
  49.     image: redis:7.2.4
  50.     # 指定端口:命令 -p 主机端口:容器端口
  51.     ports:
  52.       - "16379:16379"
  53.     volumes:
  54.       - ./data/redis/conf/redis.conf:/etc/redis/redis.conf
  55.       - ./data/redis/data:/data
  56.     # 运行命令
  57.     command: redis-server /etc/redis/redis.conf
  58.   rabbitmq:
  59.     # 镜像 如果想访问图形管理 docker exec -it 9c217810b91c rabbitmq-plugins enable rabbitmq_management
  60.     # image: rabbitmq:3.13
  61.     # 带图形管理的镜像
  62.     image: rabbitmq:3.13-management
  63.     # 容器名称
  64.     container_name: rabbitmq
  65.     # 主机名称
  66.     #hostname: node_one
  67.     # 端口
  68.     ports:
  69.       # (epmd)epmd代表 Erlang端口映射守护进程,erlang发现口
  70.       #- "4369:4369"
  71.       # 下面两个AMQP 0-9-1 without and with TLSclient端通信口
  72.       #- "5671:5671"
  73.       - "5672:5672"
  74.       # 管理监听端口
  75.       #- "15671:15671"
  76.       # 管理界面ui使用的端口
  77.       - "15672:15672"
  78.       # ( Erlang distribution) server间内部通信口
  79.       #- "25672:25672"
  80.     #docker启动,容器启动
  81.     #restart: always
  82.     volumes:
  83.       #数据文件目录
  84.       - ./data/rabbitmq/lib:/var/lib/rabbitmq
  85.       # 配置文件目录
  86.       #- ./data/rabbitmq/etc:/etc/rabbitmq
  87.       # 日志文件目录
  88.       - ./data/rabbitmq/log:/var/log/rabbitmq
  89.     #environment:
  90.       # 默认虚拟机名
  91.       #- RABBITMQ_DEFAULT_VHOST=rabbitmq-one
  92.       # 用户名
  93.       #- RABBITMQ_DEFAULT_USER=root
  94.       # 密码
  95.       #- RABBITMQ_DEFAULT_PASS=root
  96.       # 设置时区
  97.       #- Asia/Shanghai
  98.   mongodb:
  99.     container_name: mongodb
  100.     image: mongo:latest
  101.     #restart: always
  102.     environment:
  103.       MONGO_INITDB_ROOT_USERNAME: admin
  104.       MONGO_INITDB_ROOT_PASSWORD: admin
  105.     ports:
  106.       - "27017:27017"
  107.     volumes:
  108.       - ./data/mongo/db:/data/db
  109.   nacos:
  110.     # 容器名称
  111.     container_name: nacos
  112.     image: nacos/nacos-server:v2.1.1
  113.     #hostname: nacos
  114.     #restart: always
  115.     environment:
  116.       - MODE=standalone ##注意这里使用的是单机模式
  117.       - NACOS_AUTH_ENABLE=true
  118.       - NACOS_AUTH_IDENTITY_KEY=nacos
  119.       - NACOS_AUTH_IDENTITY_VALUE=nacos
  120.       - NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456789012345678901234567890123456789
  121.     volumes:
  122.       - ./data/nacos/logs:/home/nacos/logs
  123.       - ./data/nacos/data:/home/nacos/data
  124.     ports:
  125.       - "8848:8848"
  126.       #- "9848:9848"
  127.       #- "9849:9849"
  128.   yapi:
  129.     image: jayfong/yapi:latest
  130.     container_name: yapi
  131.     #restart: always
  132.     ports:
  133.       - 3000:3000
  134.     environment:
  135.       # 随便设置一个账号(账号是邮箱的格式)
  136.       - YAPI_ADMIN_ACCOUNT=gufanbiao@163.com
  137.       # 设置账号的密码
  138.       - YAPI_ADMIN_PASSWORD=123456
  139.       # 禁用注册功能
  140.       - YAPI_CLOSE_REGISTER=true
  141.       # 设置连接 mongdb 的服务器地址,可以使用容器内部的 mongodb 服务名称
  142.       - YAPI_DB_SERVERNAME=mongodb
  143.       - YAPI_DB_PORT=27017
  144.       - YAPI_DB_DATABASE=yapidb
  145.       - YAPI_DB_USER=admin
  146.       - YAPI_DB_PASS=admin
  147.       - YAPI_DB_AUTH_SOURCE=admin
  148.       #- YAPI_DB_CONNECT_STRING="mongodb://jobs:123456@mongodb:27017/yapidb?authSource=admin"
  149.       # 禁用发送邮件的功能
  150.       - YAPI_MAIL_ENABLE=false
  151.       # 禁用 LDAP 登录功能
  152.       - YAPI_LDAP_LOGIN_ENABLE=false
  153.       # 不使用任何插件
  154.       - YAPI_PLUGINS=[]
  155.     #networks:
  156.       #- yapi_net
  157.     volumes:
  158.       #- ./data/yapi/config.json:/yapi/config.json  # 宿主机配置文件路径:/容器内部配置文件路径
  159.       #- ./data/yapi/config:/yapi/config  # 挂载配置文件目录
  160.       - ./data/yapi/log:/yapi/log  # 挂载日志文件目录
  161.       #- ./data/yapi/init:/yapi/init  # 挂载初始化脚本目录
  162.       #- ./data/mongo/db:/data/db  # 挂载MongoDB数据目录
  163.     depends_on:
  164.       - mongodb
  165.   jenkins:
  166.     image: jenkins/jenkins
  167.     container_name: jenkins
  168.     ports:
  169.       - 8080:8080
  170.       - 50000:50000
  171.     volumes:
  172.       - ./data/jenkins/jenkins_home:/var/jenkins_home/
  173.       - ./data/jenkins/jenkins_logs:/var/log/jenkins # 挂载日志目录
  174.     environment:
  175.       JAVA_OPTS: '-Djava.util.logging.config.file=/var/jenkins_home/log.properties'
  176.   mysql8:
  177.     container_name: mysql8
  178.     image: mysql:8.0.31
  179.     #docker启动,容器启动
  180.     #restart: always
  181.     ports:
  182.       - "13306:3306"
  183.     # 防止被OOM kill, -1000为最低优先级
  184.     #oom_score_adj: -1000
  185.     environment:
  186.       # 等同于 -e MYSQL_ROOT_PASSWORD指定root的登录密码
  187.       MYSQL_ROOT_PASSWORD: 'root'
  188.       MYSQL_ALLOW_EMPTY_PASSWORD: 'no'
  189.       # 这里这个指令compose启动成功后会自动创建名为docker的数据库
  190.       MYSQL_DATABASE: 'totograin'
  191.       # 此处就是相当于 mysql create user,创建了数据库的登录用户
  192.       #MYSQL_USER: 'gufanbiao'
  193.       #MYSQL_PASSWORD: 'gufanbiao'  
  194.     volumes:
  195.       - ./data/mysql8/data:/var/lib/mysql
  196.       # 这里的my.cnf配置下面有简单实例
  197.       - ./data/mysql8/conf.d:/etc/mysql/conf.d # 映射配置目录,宿主机:容器
  198.       - ./data/mysql8/log:/var/log/mysql
  199.       - ./data/mysql8/mysql-files:/var/lib/mysql-files
  200.       #- /etc/localtime:/etc/localtime:ro # 让容器的时钟与宿主机时钟同步,避免时间的问题,ro是read only的意思,就是只读。
  201.     #command:
  202.       # 使用指定的配置文件启动
  203.       #- --defaults-file=/etc/mysql/my.cnf
  204.   mysql5:
  205.     # 使用 MySQL 5.7.44 镜像
  206.     image: mysql:5.7.44
  207.     # 容器名称为 mysql5
  208.     container_name: mysql5
  209.     ports:
  210.       - "23306:3306"
  211.     # 容器退出时自动重启
  212.     #restart: always
  213.     # 防止被OOM kill, -1000为最低优先级
  214.     #oom_score_adj: -1000
  215.     environment:
  216.       # 等同于 -e MYSQL_ROOT_PASSWORD指定root的登录密码
  217.       MYSQL_ROOT_PASSWORD: 'root'
  218.       MYSQL_ALLOW_EMPTY_PASSWORD: 'no'
  219.       # 这里这个指令compose启动成功后会自动创建名为docker的数据库
  220.       MYSQL_DATABASE: 'totograin'
  221.       # 此处就是相当于 mysql create user,创建了数据库的登录用户
  222.       #MYSQL_USER: 'gufanbiao'
  223.       #MYSQL_PASSWORD: 'gufanbiao'  
  224.     volumes:
  225.       # 挂载数据目录
  226.       - ./data/mysql5/data:/var/lib/mysql
  227.       # 这里的my.cnf配置下面有简单实例
  228.       - ./data/mysql5/conf.d:/etc/mysql/conf.d # 映射配置目录,宿主机:容器
  229.       - ./data/mysql5/log:/var/log/mysql
  230.       - ./data/mysql5/mysql-files:/var/lib/mysql-files
  231.       #- /etc/localtime:/etc/localtime:ro # 让容器的时钟与宿主机时钟同步,避免时间的问题,ro是read only的意思,就是只读。
  232.   postgres:
  233.     image: postgres:14
  234.     #restart: always
  235.     container_name: postgres
  236.     ports:
  237.       - 5432:5432
  238.     environment:
  239.       POSTGRES_DB: grain
  240.       POSTGRES_USER: postgres
  241.       POSTGRES_PASSWORD: postgres
  242.       PGDATA: /var/lib/postgresql/data/pgdata
  243.     volumes:
  244.       - ./data/postgres/data:/var/lib/postgresql/data/pgdata
  245.       #- ./data/postgres/logs:/var/log/postgresql
  246.       #- /etc/localtime:/etc/localtime
  247.   elasticsearch:
  248.     image: docker.elastic.co/elasticsearch/elasticsearch:8.12.2
  249.     container_name: elasticsearch
  250.     #restart: always
  251.     volumes:
  252.       - ./data/es/data:/usr/share/elasticsearch/data
  253.       - ./data/es/logs:/usr/share/elasticsearch/logs
  254.       # 挂载分词器的目录
  255.       - ./data/es/plugins:/usr/share/elasticsearch/plugins
  256.       #- ./data/es/config/jvm.options:/usr/share/elasticsearch/config/jvm.options
  257.       - ./data/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
  258.     ports:
  259.       - "9200:9200"
  260.     environment:
  261.       - discovery.type=single-node
  262.       - bootstrap.memory_lock=true
  263.       - "ES_JAVA_OPTS=-Xms1024m -Xmx2048m"
  264.       - TZ=Asia/Shanghai
  265.       - ELASTIC_PASSWORD=elastic # elastic账号密码
  266.   kibana:
  267.     image: docker.elastic.co/kibana/kibana:8.12.2
  268.     container_name: kibana
  269.     #restart: always
  270.     ports:
  271.       - "5601:5601"
  272.     #environment:在配置文件中设置
  273.     #  - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
  274.     #  - ELASTICSEARCH_USERNAME=kibana
  275.     #  - ELASTICSEARCH_PASSWORD=your_password
  276.     volumes:
  277.       - ./data/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
  278.       - ./data/kibana/data:/usr/share/kibana/data
  279.     depends_on:
  280.       - elasticsearch
  281.   tomcat8:
  282.     image: tomcat:8.5.40
  283.     #image: tomcat:latest
  284.     container_name: tomcat8
  285.     #restart: always
  286.     ports:
  287.       - "8081:8080" #8080和其他服务冲突使用8081
  288.     volumes:
  289.       - ./data/tomcat/logs:/usr/local/tomcat/logs
  290.       - ./data/tomcat/webapps:/usr/local/tomcat/webapps
  291.       #- ./data/tomcat/conf:/usr/local/tomcat/conf
  292.     environment:
  293.       TZ: Asia/Shanghai
  294.   #skyworking不是一个可以通过 docker-compose 直接安装的组件,包括 OAP 服务器、UI 和可能的存储后端(elasticsearch)
  295.   skyworking-oap:
  296.     image: apache/skywalking-oap-server:9.0.0
  297.     container_name: skywalking-oap
  298.     depends_on:
  299.       - elasticsearch
  300.     links:
  301.       - elasticsearch
  302.     #restart: always
  303.     ports:
  304.       - 11800:11800
  305.       - 12800:12800
  306.     environment:
  307.       SW_STORAGE: elasticsearch
  308.       SW_STORAGE_ES_CLUSTER_NODES: elasticsearch:9200
  309.       SW_HEALTH_CHECKER: default
  310.       SW_TELEMETRY: prometheus
  311.       SW_ES_USER: elastic
  312.       SW_ES_PASSWORD: elastic
  313.     healthcheck:
  314.       test: ["CMD", "./bin/swctl", "ch"]
  315.       interval: 30s
  316.       timeout: 10s
  317.       retries: 3
  318.       start_period: 40s
  319.   skyworking-ui:
  320.     image: apache/skywalking-ui:9.0.0
  321.     container_name: skywalking-ui
  322.     depends_on:
  323.       - skyworking-oap
  324.     links:
  325.       - skyworking-oap
  326.     #restart: always
  327.     ports:
  328.       - 8083:8080
  329.     environment:
  330.       SW_OAP_ADDRESS: skyworking-oap:12800
  331.   #rocketmq
  332.   rmqnamesrv:
  333.     image: foxiswho/rocketmq:server
  334.     container_name: rocketmq-namesrv
  335.     ports:
  336.       - 9876:9876
  337.     volumes:
  338.       - ./data/rocketmq/srv_logs:/opt/logs
  339.       - ./data/rocketmq/srv_store:/opt/store
  340.   rmqbroker:
  341.     image: foxiswho/rocketmq:broker
  342.     container_name: rocketmq-broker
  343.     ports:
  344.       - 10909:10909
  345.       - 10911:10911
  346.     volumes:
  347.       - ./data/rocketmq/broker_logs:/opt/logs
  348.       - ./data/rocketmq/broker_store:/opt/store
  349.       - ./data/rocketmq/broker_conf/broker.conf:/etc/rocketmq/broker.conf
  350.     environment:
  351.         NAMESRV_ADDR: "rmqnamesrv:9876"
  352.         JAVA_OPT_EXT: "-server -Xms128m -Xmx128m -Xmn128m"
  353.     command: mqbroker -c /etc/rocketmq/broker.conf
  354.     depends_on:
  355.       - rmqnamesrv
  356.   rmqconsole:
  357.     image: apacherocketmq/rocketmq-dashboard:latest
  358.     container_name: rocketmq-console
  359.     ports:
  360.       - 8082:8080
  361.     environment:
  362.         JAVA_OPTS: "-Drocketmq.namesrv.addr=rmqnamesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
  363.     depends_on:
  364.       - rmqnamesrv
  365.       - rmqbroker
  366.   #kafka
  367.   zookeeper:    #服务名,可自定义
  368.     image: wurstmeister/zookeeper  # 指定使用的镜像名及标签
  369.     container_name: zookeeper    # 指定实例化后的容器名
  370.     #restart: always    # 设置无论遇到什么错,重启容器
  371.     #privileged: true   #让docker 应用容器 获取宿主机root权限
  372.     ports:  # 容器内的映射端口,本地端口:容器内端口
  373.         - 2181:2181  # zookeeper对外的端口
  374.     volumes: # 设置数据卷挂载路径,本地目录:容器内目录,挂载本地文件到容器里面目录,实现数据持久化到宿主机
  375.         - ./data/zk/data:/data #本地目录使用相对路径
  376.         - ./data/zk/datalog:/datalog
  377.         - ./data/zk/conf:/conf
  378.         # - /etc/timezone:/etc/timezone # 指定时区
  379.         # - /etc/localtime:/etc/localtime
  380.     environment:  # 定义环境变量
  381.         ZOO_MY_ID: 1  # zk服务器唯一Id,不能和其它服务器myid一样
  382.         ZOO_SERVERS: server.1=zookeeper:2181    # zk集群的服务器
  383.   kafka:
  384.     image: wurstmeister/kafka
  385.     container_name: kafka
  386.     #restart: always
  387.     privileged: true   
  388.     ports:
  389.       - 9092:9092
  390.     environment:
  391.       KAFKA_ADVERTISED_HOST_NAME: kafka
  392.       KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
  393.       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
  394.     volumes:   
  395.       - ./data/kafka/data:/kafka  
  396.       #- /var/run/docker.sock:/var/run/docker.sock
  397.   kafka-manager:
  398.     image: sheepkiller/kafka-manager:latest
  399.     #restart: always
  400.     container_name: kafa-manager
  401.     hostname: kafka-manager
  402.     #privileged: true
  403.     ports:
  404.       - "9002:9000"
  405.     environment:
  406.       #ZK_HOSTS: 192.168.1.102:2181,192.168.1.102:2182,192.168.1.102:2183
  407.       ZK_HOSTS: zookeeper:2181
  408.       #KAFKA_BROKERS: 192.168.1.102:9092,192.168.1.102:9093,192.168.1.102:9094 # 修改宿主机IP
  409.       KAFKA_BROKERS: kafka:9092
  410.       APPLICATION_SECRET: "random-secret"
  411.       KAFKA_MANAGER_AUTH_ENABLED: "true"
  412.       KAFKA_MANAGER_USERNAME: admin      # web端账号
  413.       KAFKA_MANAGER_PASSWORD: password   # web端密码
复制代码
额外配置图


配置详情

es配置

   es之elasticsearch.yml配置
  1. http.port: 9200
  2. http.host: 0.0.0.0
  3. http.cors.enabled: true
  4. http.cors.allow-origin: "*"
  5. http.cors.allow-headers: Authorization
  6. # 开启安全控制
  7. xpack.security.enabled: true
  8. #xpack.security.transport.ssl.enabled: true
  9. #xpack.security.transport.ssl.keystore.type: PKCS12
  10. #xpack.security.transport.ssl.verification_mode: certificate
  11. #xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
  12. #xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
  13. #xpack.security.transport.ssl.truststore.type: PKCS12
  14. #xpack.security.audit.enabled: true
  15. path.logs: /usr/share/elasticsearch/logs
复制代码
  es8版本的暗码和之前的版本有些许区别,必要单独设置
    es之jvm.options配置
  1. ## JVM configuration
  2. ################################################################
  3. ## IMPORTANT: JVM heap size
  4. ################################################################
  5. ##
  6. ## You should always set the min and max JVM heap
  7. ## size to the same value. For example, to set
  8. ## the heap to 4 GB, set:
  9. ##
  10. ## -Xms4g
  11. ## -Xmx4g
  12. ##
  13. ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
  14. ## for more information
  15. ##
  16. ################################################################
  17. # Xms represents the initial size of total heap space
  18. # Xmx represents the maximum size of total heap space
  19. -Xms1g
  20. -Xmx1g
  21. -XX:+IgnoreUnrecognizedVMOptions
  22. ################################################################
  23. ## Expert settings
  24. ################################################################
  25. ##
  26. ## All settings below this section are considered
  27. ## expert settings. Don't tamper with them unless
  28. ## you understand what you are doing
  29. ##
  30. ################################################################
  31. ## GC configuration
  32. -XX:+UseConcMarkSweepGC
  33. -XX:CMSInitiatingOccupancyFraction=75
  34. -XX:+UseCMSInitiatingOccupancyOnly
  35. ## G1GC Configuration
  36. # NOTE: G1GC is only supported on JDK version 10 or later.
  37. # To use G1GC uncomment the lines below.
  38. # -XX:-UseConcMarkSweepGC
  39. # -XX:-UseCMSInitiatingOccupancyOnly
  40. # -XX:+UseG1GC
  41. # -XX:InitiatingHeapOccupancyPercent=75
  42. ## optimizations
  43. # pre-touch memory pages used by the JVM during initialization
  44. -XX:+AlwaysPreTouch
  45. ## basic
  46. # explicitly set the stack size
  47. -Xss1m
  48. # set to headless, just in case
  49. -Djava.awt.headless=true
  50. # ensure UTF-8 encoding by default (e.g. filenames)
  51. -Dfile.encoding=GBK
  52. # use our provided JNA always versus the system one
  53. -Djna.nosys=true
  54. # turn off a JDK optimization that throws away stack traces for common
  55. # exceptions because stack traces are important for debugging
  56. -XX:-OmitStackTraceInFastThrow
  57. # flags to configure Netty
  58. -Dio.netty.noUnsafe=true
  59. -Dio.netty.noKeySetOptimization=true
  60. -Dio.netty.recycler.maxCapacityPerThread=0
  61. # log4j 2
  62. -Dlog4j.shutdownHookEnabled=false
  63. -Dlog4j2.disable.jmx=true
  64. -Djava.io.tmpdir=${ES_TMPDIR}
  65. ## heap dumps
  66. # generate a heap dump when an allocation from the Java heap fails
  67. # heap dumps are created in the working directory of the JVM
  68. -XX:+HeapDumpOnOutOfMemoryError
  69. # specify an alternative path for heap dumps; ensure the directory exists and
  70. # has sufficient space
  71. -XX:HeapDumpPath=data
  72. # specify an alternative path for JVM fatal error logs
  73. -XX:ErrorFile=logs/hs_err_pid%p.log
  74. ## JDK 8 GC logging
  75. -XX:+PrintGCDetails
  76. -XX:+PrintGCDateStamps
  77. -XX:+PrintTenuringDistribution
  78. -XX:+PrintGCApplicationStoppedTime
  79. -Xloggc:logs/gc.log
  80. -XX:+UseGCLogFileRotation
  81. -XX:NumberOfGCLogFiles=32
  82. -XX:GCLogFileSize=64m
  83. # JDK 9+ GC logging
  84. :-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m
  85. # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise
  86. # time/date parsing will break in an incompatible way for some date patterns and locals
  87. -Djava.locale.providers=COMPAT
  88. # temporary workaround for C2 bug with JDK 10 on hardware with AVX-512
  89. -XX:UseAVX=2
复制代码
kibana配置

   kibana之kibana.yml配置
  1. server.name: kibana
  2. # kibana的主机地址 0.0.0.0可表示监听所有IP
  3. server.host: "0.0.0.0"
  4. #
  5. # 这边设置自己es的地址,
  6. #elasticsearch.hosts: [ "http://127.0.0.1:9200" ]
  7. elasticsearch.hosts: [ "http://elasticsearch:9200" ]
  8. elasticsearch.username: 'elastic'
  9. elasticsearch.password: 'elastic'
  10. # # 显示登陆页面
  11. xpack.monitoring.ui.container.elasticsearch.enabled: true
  12. # 开启中文模式
  13. i18n.locale: "zh-CN"
复制代码
mysql5配置

   mysql5之my.cnf配置
  1. [mysql]
  2. # 默认字符集
  3. default-character-set=utf8mb4
  4. [client]
  5. # 客户端使用的端口号
  6. port=3306
  7. socket=/var/run/mysqld/mysqld.sock
  8. default-character-set=utf8mb4
  9. [mysqld]
  10. # 限制 MySQL 服务器只能从 /var/lib/mysql-files 目录读取文件或将文件写入该目录
  11. secure-file-priv=/var/lib/mysql-files
  12. # docker mysql 默认配置
  13. datadir=/var/lib/mysql
  14. # 开启二进制日志功能
  15. log-bin=/var/lib/mysql/mysql-bin
  16. # InnoDB 数据文件存放目录
  17. innodb_data_home_dir=/var/lib/mysql
  18. # InnoDB 日志文件存放目录
  19. innodb_log_group_home_dir=/var/lib/mysql
  20. # MySQL 错误日志文件路径
  21. log-error=/var/lib/mysql/mysql.log
  22. # 存放 MySQL 进程 ID 的文件路径
  23. pid-file=/var/lib/mysql/mysql.pid
  24. socket=/var/run/mysqld/mysqld.sock
  25. user=mysql
  26. # 用于控制是否允许 MySQL 服务器使用符号链接
  27. symbolic-links=0
  28. # 使用主机名进行缓存查找,以提高连接性能
  29. skip-host-cache
  30. # 进行权限验证时,会尝试将客户端的主机名解析为 IP 地址
  31. skip-name-resolve
  32. #数据库服务器id,这个id用来在主从服务器中标记唯一mysql服务器
  33. server-id=1
  34. #系统数据库编码设置,排序规则
  35. character_set_server=utf8mb4
  36. collation_server=utf8mb4_bin
  37. # 日志时间系统时间
  38. log_timestamps=SYSTEM
  39. # 默认时区东八区
  40. default-time_zone='+8:00'
  41. # 表名大小写不敏感
  42. lower_case_table_names=1
  43. # 自动提交所有事务
  44. autocommit=1
  45. # 跳过排它锁定
  46. skip-external-locking
  47. # 启用显式默认时间戳
  48. explicit_defaults_for_timestamp=ON
  49. #默认sql模式,严格模式
  50. #sql_mode = ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,
  51. #NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
  52. #ONLY_FULL_GROUP_BY
  53. #NO_ZERO_IN_DATE 不允许年月为0
  54. #NO_ZERO_DATE 不允许插入年月为0的日期
  55. #ERROR_FOR_DIVISION_BY_ZERO 在INSERT或UPDATE过程中,如果数据被零除,则产生错误而非警告。如 果未给出该模式,那么数据被零除时MySQL返回NULL
  56. #NO_ENGINE_SUBSTITUTION 不使用默认的存储引擎替代
  57. sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
  58. #是MySQL执行排序使用的缓冲大小。如果想要增加ORDER BY的速度,首先看是否可以让MySQL使用索引而不是额外的排序阶段
  59. #如果不能,可以尝试增加sort_buffer_size变量的大小
  60. sort_buffer_size=16M
  61. #应用程序经常会出现一些两表(或多表)Join的操作需求,MySQL在完成某些 Join 需求的时候(all/index join),
  62. #为了减少参与Join的“被驱动表”的读取次数以提高性能,需要使用到 Join Buffer 来协助完成 Join操作。
  63. #当 Join Buffer 太小,MySQL 不会将该 Buffer 存入磁盘文件,而是先将Join Buffer中的结果集与需要 Join 的表进行 Join 操作
  64. #然后清空 Join Buffer 中的数据,继续将剩余的结果集写入此 Buffer 中,
  65. #如此往复。这势必会造成被驱动表需要被多次读取,成倍增加 IO 访问,降低效率。
  66. #若果多表连接需求大,则这个值要设置大一点。
  67. join_buffer_size=16M
  68. #索引块的缓冲区大默认16M
  69. key_buffer_size=64M
  70. # 消息缓冲区会用到该列,该值太小则会在处理大包时产生错误。如果使用大的text,BLOB列,必须增加该值
  71. max_allowed_packet=16M
  72. # 最大连接数
  73. max_connections=3000
  74. # 连接错误最大数量
  75. max_connect_errors=100
  76. #表描述符缓存大小,可减少文件打开/关闭次数,一般max_connections*2。
  77. table_open_cache=6000
  78. #MySQL 缓存 table 句柄的分区的个数,每个cache_instance<=table_open_cache/table_open_cache_instances
  79. table_open_cache_instances=32
  80. #mysql打开最大文件数
  81. open_files_limit=65535
  82. #慢查询,开发调式阶段才需要开启慢日志功能。上线后关闭
  83. slow_query_log=OFF
  84. # 创建表时使用的默认存储引擎
  85. default_storage_engine=InnoDB
  86. # InnoDB 数据文件路径设置
  87. innodb_data_file_path=ibdata1:10M:autoextend
  88. # InnoDB 缓冲池大小
  89. innodb_buffer_pool_size=2G
  90. # InnoDB 日志文件大小
  91. innodb_log_file_size=512M
  92. # InnoDB 日志缓冲区大小
  93. innodb_log_buffer_size=16M
  94. # InnoDB 每次提交时刷新日志
  95. innodb_flush_log_at_trx_commit=1
  96. # InnoDB 加锁等待超时时间(秒)
  97. innodb_lock_wait_timeout=60
  98. # 网络缓冲区长度
  99. net_buffer_length=32K
  100. # 读取缓冲区大小
  101. read_buffer_size=16M
  102. # 随机读取缓冲区大小
  103. read_rnd_buffer_size=1024K
  104. # MyISAM 排序缓冲区大小
  105. myisam_sort_buffer_size=265M
  106. # 线程缓存大小
  107. thread_cache_size=512
  108. # 临时表大小
  109. tmp_table_size=512M
  110. [mysqldump]
  111. # 快速导出数据
  112. quick
  113. # 允许的最大数据包大小
  114. max_allowed_packet=16M
  115. [myisamchk]
  116. # 键缓存大小
  117. key_buffer_size=512M
  118. # 排序缓冲区大小
  119. sort_buffer_size=16M
  120. # 读取缓冲区大小
  121. read_buffer=16M
  122. # 写入缓冲区大小
  123. write_buffer=16M
  124. [mysqlhotcopy]
  125. # 交互式超时时间
  126. interactive-timeout
复制代码
mysql8配置

   mysql8之my.cnf配置
  1. ###### [client]配置模块 ######
  2. [client]
  3. default-character-set=utf8mb4
  4. socket=/var/lib/mysql/mysql.sock
  5. ###### [mysql]配置模块 ######
  6. [mysql]
  7. # 设置MySQL客户端默认字符集
  8. default-character-set=utf8mb4
  9. socket=/var/lib/mysql/mysql.sock
  10. ###### [mysqld]配置模块 ######
  11. [mysqld]
  12. port=3306
  13. user=mysql
  14. # 设置sql模式 sql_mode模式引起的分组查询出现*this is incompatible with sql_mode=only_full_group_by,这里最好剔除ONLY_FULL_GROUP_BY
  15. sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
  16. datadir=/var/lib/mysql
  17. socket=/var/lib/mysql/mysql.sock
  18. server-id = 1
  19. # MySQL8 的密码认证插件 如果不设置低版本navicat无法连接
  20. default_authentication_plugin=mysql_native_password
  21. # 禁用符号链接以防止各种安全风险
  22. symbolic-links=0
  23. # 允许最大连接数
  24. max_connections=1000
  25. # 服务端使用的字符集默认为8比特编码的latin1字符集
  26. character-set-server=utf8mb4
  27. # 创建新表时将使用的默认存储引擎
  28. default-storage-engine=INNODB
  29. # 表名存储在磁盘是小写的,但是比较的时候是不区分大小写
  30. lower_case_table_names=0
  31. max_allowed_packet=16M
  32. # 设置时区
  33. default-time_zone='+8:00'
复制代码
nginx配置

   nginx之nginx.conf配置
  1. worker_processes  1;
  2. events {
  3.     worker_connections  1024;
  4. }
  5. http {
  6.     include       mime.types;
  7.     default_type  application/octet-stream;
  8.     sendfile        on;
  9.     keepalive_timeout  65;
  10.     server {
  11.         listen       80;
  12.         server_name  localhost;
  13.         location / {
  14.                         client_max_body_size 100m;
  15.                         root /usr/share/nginx/html/totodoc;
  16.             index  index.html index.htm;
  17.         }
  18.         error_page   500 502 503 504  /50x.html;
  19.         location = /50x.html {
  20.             root /usr/share/nginx/html;
  21.         }
  22.     }
  23. }
复制代码
Redis配置

   Redis之redis.conf配置
  1. # Redis configuration file example.
  2. #
  3. # Note that in order to read the configuration file, Redis must be
  4. # started with the file path as first argument:
  5. #
  6. # ./redis-server /path/to/redis.conf
  7. # Note on units: when memory size is needed, it is possible to specify
  8. # it in the usual form of 1k 5GB 4M and so forth:
  9. #
  10. # 1k => 1000 bytes
  11. # 1kb => 1024 bytes
  12. # 1m => 1000000 bytes
  13. # 1mb => 1024*1024 bytes
  14. # 1g => 1000000000 bytes
  15. # 1gb => 1024*1024*1024 bytes
  16. #
  17. # units are case insensitive so 1GB 1Gb 1gB are all the same.
  18. ################################## INCLUDES ###################################
  19. # Include one or more other config files here.  This is useful if you
  20. # have a standard template that goes to all Redis servers but also need
  21. # to customize a few per-server settings.  Include files can include
  22. # other files, so use this wisely.
  23. #
  24. # Note that option "include" won't be rewritten by command "CONFIG REWRITE"
  25. # from admin or Redis Sentinel. Since Redis always uses the last processed
  26. # line as value of a configuration directive, you'd better put includes
  27. # at the beginning of this file to avoid overwriting config change at runtime.
  28. #
  29. # If instead you are interested in using includes to override configuration
  30. # options, it is better to use include as the last line.
  31. #
  32. # Included paths may contain wildcards. All files matching the wildcards will
  33. # be included in alphabetical order.
  34. # Note that if an include path contains a wildcards but no files match it when
  35. # the server is started, the include statement will be ignored and no error will
  36. # be emitted.  It is safe, therefore, to include wildcard files from empty
  37. # directories.
  38. #
  39. # include /path/to/local.conf
  40. # include /path/to/other.conf
  41. # include /path/to/fragments/*.conf
  42. #
  43. ################################## MODULES #####################################
  44. # Load modules at startup. If the server is not able to load modules
  45. # it will abort. It is possible to use multiple loadmodule directives.
  46. #
  47. # loadmodule /path/to/my_module.so
  48. # loadmodule /path/to/other_module.so
  49. ################################## NETWORK #####################################
  50. # By default, if no "bind" configuration directive is specified, Redis listens
  51. # for connections from all available network interfaces on the host machine.
  52. # It is possible to listen to just one or multiple selected interfaces using
  53. # the "bind" configuration directive, followed by one or more IP addresses.
  54. # Each address can be prefixed by "-", which means that redis will not fail to
  55. # start if the address is not available. Being not available only refers to
  56. # addresses that does not correspond to any network interface. Addresses that
  57. # are already in use will always fail, and unsupported protocols will always BE
  58. # silently skipped.
  59. #
  60. # Examples:
  61. #
  62. # bind 192.168.1.100 10.0.0.1     # listens on two specific IPv4 addresses
  63. # bind 127.0.0.1 ::1              # listens on loopback IPv4 and IPv6
  64. # bind * -::*                     # like the default, all available interfaces
  65. #
  66. # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
  67. # internet, binding to all the interfaces is dangerous and will expose the
  68. # instance to everybody on the internet. So by default we uncomment the
  69. # following bind directive, that will force Redis to listen only on the
  70. # IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
  71. # will only be able to accept client connections from the same host that it is
  72. # running on).
  73. #
  74. # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
  75. # COMMENT OUT THE FOLLOWING LINE.
  76. #
  77. # You will also need to set a password unless you explicitly disable protected
  78. # mode.
  79. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  80. bind 0.0.0.0
  81. # By default, outgoing connections (from replica to master, from Sentinel to
  82. # instances, cluster bus, etc.) are not bound to a specific local address. In
  83. # most cases, this means the operating system will handle that based on routing
  84. # and the interface through which the connection goes out.
  85. #
  86. # Using bind-source-addr it is possible to configure a specific address to bind
  87. # to, which may also affect how the connection gets routed.
  88. #
  89. # Example:
  90. #
  91. # bind-source-addr 10.0.0.1
  92. # Protected mode is a layer of security protection, in order to avoid that
  93. # Redis instances left open on the internet are accessed and exploited.
  94. #
  95. # When protected mode is on and the default user has no password, the server
  96. # only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address
  97. # (::1) or Unix domain sockets.
  98. #
  99. # By default protected mode is enabled. You should disable it only if
  100. # you are sure you want clients from other hosts to connect to Redis
  101. # even if no authentication is configured.
  102. protected-mode no
  103. # Redis uses default hardened security configuration directives to reduce the
  104. # attack surface on innocent users. Therefore, several sensitive configuration
  105. # directives are immutable, and some potentially-dangerous commands are blocked.
  106. #
  107. # Configuration directives that control files that Redis writes to (e.g., 'dir'
  108. # and 'dbfilename') and that aren't usually modified during runtime
  109. # are protected by making them immutable.
  110. #
  111. # Commands that can increase the attack surface of Redis and that aren't usually
  112. # called by users are blocked by default.
  113. #
  114. # These can be exposed to either all connections or just local ones by setting
  115. # each of the configs listed below to either of these values:
  116. #
  117. # no    - Block for any connection (remain immutable)
  118. # yes   - Allow for any connection (no protection)
  119. # local - Allow only for local connections. Ones originating from the
  120. #         IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets.
  121. #
  122. # enable-protected-configs no
  123. # enable-debug-command no
  124. # enable-module-command no
  125. # Accept connections on the specified port, default is 6379 (IANA #815344).
  126. # If port 0 is specified Redis will not listen on a TCP socket.
  127. port 6379
  128. # TCP listen() backlog.
  129. #
  130. # In high requests-per-second environments you need a high backlog in order
  131. # to avoid slow clients connection issues. Note that the Linux kernel
  132. # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
  133. # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
  134. # in order to get the desired effect.
  135. tcp-backlog 511
  136. # Unix socket.
  137. #
  138. # Specify the path for the Unix socket that will be used to listen for
  139. # incoming connections. There is no default, so Redis will not listen
  140. # on a unix socket when not specified.
  141. #
  142. # unixsocket /run/redis.sock
  143. # unixsocketperm 700
  144. # Close the connection after a client is idle for N seconds (0 to disable)
  145. timeout 0
  146. # TCP keepalive.
  147. #
  148. # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
  149. # of communication. This is useful for two reasons:
  150. #
  151. # 1) Detect dead peers.
  152. # 2) Force network equipment in the middle to consider the connection to be
  153. #    alive.
  154. #
  155. # On Linux, the specified value (in seconds) is the period used to send ACKs.
  156. # Note that to close the connection the double of the time is needed.
  157. # On other kernels the period depends on the kernel configuration.
  158. #
  159. # A reasonable value for this option is 300 seconds, which is the new
  160. # Redis default starting with Redis 3.2.1.
  161. tcp-keepalive 300
  162. # Apply OS-specific mechanism to mark the listening socket with the specified
  163. # ID, to support advanced routing and filtering capabilities.
  164. #
  165. # On Linux, the ID represents a connection mark.
  166. # On FreeBSD, the ID represents a socket cookie ID.
  167. # On OpenBSD, the ID represents a route table ID.
  168. #
  169. # The default value is 0, which implies no marking is required.
  170. # socket-mark-id 0
  171. ################################# TLS/SSL #####################################
  172. # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
  173. # directive can be used to define TLS-listening ports. To enable TLS on the
  174. # default port, use:
  175. #
  176. # port 0
  177. # tls-port 6379
  178. # Configure a X.509 certificate and private key to use for authenticating the
  179. # server to connected clients, masters or cluster peers.  These files should be
  180. # PEM formatted.
  181. #
  182. # tls-cert-file redis.crt
  183. # tls-key-file redis.key
  184. #
  185. # If the key file is encrypted using a passphrase, it can be included here
  186. # as well.
  187. #
  188. # tls-key-file-pass secret
  189. # Normally Redis uses the same certificate for both server functions (accepting
  190. # connections) and client functions (replicating from a master, establishing
  191. # cluster bus connections, etc.).
  192. #
  193. # Sometimes certificates are issued with attributes that designate them as
  194. # client-only or server-only certificates. In that case it may be desired to use
  195. # different certificates for incoming (server) and outgoing (client)
  196. # connections. To do that, use the following directives:
  197. #
  198. # tls-client-cert-file client.crt
  199. # tls-client-key-file client.key
  200. #
  201. # If the key file is encrypted using a passphrase, it can be included here
  202. # as well.
  203. #
  204. # tls-client-key-file-pass secret
  205. # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange,
  206. # required by older versions of OpenSSL (<3.0). Newer versions do not require
  207. # this configuration and recommend against it.
  208. #
  209. # tls-dh-params-file redis.dh
  210. # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
  211. # clients and peers.  Redis requires an explicit configuration of at least one
  212. # of these, and will not implicitly use the system wide configuration.
  213. #
  214. # tls-ca-cert-file ca.crt
  215. # tls-ca-cert-dir /etc/ssl/certs
  216. # By default, clients (including replica servers) on a TLS port are required
  217. # to authenticate using valid client side certificates.
  218. #
  219. # If "no" is specified, client certificates are not required and not accepted.
  220. # If "optional" is specified, client certificates are accepted and must be
  221. # valid if provided, but are not required.
  222. #
  223. # tls-auth-clients no
  224. # tls-auth-clients optional
  225. # By default, a Redis replica does not attempt to establish a TLS connection
  226. # with its master.
  227. #
  228. # Use the following directive to enable TLS on replication links.
  229. #
  230. # tls-replication yes
  231. # By default, the Redis Cluster bus uses a plain TCP connection. To enable
  232. # TLS for the bus protocol, use the following directive:
  233. #
  234. # tls-cluster yes
  235. # By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
  236. # that older formally deprecated versions are kept disabled to reduce the attack surface.
  237. # You can explicitly specify TLS versions to support.
  238. # Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
  239. # "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
  240. # To enable only TLSv1.2 and TLSv1.3, use:
  241. #
  242. # tls-protocols "TLSv1.2 TLSv1.3"
  243. # Configure allowed ciphers.  See the ciphers(1ssl) manpage for more information
  244. # about the syntax of this string.
  245. #
  246. # Note: this configuration applies only to <= TLSv1.2.
  247. #
  248. # tls-ciphers DEFAULT:!MEDIUM
  249. # Configure allowed TLSv1.3 ciphersuites.  See the ciphers(1ssl) manpage for more
  250. # information about the syntax of this string, and specifically for TLSv1.3
  251. # ciphersuites.
  252. #
  253. # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
  254. # When choosing a cipher, use the server's preference instead of the client
  255. # preference. By default, the server follows the client's preference.
  256. #
  257. # tls-prefer-server-ciphers yes
  258. # By default, TLS session caching is enabled to allow faster and less expensive
  259. # reconnections by clients that support it. Use the following directive to disable
  260. # caching.
  261. #
  262. # tls-session-caching no
  263. # Change the default number of TLS sessions cached. A zero value sets the cache
  264. # to unlimited size. The default size is 20480.
  265. #
  266. # tls-session-cache-size 5000
  267. # Change the default timeout of cached TLS sessions. The default timeout is 300
  268. # seconds.
  269. #
  270. # tls-session-cache-timeout 60
  271. ################################# GENERAL #####################################
  272. # By default Redis does not run as a daemon. Use 'yes' if you need it.
  273. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
  274. # When Redis is supervised by upstart or systemd, this parameter has no impact.
  275. daemonize no
  276. # If you run Redis from upstart or systemd, Redis can interact with your
  277. # supervision tree. Options:
  278. #   supervised no      - no supervision interaction
  279. #   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
  280. #                        requires "expect stop" in your upstart job config
  281. #   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
  282. #                        on startup, and updating Redis status on a regular
  283. #                        basis.
  284. #   supervised auto    - detect upstart or systemd method based on
  285. #                        UPSTART_JOB or NOTIFY_SOCKET environment variables
  286. # Note: these supervision methods only signal "process is ready."
  287. #       They do not enable continuous pings back to your supervisor.
  288. #
  289. # The default is "no". To run under upstart/systemd, you can simply uncomment
  290. # the line below:
  291. #
  292. # supervised auto
  293. # If a pid file is specified, Redis writes it where specified at startup
  294. # and removes it at exit.
  295. #
  296. # When the server runs non daemonized, no pid file is created if none is
  297. # specified in the configuration. When the server is daemonized, the pid file
  298. # is used even if not specified, defaulting to "/var/run/redis.pid".
  299. #
  300. # Creating a pid file is best effort: if Redis is not able to create it
  301. # nothing bad happens, the server will start and run normally.
  302. #
  303. # Note that on modern Linux systems "/run/redis.pid" is more conforming
  304. # and should be used instead.
  305. pidfile /var/run/redis_6379.pid
  306. # Specify the server verbosity level.
  307. # This can be one of:
  308. # debug (a lot of information, useful for development/testing)
  309. # verbose (many rarely useful info, but not a mess like the debug level)
  310. # notice (moderately verbose, what you want in production probably)
  311. # warning (only very important / critical messages are logged)
  312. # nothing (nothing is logged)
  313. loglevel notice
  314. # Specify the log file name. Also the empty string can be used to force
  315. # Redis to log on the standard output. Note that if you use standard
  316. # output for logging but daemonize, logs will be sent to /dev/null
  317. logfile ""
  318. # To enable logging to the system logger, just set 'syslog-enabled' to yes,
  319. # and optionally update the other syslog parameters to suit your needs.
  320. # syslog-enabled no
  321. # Specify the syslog identity.
  322. # syslog-ident redis
  323. # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
  324. # syslog-facility local0
  325. # To disable the built in crash log, which will possibly produce cleaner core
  326. # dumps when they are needed, uncomment the following:
  327. #
  328. # crash-log-enabled no
  329. # To disable the fast memory check that's run as part of the crash log, which
  330. # will possibly let redis terminate sooner, uncomment the following:
  331. #
  332. # crash-memcheck-enabled no
  333. # Set the number of databases. The default database is DB 0, you can select
  334. # a different one on a per-connection basis using SELECT <dbid> where
  335. # dbid is a number between 0 and 'databases'-1
  336. databases 16
  337. # By default Redis shows an ASCII art logo only when started to log to the
  338. # standard output and if the standard output is a TTY and syslog logging is
  339. # disabled. Basically this means that normally a logo is displayed only in
  340. # interactive sessions.
  341. #
  342. # However it is possible to force the pre-4.0 behavior and always show a
  343. # ASCII art logo in startup logs by setting the following option to yes.
  344. always-show-logo no
  345. # By default, Redis modifies the process title (as seen in 'top' and 'ps') to
  346. # provide some runtime information. It is possible to disable this and leave
  347. # the process name as executed by setting the following to no.
  348. set-proc-title yes
  349. # When changing the process title, Redis uses the following template to construct
  350. # the modified title.
  351. #
  352. # Template variables are specified in curly brackets. The following variables are
  353. # supported:
  354. #
  355. # {title}           Name of process as executed if parent, or type of child process.
  356. # {listen-addr}     Bind address or '*' followed by TCP or TLS port listening on, or
  357. #                   Unix socket if only that's available.
  358. # {server-mode}     Special mode, i.e. "[sentinel]" or "[cluster]".
  359. # {port}            TCP port listening on, or 0.
  360. # {tls-port}        TLS port listening on, or 0.
  361. # {unixsocket}      Unix domain socket listening on, or "".
  362. # {config-file}     Name of configuration file used.
  363. #
  364. proc-title-template "{title} {listen-addr} {server-mode}"
  365. # Set the local environment which is used for string comparison operations, and
  366. # also affect the performance of Lua scripts. Empty String indicates the locale
  367. # is derived from the environment variables.
  368. locale-collate ""
  369. ################################ SNAPSHOTTING  ################################
  370. # Save the DB to disk.
  371. #
  372. # save <seconds> <changes> [<seconds> <changes> ...]
  373. #
  374. # Redis will save the DB if the given number of seconds elapsed and it
  375. # surpassed the given number of write operations against the DB.
  376. #
  377. # Snapshotting can be completely disabled with a single empty string argument
  378. # as in following example:
  379. #
  380. # save ""
  381. #
  382. # Unless specified otherwise, by default Redis will save the DB:
  383. #   * After 3600 seconds (an hour) if at least 1 change was performed
  384. #   * After 300 seconds (5 minutes) if at least 100 changes were performed
  385. #   * After 60 seconds if at least 10000 changes were performed
  386. #
  387. # You can set these explicitly by uncommenting the following line.
  388. #
  389. # save 3600 1 300 100 60 10000
  390. # By default Redis will stop accepting writes if RDB snapshots are enabled
  391. # (at least one save point) and the latest background save failed.
  392. # This will make the user aware (in a hard way) that data is not persisting
  393. # on disk properly, otherwise chances are that no one will notice and some
  394. # disaster will happen.
  395. #
  396. # If the background saving process will start working again Redis will
  397. # automatically allow writes again.
  398. #
  399. # However if you have setup your proper monitoring of the Redis server
  400. # and persistence, you may want to disable this feature so that Redis will
  401. # continue to work as usual even if there are problems with disk,
  402. # permissions, and so forth.
  403. stop-writes-on-bgsave-error yes
  404. # Compress string objects using LZF when dump .rdb databases?
  405. # By default compression is enabled as it's almost always a win.
  406. # If you want to save some CPU in the saving child set it to 'no' but
  407. # the dataset will likely be bigger if you have compressible values or keys.
  408. rdbcompression yes
  409. # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
  410. # This makes the format more resistant to corruption but there is a performance
  411. # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
  412. # for maximum performances.
  413. #
  414. # RDB files created with checksum disabled have a checksum of zero that will
  415. # tell the loading code to skip the check.
  416. rdbchecksum yes
  417. # Enables or disables full sanitization checks for ziplist and listpack etc when
  418. # loading an RDB or RESTORE payload. This reduces the chances of a assertion or
  419. # crash later on while processing commands.
  420. # Options:
  421. #   no         - Never perform full sanitization
  422. #   yes        - Always perform full sanitization
  423. #   clients    - Perform full sanitization only for user connections.
  424. #                Excludes: RDB files, RESTORE commands received from the master
  425. #                connection, and client connections which have the
  426. #                skip-sanitize-payload ACL flag.
  427. # The default should be 'clients' but since it currently affects cluster
  428. # resharding via MIGRATE, it is temporarily set to 'no' by default.
  429. #
  430. # sanitize-dump-payload no
  431. # The filename where to dump the DB
  432. dbfilename dump.rdb
  433. # Remove RDB files used by replication in instances without persistence
  434. # enabled. By default this option is disabled, however there are environments
  435. # where for regulations or other security concerns, RDB files persisted on
  436. # disk by masters in order to feed replicas, or stored on disk by replicas
  437. # in order to load them for the initial synchronization, should be deleted
  438. # ASAP. Note that this option ONLY WORKS in instances that have both AOF
  439. # and RDB persistence disabled, otherwise is completely ignored.
  440. #
  441. # An alternative (and sometimes better) way to obtain the same effect is
  442. # to use diskless replication on both master and replicas instances. However
  443. # in the case of replicas, diskless is not always an option.
  444. rdb-del-sync-files no
  445. # The working directory.
  446. #
  447. # The DB will be written inside this directory, with the filename specified
  448. # above using the 'dbfilename' configuration directive.
  449. #
  450. # The Append Only File will also be created inside this directory.
  451. #
  452. # Note that you must specify a directory here, not a file name.
  453. dir ./
  454. ################################# REPLICATION #################################
  455. # Master-Replica replication. Use replicaof to make a Redis instance a copy of
  456. # another Redis server. A few things to understand ASAP about Redis replication.
  457. #
  458. #   +------------------+      +---------------+
  459. #   |      Master      | ---> |    Replica    |
  460. #   | (receive writes) |      |  (exact copy) |
  461. #   +------------------+      +---------------+
  462. #
  463. # 1) Redis replication is asynchronous, but you can configure a master to
  464. #    stop accepting writes if it appears to be not connected with at least
  465. #    a given number of replicas.
  466. # 2) Redis replicas are able to perform a partial resynchronization with the
  467. #    master if the replication link is lost for a relatively small amount of
  468. #    time. You may want to configure the replication backlog size (see the next
  469. #    sections of this file) with a sensible value depending on your needs.
  470. # 3) Replication is automatic and does not need user intervention. After a
  471. #    network partition replicas automatically try to reconnect to masters
  472. #    and resynchronize with them.
  473. #
  474. # replicaof <masterip> <masterport>
  475. # If the master is password protected (using the "requirepass" configuration
  476. # directive below) it is possible to tell the replica to authenticate before
  477. # starting the replication synchronization process, otherwise the master will
  478. # refuse the replica request.
  479. #
  480. # masterauth <master-password>
  481. #
  482. # However this is not enough if you are using Redis ACLs (for Redis version
  483. # 6 or greater), and the default user is not capable of running the PSYNC
  484. # command and/or other commands needed for replication. In this case it's
  485. # better to configure a special user to use with replication, and specify the
  486. # masteruser configuration as such:
  487. #
  488. # masteruser <username>
  489. #
  490. # When masteruser is specified, the replica will authenticate against its
  491. # master using the new AUTH form: AUTH <username> <password>.
  492. # When a replica loses its connection with the master, or when the replication
  493. # is still in progress, the replica can act in two different ways:
  494. #
  495. # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
  496. #    still reply to client requests, possibly with out of date data, or the
  497. #    data set may just be empty if this is the first synchronization.
  498. #
  499. # 2) If replica-serve-stale-data is set to 'no' the replica will reply with error
  500. #    "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'"
  501. #    to all data access commands, excluding commands such as:
  502. #    INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
  503. #    UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
  504. #    HOST and LATENCY.
  505. #
  506. replica-serve-stale-data yes
  507. # You can configure a replica instance to accept writes or not. Writing against
  508. # a replica instance may be useful to store some ephemeral data (because data
  509. # written on a replica will be easily deleted after resync with the master) but
  510. # may also cause problems if clients are writing to it because of a
  511. # misconfiguration.
  512. #
  513. # Since Redis 2.6 by default replicas are read-only.
  514. #
  515. # Note: read only replicas are not designed to be exposed to untrusted clients
  516. # on the internet. It's just a protection layer against misuse of the instance.
  517. # Still a read only replica exports by default all the administrative commands
  518. # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
  519. # security of read only replicas using 'rename-command' to shadow all the
  520. # administrative / dangerous commands.
  521. replica-read-only yes
  522. # Replication SYNC strategy: disk or socket.
  523. #
  524. # New replicas and reconnecting replicas that are not able to continue the
  525. # replication process just receiving differences, need to do what is called a
  526. # "full synchronization". An RDB file is transmitted from the master to the
  527. # replicas.
  528. #
  529. # The transmission can happen in two different ways:
  530. #
  531. # 1) Disk-backed: The Redis master creates a new process that writes the RDB
  532. #                 file on disk. Later the file is transferred by the parent
  533. #                 process to the replicas incrementally.
  534. # 2) Diskless: The Redis master creates a new process that directly writes the
  535. #              RDB file to replica sockets, without touching the disk at all.
  536. #
  537. # With disk-backed replication, while the RDB file is generated, more replicas
  538. # can be queued and served with the RDB file as soon as the current child
  539. # producing the RDB file finishes its work. With diskless replication instead
  540. # once the transfer starts, new replicas arriving will be queued and a new
  541. # transfer will start when the current one terminates.
  542. #
  543. # When diskless replication is used, the master waits a configurable amount of
  544. # time (in seconds) before starting the transfer in the hope that multiple
  545. # replicas will arrive and the transfer can be parallelized.
  546. #
  547. # With slow disks and fast (large bandwidth) networks, diskless replication
  548. # works better.
  549. repl-diskless-sync yes
  550. # When diskless replication is enabled, it is possible to configure the delay
  551. # the server waits in order to spawn the child that transfers the RDB via socket
  552. # to the replicas.
  553. #
  554. # This is important since once the transfer starts, it is not possible to serve
  555. # new replicas arriving, that will be queued for the next RDB transfer, so the
  556. # server waits a delay in order to let more replicas arrive.
  557. #
  558. # The delay is specified in seconds, and by default is 5 seconds. To disable
  559. # it entirely just set it to 0 seconds and the transfer will start ASAP.
  560. repl-diskless-sync-delay 5
  561. # When diskless replication is enabled with a delay, it is possible to let
  562. # the replication start before the maximum delay is reached if the maximum
  563. # number of replicas expected have connected. Default of 0 means that the
  564. # maximum is not defined and Redis will wait the full delay.
  565. repl-diskless-sync-max-replicas 0
  566. # -----------------------------------------------------------------------------
  567. # WARNING: Since in this setup the replica does not immediately store an RDB on
  568. # disk, it may cause data loss during failovers. RDB diskless load + Redis
  569. # modules not handling I/O reads may cause Redis to abort in case of I/O errors
  570. # during the initial synchronization stage with the master.
  571. # -----------------------------------------------------------------------------
  572. #
  573. # Replica can load the RDB it reads from the replication link directly from the
  574. # socket, or store the RDB to a file and read that file after it was completely
  575. # received from the master.
  576. #
  577. # In many cases the disk is slower than the network, and storing and loading
  578. # the RDB file may increase replication time (and even increase the master's
  579. # Copy on Write memory and replica buffers).
  580. # However, when parsing the RDB file directly from the socket, in order to avoid
  581. # data loss it's only safe to flush the current dataset when the new dataset is
  582. # fully loaded in memory, resulting in higher memory usage.
  583. # For this reason we have the following options:
  584. #
  585. # "disabled"    - Don't use diskless load (store the rdb file to the disk first)
  586. # "swapdb"      - Keep current db contents in RAM while parsing the data directly
  587. #                 from the socket. Replicas in this mode can keep serving current
  588. #                 dataset while replication is in progress, except for cases where
  589. #                 they can't recognize master as having a data set from same
  590. #                 replication history.
  591. #                 Note that this requires sufficient memory, if you don't have it,
  592. #                 you risk an OOM kill.
  593. # "on-empty-db" - Use diskless load only when current dataset is empty. This is
  594. #                 safer and avoid having old and new dataset loaded side by side
  595. #                 during replication.
  596. repl-diskless-load disabled
  597. # Master send PINGs to its replicas in a predefined interval. It's possible to
  598. # change this interval with the repl_ping_replica_period option. The default
  599. # value is 10 seconds.
  600. #
  601. # repl-ping-replica-period 10
  602. # The following option sets the replication timeout for:
  603. #
  604. # 1) Bulk transfer I/O during SYNC, from the point of view of replica.
  605. # 2) Master timeout from the point of view of replicas (data, pings).
  606. # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
  607. #
  608. # It is important to make sure that this value is greater than the value
  609. # specified for repl-ping-replica-period otherwise a timeout will be detected
  610. # every time there is low traffic between the master and the replica. The default
  611. # value is 60 seconds.
  612. #
  613. # repl-timeout 60
  614. # Disable TCP_NODELAY on the replica socket after SYNC?
  615. #
  616. # If you select "yes" Redis will use a smaller number of TCP packets and
  617. # less bandwidth to send data to replicas. But this can add a delay for
  618. # the data to appear on the replica side, up to 40 milliseconds with
  619. # Linux kernels using a default configuration.
  620. #
  621. # If you select "no" the delay for data to appear on the replica side will
  622. # be reduced but more bandwidth will be used for replication.
  623. #
  624. # By default we optimize for low latency, but in very high traffic conditions
  625. # or when the master and replicas are many hops away, turning this to "yes" may
  626. # be a good idea.
  627. repl-disable-tcp-nodelay no
  628. # Set the replication backlog size. The backlog is a buffer that accumulates
  629. # replica data when replicas are disconnected for some time, so that when a
  630. # replica wants to reconnect again, often a full resync is not needed, but a
  631. # partial resync is enough, just passing the portion of data the replica
  632. # missed while disconnected.
  633. #
  634. # The bigger the replication backlog, the longer the replica can endure the
  635. # disconnect and later be able to perform a partial resynchronization.
  636. #
  637. # The backlog is only allocated if there is at least one replica connected.
  638. #
  639. # repl-backlog-size 1mb
  640. # After a master has no connected replicas for some time, the backlog will be
  641. # freed. The following option configures the amount of seconds that need to
  642. # elapse, starting from the time the last replica disconnected, for the backlog
  643. # buffer to be freed.
  644. #
  645. # Note that replicas never free the backlog for timeout, since they may be
  646. # promoted to masters later, and should be able to correctly "partially
  647. # resynchronize" with other replicas: hence they should always accumulate backlog.
  648. #
  649. # A value of 0 means to never release the backlog.
  650. #
  651. # repl-backlog-ttl 3600
  652. # The replica priority is an integer number published by Redis in the INFO
  653. # output. It is used by Redis Sentinel in order to select a replica to promote
  654. # into a master if the master is no longer working correctly.
  655. #
  656. # A replica with a low priority number is considered better for promotion, so
  657. # for instance if there are three replicas with priority 10, 100, 25 Sentinel
  658. # will pick the one with priority 10, that is the lowest.
  659. #
  660. # However a special priority of 0 marks the replica as not able to perform the
  661. # role of master, so a replica with priority of 0 will never be selected by
  662. # Redis Sentinel for promotion.
  663. #
  664. # By default the priority is 100.
  665. replica-priority 100
  666. # The propagation error behavior controls how Redis will behave when it is
  667. # unable to handle a command being processed in the replication stream from a master
  668. # or processed while reading from an AOF file. Errors that occur during propagation
  669. # are unexpected, and can cause data inconsistency. However, there are edge cases
  670. # in earlier versions of Redis where it was possible for the server to replicate or persist
  671. # commands that would fail on future versions. For this reason the default behavior
  672. # is to ignore such errors and continue processing commands.
  673. #
  674. # If an application wants to ensure there is no data divergence, this configuration
  675. # should be set to 'panic' instead. The value can also be set to 'panic-on-replicas'
  676. # to only panic when a replica encounters an error on the replication stream. One of
  677. # these two panic values will become the default value in the future once there are
  678. # sufficient safety mechanisms in place to prevent false positive crashes.
  679. #
  680. # propagation-error-behavior ignore
  681. # Replica ignore disk write errors controls the behavior of a replica when it is
  682. # unable to persist a write command received from its master to disk. By default,
  683. # this configuration is set to 'no' and will crash the replica in this condition.
  684. # It is not recommended to change this default, however in order to be compatible
  685. # with older versions of Redis this config can be toggled to 'yes' which will just
  686. # log a warning and execute the write command it got from the master.
  687. #
  688. # replica-ignore-disk-write-errors no
  689. # -----------------------------------------------------------------------------
  690. # By default, Redis Sentinel includes all replicas in its reports. A replica
  691. # can be excluded from Redis Sentinel's announcements. An unannounced replica
  692. # will be ignored by the 'sentinel replicas <master>' command and won't be
  693. # exposed to Redis Sentinel's clients.
  694. #
  695. # This option does not change the behavior of replica-priority. Even with
  696. # replica-announced set to 'no', the replica can be promoted to master. To
  697. # prevent this behavior, set replica-priority to 0.
  698. #
  699. # replica-announced yes
  700. # It is possible for a master to stop accepting writes if there are less than
  701. # N replicas connected, having a lag less or equal than M seconds.
  702. #
  703. # The N replicas need to be in "online" state.
  704. #
  705. # The lag in seconds, that must be <= the specified value, is calculated from
  706. # the last ping received from the replica, that is usually sent every second.
  707. #
  708. # This option does not GUARANTEE that N replicas will accept the write, but
  709. # will limit the window of exposure for lost writes in case not enough replicas
  710. # are available, to the specified number of seconds.
  711. #
  712. # For example to require at least 3 replicas with a lag <= 10 seconds use:
  713. #
  714. # min-replicas-to-write 3
  715. # min-replicas-max-lag 10
  716. #
  717. # Setting one or the other to 0 disables the feature.
  718. #
  719. # By default min-replicas-to-write is set to 0 (feature disabled) and
  720. # min-replicas-max-lag is set to 10.
  721. # A Redis master is able to list the address and port of the attached
  722. # replicas in different ways. For example the "INFO replication" section
  723. # offers this information, which is used, among other tools, by
  724. # Redis Sentinel in order to discover replica instances.
  725. # Another place where this info is available is in the output of the
  726. # "ROLE" command of a master.
  727. #
  728. # The listed IP address and port normally reported by a replica is
  729. # obtained in the following way:
  730. #
  731. #   IP: The address is auto detected by checking the peer address
  732. #   of the socket used by the replica to connect with the master.
  733. #
  734. #   Port: The port is communicated by the replica during the replication
  735. #   handshake, and is normally the port that the replica is using to
  736. #   listen for connections.
  737. #
  738. # However when port forwarding or Network Address Translation (NAT) is
  739. # used, the replica may actually be reachable via different IP and port
  740. # pairs. The following two options can be used by a replica in order to
  741. # report to its master a specific set of IP and port, so that both INFO
  742. # and ROLE will report those values.
  743. #
  744. # There is no need to use both the options if you need to override just
  745. # the port or the IP address.
  746. #
  747. # replica-announce-ip 5.5.5.5
  748. # replica-announce-port 1234
  749. ############################### KEYS TRACKING #################################
  750. # Redis implements server assisted support for client side caching of values.
  751. # This is implemented using an invalidation table that remembers, using
  752. # a radix key indexed by key name, what clients have which keys. In turn
  753. # this is used in order to send invalidation messages to clients. Please
  754. # check this page to understand more about the feature:
  755. #
  756. #   https://redis.io/topics/client-side-caching
  757. #
  758. # When tracking is enabled for a client, all the read only queries are assumed
  759. # to be cached: this will force Redis to store information in the invalidation
  760. # table. When keys are modified, such information is flushed away, and
  761. # invalidation messages are sent to the clients. However if the workload is
  762. # heavily dominated by reads, Redis could use more and more memory in order
  763. # to track the keys fetched by many clients.
  764. #
  765. # For this reason it is possible to configure a maximum fill value for the
  766. # invalidation table. By default it is set to 1M of keys, and once this limit
  767. # is reached, Redis will start to evict keys in the invalidation table
  768. # even if they were not modified, just to reclaim memory: this will in turn
  769. # force the clients to invalidate the cached values. Basically the table
  770. # maximum size is a trade off between the memory you want to spend server
  771. # side to track information about who cached what, and the ability of clients
  772. # to retain cached objects in memory.
  773. #
  774. # If you set the value to 0, it means there are no limits, and Redis will
  775. # retain as many keys as needed in the invalidation table.
  776. # In the "stats" INFO section, you can find information about the number of
  777. # keys in the invalidation table at every given moment.
  778. #
  779. # Note: when key tracking is used in broadcasting mode, no memory is used
  780. # in the server side so this setting is useless.
  781. #
  782. # tracking-table-max-keys 1000000
  783. ################################## SECURITY ###################################
  784. # Warning: since Redis is pretty fast, an outside user can try up to
  785. # 1 million passwords per second against a modern box. This means that you
  786. # should use very strong passwords, otherwise they will be very easy to break.
  787. # Note that because the password is really a shared secret between the client
  788. # and the server, and should not be memorized by any human, the password
  789. # can be easily a long string from /dev/urandom or whatever, so by using a
  790. # long and unguessable password no brute force attack will be possible.
  791. # Redis ACL users are defined in the following format:
  792. #
  793. #   user <username> ... acl rules ...
  794. #
  795. # For example:
  796. #
  797. #   user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
  798. #
  799. # The special username "default" is used for new connections. If this user
  800. # has the "nopass" rule, then new connections will be immediately authenticated
  801. # as the "default" user without the need of any password provided via the
  802. # AUTH command. Otherwise if the "default" user is not flagged with "nopass"
  803. # the connections will start in not authenticated state, and will require
  804. # AUTH (or the HELLO command AUTH option) in order to be authenticated and
  805. # start to work.
  806. #
  807. # The ACL rules that describe what a user can do are the following:
  808. #
  809. #  on           Enable the user: it is possible to authenticate as this user.
  810. #  off          Disable the user: it's no longer possible to authenticate
  811. #               with this user, however the already authenticated connections
  812. #               will still work.
  813. #  skip-sanitize-payload    RESTORE dump-payload sanitization is skipped.
  814. #  sanitize-payload         RESTORE dump-payload is sanitized (default).
  815. #  +<command>   Allow the execution of that command.
  816. #               May be used with `|` for allowing subcommands (e.g "+config|get")
  817. #  -<command>   Disallow the execution of that command.
  818. #               May be used with `|` for blocking subcommands (e.g "-config|set")
  819. #  +@<category> Allow the execution of all the commands in such category
  820. #               with valid categories are like @admin, @set, @sortedset, ...
  821. #               and so forth, see the full list in the server.c file where
  822. #               the Redis command table is described and defined.
  823. #               The special category @all means all the commands, but currently
  824. #               present in the server, and that will be loaded in the future
  825. #               via modules.
  826. #  +<command>|first-arg  Allow a specific first argument of an otherwise
  827. #                        disabled command. It is only supported on commands with
  828. #                        no sub-commands, and is not allowed as negative form
  829. #                        like -SELECT|1, only additive starting with "+". This
  830. #                        feature is deprecated and may be removed in the future.
  831. #  allcommands  Alias for +@all. Note that it implies the ability to execute
  832. #               all the future commands loaded via the modules system.
  833. #  nocommands   Alias for -@all.
  834. #  ~<pattern>   Add a pattern of keys that can be mentioned as part of
  835. #               commands. For instance ~* allows all the keys. The pattern
  836. #               is a glob-style pattern like the one of KEYS.
  837. #               It is possible to specify multiple patterns.
  838. # %R~<pattern>  Add key read pattern that specifies which keys can be read
  839. #               from.
  840. # %W~<pattern>  Add key write pattern that specifies which keys can be
  841. #               written to.
  842. #  allkeys      Alias for ~*
  843. #  resetkeys    Flush the list of allowed keys patterns.
  844. #  &<pattern>   Add a glob-style pattern of Pub/Sub channels that can be
  845. #               accessed by the user. It is possible to specify multiple channel
  846. #               patterns.
  847. #  allchannels  Alias for &*
  848. #  resetchannels            Flush the list of allowed channel patterns.
  849. #  ><password>  Add this password to the list of valid password for the user.
  850. #               For example >mypass will add "mypass" to the list.
  851. #               This directive clears the "nopass" flag (see later).
  852. #  <<password>  Remove this password from the list of valid passwords.
  853. #  nopass       All the set passwords of the user are removed, and the user
  854. #               is flagged as requiring no password: it means that every
  855. #               password will work against this user. If this directive is
  856. #               used for the default user, every new connection will be
  857. #               immediately authenticated with the default user without
  858. #               any explicit AUTH command required. Note that the "resetpass"
  859. #               directive will clear this condition.
  860. #  resetpass    Flush the list of allowed passwords. Moreover removes the
  861. #               "nopass" status. After "resetpass" the user has no associated
  862. #               passwords and there is no way to authenticate without adding
  863. #               some password (or setting it as "nopass" later).
  864. #  reset        Performs the following actions: resetpass, resetkeys, resetchannels,
  865. #               allchannels (if acl-pubsub-default is set), off, clearselectors, -@all.
  866. #               The user returns to the same state it has immediately after its creation.
  867. # (<options>)   Create a new selector with the options specified within the
  868. #               parentheses and attach it to the user. Each option should be
  869. #               space separated. The first character must be ( and the last
  870. #               character must be ).
  871. # clearselectors            Remove all of the currently attached selectors.
  872. #                           Note this does not change the "root" user permissions,
  873. #                           which are the permissions directly applied onto the
  874. #                           user (outside the parentheses).
  875. #
  876. # ACL rules can be specified in any order: for instance you can start with
  877. # passwords, then flags, or key patterns. However note that the additive
  878. # and subtractive rules will CHANGE MEANING depending on the ordering.
  879. # For instance see the following example:
  880. #
  881. #   user alice on +@all -DEBUG ~* >somepassword
  882. #
  883. # This will allow "alice" to use all the commands with the exception of the
  884. # DEBUG command, since +@all added all the commands to the set of the commands
  885. # alice can use, and later DEBUG was removed. However if we invert the order
  886. # of two ACL rules the result will be different:
  887. #
  888. #   user alice on -DEBUG +@all ~* >somepassword
  889. #
  890. # Now DEBUG was removed when alice had yet no commands in the set of allowed
  891. # commands, later all the commands are added, so the user will be able to
  892. # execute everything.
  893. #
  894. # Basically ACL rules are processed left-to-right.
  895. #
  896. # The following is a list of command categories and their meanings:
  897. # * keyspace - Writing or reading from keys, databases, or their metadata
  898. #     in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE,
  899. #     KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace,
  900. #     key or metadata will also have `write` category. Commands that only read
  901. #     the keyspace, key or metadata will have the `read` category.
  902. # * read - Reading from keys (values or metadata). Note that commands that don't
  903. #     interact with keys, will not have either `read` or `write`.
  904. # * write - Writing to keys (values or metadata)
  905. # * admin - Administrative commands. Normal applications will never need to use
  906. #     these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc.
  907. # * dangerous - Potentially dangerous (each should be considered with care for
  908. #     various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS,
  909. #     CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc.
  910. # * connection - Commands affecting the connection or other connections.
  911. #     This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc.
  912. # * blocking - Potentially blocking the connection until released by another
  913. #     command.
  914. # * fast - Fast O(1) commands. May loop on the number of arguments, but not the
  915. #     number of elements in the key.
  916. # * slow - All commands that are not Fast.
  917. # * pubsub - PUBLISH / SUBSCRIBE related
  918. # * transaction - WATCH / MULTI / EXEC related commands.
  919. # * scripting - Scripting related.
  920. # * set - Data type: sets related.
  921. # * sortedset - Data type: zsets related.
  922. # * list - Data type: lists related.
  923. # * hash - Data type: hashes related.
  924. # * string - Data type: strings related.
  925. # * bitmap - Data type: bitmaps related.
  926. # * hyperloglog - Data type: hyperloglog related.
  927. # * geo - Data type: geo related.
  928. # * stream - Data type: streams related.
  929. #
  930. # For more information about ACL configuration please refer to
  931. # the Redis web site at https://redis.io/topics/acl
  932. # ACL LOG
  933. #
  934. # The ACL Log tracks failed commands and authentication events associated
  935. # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
  936. # by ACLs. The ACL Log is stored in memory. You can reclaim memory with
  937. # ACL LOG RESET. Define the maximum entry length of the ACL Log below.
  938. acllog-max-len 128
  939. # Using an external ACL file
  940. #
  941. # Instead of configuring users here in this file, it is possible to use
  942. # a stand-alone file just listing users. The two methods cannot be mixed:
  943. # if you configure users here and at the same time you activate the external
  944. # ACL file, the server will refuse to start.
  945. #
  946. # The format of the external ACL user file is exactly the same as the
  947. # format that is used inside redis.conf to describe users.
  948. #
  949. # aclfile /etc/redis/users.acl
  950. # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
  951. # layer on top of the new ACL system. The option effect will be just setting
  952. # the password for the default user. Clients will still authenticate using
  953. # AUTH <password> as usually, or more explicitly with AUTH default <password>
  954. # if they follow the new protocol: both will work.
  955. #
  956. # The requirepass is not compatible with aclfile option and the ACL LOAD
  957. # command, these will cause requirepass to be ignored.
  958. #
  959. requirepass 123456
  960. # New users are initialized with restrictive permissions by default, via the
  961. # equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
  962. # is possible to manage access to Pub/Sub channels with ACL rules as well. The
  963. # default Pub/Sub channels permission if new users is controlled by the
  964. # acl-pubsub-default configuration directive, which accepts one of these values:
  965. #
  966. # allchannels: grants access to all Pub/Sub channels
  967. # resetchannels: revokes access to all Pub/Sub channels
  968. #
  969. # From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission.
  970. #
  971. # acl-pubsub-default resetchannels
  972. # Command renaming (DEPRECATED).
  973. #
  974. # ------------------------------------------------------------------------
  975. # WARNING: avoid using this option if possible. Instead use ACLs to remove
  976. # commands from the default user, and put them only in some admin user you
  977. # create for administrative purposes.
  978. # ------------------------------------------------------------------------
  979. #
  980. # It is possible to change the name of dangerous commands in a shared
  981. # environment. For instance the CONFIG command may be renamed into something
  982. # hard to guess so that it will still be available for internal-use tools
  983. # but not available for general clients.
  984. #
  985. # Example:
  986. #
  987. # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
  988. #
  989. # It is also possible to completely kill a command by renaming it into
  990. # an empty string:
  991. #
  992. # rename-command CONFIG ""
  993. #
  994. # Please note that changing the name of commands that are logged into the
  995. # AOF file or transmitted to replicas may cause problems.
  996. ################################### CLIENTS ####################################
  997. # Set the max number of connected clients at the same time. By default
  998. # this limit is set to 10000 clients, however if the Redis server is not
  999. # able to configure the process file limit to allow for the specified limit
  1000. # the max number of allowed clients is set to the current file limit
  1001. # minus 32 (as Redis reserves a few file descriptors for internal uses).
  1002. #
  1003. # Once the limit is reached Redis will close all the new connections sending
  1004. # an error 'max number of clients reached'.
  1005. #
  1006. # IMPORTANT: When Redis Cluster is used, the max number of connections is also
  1007. # shared with the cluster bus: every node in the cluster will use two
  1008. # connections, one incoming and another outgoing. It is important to size the
  1009. # limit accordingly in case of very large clusters.
  1010. #
  1011. # maxclients 10000
  1012. ############################## MEMORY MANAGEMENT ################################
  1013. # Set a memory usage limit to the specified amount of bytes.
  1014. # When the memory limit is reached Redis will try to remove keys
  1015. # according to the eviction policy selected (see maxmemory-policy).
  1016. #
  1017. # If Redis can't remove keys according to the policy, or if the policy is
  1018. # set to 'noeviction', Redis will start to reply with errors to commands
  1019. # that would use more memory, like SET, LPUSH, and so on, and will continue
  1020. # to reply to read-only commands like GET.
  1021. #
  1022. # This option is usually useful when using Redis as an LRU or LFU cache, or to
  1023. # set a hard memory limit for an instance (using the 'noeviction' policy).
  1024. #
  1025. # WARNING: If you have replicas attached to an instance with maxmemory on,
  1026. # the size of the output buffers needed to feed the replicas are subtracted
  1027. # from the used memory count, so that network problems / resyncs will
  1028. # not trigger a loop where keys are evicted, and in turn the output
  1029. # buffer of replicas is full with DELs of keys evicted triggering the deletion
  1030. # of more keys, and so forth until the database is completely emptied.
  1031. #
  1032. # In short... if you have replicas attached it is suggested that you set a lower
  1033. # limit for maxmemory so that there is some free RAM on the system for replica
  1034. # output buffers (but this is not needed if the policy is 'noeviction').
  1035. #
  1036. # maxmemory <bytes>
  1037. # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
  1038. # is reached. You can select one from the following behaviors:
  1039. #
  1040. # volatile-lru -> Evict using approximated LRU, only keys with an expire set.
  1041. # allkeys-lru -> Evict any key using approximated LRU.
  1042. # volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
  1043. # allkeys-lfu -> Evict any key using approximated LFU.
  1044. # volatile-random -> Remove a random key having an expire set.
  1045. # allkeys-random -> Remove a random key, any key.
  1046. # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
  1047. # noeviction -> Don't evict anything, just return an error on write operations.
  1048. #
  1049. # LRU means Least Recently Used
  1050. # LFU means Least Frequently Used
  1051. #
  1052. # Both LRU, LFU and volatile-ttl are implemented using approximated
  1053. # randomized algorithms.
  1054. #
  1055. # Note: with any of the above policies, when there are no suitable keys for
  1056. # eviction, Redis will return an error on write operations that require
  1057. # more memory. These are usually commands that create new keys, add data or
  1058. # modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
  1059. # SORT (due to the STORE argument), and EXEC (if the transaction includes any
  1060. # command that requires memory).
  1061. #
  1062. # The default is:
  1063. #
  1064. # maxmemory-policy noeviction
  1065. # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
  1066. # algorithms (in order to save memory), so you can tune it for speed or
  1067. # accuracy. By default Redis will check five keys and pick the one that was
  1068. # used least recently, you can change the sample size using the following
  1069. # configuration directive.
  1070. #
  1071. # The default of 5 produces good enough results. 10 Approximates very closely
  1072. # true LRU but costs more CPU. 3 is faster but not very accurate.
  1073. #
  1074. # maxmemory-samples 5
  1075. # Eviction processing is designed to function well with the default setting.
  1076. # If there is an unusually large amount of write traffic, this value may need to
  1077. # be increased.  Decreasing this value may reduce latency at the risk of
  1078. # eviction processing effectiveness
  1079. #   0 = minimum latency, 10 = default, 100 = process without regard to latency
  1080. #
  1081. # maxmemory-eviction-tenacity 10
  1082. # Starting from Redis 5, by default a replica will ignore its maxmemory setting
  1083. # (unless it is promoted to master after a failover or manually). It means
  1084. # that the eviction of keys will be just handled by the master, sending the
  1085. # DEL commands to the replica as keys evict in the master side.
  1086. #
  1087. # This behavior ensures that masters and replicas stay consistent, and is usually
  1088. # what you want, however if your replica is writable, or you want the replica
  1089. # to have a different memory setting, and you are sure all the writes performed
  1090. # to the replica are idempotent, then you may change this default (but be sure
  1091. # to understand what you are doing).
  1092. #
  1093. # Note that since the replica by default does not evict, it may end using more
  1094. # memory than the one set via maxmemory (there are certain buffers that may
  1095. # be larger on the replica, or data structures may sometimes take more memory
  1096. # and so forth). So make sure you monitor your replicas and make sure they
  1097. # have enough memory to never hit a real out-of-memory condition before the
  1098. # master hits the configured maxmemory setting.
  1099. #
  1100. # replica-ignore-maxmemory yes
  1101. # Redis reclaims expired keys in two ways: upon access when those keys are
  1102. # found to be expired, and also in background, in what is called the
  1103. # "active expire key". The key space is slowly and interactively scanned
  1104. # looking for expired keys to reclaim, so that it is possible to free memory
  1105. # of keys that are expired and will never be accessed again in a short time.
  1106. #
  1107. # The default effort of the expire cycle will try to avoid having more than
  1108. # ten percent of expired keys still in memory, and will try to avoid consuming
  1109. # more than 25% of total memory and to add latency to the system. However
  1110. # it is possible to increase the expire "effort" that is normally set to
  1111. # "1", to a greater value, up to the value "10". At its maximum value the
  1112. # system will use more CPU, longer cycles (and technically may introduce
  1113. # more latency), and will tolerate less already expired keys still present
  1114. # in the system. It's a tradeoff between memory, CPU and latency.
  1115. #
  1116. # active-expire-effort 1
  1117. ############################# LAZY FREEING ####################################
  1118. # Redis has two primitives to delete keys. One is called DEL and is a blocking
  1119. # deletion of the object. It means that the server stops processing new commands
  1120. # in order to reclaim all the memory associated with an object in a synchronous
  1121. # way. If the key deleted is associated with a small object, the time needed
  1122. # in order to execute the DEL command is very small and comparable to most other
  1123. # O(1) or O(log_N) commands in Redis. However if the key is associated with an
  1124. # aggregated value containing millions of elements, the server can block for
  1125. # a long time (even seconds) in order to complete the operation.
  1126. #
  1127. # For the above reasons Redis also offers non blocking deletion primitives
  1128. # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
  1129. # FLUSHDB commands, in order to reclaim memory in background. Those commands
  1130. # are executed in constant time. Another thread will incrementally free the
  1131. # object in the background as fast as possible.
  1132. #
  1133. # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
  1134. # It's up to the design of the application to understand when it is a good
  1135. # idea to use one or the other. However the Redis server sometimes has to
  1136. # delete keys or flush the whole database as a side effect of other operations.
  1137. # Specifically Redis deletes objects independently of a user call in the
  1138. # following scenarios:
  1139. #
  1140. # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
  1141. #    in order to make room for new data, without going over the specified
  1142. #    memory limit.
  1143. # 2) Because of expire: when a key with an associated time to live (see the
  1144. #    EXPIRE command) must be deleted from memory.
  1145. # 3) Because of a side effect of a command that stores data on a key that may
  1146. #    already exist. For example the RENAME command may delete the old key
  1147. #    content when it is replaced with another one. Similarly SUNIONSTORE
  1148. #    or SORT with STORE option may delete existing keys. The SET command
  1149. #    itself removes any old content of the specified key in order to replace
  1150. #    it with the specified string.
  1151. # 4) During replication, when a replica performs a full resynchronization with
  1152. #    its master, the content of the whole database is removed in order to
  1153. #    load the RDB file just transferred.
  1154. #
  1155. # In all the above cases the default is to delete objects in a blocking way,
  1156. # like if DEL was called. However you can configure each case specifically
  1157. # in order to instead release memory in a non-blocking way like if UNLINK
  1158. # was called, using the following configuration directives.
  1159. lazyfree-lazy-eviction no
  1160. lazyfree-lazy-expire no
  1161. lazyfree-lazy-server-del no
  1162. replica-lazy-flush no
  1163. # It is also possible, for the case when to replace the user code DEL calls
  1164. # with UNLINK calls is not easy, to modify the default behavior of the DEL
  1165. # command to act exactly like UNLINK, using the following configuration
  1166. # directive:
  1167. lazyfree-lazy-user-del no
  1168. # FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
  1169. # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
  1170. # commands. When neither flag is passed, this directive will be used to determine
  1171. # if the data should be deleted asynchronously.
  1172. lazyfree-lazy-user-flush no
  1173. ################################ THREADED I/O #################################
  1174. # Redis is mostly single threaded, however there are certain threaded
  1175. # operations such as UNLINK, slow I/O accesses and other things that are
  1176. # performed on side threads.
  1177. #
  1178. # Now it is also possible to handle Redis clients socket reads and writes
  1179. # in different I/O threads. Since especially writing is so slow, normally
  1180. # Redis users use pipelining in order to speed up the Redis performances per
  1181. # core, and spawn multiple instances in order to scale more. Using I/O
  1182. # threads it is possible to easily speedup two times Redis without resorting
  1183. # to pipelining nor sharding of the instance.
  1184. #
  1185. # By default threading is disabled, we suggest enabling it only in machines
  1186. # that have at least 4 or more cores, leaving at least one spare core.
  1187. # Using more than 8 threads is unlikely to help much. We also recommend using
  1188. # threaded I/O only if you actually have performance problems, with Redis
  1189. # instances being able to use a quite big percentage of CPU time, otherwise
  1190. # there is no point in using this feature.
  1191. #
  1192. # So for instance if you have a four cores boxes, try to use 2 or 3 I/O
  1193. # threads, if you have a 8 cores, try to use 6 threads. In order to
  1194. # enable I/O threads use the following configuration directive:
  1195. #
  1196. # io-threads 4
  1197. #
  1198. # Setting io-threads to 1 will just use the main thread as usual.
  1199. # When I/O threads are enabled, we only use threads for writes, that is
  1200. # to thread the write(2) syscall and transfer the client buffers to the
  1201. # socket. However it is also possible to enable threading of reads and
  1202. # protocol parsing using the following configuration directive, by setting
  1203. # it to yes:
  1204. #
  1205. # io-threads-do-reads no
  1206. #
  1207. # Usually threading reads doesn't help much.
  1208. #
  1209. # NOTE 1: This configuration directive cannot be changed at runtime via
  1210. # CONFIG SET. Also, this feature currently does not work when SSL is
  1211. # enabled.
  1212. #
  1213. # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
  1214. # sure you also run the benchmark itself in threaded mode, using the
  1215. # --threads option to match the number of Redis threads, otherwise you'll not
  1216. # be able to notice the improvements.
  1217. ############################ KERNEL OOM CONTROL ##############################
  1218. # On Linux, it is possible to hint the kernel OOM killer on what processes
  1219. # should be killed first when out of memory.
  1220. #
  1221. # Enabling this feature makes Redis actively control the oom_score_adj value
  1222. # for all its processes, depending on their role. The default scores will
  1223. # attempt to have background child processes killed before all others, and
  1224. # replicas killed before masters.
  1225. #
  1226. # Redis supports these options:
  1227. #
  1228. # no:       Don't make changes to oom-score-adj (default).
  1229. # yes:      Alias to "relative" see below.
  1230. # absolute: Values in oom-score-adj-values are written as is to the kernel.
  1231. # relative: Values are used relative to the initial value of oom_score_adj when
  1232. #           the server starts and are then clamped to a range of -1000 to 1000.
  1233. #           Because typically the initial value is 0, they will often match the
  1234. #           absolute values.
  1235. oom-score-adj no
  1236. # When oom-score-adj is used, this directive controls the specific values used
  1237. # for master, replica and background child processes. Values range -2000 to
  1238. # 2000 (higher means more likely to be killed).
  1239. #
  1240. # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
  1241. # can freely increase their value, but not decrease it below its initial
  1242. # settings. This means that setting oom-score-adj to "relative" and setting the
  1243. # oom-score-adj-values to positive values will always succeed.
  1244. oom-score-adj-values 0 200 800
  1245. #################### KERNEL transparent hugepage CONTROL ######################
  1246. # Usually the kernel Transparent Huge Pages control is set to "madvise" or
  1247. # or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
  1248. # case this config has no effect. On systems in which it is set to "always",
  1249. # redis will attempt to disable it specifically for the redis process in order
  1250. # to avoid latency problems specifically with fork(2) and CoW.
  1251. # If for some reason you prefer to keep it enabled, you can set this config to
  1252. # "no" and the kernel global to "always".
  1253. disable-thp yes
  1254. ############################## APPEND ONLY MODE ###############################
  1255. # By default Redis asynchronously dumps the dataset on disk. This mode is
  1256. # good enough in many applications, but an issue with the Redis process or
  1257. # a power outage may result into a few minutes of writes lost (depending on
  1258. # the configured save points).
  1259. #
  1260. # The Append Only File is an alternative persistence mode that provides
  1261. # much better durability. For instance using the default data fsync policy
  1262. # (see later in the config file) Redis can lose just one second of writes in a
  1263. # dramatic event like a server power outage, or a single write if something
  1264. # wrong with the Redis process itself happens, but the operating system is
  1265. # still running correctly.
  1266. #
  1267. # AOF and RDB persistence can be enabled at the same time without problems.
  1268. # If the AOF is enabled on startup Redis will load the AOF, that is the file
  1269. # with the better durability guarantees.
  1270. #
  1271. # Please check https://redis.io/topics/persistence for more information.
  1272. appendonly no
  1273. # The base name of the append only file.
  1274. #
  1275. # Redis 7 and newer use a set of append-only files to persist the dataset
  1276. # and changes applied to it. There are two basic types of files in use:
  1277. #
  1278. # - Base files, which are a snapshot representing the complete state of the
  1279. #   dataset at the time the file was created. Base files can be either in
  1280. #   the form of RDB (binary serialized) or AOF (textual commands).
  1281. # - Incremental files, which contain additional commands that were applied
  1282. #   to the dataset following the previous file.
  1283. #
  1284. # In addition, manifest files are used to track the files and the order in
  1285. # which they were created and should be applied.
  1286. #
  1287. # Append-only file names are created by Redis following a specific pattern.
  1288. # The file name's prefix is based on the 'appendfilename' configuration
  1289. # parameter, followed by additional information about the sequence and type.
  1290. #
  1291. # For example, if appendfilename is set to appendonly.aof, the following file
  1292. # names could be derived:
  1293. #
  1294. # - appendonly.aof.1.base.rdb as a base file.
  1295. # - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files.
  1296. # - appendonly.aof.manifest as a manifest file.
  1297. appendfilename "appendonly.aof"
  1298. # For convenience, Redis stores all persistent append-only files in a dedicated
  1299. # directory. The name of the directory is determined by the appenddirname
  1300. # configuration parameter.
  1301. appenddirname "appendonlydir"
  1302. # The fsync() call tells the Operating System to actually write data on disk
  1303. # instead of waiting for more data in the output buffer. Some OS will really flush
  1304. # data on disk, some other OS will just try to do it ASAP.
  1305. #
  1306. # Redis supports three different modes:
  1307. #
  1308. # no: don't fsync, just let the OS flush the data when it wants. Faster.
  1309. # always: fsync after every write to the append only log. Slow, Safest.
  1310. # everysec: fsync only one time every second. Compromise.
  1311. #
  1312. # The default is "everysec", as that's usually the right compromise between
  1313. # speed and data safety. It's up to you to understand if you can relax this to
  1314. # "no" that will let the operating system flush the output buffer when
  1315. # it wants, for better performances (but if you can live with the idea of
  1316. # some data loss consider the default persistence mode that's snapshotting),
  1317. # or on the contrary, use "always" that's very slow but a bit safer than
  1318. # everysec.
  1319. #
  1320. # More details please check the following article:
  1321. # http://antirez.com/post/redis-persistence-demystified.html
  1322. #
  1323. # If unsure, use "everysec".
  1324. # appendfsync always
  1325. appendfsync everysec
  1326. # appendfsync no
  1327. # When the AOF fsync policy is set to always or everysec, and a background
  1328. # saving process (a background save or AOF log background rewriting) is
  1329. # performing a lot of I/O against the disk, in some Linux configurations
  1330. # Redis may block too long on the fsync() call. Note that there is no fix for
  1331. # this currently, as even performing fsync in a different thread will block
  1332. # our synchronous write(2) call.
  1333. #
  1334. # In order to mitigate this problem it's possible to use the following option
  1335. # that will prevent fsync() from being called in the main process while a
  1336. # BGSAVE or BGREWRITEAOF is in progress.
  1337. #
  1338. # This means that while another child is saving, the durability of Redis is
  1339. # the same as "appendfsync no". In practical terms, this means that it is
  1340. # possible to lose up to 30 seconds of log in the worst scenario (with the
  1341. # default Linux settings).
  1342. #
  1343. # If you have latency problems turn this to "yes". Otherwise leave it as
  1344. # "no" that is the safest pick from the point of view of durability.
  1345. no-appendfsync-on-rewrite no
  1346. # Automatic rewrite of the append only file.
  1347. # Redis is able to automatically rewrite the log file implicitly calling
  1348. # BGREWRITEAOF when the AOF log size grows by the specified percentage.
  1349. #
  1350. # This is how it works: Redis remembers the size of the AOF file after the
  1351. # latest rewrite (if no rewrite has happened since the restart, the size of
  1352. # the AOF at startup is used).
  1353. #
  1354. # This base size is compared to the current size. If the current size is
  1355. # bigger than the specified percentage, the rewrite is triggered. Also
  1356. # you need to specify a minimal size for the AOF file to be rewritten, this
  1357. # is useful to avoid rewriting the AOF file even if the percentage increase
  1358. # is reached but it is still pretty small.
  1359. #
  1360. # Specify a percentage of zero in order to disable the automatic AOF
  1361. # rewrite feature.
  1362. auto-aof-rewrite-percentage 100
  1363. auto-aof-rewrite-min-size 64mb
  1364. # An AOF file may be found to be truncated at the end during the Redis
  1365. # startup process, when the AOF data gets loaded back into memory.
  1366. # This may happen when the system where Redis is running
  1367. # crashes, especially when an ext4 filesystem is mounted without the
  1368. # data=ordered option (however this can't happen when Redis itself
  1369. # crashes or aborts but the operating system still works correctly).
  1370. #
  1371. # Redis can either exit with an error when this happens, or load as much
  1372. # data as possible (the default now) and start if the AOF file is found
  1373. # to be truncated at the end. The following option controls this behavior.
  1374. #
  1375. # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
  1376. # the Redis server starts emitting a log to inform the user of the event.
  1377. # Otherwise if the option is set to no, the server aborts with an error
  1378. # and refuses to start. When the option is set to no, the user requires
  1379. # to fix the AOF file using the "redis-check-aof" utility before to restart
  1380. # the server.
  1381. #
  1382. # Note that if the AOF file will be found to be corrupted in the middle
  1383. # the server will still exit with an error. This option only applies when
  1384. # Redis will try to read more data from the AOF file but not enough bytes
  1385. # will be found.
  1386. aof-load-truncated yes
  1387. # Redis can create append-only base files in either RDB or AOF formats. Using
  1388. # the RDB format is always faster and more efficient, and disabling it is only
  1389. # supported for backward compatibility purposes.
  1390. aof-use-rdb-preamble yes
  1391. # Redis supports recording timestamp annotations in the AOF to support restoring
  1392. # the data from a specific point-in-time. However, using this capability changes
  1393. # the AOF format in a way that may not be compatible with existing AOF parsers.
  1394. aof-timestamp-enabled no
  1395. ################################ SHUTDOWN #####################################
  1396. # Maximum time to wait for replicas when shutting down, in seconds.
  1397. #
  1398. # During shut down, a grace period allows any lagging replicas to catch up with
  1399. # the latest replication offset before the master exists. This period can
  1400. # prevent data loss, especially for deployments without configured disk backups.
  1401. #
  1402. # The 'shutdown-timeout' value is the grace period's duration in seconds. It is
  1403. # only applicable when the instance has replicas. To disable the feature, set
  1404. # the value to 0.
  1405. #
  1406. # shutdown-timeout 10
  1407. # When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default
  1408. # an RDB snapshot is written to disk in a blocking operation if save points are configured.
  1409. # The options used on signaled shutdown can include the following values:
  1410. # default:  Saves RDB snapshot only if save points are configured.
  1411. #           Waits for lagging replicas to catch up.
  1412. # save:     Forces a DB saving operation even if no save points are configured.
  1413. # nosave:   Prevents DB saving operation even if one or more save points are configured.
  1414. # now:      Skips waiting for lagging replicas.
  1415. # force:    Ignores any errors that would normally prevent the server from exiting.
  1416. #
  1417. # Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously.
  1418. # Example: "nosave force now"
  1419. #
  1420. # shutdown-on-sigint default
  1421. # shutdown-on-sigterm default
  1422. ################ NON-DETERMINISTIC LONG BLOCKING COMMANDS #####################
  1423. # Maximum time in milliseconds for EVAL scripts, functions and in some cases
  1424. # modules' commands before Redis can start processing or rejecting other clients.
  1425. #
  1426. # If the maximum execution time is reached Redis will start to reply to most
  1427. # commands with a BUSY error.
  1428. #
  1429. # In this state Redis will only allow a handful of commands to be executed.
  1430. # For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some
  1431. # module specific 'allow-busy' commands.
  1432. #
  1433. # SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not
  1434. # yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop
  1435. # the server in the case a write command was already issued by the script when
  1436. # the user doesn't want to wait for the natural termination of the script.
  1437. #
  1438. # The default is 5 seconds. It is possible to set it to 0 or a negative value
  1439. # to disable this mechanism (uninterrupted execution). Note that in the past
  1440. # this config had a different name, which is now an alias, so both of these do
  1441. # the same:
  1442. # lua-time-limit 5000
  1443. # busy-reply-threshold 5000
  1444. ################################ REDIS CLUSTER  ###############################
  1445. # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
  1446. # started as cluster nodes can. In order to start a Redis instance as a
  1447. # cluster node enable the cluster support uncommenting the following:
  1448. #
  1449. # cluster-enabled yes
  1450. # Every cluster node has a cluster configuration file. This file is not
  1451. # intended to be edited by hand. It is created and updated by Redis nodes.
  1452. # Every Redis Cluster node requires a different cluster configuration file.
  1453. # Make sure that instances running in the same system do not have
  1454. # overlapping cluster configuration file names.
  1455. #
  1456. # cluster-config-file nodes-6379.conf
  1457. # Cluster node timeout is the amount of milliseconds a node must be unreachable
  1458. # for it to be considered in failure state.
  1459. # Most other internal time limits are a multiple of the node timeout.
  1460. #
  1461. # cluster-node-timeout 15000
  1462. # The cluster port is the port that the cluster bus will listen for inbound connections on. When set
  1463. # to the default value, 0, it will be bound to the command port + 10000. Setting this value requires
  1464. # you to specify the cluster bus port when executing cluster meet.
  1465. # cluster-port 0
  1466. # A replica of a failing master will avoid to start a failover if its data
  1467. # looks too old.
  1468. #
  1469. # There is no simple way for a replica to actually have an exact measure of
  1470. # its "data age", so the following two checks are performed:
  1471. #
  1472. # 1) If there are multiple replicas able to failover, they exchange messages
  1473. #    in order to try to give an advantage to the replica with the best
  1474. #    replication offset (more data from the master processed).
  1475. #    Replicas will try to get their rank by offset, and apply to the start
  1476. #    of the failover a delay proportional to their rank.
  1477. #
  1478. # 2) Every single replica computes the time of the last interaction with
  1479. #    its master. This can be the last ping or command received (if the master
  1480. #    is still in the "connected" state), or the time that elapsed since the
  1481. #    disconnection with the master (if the replication link is currently down).
  1482. #    If the last interaction is too old, the replica will not try to failover
  1483. #    at all.
  1484. #
  1485. # The point "2" can be tuned by user. Specifically a replica will not perform
  1486. # the failover if, since the last interaction with the master, the time
  1487. # elapsed is greater than:
  1488. #
  1489. #   (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
  1490. #
  1491. # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
  1492. # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
  1493. # replica will not try to failover if it was not able to talk with the master
  1494. # for longer than 310 seconds.
  1495. #
  1496. # A large cluster-replica-validity-factor may allow replicas with too old data to failover
  1497. # a master, while a too small value may prevent the cluster from being able to
  1498. # elect a replica at all.
  1499. #
  1500. # For maximum availability, it is possible to set the cluster-replica-validity-factor
  1501. # to a value of 0, which means, that replicas will always try to failover the
  1502. # master regardless of the last time they interacted with the master.
  1503. # (However they'll always try to apply a delay proportional to their
  1504. # offset rank).
  1505. #
  1506. # Zero is the only value able to guarantee that when all the partitions heal
  1507. # the cluster will always be able to continue.
  1508. #
  1509. # cluster-replica-validity-factor 10
  1510. # Cluster replicas are able to migrate to orphaned masters, that are masters
  1511. # that are left without working replicas. This improves the cluster ability
  1512. # to resist to failures as otherwise an orphaned master can't be failed over
  1513. # in case of failure if it has no working replicas.
  1514. #
  1515. # Replicas migrate to orphaned masters only if there are still at least a
  1516. # given number of other working replicas for their old master. This number
  1517. # is the "migration barrier". A migration barrier of 1 means that a replica
  1518. # will migrate only if there is at least 1 other working replica for its master
  1519. # and so forth. It usually reflects the number of replicas you want for every
  1520. # master in your cluster.
  1521. #
  1522. # Default is 1 (replicas migrate only if their masters remain with at least
  1523. # one replica). To disable migration just set it to a very large value or
  1524. # set cluster-allow-replica-migration to 'no'.
  1525. # A value of 0 can be set but is useful only for debugging and dangerous
  1526. # in production.
  1527. #
  1528. # cluster-migration-barrier 1
  1529. # Turning off this option allows to use less automatic cluster configuration.
  1530. # It both disables migration to orphaned masters and migration from masters
  1531. # that became empty.
  1532. #
  1533. # Default is 'yes' (allow automatic migrations).
  1534. #
  1535. # cluster-allow-replica-migration yes
  1536. # By default Redis Cluster nodes stop accepting queries if they detect there
  1537. # is at least a hash slot uncovered (no available node is serving it).
  1538. # This way if the cluster is partially down (for example a range of hash slots
  1539. # are no longer covered) all the cluster becomes, eventually, unavailable.
  1540. # It automatically returns available as soon as all the slots are covered again.
  1541. #
  1542. # However sometimes you want the subset of the cluster which is working,
  1543. # to continue to accept queries for the part of the key space that is still
  1544. # covered. In order to do so, just set the cluster-require-full-coverage
  1545. # option to no.
  1546. #
  1547. # cluster-require-full-coverage yes
  1548. # This option, when set to yes, prevents replicas from trying to failover its
  1549. # master during master failures. However the replica can still perform a
  1550. # manual failover, if forced to do so.
  1551. #
  1552. # This is useful in different scenarios, especially in the case of multiple
  1553. # data center operations, where we want one side to never be promoted if not
  1554. # in the case of a total DC failure.
  1555. #
  1556. # cluster-replica-no-failover no
  1557. # This option, when set to yes, allows nodes to serve read traffic while the
  1558. # cluster is in a down state, as long as it believes it owns the slots.
  1559. #
  1560. # This is useful for two cases.  The first case is for when an application
  1561. # doesn't require consistency of data during node failures or network partitions.
  1562. # One example of this is a cache, where as long as the node has the data it
  1563. # should be able to serve it.
  1564. #
  1565. # The second use case is for configurations that don't meet the recommended
  1566. # three shards but want to enable cluster mode and scale later. A
  1567. # master outage in a 1 or 2 shard configuration causes a read/write outage to the
  1568. # entire cluster without this option set, with it set there is only a write outage.
  1569. # Without a quorum of masters, slot ownership will not change automatically.
  1570. #
  1571. # cluster-allow-reads-when-down no
  1572. # This option, when set to yes, allows nodes to serve pubsub shard traffic while
  1573. # the cluster is in a down state, as long as it believes it owns the slots.
  1574. #
  1575. # This is useful if the application would like to use the pubsub feature even when
  1576. # the cluster global stable state is not OK. If the application wants to make sure only
  1577. # one shard is serving a given channel, this feature should be kept as yes.
  1578. #
  1579. # cluster-allow-pubsubshard-when-down yes
  1580. # Cluster link send buffer limit is the limit on the memory usage of an individual
  1581. # cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed
  1582. # this limit. This is to primarily prevent send buffers from growing unbounded on links
  1583. # toward slow peers (E.g. PubSub messages being piled up).
  1584. # This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field
  1585. # and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase.
  1586. # Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single
  1587. # PubSub message by default. (client-query-buffer-limit default value is 1gb)
  1588. #
  1589. # cluster-link-sendbuf-limit 0
  1590. # Clusters can configure their announced hostname using this config. This is a common use case for
  1591. # applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based
  1592. # routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS
  1593. # command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is
  1594. # communicated along the clusterbus to all nodes, setting it to an empty string will remove
  1595. # the hostname and also propagate the removal.
  1596. #
  1597. # cluster-announce-hostname ""
  1598. # Clusters can configure an optional nodename to be used in addition to the node ID for
  1599. # debugging and admin information. This name is broadcasted between nodes, so will be used
  1600. # in addition to the node ID when reporting cross node events such as node failures.
  1601. # cluster-announce-human-nodename ""
  1602. # Clusters can advertise how clients should connect to them using either their IP address,
  1603. # a user defined hostname, or by declaring they have no endpoint. Which endpoint is
  1604. # shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type
  1605. # config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how
  1606. # the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS.
  1607. # If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?'
  1608. # will be returned instead.
  1609. #
  1610. # When a cluster advertises itself as having an unknown endpoint, it's indicating that
  1611. # the server doesn't know how clients can reach the cluster. This can happen in certain
  1612. # networking situations where there are multiple possible routes to the node, and the
  1613. # server doesn't know which one the client took. In this case, the server is expecting
  1614. # the client to reach out on the same endpoint it used for making the last request, but use
  1615. # the port provided in the response.
  1616. #
  1617. # cluster-preferred-endpoint-type ip
  1618. # In order to setup your cluster make sure to read the documentation
  1619. # available at https://redis.io web site.
  1620. ########################## CLUSTER DOCKER/NAT support  ########################
  1621. # In certain deployments, Redis Cluster nodes address discovery fails, because
  1622. # addresses are NAT-ted or because ports are forwarded (the typical case is
  1623. # Docker and other containers).
  1624. #
  1625. # In order to make Redis Cluster working in such environments, a static
  1626. # configuration where each node knows its public address is needed. The
  1627. # following four options are used for this scope, and are:
  1628. #
  1629. # * cluster-announce-ip
  1630. # * cluster-announce-port
  1631. # * cluster-announce-tls-port
  1632. # * cluster-announce-bus-port
  1633. #
  1634. # Each instructs the node about its address, client ports (for connections
  1635. # without and with TLS) and cluster message bus port. The information is then
  1636. # published in the header of the bus packets so that other nodes will be able to
  1637. # correctly map the address of the node publishing the information.
  1638. #
  1639. # If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set
  1640. # to zero, then cluster-announce-port refers to the TLS port. Note also that
  1641. # cluster-announce-tls-port has no effect if tls-cluster is set to no.
  1642. #
  1643. # If the above options are not used, the normal Redis Cluster auto-detection
  1644. # will be used instead.
  1645. #
  1646. # Note that when remapped, the bus port may not be at the fixed offset of
  1647. # clients port + 10000, so you can specify any port and bus-port depending
  1648. # on how they get remapped. If the bus-port is not set, a fixed offset of
  1649. # 10000 will be used as usual.
  1650. #
  1651. # Example:
  1652. #
  1653. # cluster-announce-ip 10.1.1.5
  1654. # cluster-announce-tls-port 6379
  1655. # cluster-announce-port 0
  1656. # cluster-announce-bus-port 6380
  1657. ################################## SLOW LOG ###################################
  1658. # The Redis Slow Log is a system to log queries that exceeded a specified
  1659. # execution time. The execution time does not include the I/O operations
  1660. # like talking with the client, sending the reply and so forth,
  1661. # but just the time needed to actually execute the command (this is the only
  1662. # stage of command execution where the thread is blocked and can not serve
  1663. # other requests in the meantime).
  1664. #
  1665. # You can configure the slow log with two parameters: one tells Redis
  1666. # what is the execution time, in microseconds, to exceed in order for the
  1667. # command to get logged, and the other parameter is the length of the
  1668. # slow log. When a new command is logged the oldest one is removed from the
  1669. # queue of logged commands.
  1670. # The following time is expressed in microseconds, so 1000000 is equivalent
  1671. # to one second. Note that a negative number disables the slow log, while
  1672. # a value of zero forces the logging of every command.
  1673. slowlog-log-slower-than 10000
  1674. # There is no limit to this length. Just be aware that it will consume memory.
  1675. # You can reclaim memory used by the slow log with SLOWLOG RESET.
  1676. slowlog-max-len 128
  1677. ################################ LATENCY MONITOR ##############################
  1678. # The Redis latency monitoring subsystem samples different operations
  1679. # at runtime in order to collect data related to possible sources of
  1680. # latency of a Redis instance.
  1681. #
  1682. # Via the LATENCY command this information is available to the user that can
  1683. # print graphs and obtain reports.
  1684. #
  1685. # The system only logs operations that were performed in a time equal or
  1686. # greater than the amount of milliseconds specified via the
  1687. # latency-monitor-threshold configuration directive. When its value is set
  1688. # to zero, the latency monitor is turned off.
  1689. #
  1690. # By default latency monitoring is disabled since it is mostly not needed
  1691. # if you don't have latency issues, and collecting data has a performance
  1692. # impact, that while very small, can be measured under big load. Latency
  1693. # monitoring can easily be enabled at runtime using the command
  1694. # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
  1695. latency-monitor-threshold 0
  1696. ################################ LATENCY TRACKING ##############################
  1697. # The Redis extended latency monitoring tracks the per command latencies and enables
  1698. # exporting the percentile distribution via the INFO latencystats command,
  1699. # and cumulative latency distributions (histograms) via the LATENCY command.
  1700. #
  1701. # By default, the extended latency monitoring is enabled since the overhead
  1702. # of keeping track of the command latency is very small.
  1703. # latency-tracking yes
  1704. # By default the exported latency percentiles via the INFO latencystats command
  1705. # are the p50, p99, and p999.
  1706. # latency-tracking-info-percentiles 50 99 99.9
  1707. ############################# EVENT NOTIFICATION ##############################
  1708. # Redis can notify Pub/Sub clients about events happening in the key space.
  1709. # This feature is documented at https://redis.io/topics/notifications
  1710. #
  1711. # For instance if keyspace events notification is enabled, and a client
  1712. # performs a DEL operation on key "foo" stored in the Database 0, two
  1713. # messages will be published via Pub/Sub:
  1714. #
  1715. # PUBLISH __keyspace@0__:foo del
  1716. # PUBLISH __keyevent@0__:del foo
  1717. #
  1718. # It is possible to select the events that Redis will notify among a set
  1719. # of classes. Every class is identified by a single character:
  1720. #
  1721. #  K     Keyspace events, published with __keyspace@<db>__ prefix.
  1722. #  E     Keyevent events, published with __keyevent@<db>__ prefix.
  1723. #  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
  1724. #  $     String commands
  1725. #  l     List commands
  1726. #  s     Set commands
  1727. #  h     Hash commands
  1728. #  z     Sorted set commands
  1729. #  x     Expired events (events generated every time a key expires)
  1730. #  e     Evicted events (events generated when a key is evicted for maxmemory)
  1731. #  n     New key events (Note: not included in the 'A' class)
  1732. #  t     Stream commands
  1733. #  d     Module key type events
  1734. #  m     Key-miss events (Note: It is not included in the 'A' class)
  1735. #  A     Alias for g$lshzxetd, so that the "AKE" string means all the events
  1736. #        (Except key-miss events which are excluded from 'A' due to their
  1737. #         unique nature).
  1738. #
  1739. #  The "notify-keyspace-events" takes as argument a string that is composed
  1740. #  of zero or multiple characters. The empty string means that notifications
  1741. #  are disabled.
  1742. #
  1743. #  Example: to enable list and generic events, from the point of view of the
  1744. #           event name, use:
  1745. #
  1746. #  notify-keyspace-events Elg
  1747. #
  1748. #  Example 2: to get the stream of the expired keys subscribing to channel
  1749. #             name __keyevent@0__:expired use:
  1750. #
  1751. #  notify-keyspace-events Ex
  1752. #
  1753. #  By default all notifications are disabled because most users don't need
  1754. #  this feature and the feature has some overhead. Note that if you don't
  1755. #  specify at least one of K or E, no events will be delivered.
  1756. notify-keyspace-events ""
  1757. ############################### ADVANCED CONFIG ###############################
  1758. # Hashes are encoded using a memory efficient data structure when they have a
  1759. # small number of entries, and the biggest entry does not exceed a given
  1760. # threshold. These thresholds can be configured using the following directives.
  1761. hash-max-listpack-entries 512
  1762. hash-max-listpack-value 64
  1763. # Lists are also encoded in a special way to save a lot of space.
  1764. # The number of entries allowed per internal list node can be specified
  1765. # as a fixed maximum size or a maximum number of elements.
  1766. # For a fixed maximum size, use -5 through -1, meaning:
  1767. # -5: max size: 64 Kb  <-- not recommended for normal workloads
  1768. # -4: max size: 32 Kb  <-- not recommended
  1769. # -3: max size: 16 Kb  <-- probably not recommended
  1770. # -2: max size: 8 Kb   <-- good
  1771. # -1: max size: 4 Kb   <-- good
  1772. # Positive numbers mean store up to _exactly_ that number of elements
  1773. # per list node.
  1774. # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
  1775. # but if your use case is unique, adjust the settings as necessary.
  1776. list-max-listpack-size -2
  1777. # Lists may also be compressed.
  1778. # Compress depth is the number of quicklist ziplist nodes from *each* side of
  1779. # the list to *exclude* from compression.  The head and tail of the list
  1780. # are always uncompressed for fast push/pop operations.  Settings are:
  1781. # 0: disable all list compression
  1782. # 1: depth 1 means "don't start compressing until after 1 node into the list,
  1783. #    going from either the head or tail"
  1784. #    So: [head]->node->node->...->node->[tail]
  1785. #    [head], [tail] will always be uncompressed; inner nodes will compress.
  1786. # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
  1787. #    2 here means: don't compress head or head->next or tail->prev or tail,
  1788. #    but compress all nodes between them.
  1789. # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
  1790. # etc.
  1791. list-compress-depth 0
  1792. # Sets have a special encoding when a set is composed
  1793. # of just strings that happen to be integers in radix 10 in the range
  1794. # of 64 bit signed integers.
  1795. # The following configuration setting sets the limit in the size of the
  1796. # set in order to use this special memory saving encoding.
  1797. set-max-intset-entries 512
  1798. # Sets containing non-integer values are also encoded using a memory efficient
  1799. # data structure when they have a small number of entries, and the biggest entry
  1800. # does not exceed a given threshold. These thresholds can be configured using
  1801. # the following directives.
  1802. set-max-listpack-entries 128
  1803. set-max-listpack-value 64
  1804. # Similarly to hashes and lists, sorted sets are also specially encoded in
  1805. # order to save a lot of space. This encoding is only used when the length and
  1806. # elements of a sorted set are below the following limits:
  1807. zset-max-listpack-entries 128
  1808. zset-max-listpack-value 64
  1809. # HyperLogLog sparse representation bytes limit. The limit includes the
  1810. # 16 bytes header. When a HyperLogLog using the sparse representation crosses
  1811. # this limit, it is converted into the dense representation.
  1812. #
  1813. # A value greater than 16000 is totally useless, since at that point the
  1814. # dense representation is more memory efficient.
  1815. #
  1816. # The suggested value is ~ 3000 in order to have the benefits of
  1817. # the space efficient encoding without slowing down too much PFADD,
  1818. # which is O(N) with the sparse encoding. The value can be raised to
  1819. # ~ 10000 when CPU is not a concern, but space is, and the data set is
  1820. # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
  1821. hll-sparse-max-bytes 3000
  1822. # Streams macro node max size / items. The stream data structure is a radix
  1823. # tree of big nodes that encode multiple items inside. Using this configuration
  1824. # it is possible to configure how big a single node can be in bytes, and the
  1825. # maximum number of items it may contain before switching to a new node when
  1826. # appending new stream entries. If any of the following settings are set to
  1827. # zero, the limit is ignored, so for instance it is possible to set just a
  1828. # max entries limit by setting max-bytes to 0 and max-entries to the desired
  1829. # value.
  1830. stream-node-max-bytes 4096
  1831. stream-node-max-entries 100
  1832. # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
  1833. # order to help rehashing the main Redis hash table (the one mapping top-level
  1834. # keys to values). The hash table implementation Redis uses (see dict.c)
  1835. # performs a lazy rehashing: the more operation you run into a hash table
  1836. # that is rehashing, the more rehashing "steps" are performed, so if the
  1837. # server is idle the rehashing is never complete and some more memory is used
  1838. # by the hash table.
  1839. #
  1840. # The default is to use this millisecond 10 times every second in order to
  1841. # actively rehash the main dictionaries, freeing memory when possible.
  1842. #
  1843. # If unsure:
  1844. # use "activerehashing no" if you have hard latency requirements and it is
  1845. # not a good thing in your environment that Redis can reply from time to time
  1846. # to queries with 2 milliseconds delay.
  1847. #
  1848. # use "activerehashing yes" if you don't have such hard requirements but
  1849. # want to free memory asap when possible.
  1850. activerehashing yes
  1851. # The client output buffer limits can be used to force disconnection of clients
  1852. # that are not reading data from the server fast enough for some reason (a
  1853. # common reason is that a Pub/Sub client can't consume messages as fast as the
  1854. # publisher can produce them).
  1855. #
  1856. # The limit can be set differently for the three different classes of clients:
  1857. #
  1858. # normal -> normal clients including MONITOR clients
  1859. # replica -> replica clients
  1860. # pubsub -> clients subscribed to at least one pubsub channel or pattern
  1861. #
  1862. # The syntax of every client-output-buffer-limit directive is the following:
  1863. #
  1864. # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
  1865. #
  1866. # A client is immediately disconnected once the hard limit is reached, or if
  1867. # the soft limit is reached and remains reached for the specified number of
  1868. # seconds (continuously).
  1869. # So for instance if the hard limit is 32 megabytes and the soft limit is
  1870. # 16 megabytes / 10 seconds, the client will get disconnected immediately
  1871. # if the size of the output buffers reach 32 megabytes, but will also get
  1872. # disconnected if the client reaches 16 megabytes and continuously overcomes
  1873. # the limit for 10 seconds.
  1874. #
  1875. # By default normal clients are not limited because they don't receive data
  1876. # without asking (in a push way), but just after a request, so only
  1877. # asynchronous clients may create a scenario where data is requested faster
  1878. # than it can read.
  1879. #
  1880. # Instead there is a default limit for pubsub and replica clients, since
  1881. # subscribers and replicas receive data in a push fashion.
  1882. #
  1883. # Note that it doesn't make sense to set the replica clients output buffer
  1884. # limit lower than the repl-backlog-size config (partial sync will succeed
  1885. # and then replica will get disconnected).
  1886. # Such a configuration is ignored (the size of repl-backlog-size will be used).
  1887. # This doesn't have memory consumption implications since the replica client
  1888. # will share the backlog buffers memory.
  1889. #
  1890. # Both the hard or the soft limit can be disabled by setting them to zero.
  1891. client-output-buffer-limit normal 0 0 0
  1892. client-output-buffer-limit replica 256mb 64mb 60
  1893. client-output-buffer-limit pubsub 32mb 8mb 60
  1894. # Client query buffers accumulate new commands. They are limited to a fixed
  1895. # amount by default in order to avoid that a protocol desynchronization (for
  1896. # instance due to a bug in the client) will lead to unbound memory usage in
  1897. # the query buffer. However you can configure it here if you have very special
  1898. # needs, such us huge multi/exec requests or alike.
  1899. #
  1900. # client-query-buffer-limit 1gb
  1901. # In some scenarios client connections can hog up memory leading to OOM
  1902. # errors or data eviction. To avoid this we can cap the accumulated memory
  1903. # used by all client connections (all pubsub and normal clients). Once we
  1904. # reach that limit connections will be dropped by the server freeing up
  1905. # memory. The server will attempt to drop the connections using the most
  1906. # memory first. We call this mechanism "client eviction".
  1907. #
  1908. # Client eviction is configured using the maxmemory-clients setting as follows:
  1909. # 0 - client eviction is disabled (default)
  1910. #
  1911. # A memory value can be used for the client eviction threshold,
  1912. # for example:
  1913. # maxmemory-clients 1g
  1914. #
  1915. # A percentage value (between 1% and 100%) means the client eviction threshold
  1916. # is based on a percentage of the maxmemory setting. For example to set client
  1917. # eviction at 5% of maxmemory:
  1918. # maxmemory-clients 5%
  1919. # In the Redis protocol, bulk requests, that are, elements representing single
  1920. # strings, are normally limited to 512 mb. However you can change this limit
  1921. # here, but must be 1mb or greater
  1922. #
  1923. # proto-max-bulk-len 512mb
  1924. # Redis calls an internal function to perform many background tasks, like
  1925. # closing connections of clients in timeout, purging expired keys that are
  1926. # never requested, and so forth.
  1927. #
  1928. # Not all tasks are performed with the same frequency, but Redis checks for
  1929. # tasks to perform according to the specified "hz" value.
  1930. #
  1931. # By default "hz" is set to 10. Raising the value will use more CPU when
  1932. # Redis is idle, but at the same time will make Redis more responsive when
  1933. # there are many keys expiring at the same time, and timeouts may be
  1934. # handled with more precision.
  1935. #
  1936. # The range is between 1 and 500, however a value over 100 is usually not
  1937. # a good idea. Most users should use the default of 10 and raise this up to
  1938. # 100 only in environments where very low latency is required.
  1939. hz 10
  1940. # Normally it is useful to have an HZ value which is proportional to the
  1941. # number of clients connected. This is useful in order, for instance, to
  1942. # avoid too many clients are processed for each background task invocation
  1943. # in order to avoid latency spikes.
  1944. #
  1945. # Since the default HZ value by default is conservatively set to 10, Redis
  1946. # offers, and enables by default, the ability to use an adaptive HZ value
  1947. # which will temporarily raise when there are many connected clients.
  1948. #
  1949. # When dynamic HZ is enabled, the actual configured HZ will be used
  1950. # as a baseline, but multiples of the configured HZ value will be actually
  1951. # used as needed once more clients are connected. In this way an idle
  1952. # instance will use very little CPU time while a busy instance will be
  1953. # more responsive.
  1954. dynamic-hz yes
  1955. # When a child rewrites the AOF file, if the following option is enabled
  1956. # the file will be fsync-ed every 4 MB of data generated. This is useful
  1957. # in order to commit the file to the disk more incrementally and avoid
  1958. # big latency spikes.
  1959. aof-rewrite-incremental-fsync yes
  1960. # When redis saves RDB file, if the following option is enabled
  1961. # the file will be fsync-ed every 4 MB of data generated. This is useful
  1962. # in order to commit the file to the disk more incrementally and avoid
  1963. # big latency spikes.
  1964. rdb-save-incremental-fsync yes
  1965. # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
  1966. # idea to start with the default settings and only change them after investigating
  1967. # how to improve the performances and how the keys LFU change over time, which
  1968. # is possible to inspect via the OBJECT FREQ command.
  1969. #
  1970. # There are two tunable parameters in the Redis LFU implementation: the
  1971. # counter logarithm factor and the counter decay time. It is important to
  1972. # understand what the two parameters mean before changing them.
  1973. #
  1974. # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
  1975. # uses a probabilistic increment with logarithmic behavior. Given the value
  1976. # of the old counter, when a key is accessed, the counter is incremented in
  1977. # this way:
  1978. #
  1979. # 1. A random number R between 0 and 1 is extracted.
  1980. # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
  1981. # 3. The counter is incremented only if R < P.
  1982. #
  1983. # The default lfu-log-factor is 10. This is a table of how the frequency
  1984. # counter changes with a different number of accesses with different
  1985. # logarithmic factors:
  1986. #
  1987. # +--------+------------+------------+------------+------------+------------+
  1988. # | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |
  1989. # +--------+------------+------------+------------+------------+------------+
  1990. # | 0      | 104        | 255        | 255        | 255        | 255        |
  1991. # +--------+------------+------------+------------+------------+------------+
  1992. # | 1      | 18         | 49         | 255        | 255        | 255        |
  1993. # +--------+------------+------------+------------+------------+------------+
  1994. # | 10     | 10         | 18         | 142        | 255        | 255        |
  1995. # +--------+------------+------------+------------+------------+------------+
  1996. # | 100    | 8          | 11         | 49         | 143        | 255        |
  1997. # +--------+------------+------------+------------+------------+------------+
  1998. #
  1999. # NOTE: The above table was obtained by running the following commands:
  2000. #
  2001. #   redis-benchmark -n 1000000 incr foo
  2002. #   redis-cli object freq foo
  2003. #
  2004. # NOTE 2: The counter initial value is 5 in order to give new objects a chance
  2005. # to accumulate hits.
  2006. #
  2007. # The counter decay time is the time, in minutes, that must elapse in order
  2008. # for the key counter to be decremented.
  2009. #
  2010. # The default value for the lfu-decay-time is 1. A special value of 0 means we
  2011. # will never decay the counter.
  2012. #
  2013. # lfu-log-factor 10
  2014. # lfu-decay-time 1
  2015. ########################### ACTIVE DEFRAGMENTATION #######################
  2016. #
  2017. # What is active defragmentation?
  2018. # -------------------------------
  2019. #
  2020. # Active (online) defragmentation allows a Redis server to compact the
  2021. # spaces left between small allocations and deallocations of data in memory,
  2022. # thus allowing to reclaim back memory.
  2023. #
  2024. # Fragmentation is a natural process that happens with every allocator (but
  2025. # less so with Jemalloc, fortunately) and certain workloads. Normally a server
  2026. # restart is needed in order to lower the fragmentation, or at least to flush
  2027. # away all the data and create it again. However thanks to this feature
  2028. # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
  2029. # in a "hot" way, while the server is running.
  2030. #
  2031. # Basically when the fragmentation is over a certain level (see the
  2032. # configuration options below) Redis will start to create new copies of the
  2033. # values in contiguous memory regions by exploiting certain specific Jemalloc
  2034. # features (in order to understand if an allocation is causing fragmentation
  2035. # and to allocate it in a better place), and at the same time, will release the
  2036. # old copies of the data. This process, repeated incrementally for all the keys
  2037. # will cause the fragmentation to drop back to normal values.
  2038. #
  2039. # Important things to understand:
  2040. #
  2041. # 1. This feature is disabled by default, and only works if you compiled Redis
  2042. #    to use the copy of Jemalloc we ship with the source code of Redis.
  2043. #    This is the default with Linux builds.
  2044. #
  2045. # 2. You never need to enable this feature if you don't have fragmentation
  2046. #    issues.
  2047. #
  2048. # 3. Once you experience fragmentation, you can enable this feature when
  2049. #    needed with the command "CONFIG SET activedefrag yes".
  2050. #
  2051. # The configuration parameters are able to fine tune the behavior of the
  2052. # defragmentation process. If you are not sure about what they mean it is
  2053. # a good idea to leave the defaults untouched.
  2054. # Active defragmentation is disabled by default
  2055. # activedefrag no
  2056. # Minimum amount of fragmentation waste to start active defrag
  2057. # active-defrag-ignore-bytes 100mb
  2058. # Minimum percentage of fragmentation to start active defrag
  2059. # active-defrag-threshold-lower 10
  2060. # Maximum percentage of fragmentation at which we use maximum effort
  2061. # active-defrag-threshold-upper 100
  2062. # Minimal effort for defrag in CPU percentage, to be used when the lower
  2063. # threshold is reached
  2064. # active-defrag-cycle-min 1
  2065. # Maximal effort for defrag in CPU percentage, to be used when the upper
  2066. # threshold is reached
  2067. # active-defrag-cycle-max 25
  2068. # Maximum number of set/hash/zset/list fields that will be processed from
  2069. # the main dictionary scan
  2070. # active-defrag-max-scan-fields 1000
  2071. # Jemalloc background thread for purging will be enabled by default
  2072. jemalloc-bg-thread yes
  2073. # It is possible to pin different threads and processes of Redis to specific
  2074. # CPUs in your system, in order to maximize the performances of the server.
  2075. # This is useful both in order to pin different Redis threads in different
  2076. # CPUs, but also in order to make sure that multiple Redis instances running
  2077. # in the same host will be pinned to different CPUs.
  2078. #
  2079. # Normally you can do this using the "taskset" command, however it is also
  2080. # possible to this via Redis configuration directly, both in Linux and FreeBSD.
  2081. #
  2082. # You can pin the server/IO threads, bio threads, aof rewrite child process, and
  2083. # the bgsave child process. The syntax to specify the cpu list is the same as
  2084. # the taskset command:
  2085. #
  2086. # Set redis server/io threads to cpu affinity 0,2,4,6:
  2087. # server_cpulist 0-7:2
  2088. #
  2089. # Set bio threads to cpu affinity 1,3:
  2090. # bio_cpulist 1,3
  2091. #
  2092. # Set aof rewrite child process to cpu affinity 8,9,10,11:
  2093. # aof_rewrite_cpulist 8-11
  2094. #
  2095. # Set bgsave child process to cpu affinity 1,10,11
  2096. # bgsave_cpulist 1,10-11
  2097. # In some cases redis will emit warnings and even refuse to start if it detects
  2098. # that the system is in bad state, it is possible to suppress these warnings
  2099. # by setting the following config which takes a space delimited list of warnings
  2100. # to suppress
  2101. #
  2102. # ignore-warnings ARM64-COW-BUG
复制代码
rocketmq配置

   rocketmq之broker.conf配置
  1. # Licensed to the Apache Software Foundation (ASF) under one or more
  2. # contributor license agreements.  See the NOTICE file distributed with
  3. # this work for additional information regarding copyright ownership.
  4. # The ASF licenses this file to You under the Apache License, Version 2.0
  5. # (the "License"); you may not use this file except in compliance with
  6. # the License.  You may obtain a copy of the License at
  7. #
  8. #     http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. #  Unless required by applicable law or agreed to in writing, software
  11. #  distributed under the License is distributed on an "AS IS" BASIS,
  12. #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. #  See the License for the specific language governing permissions and
  14. #  limitations under the License.
  15. # 所属集群名字
  16. brokerClusterName=DefaultCluster
  17. # broker 名字,注意此处不同的配置文件填写的不一样,如果在 broker-a.properties 使用: broker-a,
  18. # 在 broker-b.properties 使用: broker-b
  19. brokerName=broker-a
  20. # 0 表示 Master,&gt; 0 表示 Slave
  21. brokerId=0
  22. # nameServer地址,分号分割
  23. # namesrvAddr=rocketmq-nameserver1:9876;rocketmq-nameserver2:9876
  24. # 启动IP,如果 docker 报 com.alibaba.rocketmq.remoting.exception.RemotingConnectException: connect to &lt;192.168.0.120:10909&gt; failed
  25. # 解决方式1 加上一句 producer.setVipChannelEnabled(false);,解决方式2 brokerIP1 设置宿主机IP,不要使用docker 内部IP 要换做你自己的IP
  26. #brokerIP1=192.168.1.16
  27. # 在发送消息时,自动创建服务器不存在的topic,默认创建的队列数
  28. defaultTopicQueueNums=4
  29. # 是否允许 Broker 自动创建 Topic,建议线下开启,线上关闭 !!!这里仔细看是 false,false,false
  30. autoCreateTopicEnable=true
  31. # 是否允许 Broker 自动创建订阅组,建议线下开启,线上关闭
  32. autoCreateSubscriptionGroup=true
  33. # Broker 对外服务的监听端口
  34. listenPort=10911
  35. # 删除文件时间点,默认凌晨4点
  36. deleteWhen=04
  37. # 文件保留时间,默认48小时
  38. fileReservedTime=120
  39. # commitLog 每个文件的大小默认1G
  40. mapedFileSizeCommitLog=1073741824
  41. # ConsumeQueue 每个文件默认存 30W 条,根据业务情况调整
  42. mapedFileSizeConsumeQueue=300000
  43. # destroyMapedFileIntervalForcibly=120000
  44. # redeleteHangedFileInterval=120000
  45. # 检测物理文件磁盘空间
  46. diskMaxUsedSpaceRatio=88
  47. # 存储路径
  48. # storePathRootDir=/home/ztztdata/rocketmq-all-4.1.0-incubating/store
  49. # commitLog 存储路径
  50. # storePathCommitLog=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/commitlog
  51. # 消费队列存储
  52. # storePathConsumeQueue=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/consumequeue
  53. # 消息索引存储路径
  54. # storePathIndex=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/index
  55. # checkpoint 文件存储路径
  56. # storeCheckpoint=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/checkpoint
  57. # abort 文件存储路径
  58. # abortFile=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/abort
  59. # 限制的消息大小
  60. maxMessageSize=65536
  61. # flushCommitLogLeastPages=4
  62. # flushConsumeQueueLeastPages=2
  63. # flushCommitLogThoroughInterval=10000
  64. # flushConsumeQueueThoroughInterval=60000
  65. # Broker 的角色
  66. # - ASYNC_MASTER 异步复制Master
  67. # - SYNC_MASTER 同步双写Master
  68. # - SLAVE
  69. brokerRole=ASYNC_MASTER
  70. # 刷盘方式
  71. # - ASYNC_FLUSH 异步刷盘
  72. # - SYNC_FLUSH 同步刷盘
  73. flushDiskType=ASYNC_FLUSH
  74. # 发消息线程池数量
  75. # sendMessageThreadPoolNums=128
  76. # 拉消息线程池数量
  77. # pullMessageThreadPoolNums=128
复制代码
yapi配置

   yapi之config.json配置
  1. {"port":3000,"host":"localhost","adminAccount":"gufanbiao@163.com","adminPassword":"123456","closeRegister":true,"db":{"servername":"mongodb","port":27017,"DATABASE":"yapidb","user":"admin","pass":"admin","authSource":"admin"},"mail":{"enable":false,"auth":{}},"ldapLogin":{"enable":false},"plugins":[]}
复制代码
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。




欢迎光临 ToB企服应用市场:ToB评测及商务社交产业平台 (https://dis.qidao123.com/) Powered by Discuz! X3.4