docker网络规划
- docker network create kafka-net --subnet 172.20.0.0/16
- docker network ls
复制代码
- zookeeper1(172.20.0.11 2184:2181)
- zookeeper2(172.20.0.12 2185:2181)
- zookeeper3(172.20.0.13 2186:2181)
- kafka(172.20.0.14 内部9093:9093,外部9193:9193)
- kafka(172.20.0.15 内部9094:9094,外部9194:9194)
- kafka(172.20.0.16 内部9095:9095,外部9195:9195)
- kafka manager(172.20.0.10 9000:9000)
部署中的配置和授权认证文件制作
准备一下两个文件,他们的位置可以放到恣意地方,只须要镜像部署的配置文件中能引用到即可。
- 新建一个zookeeper和kafka共用的授权认证文件:server_jass.conf。按照本教程发起放到/root/kafka/kafka-sasl/server_jass.conf
- Client {
- org.apache.zookeeper.server.auth.DigestLoginModule required
- username="test"
- password="test@QWER";
- };
- Server {
- org.apache.zookeeper.server.auth.DigestLoginModule required
- username="test"
- password="test@QWER"
- user_admin="test@QWER"
- user_test="test@QWER"; # 账号是test,密码是test@QWER
- };
- KafkaServer {
- org.apache.kafka.common.security.plain.PlainLoginModule required
- username="test"
- password="test@QWER"
- user_test="test@QWER";
- };
- KafkaClient {
- org.apache.kafka.common.security.plain.PlainLoginModule required
- username="test"
- password="test@QWER";
- };
复制代码
- 新建一个kafka-run-class脚本文件,规避JMX冲突:kafka-run-class.sh。按照本教程发起放到/root/kafka/kafka-run-class.sh
- #!/bin/bash
- # Licensed to the Apache Software Foundation (ASF) under one or more
- # contributor license agreements. See the NOTICE file distributed with
- # this work for additional information regarding copyright ownership.
- # The ASF licenses this file to You under the Apache License, Version 2.0
- # (the "License"); you may not use this file except in compliance with
- # the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- if [ $# -lt 1 ];
- then
- echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
- exit 1
- fi
- # CYGWIN == 1 if Cygwin is detected, else 0.
- if [[ $(uname -a) =~ "CYGWIN" ]]; then
- CYGWIN=1
- else
- CYGWIN=0
- fi
- if [ -z "$INCLUDE_TEST_JARS" ]; then
- INCLUDE_TEST_JARS=false
- fi
- # Exclude jars not necessary for running commands.
- regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
- should_include_file() {
- if [ "$INCLUDE_TEST_JARS" = true ]; then
- return 0
- fi
- file=$1
- if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
- return 0
- else
- return 1
- fi
- }
- base_dir=$(dirname $0)/..
- if [ -z "$SCALA_VERSION" ]; then
- SCALA_VERSION=2.13.5
- if [[ -f "$base_dir/gradle.properties" ]]; then
- SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2`
- fi
- fi
- if [ -z "$SCALA_BINARY_VERSION" ]; then
- SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
- fi
- # run ./gradlew copyDependantLibs to get all dependant jars in a local dir
- shopt -s nullglob
- if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
- for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
- do
- CLASSPATH="$CLASSPATH:$dir/*"
- done
- fi
- for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
- clients_lib_dir=$(dirname $0)/../clients/build/libs
- streams_lib_dir=$(dirname $0)/../streams/build/libs
- streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
- else
- clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
- streams_lib_dir=$clients_lib_dir
- streams_dependant_clients_lib_dir=$streams_lib_dir
- fi
- for file in "$clients_lib_dir"/kafka-clients*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- for file in "$streams_lib_dir"/kafka-streams*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
- for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- else
- VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
- SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
- for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$file":"$CLASSPATH"
- fi
- done
- if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
- CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
- CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
- fi
- if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
- CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
- CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
- fi
- fi
- for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
- do
- CLASSPATH="$CLASSPATH":"$file"
- done
- for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
- do
- CLASSPATH="$CLASSPATH":"$file"
- done
- for file in "$base_dir"/shell/build/libs/kafka-shell*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- for dir in "$base_dir"/shell/build/dependant-libs-${SCALA_VERSION}*;
- do
- CLASSPATH="$CLASSPATH:$dir/*"
- done
- for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
- do
- CLASSPATH="$CLASSPATH:$dir/*"
- done
- for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
- do
- for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
- CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
- fi
- done
- # classpath addition for release
- for file in "$base_dir"/libs/*;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
- do
- if should_include_file "$file"; then
- CLASSPATH="$CLASSPATH":"$file"
- fi
- done
- shopt -u nullglob
- if [ -z "$CLASSPATH" ] ; then
- echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
- exit 1
- fi
- # JMX settings
- if [ -z "$KAFKA_JMX_OPTS" ]; then
- KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
- fi
- # JMX port to use
- ISKAFKASERVER="false"
- if [[ "$*" =~ "kafka.Kafka" ]]; then
- ISKAFKASERVER="true"
- fi
- if [ $JMX_PORT ] && [ -z "$ISKAFKASERVER" ]; then
- KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
- fi
- # Log directory to use
- if [ "x$LOG_DIR" = "x" ]; then
- LOG_DIR="$base_dir/logs"
- fi
- # Log4j settings
- if [ -z "$KAFKA_LOG4J_OPTS" ]; then
- # Log to console. This is a tool.
- LOG4J_DIR="$base_dir/config/tools-log4j.properties"
- # If Cygwin is detected, LOG4J_DIR is converted to Windows format.
- (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
- KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
- else
- # create logs directory
- if [ ! -d "$LOG_DIR" ]; then
- mkdir -p "$LOG_DIR"
- fi
- fi
- # If Cygwin is detected, LOG_DIR is converted to Windows format.
- (( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
- KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
- # Generic jvm settings you want to add
- if [ -z "$KAFKA_OPTS" ]; then
- KAFKA_OPTS=""
- fi
- # Set Debug options if enabled
- if [ "x$KAFKA_DEBUG" != "x" ]; then
- # Use default ports
- DEFAULT_JAVA_DEBUG_PORT="5005"
- if [ -z "$JAVA_DEBUG_PORT" ]; then
- JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
- fi
- # Use the defaults if JAVA_DEBUG_OPTS was not set
- DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
- if [ -z "$JAVA_DEBUG_OPTS" ]; then
- JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
- fi
- echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
- KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
- fi
- # Which java to use
- if [ -z "$JAVA_HOME" ]; then
- JAVA="java"
- else
- JAVA="$JAVA_HOME/bin/java"
- fi
- # Memory options
- if [ -z "$KAFKA_HEAP_OPTS" ]; then
- KAFKA_HEAP_OPTS="-Xmx256M"
- fi
- # JVM performance options
- # MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
- if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
- KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
- fi
- while [ $# -gt 0 ]; do
- COMMAND=$1
- case $COMMAND in
- -name)
- DAEMON_NAME=$2
- CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
- shift 2
- ;;
- -loggc)
- if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
- GC_LOG_ENABLED="true"
- fi
- shift
- ;;
- -daemon)
- DAEMON_MODE="true"
- shift
- ;;
- *)
- break
- ;;
- esac
- done
- # GC options
- GC_FILE_SUFFIX='-gc.log'
- GC_LOG_FILE_NAME=''
- if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
- GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
- # The first segment of the version number, which is '1' for releases before Java 9
- # it then becomes '9', '10', ...
- # Some examples of the first line of `java --version`:
- # 8 -> java version "1.8.0_152"
- # 9.0.4 -> java version "9.0.4"
- # 10 -> java version "10" 2018-03-20
- # 10.0.1 -> java version "10.0.1" 2018-04-17
- # We need to match to the end of the line to prevent sed from printing the characters that do not match
- JAVA_MAJOR_VERSION=$("$JAVA" -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
- if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
- KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=100M"
- else
- KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
- fi
- fi
- # Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
- # Syntax used on the right side is native Bash string manipulation; for more details see
- # http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
- CLASSPATH=${CLASSPATH#:}
- # If Cygwin is detected, classpath is converted to Windows format.
- (( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
- # Launch mode
- if [ "x$DAEMON_MODE" = "xtrue" ]; then
- nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
- else
- exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@"
- fi
复制代码 镜像部署
- 新建zookeeper镜像文件:zk-docker-compose.yml
- services:
- zook1:
- image: zookeeper:latest
- #restart: always #自动重新启动
- hostname: zook1
- container_name: zook1 #容器名称,方便在rancher中显示有意义的名称
- ports:
- - 2183:2181 #将本容器的zookeeper默认端口号映射出去
- volumes: # 挂载数据卷 前面是宿主机即本机的目录位置,后面是docker的目录
- - "/Users/konsy/Development/volume/zkcluster/zook1/data:/data"
- - "/Users/konsy/Development/volume/zkcluster/zook1/datalog:/datalog"
- - "/Users/konsy/Development/volume/zkcluster/zook1/logs:/logs"
- - "/root/kafka/kafka-sasl/:/opt/zookeeper/secrets/" #映射账号密码配置文件
- environment:
- ZOO_MY_ID: 1 #即是zookeeper的节点值,也是kafka的brokerid值
- ZOO_SERVERS: server.1=zook1:2888:3888;2181 server.2=zook2:2888:3888;2181 server.3=zook3:2888:3888;2181
- ZOO_TLS_QUORUM_CLIENT_AUTH: need
- SERVER_JVMFLAGS: -Djava.security.auth.login.config=/opt/zookeeper/secrets/server_jass.conf #指定账号密码配置文件地址
- networks:
- kafka-net:
- ipv4_address: 172.20.0.11
- zook2:
- image: zookeeper:latest
- #restart: always #自动重新启动
- hostname: zook2
- container_name: zook2 #容器名称,方便在rancher中显示有意义的名称
- ports:
- - 2184:2181 #将本容器的zookeeper默认端口号映射出去
- volumes:
- - "/Users/konsy/Development/volume/zkcluster/zook2/data:/data"
- - "/Users/konsy/Development/volume/zkcluster/zook2/datalog:/datalog"
- - "/Users/konsy/Development/volume/zkcluster/zook2/logs:/logs"
- - "/root/kafka/kafka-sasl/:/opt/zookeeper/secrets/"
- environment:
- ZOO_MY_ID: 2 #即是zookeeper的节点值,也是kafka的brokerid值
- ZOO_SERVERS: server.1=zook1:2888:3888;2181 server.2=zook2:2888:3888;2181 server.3=zook3:2888:3888;2181
- ZOO_TLS_QUORUM_CLIENT_AUTH: need
- SERVER_JVMFLAGS: -Djava.security.auth.login.config=/opt/zookeeper/secrets/server_jass.conf
- networks:
- kafka-net:
- ipv4_address: 172.20.0.12
-
- zook3:
- image: zookeeper:latest
- #restart: always #自动重新启动
- hostname: zook3
- container_name: zook3 #容器名称,方便在rancher中显示有意义的名称
- ports:
- - 2185:2181 #将本容器的zookeeper默认端口号映射出去
- volumes:
- - "/Users/konsy/Development/volume/zkcluster/zook3/data:/data"
- - "/Users/konsy/Development/volume/zkcluster/zook3/datalog:/datalog"
- - "/Users/konsy/Development/volume/zkcluster/zook3/logs:/logs"
- - "/root/kafka/kafka-sasl/:/opt/zookeeper/secrets/"
- environment:
- ZOO_MY_ID: 3 #即是zookeeper的节点值,也是kafka的brokerid值
- ZOO_SERVERS: server.1=zook1:2888:3888;2181 server.2=zook2:2888:3888;2181 server.3=zook3:2888:3888;2181
- ZOO_TLS_QUORUM_CLIENT_AUTH: need
- SERVER_JVMFLAGS: -Djava.security.auth.login.config=/opt/zookeeper/secrets/server_jass.conf
- networks:
- kafka-net:
- ipv4_address: 172.20.0.13
- networks:
- kafka-net:
- external: true
复制代码- docker compose -p zookeeper -f ./zk-docker-compose.yml up -d
复制代码
- 新建Kafka集群配置文件:kafka-docker-compose.yml
- docker compose -f ./kafka-docker-compose.yml up -d
复制代码
- 新建kafka-manager配置文件:kafka-manager-docker-compose.yml
- services:
- kafka-manager:
- image: scjtqs/kafka-manager:latest
- restart: always
- hostname: kafka-manager
- container_name: kafka-manager
- ports:
- - 9000:9000
- external_links: # 连接本compose文件以外的container
- - zook1
- - zook2
- - zook3
- - 172.20.0.14
- - 172.20.0.15
- - 172.20.0.16
- environment:
- ZK_HOSTS: zook1:2181,zook2:2181,zook3:2181
- KAFKA_BROKERS: 172.20.0.14:9093,172.20.0.15:9094,172.20.0.16:9095
- APPLICATION_SECRET: letmein
- KM_ARGS: -Djava.net.preferIPv4Stack=true
- networks:
- kafka-net:
- ipv4_address: 172.20.10.10
- networks:
- kafka-net:
- external: true
复制代码
- 执行脚本部署kafka-manager至Docker
- docker compose -f ./kafka-manager-docker-compose.yml up -d
复制代码
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。 |