From 5a2c942656a821d8eaaf7748340829e576b9c2b0 Mon Sep 17 00:00:00 2001 From: openeuler_bot Date: Wed, 3 Sep 2025 00:03:06 +0000 Subject: [PATCH] 24.03-lts-sp1 update kafka to 4.1.0 --- Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile | 34 +++++ Bigdata/kafka/4.1.0/24.03-lts-sp1/launch | 68 +++++++++ .../4.1.0/24.03-lts-sp1/scripts/bash-config | 23 +++ .../4.1.0/24.03-lts-sp1/scripts/configure | 121 +++++++++++++++ .../24.03-lts-sp1/scripts/configureDefaults | 28 ++++ Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/run | 38 +++++ .../4.1.0/24.03-lts-sp1/server.properties | 138 ++++++++++++++++++ Bigdata/kafka/README.md | 1 + Bigdata/kafka/doc/image-info.yml | 3 +- Bigdata/kafka/meta.yml | 4 +- 10 files changed, 456 insertions(+), 2 deletions(-) create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/launch create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/bash-config create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configure create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configureDefaults create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/run create mode 100644 Bigdata/kafka/4.1.0/24.03-lts-sp1/server.properties diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile b/Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile new file mode 100644 index 00000000..4c6c968a --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile @@ -0,0 +1,34 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=4.1.0 + +ARG SCALA_VERSION=2.13 +ARG KAFKA_URL=https://archive.apache.org/dist/kafka/${VERSION}/kafka_${SCALA_VERSION}-${VERSION}.tgz + +RUN yum -y install wget hostname java-17-openjdk java-17-openjdk-devel +RUN mkdir opt/kafka; \ + wget -O kafka.tgz "${KAFKA_URL}"; \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + mkdir -p /var/lib/kafka/data /etc/kafka/secrets; \ + mkdir -p /etc/kafka/docker /usr/logs /mnt/shared/config; \ + useradd -d /home/appuser -m -s /bin/bash appuser; \ + chown appuser:appuser -R /usr/logs /opt/kafka /mnt/shared/config; \ + chown appuser:root -R /var/lib/kafka /etc/kafka/secrets /etc/kafka; \ + chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \ + cp /opt/kafka/config/log4j2.yaml /etc/kafka/docker/log4j2.yaml; \ + cp /opt/kafka/config/tools-log4j2.yaml /etc/kafka/docker/tools-log4j2.yaml; \ + rm kafka.tgz; \ + yum clean all + +COPY --chown=appuser:appuser scripts /etc/kafka/docker +COPY --chown=appuser:appuser launch /etc/kafka/docker/launch +COPY --chown=appuser:appuser server.properties /etc/kafka/docker/server.properties +RUN chmod 755 /etc/kafka/docker/run + +EXPOSE 9092 +USER appuser + +VOLUME ["/etc/kafka/secrets", "/var/lib/kafka/data", "/mnt/shared/config"] +CMD ["/etc/kafka/docker/run"] \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/launch b/Bigdata/kafka/4.1.0/24.03-lts-sp1/launch new file mode 100644 index 00000000..900eaab8 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/launch @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. +if [ -z "${KAFKA_JMX_OPTS-}" ]; then + export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true \ + -Dcom.sun.management.jmxremote.authenticate=false \ + -Dcom.sun.management.jmxremote.ssl=false " +fi + +# The JMX client needs to be able to connect to java.rmi.server.hostname. +# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. +# For host n/w, this is the IP that the hostname on the host resolves to. + +# If you have more than one n/w configured, hostname -i gives you all the IPs, +# the default is to pick the first IP (or network). +export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} + +if [ "${KAFKA_JMX_PORT-}" ]; then + # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. + export JMX_PORT=$KAFKA_JMX_PORT + export KAFKA_JMX_OPTS="${KAFKA_JMX_OPTS-} -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME \ + -Dcom.sun.management.jmxremote.local.only=false \ + -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT \ + -Dcom.sun.management.jmxremote.port=$JMX_PORT" +fi + +# Make a temp env variable to store user provided performance otps +if [ -z "${KAFKA_JVM_PERFORMANCE_OPTS-}" ]; then + export TEMP_KAFKA_JVM_PERFORMANCE_OPTS="" +else + export TEMP_KAFKA_JVM_PERFORMANCE_OPTS="$KAFKA_JVM_PERFORMANCE_OPTS" +fi + +# We will first use CDS for storage to format storage +export KAFKA_JVM_PERFORMANCE_OPTS="${KAFKA_JVM_PERFORMANCE_OPTS-} -XX:SharedArchiveFile=/opt/kafka/storage.jsa" + +echo "===> Using provided cluster id $CLUSTER_ID ..." + +# Invoke the docker wrapper to setup property files and format storage +result=$(/opt/kafka/bin/kafka-run-class.sh kafka.docker.KafkaDockerWrapper setup \ + --default-configs-dir /etc/kafka/docker \ + --mounted-configs-dir /mnt/shared/config \ + --final-configs-dir /opt/kafka/config 2>&1) || \ + echo $result | grep -i "already formatted" || \ + { echo $result && (exit 1) } + +# Using temp env variable to get rid of storage CDS command +export KAFKA_JVM_PERFORMANCE_OPTS="$TEMP_KAFKA_JVM_PERFORMANCE_OPTS" + +# Now we will use CDS for kafka to start kafka server +export KAFKA_JVM_PERFORMANCE_OPTS="$KAFKA_JVM_PERFORMANCE_OPTS -XX:SharedArchiveFile=/opt/kafka/kafka.jsa" + +# Start kafka broker +exec /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.properties \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/bash-config b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/bash-config new file mode 100644 index 00000000..3f0dc450 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/bash-config @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o nounset \ + -o errexit + +# Trace may expose passwords/credentials by printing them to stdout, so turn on with care. +if [ "${TRACE:-}" == "true" ]; then + set -o verbose \ + -o xtrace +fi \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configure b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configure new file mode 100644 index 00000000..9d9961d5 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configure @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ensure() { + if [[ -z "${!1}" ]]; then + echo "$1 environment variable not set" + exit 1 + fi +} + +path() { + if [[ $2 == "writable" ]]; then + if [[ ! -w "$1" ]]; then + echo "$1 file not writable" + exit 1 + fi + elif [[ $2 == "existence" ]]; then + if [[ ! -e "$1" ]]; then + echo "$1 file does not exist" + exit 1 + fi + fi +} + +# unset KAFKA_ADVERTISED_LISTENERS from ENV in KRaft mode when running as controller only +if [[ -n "${KAFKA_PROCESS_ROLES-}" ]] +then + echo "Running in KRaft mode..." + ensure CLUSTER_ID + if [[ $KAFKA_PROCESS_ROLES == "controller" ]] + then + if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] + then + echo "KAFKA_ADVERTISED_LISTENERS is not supported on a KRaft controller." + exit 1 + else + # Unset in case env variable is set with empty value + unset KAFKA_ADVERTISED_LISTENERS + fi + fi +fi + +# By default, LISTENERS is derived from ADVERTISED_LISTENERS by replacing +# hosts with 0.0.0.0. This is good default as it ensures that the broker +# process listens on all ports. +if [[ -z "${KAFKA_LISTENERS-}" ]] && ( [[ -z "${KAFKA_PROCESS_ROLES-}" ]] || [[ $KAFKA_PROCESS_ROLES != "controller" ]] ) && [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] +then + export KAFKA_LISTENERS + KAFKA_LISTENERS=$(echo "$KAFKA_ADVERTISED_LISTENERS" | sed -e 's|://[^:]*:|://0.0.0.0:|g') +fi + +path /opt/kafka/config/ writable + +# Set if ADVERTISED_LISTENERS has SSL:// or SASL_SSL:// endpoints. +if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] && [[ $KAFKA_ADVERTISED_LISTENERS == *"SSL://"* ]] +then + echo "SSL is enabled." + + ensure KAFKA_SSL_KEYSTORE_FILENAME + export KAFKA_SSL_KEYSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_FILENAME" + path "$KAFKA_SSL_KEYSTORE_LOCATION" existence + + ensure KAFKA_SSL_KEY_CREDENTIALS + KAFKA_SSL_KEY_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEY_CREDENTIALS" + path "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_KEY_PASSWORD + KAFKA_SSL_KEY_PASSWORD=$(cat "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION") + + ensure KAFKA_SSL_KEYSTORE_CREDENTIALS + KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_CREDENTIALS" + path "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_KEYSTORE_PASSWORD + KAFKA_SSL_KEYSTORE_PASSWORD=$(cat "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION") + + if [[ -n "${KAFKA_SSL_CLIENT_AUTH-}" ]] && ( [[ $KAFKA_SSL_CLIENT_AUTH == *"required"* ]] || [[ $KAFKA_SSL_CLIENT_AUTH == *"requested"* ]] ) + then + ensure KAFKA_SSL_TRUSTSTORE_FILENAME + export KAFKA_SSL_TRUSTSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_FILENAME" + path "$KAFKA_SSL_TRUSTSTORE_LOCATION" existence + + ensure KAFKA_SSL_TRUSTSTORE_CREDENTIALS + KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_CREDENTIALS" + path "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_TRUSTSTORE_PASSWORD + KAFKA_SSL_TRUSTSTORE_PASSWORD=$(cat "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION") + fi +fi + +# Set if KAFKA_ADVERTISED_LISTENERS has SASL_PLAINTEXT:// or SASL_SSL:// endpoints. +if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] && [[ $KAFKA_ADVERTISED_LISTENERS =~ .*SASL_.*://.* ]] +then + echo "SASL" is enabled. + + ensure KAFKA_OPTS + + if [[ ! $KAFKA_OPTS == *"java.security.auth.login.config"* ]] + then + echo "KAFKA_OPTS should contain 'java.security.auth.login.config' property." + fi +fi + +if [[ -n "${KAFKA_JMX_OPTS-}" ]] +then + if [[ ! $KAFKA_JMX_OPTS == *"com.sun.management.jmxremote.rmi.port"* ]] + then + echo "KAFKA_OPTS should contain 'com.sun.management.jmxremote.rmi.port' property. It is required for accessing the JMX metrics externally." + fi +fi \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configureDefaults b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configureDefaults new file mode 100644 index 00000000..c3c68ec8 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/configureDefaults @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +declare -A env_defaults +env_defaults=( +# Replace CLUSTER_ID with a unique base64 UUID using "bin/kafka-storage.sh random-uuid" + ["CLUSTER_ID"]="5L6g3nShT-eMCtK--X86sw" +) + +for key in "${!env_defaults[@]}"; do + if [[ -z "${!key:-}" ]]; then + echo ${key} not set. Setting it to default value: \"${env_defaults[$key]}\" + export "$key"="${env_defaults[$key]}" + fi +done \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/run b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/run new file mode 100644 index 00000000..9b4d43d6 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/scripts/run @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +. /etc/kafka/docker/bash-config + +# Set environment values if they exist as arguments +if [ $# -ne 0 ]; then + echo "===> Overriding env params with args ..." + for var in "$@" + do + export "$var" + done +fi + +echo "===> User" +id + +echo "===> Setting default values of environment variables if not already set." +. /etc/kafka/docker/configureDefaults + +echo "===> Configuring ..." +. /etc/kafka/docker/configure + +echo "===> Launching ... " +. /etc/kafka/docker/launch \ No newline at end of file diff --git a/Bigdata/kafka/4.1.0/24.03-lts-sp1/server.properties b/Bigdata/kafka/4.1.0/24.03-lts-sp1/server.properties new file mode 100644 index 00000000..d43b20b9 --- /dev/null +++ b/Bigdata/kafka/4.1.0/24.03-lts-sp1/server.properties @@ -0,0 +1,138 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################## Why We Need This Separate Config #################### + +# While our latest version supports dynamic voters configuration, +# we will continue using static voter configurations in our Docker Hub images. +# This decision ensures broader compatibility across different versions and +# maintains consistent behavior for existing deployments. +# By retaining static voter implementation in our Docker images, we can provide +# a more stable and predictable environment for users across various versions of the application. + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9092,CONTROLLER://:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=PLAINTEXT + +# Listener name, hostname and port the broker or the controller will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=PLAINTEXT://localhost:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-combined-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +share.coordinator.state.topic.replication.factor=1 +share.coordinator.state.topic.min.isr=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/Bigdata/kafka/README.md b/Bigdata/kafka/README.md index 6ef27663..cad4c529 100644 --- a/Bigdata/kafka/README.md +++ b/Bigdata/kafka/README.md @@ -17,6 +17,7 @@ Learn more on [Kafka website](https://kafka.apache.org/). The tag of each kafka docker image is consist of the version of kafka and the version of basic image. The details are as follows | Tags | Currently | Architectures| |--|--|--| +|[4.1.0-oe2403sp1](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile) | kafka 4.1.0 on openEuler 24.03-LTS-SP1 | amd64, arm64 | |[3.7.0-oe2203sp3](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.7.0/22.03-lts-sp3/Dockerfile)| Apache Kafka server 3.7.0 on openEuler 22.03-LTS-SP3 | amd64, arm64 | |[3.8.0-oe2003sp4](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.8.0/20.03-lts-sp4/Dockerfile)| Apache Kafka server 3.8.0 on openEuler 20.03-LTS-SP4 | amd64, arm64 | |[3.8.0-oe2203sp1](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.8.0/22.03-lts-sp1/Dockerfile)| Apache Kafka server 3.8.0 on openEuler 22.03-LTS-SP1 | amd64, arm64 | diff --git a/Bigdata/kafka/doc/image-info.yml b/Bigdata/kafka/doc/image-info.yml index 07e7d4f3..2e97b845 100644 --- a/Bigdata/kafka/doc/image-info.yml +++ b/Bigdata/kafka/doc/image-info.yml @@ -11,6 +11,7 @@ tags: | | Tag | Currently | Architectures | |----------|-------------|------------------| + |[4.1.0-oe2403sp1](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/4.1.0/24.03-lts-sp1/Dockerfile) | kafka 4.1.0 on openEuler 24.03-LTS-SP1 | amd64, arm64 | |[3.7.0-oe2203sp3](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.7.0/22.03-lts-sp3/Dockerfile)| Apache Kafka server 3.7.0 on openEuler 22.03-LTS-SP3 | amd64, arm64 | |[3.8.0-oe2003sp4](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.8.0/20.03-lts-sp4/Dockerfile)| Apache Kafka server 3.8.0 on openEuler 20.03-LTS-SP4 | amd64, arm64 | |[3.8.0-oe2203sp1](https://gitee.com/openeuler/openeuler-docker-images/blob/master/Bigdata/kafka/3.8.0/22.03-lts-sp1/Dockerfile)| Apache Kafka server 3.8.0 on openEuler 22.03-LTS-SP1 | amd64, arm64 | @@ -70,4 +71,4 @@ similar_packages: - AWS Kinesis: 亚马逊云服务中的流处理平台,用于收集、处理和分析实时数据流。它提供了可扩展的消息传递和数据处理能力。 - Apache Samza: 一个分布式流处理框架,用于实时处理和分析数据流。它与Apache Kafka集成紧密,可以直接从Kafka获取数据进行处理。。 dependency: - - openjdk + - openjdk \ No newline at end of file diff --git a/Bigdata/kafka/meta.yml b/Bigdata/kafka/meta.yml index af2d03a4..033809a5 100644 --- a/Bigdata/kafka/meta.yml +++ b/Bigdata/kafka/meta.yml @@ -27,4 +27,6 @@ 3.9.0-oe2403lts: path: 3.9.0/24.03-lts/Dockerfile 4.0.0-oe2403sp1: - path: 4.0.0/24.03-lts-sp1/Dockerfile \ No newline at end of file + path: 4.0.0/24.03-lts-sp1/Dockerfile +4.1.0-oe2403sp1: + path: 4.1.0/24.03-lts-sp1/Dockerfile \ No newline at end of file -- Gitee