正文
安装Kafka
Kafka 使用 Zookeeper 来保存相关配置信息,Kafka 及 Zookeeper 依赖 Java 运行环境,需要先安装Java。
安装Java
我们可以从 Oracle官网 下载JDK 安装包,解压安装; 也可以用yum方式安装。
这里选择yum方式安装。
一、查找java相关的列表:
yum -y list java*
[root@HIGDQEEMO63U9NR ~]# yum -y list java*
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* remi-safe: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.aliyun.com
Available Packages
java-1.6.0-openjdk.x86_64 1:1.6.0.41-1.13.13.1.el7_3 base
java-1.6.0-openjdk-demo.x86_64 1:1.6.0.41-1.13.13.1.el7_3 base
java-1.6.0-openjdk-devel.x86_64 1:1.6.0.41-1.13.13.1.el7_3 base
java-1.6.0-openjdk-javadoc.x86_64 1:1.6.0.41-1.13.13.1.el7_3 base
java-1.6.0-openjdk-src.x86_64 1:1.6.0.41-1.13.13.1.el7_3 base
java-1.7.0-openjdk.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-accessibility.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-demo.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-devel.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-headless.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-javadoc.noarch 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.7.0-openjdk-src.x86_64 1:1.7.0.261-2.6.22.2.el7_8 base
java-1.8.0-openjdk.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-accessibility.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-accessibility.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-demo.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-demo.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-devel.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-devel.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-headless.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-headless.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-javadoc.noarch 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-javadoc-zip.noarch 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-src.i686 1:1.8.0.302.b08-0.el7_9 updates
java-1.8.0-openjdk-src.x86_64 1:1.8.0.302.b08-0.el7_9 updates
java-11-openjdk.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-demo.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-demo.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-devel.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-devel.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-headless.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-headless.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-javadoc.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-javadoc.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-javadoc-zip.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-javadoc-zip.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-jmods.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-jmods.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-src.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-src.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-static-libs.i686 1:11.0.12.0.7-0.el7_9 updates
java-11-openjdk-static-libs.x86_64 1:11.0.12.0.7-0.el7_9 updates
java-atk-wrapper.i686 0.30.4-5.el7 base
java-atk-wrapper.x86_64 0.30.4-5.el7 base
java-dirq.noarch 1.8-1.el7 epel
java-dirq-javadoc.noarch 1.8-1.el7 epel
java-latest-openjdk.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-demo.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-demo-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-demo-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-devel.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-devel-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-devel-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-headless.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-headless-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-headless-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-javadoc.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-javadoc-zip.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-jmods.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-jmods-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-jmods-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-src.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-src-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-src-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-static-libs.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-static-libs-debug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-latest-openjdk-static-libs-fastdebug.x86_64 1:16.0.1.0.9-3.rolling.el7 epel
java-oauth.noarch 20100601-13.el7 epel
java-oauth-javadoc.noarch 20100601-13.el7 epel
java-runtime-decompiler.noarch 5.1-1.el7 epel
java-runtime-decompiler-javadoc.noarch 5.1-1.el7 epel
java_cup.noarch 1:0.11a-16.el7 base
java_cup-javadoc.noarch 1:0.11a-16.el7 base
java_cup-manual.noarch 1:0.11a-16.el7 base
javacc.noarch 5.0-10.el7 base
javacc-demo.noarch 5.0-10.el7 base
javacc-javadoc.noarch 5.0-10.el7 base
javacc-manual.noarch 5.0-10.el7 base
javacc-maven-plugin.noarch 2.6-17.el7 base
javacc-maven-plugin-javadoc.noarch 2.6-17.el7 base
javamail.noarch 1.4.6-8.el7 base
javamail-javadoc.noarch 1.4.6-8.el7 base
javapackages-tools.noarch 3.4.1-11.el7 base
javaparser.noarch 1.0.11-3.el7 epel
javaparser-javadoc.noarch 1.0.11-3.el7 epel
javassist.noarch 3.16.1-10.el7 base
javassist-javadoc.noarch 3.16.1-10.el7 base
javastroke.x86_64 0.5.1-33.el7 epel
javawriter.noarch 2.5.1-4.el7 epel
javawriter-javadoc.noarch 2.5.1-4.el7 epel
[root@HIGDQEEMO63U9NR ~]#
或者:
yum search jdk
[root@HIGDQEEMO63U9NR ~]# yum search jdk
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* remi-safe: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.aliyun.com
====================================== N/S matched: jdk =======================================
copy-jdk-configs.noarch : JDKs configuration files copier
java-1.6.0-openjdk.x86_64 : OpenJDK Runtime Environment
java-1.6.0-openjdk-demo.x86_64 : OpenJDK Demos
java-1.6.0-openjdk-devel.x86_64 : OpenJDK Development Environment
java-1.6.0-openjdk-javadoc.x86_64 : OpenJDK API Documentation
java-1.6.0-openjdk-src.x86_64 : OpenJDK Source Bundle
java-1.7.0-openjdk.x86_64 : OpenJDK Runtime Environment
java-1.7.0-openjdk-accessibility.x86_64 : OpenJDK accessibility connector
java-1.7.0-openjdk-demo.x86_64 : OpenJDK Demos
java-1.7.0-openjdk-devel.x86_64 : OpenJDK Development Environment
java-1.7.0-openjdk-headless.x86_64 : The OpenJDK runtime environment without audio and video
: support
java-1.7.0-openjdk-javadoc.noarch : OpenJDK API Documentation
java-1.7.0-openjdk-src.x86_64 : OpenJDK Source Bundle
java-1.8.0-openjdk.i686 : OpenJDK Runtime Environment 8
java-1.8.0-openjdk.x86_64 : OpenJDK 8 Runtime Environment
java-1.8.0-openjdk-accessibility.i686 : OpenJDK accessibility connector
java-1.8.0-openjdk-accessibility.x86_64 : OpenJDK accessibility connector
java-1.8.0-openjdk-demo.i686 : OpenJDK Demos 8
java-1.8.0-openjdk-demo.x86_64 : OpenJDK 8 Demos
java-1.8.0-openjdk-devel.i686 : OpenJDK Development Environment 8
java-1.8.0-openjdk-devel.x86_64 : OpenJDK 8 Development Environment
java-1.8.0-openjdk-headless.i686 : OpenJDK Headless Runtime Environment 8
java-1.8.0-openjdk-headless.x86_64 : OpenJDK 8 Headless Runtime Environment
java-1.8.0-openjdk-javadoc.noarch : OpenJDK 8 API documentation
java-1.8.0-openjdk-javadoc-zip.noarch : OpenJDK 8 API documentation compressed in a single
: archive
java-1.8.0-openjdk-src.i686 : OpenJDK Source Bundle 8
java-1.8.0-openjdk-src.x86_64 : OpenJDK 8 Source Bundle
java-11-openjdk.i686 : OpenJDK Runtime Environment 11
java-11-openjdk.x86_64 : OpenJDK 11 Runtime Environment
java-11-openjdk-demo.i686 : OpenJDK Demos 11
java-11-openjdk-demo.x86_64 : OpenJDK 11 Demos
java-11-openjdk-devel.i686 : OpenJDK Development Environment 11
java-11-openjdk-devel.x86_64 : OpenJDK 11 Development Environment
java-11-openjdk-headless.i686 : OpenJDK Headless Runtime Environment 11
java-11-openjdk-headless.x86_64 : OpenJDK 11 Headless Runtime Environment
java-11-openjdk-javadoc.i686 : OpenJDK 11 API documentation
java-11-openjdk-javadoc.x86_64 : OpenJDK 11 API documentation
java-11-openjdk-javadoc-zip.i686 : OpenJDK 11 API documentation compressed in a single archive
java-11-openjdk-javadoc-zip.x86_64 : OpenJDK 11 API documentation compressed in a single
: archive
java-11-openjdk-jmods.i686 : JMods for OpenJDK 11
java-11-openjdk-jmods.x86_64 : JMods for OpenJDK 11
java-11-openjdk-src.i686 : OpenJDK Source Bundle 11
java-11-openjdk-src.x86_64 : OpenJDK 11 Source Bundle
java-11-openjdk-static-libs.i686 : OpenJDK libraries for static linking 11
java-11-openjdk-static-libs.x86_64 : OpenJDK 11 libraries for static linking
java-latest-openjdk.x86_64 : OpenJDK 16 Runtime Environment
java-latest-openjdk-debug.x86_64 : OpenJDK 16 Runtime Environment with full debugging on
java-latest-openjdk-demo.x86_64 : OpenJDK 16 Demos
java-latest-openjdk-demo-debug.x86_64 : OpenJDK 16 Demos with full debugging on
java-latest-openjdk-demo-fastdebug.x86_64 : OpenJDK 16 Demos with minimal debugging on
java-latest-openjdk-devel.x86_64 : OpenJDK 16 Development Environment
java-latest-openjdk-devel-debug.x86_64 : OpenJDK 16 Development Environment with full debugging
: on
java-latest-openjdk-devel-fastdebug.x86_64 : OpenJDK 16 Development Environment with minimal
: debugging on
java-latest-openjdk-fastdebug.x86_64 : OpenJDK 16 Runtime Environment with minimal debugging on
java-latest-openjdk-headless.x86_64 : OpenJDK 16 Headless Runtime Environment
java-latest-openjdk-headless-debug.x86_64 : OpenJDK 16 Runtime Environment with full debugging
: on
java-latest-openjdk-headless-fastdebug.x86_64 : OpenJDK 16 Runtime Environment with minimal
: debugging on
java-latest-openjdk-javadoc.x86_64 : OpenJDK 16 API documentation
java-latest-openjdk-javadoc-zip.x86_64 : OpenJDK 16 API documentation compressed in a single
: archive
java-latest-openjdk-jmods.x86_64 : JMods for OpenJDK 16
java-latest-openjdk-jmods-debug.x86_64 : JMods for OpenJDK 16 with full debugging on
java-latest-openjdk-jmods-fastdebug.x86_64 : JMods for OpenJDK 16 with minimal debugging on
java-latest-openjdk-src.x86_64 : OpenJDK 16 Source Bundle
java-latest-openjdk-src-debug.x86_64 : OpenJDK 16 Source Bundle for packages with debugging on
java-latest-openjdk-src-fastdebug.x86_64 : OpenJDK 16 Source Bundle %{for_fastdebug}
java-latest-openjdk-static-libs.x86_64 : OpenJDK 16 libraries for static linking
java-latest-openjdk-static-libs-debug.x86_64 : OpenJDK 16 libraries for static linking with
: full debugging on
java-latest-openjdk-static-libs-fastdebug.x86_64 : OpenJDK 16 libraries for static linking with
: minimal debugging on
ldapjdk-javadoc.noarch : Javadoc for ldapjdk
openjdk-asmtools-javadoc.noarch : Javadoc for openjdk-asmtools
icedtea-web.x86_64 : Additional Java components for OpenJDK - Java browser plug-in and Web
: Start implementation
ldapjdk.noarch : The Mozilla LDAP Java SDK
openjdk-asmtools.noarch : To develop tools create proper & improper Java '.class' files
openprops.noarch : An improved java.util.Properties from OpenJDK
Name and summary matches only, use "search all" for everything.
[root@HIGDQEEMO63U9NR ~]#
二、安装jdk
yum install java-1.8.0-openjdk.x86_64
[root@HIGDQEEMO63U9NR ~]# yum install -y java-1.8.0-openjdk.x86_64
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* remi-safe: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package java-1.8.0-openjdk.x86_64 1:1.8.0.302.b08-0.el7_9 will be installed
--> Processing Dependency: java-1.8.0-openjdk-headless(x86-64) = 1:1.8.0.302.b08-0.el7_9 for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: xorg-x11-fonts-Type1 for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libjvm.so(SUNWprivate_1.1)(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libjava.so(SUNWprivate_1.1)(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libasound.so.2(ALSA_0.9.0rc4)(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libasound.so.2(ALSA_0.9)(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libXcomposite(x86-64) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: gtk2(x86-64) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libjvm.so()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libjava.so()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libgif.so.4()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libasound.so.2()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libXtst.so.6()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libXrender.so.1()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libXi.so.6()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: libXext.so.6()(64bit) for package: 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
--> Running transaction check
---> Package alsa-lib.x86_64 0:1.1.8-1.el7 will be installed
---> Package giflib.x86_64 0:4.1.6-9.el7 will be installed
--> Processing Dependency: libSM.so.6()(64bit) for package: giflib-4.1.6-9.el7.x86_64
--> Processing Dependency: libICE.so.6()(64bit) for package: giflib-4.1.6-9.el7.x86_64
---> Package gtk2.x86_64 0:2.24.31-1.el7 will be installed
--> Processing Dependency: pango >= 1.20.0-1 for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXrandr >= 1.2.99.4-2 for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: atk >= 1.29.4-2 for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: hicolor-icon-theme for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: gtk-update-icon-cache for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libpangoft2-1.0.so.0()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libpangocairo-1.0.so.0()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libpango-1.0.so.0()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libgdk_pixbuf-2.0.so.0()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libcups.so.2()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libcairo.so.2()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libatk-1.0.so.0()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXrandr.so.2()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXinerama.so.1()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXfixes.so.3()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXdamage.so.1()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
--> Processing Dependency: libXcursor.so.1()(64bit) for package: gtk2-2.24.31-1.el7.x86_64
---> Package java-1.8.0-openjdk-headless.x86_64 1:1.8.0.302.b08-0.el7_9 will be installed
--> Processing Dependency: tzdata-java >= 2021a for package: 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: copy-jdk-configs >= 3.3 for package: 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: pcsc-lite-libs(x86-64) for package: 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: lksctp-tools(x86-64) for package: 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64
--> Processing Dependency: jpackage-utils for package: 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64
---> Package libXcomposite.x86_64 0:0.4.4-4.1.el7 will be installed
---> Package libXext.x86_64 0:1.3.3-3.el7 will be installed
---> Package libXi.x86_64 0:1.7.9-1.el7 will be installed
---> Package libXrender.x86_64 0:0.9.10-1.el7 will be installed
---> Package libXtst.x86_64 0:1.2.3-1.el7 will be installed
---> Package xorg-x11-fonts-Type1.noarch 0:7.5-9.el7 will be installed
--> Processing Dependency: ttmkfdir for package: xorg-x11-fonts-Type1-7.5-9.el7.noarch
--> Processing Dependency: ttmkfdir for package: xorg-x11-fonts-Type1-7.5-9.el7.noarch
--> Processing Dependency: mkfontdir for package: xorg-x11-fonts-Type1-7.5-9.el7.noarch
--> Processing Dependency: mkfontdir for package: xorg-x11-fonts-Type1-7.5-9.el7.noarch
--> Running transaction check
---> Package atk.x86_64 0:2.28.1-2.el7 will be installed
---> Package cairo.x86_64 0:1.15.12-4.el7 will be installed
--> Processing Dependency: libpixman-1.so.0()(64bit) for package: cairo-1.15.12-4.el7.x86_64
--> Processing Dependency: libGL.so.1()(64bit) for package: cairo-1.15.12-4.el7.x86_64
--> Processing Dependency: libEGL.so.1()(64bit) for package: cairo-1.15.12-4.el7.x86_64
---> Package copy-jdk-configs.noarch 0:3.3-10.el7_5 will be installed
---> Package cups-libs.x86_64 1:1.6.3-51.el7 will be installed
--> Processing Dependency: libavahi-common.so.3()(64bit) for package: 1:cups-libs-1.6.3-51.el7.x86_64
--> Processing Dependency: libavahi-client.so.3()(64bit) for package: 1:cups-libs-1.6.3-51.el7.x86_64
---> Package gdk-pixbuf2.x86_64 0:2.36.12-3.el7 will be installed
--> Processing Dependency: libjasper.so.1()(64bit) for package: gdk-pixbuf2-2.36.12-3.el7.x86_64
---> Package gtk-update-icon-cache.x86_64 0:3.22.30-6.el7 will be installed
---> Package hicolor-icon-theme.noarch 0:0.12-7.el7 will be installed
---> Package javapackages-tools.noarch 0:3.4.1-11.el7 will be installed
--> Processing Dependency: python-javapackages = 3.4.1-11.el7 for package: javapackages-tools-3.4.1-11.el7.noarch
---> Package libICE.x86_64 0:1.0.9-9.el7 will be installed
---> Package libSM.x86_64 0:1.2.2-2.el7 will be installed
---> Package libXcursor.x86_64 0:1.1.15-1.el7 will be installed
---> Package libXdamage.x86_64 0:1.1.4-4.1.el7 will be installed
---> Package libXfixes.x86_64 0:5.0.3-1.el7 will be installed
---> Package libXinerama.x86_64 0:1.1.3-2.1.el7 will be installed
---> Package libXrandr.x86_64 0:1.5.1-2.el7 will be installed
---> Package lksctp-tools.x86_64 0:1.0.17-2.el7 will be installed
---> Package pango.x86_64 0:1.42.4-4.el7_7 will be installed
--> Processing Dependency: libthai(x86-64) >= 0.1.9 for package: pango-1.42.4-4.el7_7.x86_64
--> Processing Dependency: libXft(x86-64) >= 2.0.0 for package: pango-1.42.4-4.el7_7.x86_64
--> Processing Dependency: libthai.so.0(LIBTHAI_0.1)(64bit) for package: pango-1.42.4-4.el7_7.x86_64
--> Processing Dependency: libthai.so.0()(64bit) for package: pango-1.42.4-4.el7_7.x86_64
--> Processing Dependency: libXft.so.2()(64bit) for package: pango-1.42.4-4.el7_7.x86_64
---> Package pcsc-lite-libs.x86_64 0:1.8.8-8.el7 will be installed
---> Package ttmkfdir.x86_64 0:3.0.9-42.el7 will be installed
---> Package tzdata-java.noarch 0:2021a-1.el7 will be installed
---> Package xorg-x11-font-utils.x86_64 1:7.5-21.el7 will be installed
--> Processing Dependency: libfontenc.so.1()(64bit) for package: 1:xorg-x11-font-utils-7.5-21.el7.x86_64
--> Running transaction check
---> Package avahi-libs.x86_64 0:0.6.31-20.el7 will be installed
---> Package jasper-libs.x86_64 0:1.900.1-33.el7 will be installed
---> Package libXft.x86_64 0:2.3.2-2.el7 will be installed
---> Package libfontenc.x86_64 0:1.1.3-3.el7 will be installed
---> Package libglvnd-egl.x86_64 1:1.0.1-0.8.git5baa1e5.el7 will be installed
--> Processing Dependency: libglvnd(x86-64) = 1:1.0.1-0.8.git5baa1e5.el7 for package: 1:libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64
--> Processing Dependency: mesa-libEGL(x86-64) >= 13.0.4-1 for package: 1:libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64
--> Processing Dependency: libGLdispatch.so.0()(64bit) for package: 1:libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64
---> Package libglvnd-glx.x86_64 1:1.0.1-0.8.git5baa1e5.el7 will be installed
--> Processing Dependency: mesa-libGL(x86-64) >= 13.0.4-1 for package: 1:libglvnd-glx-1.0.1-0.8.git5baa1e5.el7.x86_64
---> Package libthai.x86_64 0:0.1.14-9.el7 will be installed
---> Package pixman.x86_64 0:0.34.0-1.el7 will be installed
---> Package python-javapackages.noarch 0:3.4.1-11.el7 will be installed
--> Processing Dependency: python-lxml for package: python-javapackages-3.4.1-11.el7.noarch
--> Running transaction check
---> Package libglvnd.x86_64 1:1.0.1-0.8.git5baa1e5.el7 will be installed
---> Package mesa-libEGL.x86_64 0:18.3.4-12.el7_9 will be installed
--> Processing Dependency: mesa-libgbm = 18.3.4-12.el7_9 for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libxshmfence.so.1()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libwayland-server.so.0()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libwayland-client.so.0()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libglapi.so.0()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libgbm.so.1()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
--> Processing Dependency: libdrm.so.2()(64bit) for package: mesa-libEGL-18.3.4-12.el7_9.x86_64
---> Package mesa-libGL.x86_64 0:18.3.4-12.el7_9 will be installed
--> Processing Dependency: libXxf86vm.so.1()(64bit) for package: mesa-libGL-18.3.4-12.el7_9.x86_64
---> Package python-lxml.x86_64 0:3.2.1-4.el7 will be installed
--> Running transaction check
---> Package libXxf86vm.x86_64 0:1.1.4-1.el7 will be installed
---> Package libdrm.x86_64 0:2.4.97-2.el7 will be installed
--> Processing Dependency: libpciaccess.so.0()(64bit) for package: libdrm-2.4.97-2.el7.x86_64
---> Package libwayland-client.x86_64 0:1.15.0-1.el7 will be installed
---> Package libwayland-server.x86_64 0:1.15.0-1.el7 will be installed
---> Package libxshmfence.x86_64 0:1.2-1.el7 will be installed
---> Package mesa-libgbm.x86_64 0:18.3.4-12.el7_9 will be installed
---> Package mesa-libglapi.x86_64 0:18.3.4-12.el7_9 will be installed
--> Running transaction check
---> Package libpciaccess.x86_64 0:0.14-1.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
java-1.8.0-openjdk x86_64 1:1.8.0.302.b08-0.el7_9 updates 311 k
Installing for dependencies:
alsa-lib x86_64 1.1.8-1.el7 base 425 k
atk x86_64 2.28.1-2.el7 base 263 k
avahi-libs x86_64 0.6.31-20.el7 base 62 k
cairo x86_64 1.15.12-4.el7 base 741 k
copy-jdk-configs noarch 3.3-10.el7_5 base 21 k
cups-libs x86_64 1:1.6.3-51.el7 base 359 k
gdk-pixbuf2 x86_64 2.36.12-3.el7 base 570 k
giflib x86_64 4.1.6-9.el7 base 40 k
gtk-update-icon-cache x86_64 3.22.30-6.el7 updates 27 k
gtk2 x86_64 2.24.31-1.el7 base 3.4 M
hicolor-icon-theme noarch 0.12-7.el7 base 42 k
jasper-libs x86_64 1.900.1-33.el7 base 150 k
java-1.8.0-openjdk-headless x86_64 1:1.8.0.302.b08-0.el7_9 updates 33 M
javapackages-tools noarch 3.4.1-11.el7 base 73 k
libICE x86_64 1.0.9-9.el7 base 66 k
libSM x86_64 1.2.2-2.el7 base 39 k
libXcomposite x86_64 0.4.4-4.1.el7 base 22 k
libXcursor x86_64 1.1.15-1.el7 base 30 k
libXdamage x86_64 1.1.4-4.1.el7 base 20 k
libXext x86_64 1.3.3-3.el7 base 39 k
libXfixes x86_64 5.0.3-1.el7 base 18 k
libXft x86_64 2.3.2-2.el7 base 58 k
libXi x86_64 1.7.9-1.el7 base 40 k
libXinerama x86_64 1.1.3-2.1.el7 base 14 k
libXrandr x86_64 1.5.1-2.el7 base 27 k
libXrender x86_64 0.9.10-1.el7 base 26 k
libXtst x86_64 1.2.3-1.el7 base 20 k
libXxf86vm x86_64 1.1.4-1.el7 base 18 k
libdrm x86_64 2.4.97-2.el7 base 151 k
libfontenc x86_64 1.1.3-3.el7 base 31 k
libglvnd x86_64 1:1.0.1-0.8.git5baa1e5.el7 base 89 k
libglvnd-egl x86_64 1:1.0.1-0.8.git5baa1e5.el7 base 44 k
libglvnd-glx x86_64 1:1.0.1-0.8.git5baa1e5.el7 base 125 k
libpciaccess x86_64 0.14-1.el7 base 26 k
libthai x86_64 0.1.14-9.el7 base 187 k
libwayland-client x86_64 1.15.0-1.el7 base 33 k
libwayland-server x86_64 1.15.0-1.el7 base 39 k
libxshmfence x86_64 1.2-1.el7 base 7.2 k
lksctp-tools x86_64 1.0.17-2.el7 base 88 k
mesa-libEGL x86_64 18.3.4-12.el7_9 updates 110 k
mesa-libGL x86_64 18.3.4-12.el7_9 updates 166 k
mesa-libgbm x86_64 18.3.4-12.el7_9 updates 39 k
mesa-libglapi x86_64 18.3.4-12.el7_9 updates 46 k
pango x86_64 1.42.4-4.el7_7 base 280 k
pcsc-lite-libs x86_64 1.8.8-8.el7 base 34 k
pixman x86_64 0.34.0-1.el7 base 248 k
python-javapackages noarch 3.4.1-11.el7 base 31 k
python-lxml x86_64 3.2.1-4.el7 base 758 k
ttmkfdir x86_64 3.0.9-42.el7 base 48 k
tzdata-java noarch 2021a-1.el7 updates 191 k
xorg-x11-font-utils x86_64 1:7.5-21.el7 base 104 k
xorg-x11-fonts-Type1 noarch 7.5-9.el7 base 521 k
Transaction Summary
===============================================================================================
Install 1 Package (+52 Dependent packages)
Total download size: 43 M
Installed size: 143 M
Downloading packages:
(1/53): alsa-lib-1.1.8-1.el7.x86_64.rpm | 425 kB 00:00:00
(2/53): avahi-libs-0.6.31-20.el7.x86_64.rpm | 62 kB 00:00:00
(3/53): atk-2.28.1-2.el7.x86_64.rpm | 263 kB 00:00:00
(4/53): copy-jdk-configs-3.3-10.el7_5.noarch.rpm | 21 kB 00:00:00
(5/53): cairo-1.15.12-4.el7.x86_64.rpm | 741 kB 00:00:00
(6/53): gdk-pixbuf2-2.36.12-3.el7.x86_64.rpm | 570 kB 00:00:00
(7/53): giflib-4.1.6-9.el7.x86_64.rpm | 40 kB 00:00:00
(8/53): gtk-update-icon-cache-3.22.30-6.el7.x86_64.rpm | 27 kB 00:00:00
(9/53): cups-libs-1.6.3-51.el7.x86_64.rpm | 359 kB 00:00:01
(10/53): hicolor-icon-theme-0.12-7.el7.noarch.rpm | 42 kB 00:00:00
(11/53): jasper-libs-1.900.1-33.el7.x86_64.rpm | 150 kB 00:00:00
(12/53): java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64.rpm | 311 kB 00:00:00
(13/53): javapackages-tools-3.4.1-11.el7.noarch.rpm | 73 kB 00:00:00
(14/53): gtk2-2.24.31-1.el7.x86_64.rpm | 3.4 MB 00:00:02
(15/53): libICE-1.0.9-9.el7.x86_64.rpm | 66 kB 00:00:00
(16/53): libSM-1.2.2-2.el7.x86_64.rpm | 39 kB 00:00:00
(17/53): libXcomposite-0.4.4-4.1.el7.x86_64.rpm | 22 kB 00:00:00
(18/53): libXcursor-1.1.15-1.el7.x86_64.rpm | 30 kB 00:00:00
(19/53): libXdamage-1.1.4-4.1.el7.x86_64.rpm | 20 kB 00:00:00
(20/53): libXext-1.3.3-3.el7.x86_64.rpm | 39 kB 00:00:00
(21/53): libXfixes-5.0.3-1.el7.x86_64.rpm | 18 kB 00:00:00
(22/53): libXft-2.3.2-2.el7.x86_64.rpm | 58 kB 00:00:00
(23/53): libXinerama-1.1.3-2.1.el7.x86_64.rpm | 14 kB 00:00:00
(24/53): libXrandr-1.5.1-2.el7.x86_64.rpm | 27 kB 00:00:00
(25/53): libXi-1.7.9-1.el7.x86_64.rpm | 40 kB 00:00:00
(26/53): libXrender-0.9.10-1.el7.x86_64.rpm | 26 kB 00:00:00
(27/53): libXtst-1.2.3-1.el7.x86_64.rpm | 20 kB 00:00:00
(28/53): libXxf86vm-1.1.4-1.el7.x86_64.rpm | 18 kB 00:00:00
(29/53): libfontenc-1.1.3-3.el7.x86_64.rpm | 31 kB 00:00:00
(30/53): libglvnd-1.0.1-0.8.git5baa1e5.el7.x86_64.rpm | 89 kB 00:00:00
(31/53): libdrm-2.4.97-2.el7.x86_64.rpm | 151 kB 00:00:00
(32/53): libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64.rpm | 44 kB 00:00:00
(33/53): libglvnd-glx-1.0.1-0.8.git5baa1e5.el7.x86_64.rpm | 125 kB 00:00:00
(34/53): libpciaccess-0.14-1.el7.x86_64.rpm | 26 kB 00:00:00
(35/53): libwayland-client-1.15.0-1.el7.x86_64.rpm | 33 kB 00:00:00
(36/53): libwayland-server-1.15.0-1.el7.x86_64.rpm | 39 kB 00:00:00
(37/53): libthai-0.1.14-9.el7.x86_64.rpm | 187 kB 00:00:00
(38/53): libxshmfence-1.2-1.el7.x86_64.rpm | 7.2 kB 00:00:00
(39/53): lksctp-tools-1.0.17-2.el7.x86_64.rpm | 88 kB 00:00:00
(40/53): mesa-libEGL-18.3.4-12.el7_9.x86_64.rpm | 110 kB 00:00:01
(41/53): mesa-libGL-18.3.4-12.el7_9.x86_64.rpm | 166 kB 00:00:00
(42/53): mesa-libgbm-18.3.4-12.el7_9.x86_64.rpm | 39 kB 00:00:00
(43/53): mesa-libglapi-18.3.4-12.el7_9.x86_64.rpm | 46 kB 00:00:00
(44/53): pango-1.42.4-4.el7_7.x86_64.rpm | 280 kB 00:00:00
(45/53): pixman-0.34.0-1.el7.x86_64.rpm | 248 kB 00:00:00
(46/53): python-javapackages-3.4.1-11.el7.noarch.rpm | 31 kB 00:00:00
(47/53): pcsc-lite-libs-1.8.8-8.el7.x86_64.rpm | 34 kB 00:00:01
(48/53): ttmkfdir-3.0.9-42.el7.x86_64.rpm | 48 kB 00:00:00
(49/53): xorg-x11-font-utils-7.5-21.el7.x86_64.rpm | 104 kB 00:00:00
(50/53): tzdata-java-2021a-1.el7.noarch.rpm | 191 kB 00:00:00
(51/53): python-lxml-3.2.1-4.el7.x86_64.rpm | 758 kB 00:00:00
(52/53): xorg-x11-fonts-Type1-7.5-9.el7.noarch.rpm | 521 kB 00:00:00
(53/53): java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64.rpm | 33 MB 00:00:12
-----------------------------------------------------------------------------------------------
Total 2.8 MB/s | 43 MB 00:00:15
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : libXext-1.3.3-3.el7.x86_64 1/53
Installing : libXrender-0.9.10-1.el7.x86_64 2/53
Installing : libXfixes-5.0.3-1.el7.x86_64 3/53
Installing : libXi-1.7.9-1.el7.x86_64 4/53
Installing : mesa-libglapi-18.3.4-12.el7_9.x86_64 5/53
Installing : libXdamage-1.1.4-4.1.el7.x86_64 6/53
Installing : libxshmfence-1.2-1.el7.x86_64 7/53
Installing : 1:libglvnd-1.0.1-0.8.git5baa1e5.el7.x86_64 8/53
Installing : libXcomposite-0.4.4-4.1.el7.x86_64 9/53
Installing : libwayland-server-1.15.0-1.el7.x86_64 10/53
Installing : libICE-1.0.9-9.el7.x86_64 11/53
Installing : libSM-1.2.2-2.el7.x86_64 12/53
Installing : giflib-4.1.6-9.el7.x86_64 13/53
Installing : libXtst-1.2.3-1.el7.x86_64 14/53
Installing : libXcursor-1.1.15-1.el7.x86_64 15/53
Installing : libXft-2.3.2-2.el7.x86_64 16/53
Installing : libXrandr-1.5.1-2.el7.x86_64 17/53
Installing : libXinerama-1.1.3-2.1.el7.x86_64 18/53
Installing : libXxf86vm-1.1.4-1.el7.x86_64 19/53
Installing : pixman-0.34.0-1.el7.x86_64 20/53
Installing : avahi-libs-0.6.31-20.el7.x86_64 21/53
Installing : 1:cups-libs-1.6.3-51.el7.x86_64 22/53
Installing : libfontenc-1.1.3-3.el7.x86_64 23/53
Installing : 1:xorg-x11-font-utils-7.5-21.el7.x86_64 24/53
Installing : atk-2.28.1-2.el7.x86_64 25/53
Installing : libthai-0.1.14-9.el7.x86_64 26/53
Installing : python-lxml-3.2.1-4.el7.x86_64 27/53
Installing : python-javapackages-3.4.1-11.el7.noarch 28/53
Installing : javapackages-tools-3.4.1-11.el7.noarch 29/53
Installing : jasper-libs-1.900.1-33.el7.x86_64 30/53
Installing : gdk-pixbuf2-2.36.12-3.el7.x86_64 31/53
Installing : gtk-update-icon-cache-3.22.30-6.el7.x86_64 32/53
Installing : pcsc-lite-libs-1.8.8-8.el7.x86_64 33/53
Installing : hicolor-icon-theme-0.12-7.el7.noarch 34/53
Installing : lksctp-tools-1.0.17-2.el7.x86_64 35/53
Installing : tzdata-java-2021a-1.el7.noarch 36/53
Installing : copy-jdk-configs-3.3-10.el7_5.noarch 37/53
Installing : 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64 38/53
Installing : alsa-lib-1.1.8-1.el7.x86_64 39/53
Installing : ttmkfdir-3.0.9-42.el7.x86_64 40/53
Installing : xorg-x11-fonts-Type1-7.5-9.el7.noarch 41/53
Installing : libwayland-client-1.15.0-1.el7.x86_64 42/53
Installing : libpciaccess-0.14-1.el7.x86_64 43/53
Installing : libdrm-2.4.97-2.el7.x86_64 44/53
Installing : mesa-libGL-18.3.4-12.el7_9.x86_64 45/53
Installing : 1:libglvnd-glx-1.0.1-0.8.git5baa1e5.el7.x86_64 46/53
Installing : mesa-libgbm-18.3.4-12.el7_9.x86_64 47/53
Installing : 1:libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64 48/53
Installing : mesa-libEGL-18.3.4-12.el7_9.x86_64 49/53
Installing : cairo-1.15.12-4.el7.x86_64 50/53
Installing : pango-1.42.4-4.el7_7.x86_64 51/53
Installing : gtk2-2.24.31-1.el7.x86_64 52/53
Installing : 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64 53/53
Verifying : libXext-1.3.3-3.el7.x86_64 1/53
Verifying : libpciaccess-0.14-1.el7.x86_64 2/53
Verifying : libXi-1.7.9-1.el7.x86_64 3/53
Verifying : libICE-1.0.9-9.el7.x86_64 4/53
Verifying : gdk-pixbuf2-2.36.12-3.el7.x86_64 5/53
Verifying : libXinerama-1.1.3-2.1.el7.x86_64 6/53
Verifying : libXrender-0.9.10-1.el7.x86_64 7/53
Verifying : 1:xorg-x11-font-utils-7.5-21.el7.x86_64 8/53
Verifying : libXxf86vm-1.1.4-1.el7.x86_64 9/53
Verifying : libwayland-server-1.15.0-1.el7.x86_64 10/53
Verifying : libXcursor-1.1.15-1.el7.x86_64 11/53
Verifying : libwayland-client-1.15.0-1.el7.x86_64 12/53
Verifying : gtk2-2.24.31-1.el7.x86_64 13/53
Verifying : ttmkfdir-3.0.9-42.el7.x86_64 14/53
Verifying : 1:cups-libs-1.6.3-51.el7.x86_64 15/53
Verifying : alsa-lib-1.1.8-1.el7.x86_64 16/53
Verifying : giflib-4.1.6-9.el7.x86_64 17/53
Verifying : libXcomposite-0.4.4-4.1.el7.x86_64 18/53
Verifying : mesa-libglapi-18.3.4-12.el7_9.x86_64 19/53
Verifying : copy-jdk-configs-3.3-10.el7_5.noarch 20/53
Verifying : python-javapackages-3.4.1-11.el7.noarch 21/53
Verifying : tzdata-java-2021a-1.el7.noarch 22/53
Verifying : pango-1.42.4-4.el7_7.x86_64 23/53
Verifying : libXtst-1.2.3-1.el7.x86_64 24/53
Verifying : 1:libglvnd-1.0.1-0.8.git5baa1e5.el7.x86_64 25/53
Verifying : libXft-2.3.2-2.el7.x86_64 26/53
Verifying : libdrm-2.4.97-2.el7.x86_64 27/53
Verifying : mesa-libGL-18.3.4-12.el7_9.x86_64 28/53
Verifying : gtk-update-icon-cache-3.22.30-6.el7.x86_64 29/53
Verifying : lksctp-tools-1.0.17-2.el7.x86_64 30/53
Verifying : hicolor-icon-theme-0.12-7.el7.noarch 31/53
Verifying : libXdamage-1.1.4-4.1.el7.x86_64 32/53
Verifying : xorg-x11-fonts-Type1-7.5-9.el7.noarch 33/53
Verifying : 1:libglvnd-glx-1.0.1-0.8.git5baa1e5.el7.x86_64 34/53
Verifying : libXrandr-1.5.1-2.el7.x86_64 35/53
Verifying : pcsc-lite-libs-1.8.8-8.el7.x86_64 36/53
Verifying : javapackages-tools-3.4.1-11.el7.noarch 37/53
Verifying : cairo-1.15.12-4.el7.x86_64 38/53
Verifying : mesa-libgbm-18.3.4-12.el7_9.x86_64 39/53
Verifying : libxshmfence-1.2-1.el7.x86_64 40/53
Verifying : 1:java-1.8.0-openjdk-headless-1.8.0.302.b08-0.el7_9.x86_64 41/53
Verifying : libSM-1.2.2-2.el7.x86_64 42/53
Verifying : jasper-libs-1.900.1-33.el7.x86_64 43/53
Verifying : python-lxml-3.2.1-4.el7.x86_64 44/53
Verifying : mesa-libEGL-18.3.4-12.el7_9.x86_64 45/53
Verifying : libthai-0.1.14-9.el7.x86_64 46/53
Verifying : 1:java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64 47/53
Verifying : libXfixes-5.0.3-1.el7.x86_64 48/53
Verifying : atk-2.28.1-2.el7.x86_64 49/53
Verifying : libfontenc-1.1.3-3.el7.x86_64 50/53
Verifying : avahi-libs-0.6.31-20.el7.x86_64 51/53
Verifying : 1:libglvnd-egl-1.0.1-0.8.git5baa1e5.el7.x86_64 52/53
Verifying : pixman-0.34.0-1.el7.x86_64 53/53
Installed:
java-1.8.0-openjdk.x86_64 1:1.8.0.302.b08-0.el7_9
Dependency Installed:
alsa-lib.x86_64 0:1.1.8-1.el7
atk.x86_64 0:2.28.1-2.el7
avahi-libs.x86_64 0:0.6.31-20.el7
cairo.x86_64 0:1.15.12-4.el7
copy-jdk-configs.noarch 0:3.3-10.el7_5
cups-libs.x86_64 1:1.6.3-51.el7
gdk-pixbuf2.x86_64 0:2.36.12-3.el7
giflib.x86_64 0:4.1.6-9.el7
gtk-update-icon-cache.x86_64 0:3.22.30-6.el7
gtk2.x86_64 0:2.24.31-1.el7
hicolor-icon-theme.noarch 0:0.12-7.el7
jasper-libs.x86_64 0:1.900.1-33.el7
java-1.8.0-openjdk-headless.x86_64 1:1.8.0.302.b08-0.el7_9
javapackages-tools.noarch 0:3.4.1-11.el7
libICE.x86_64 0:1.0.9-9.el7
libSM.x86_64 0:1.2.2-2.el7
libXcomposite.x86_64 0:0.4.4-4.1.el7
libXcursor.x86_64 0:1.1.15-1.el7
libXdamage.x86_64 0:1.1.4-4.1.el7
libXext.x86_64 0:1.3.3-3.el7
libXfixes.x86_64 0:5.0.3-1.el7
libXft.x86_64 0:2.3.2-2.el7
libXi.x86_64 0:1.7.9-1.el7
libXinerama.x86_64 0:1.1.3-2.1.el7
libXrandr.x86_64 0:1.5.1-2.el7
libXrender.x86_64 0:0.9.10-1.el7
libXtst.x86_64 0:1.2.3-1.el7
libXxf86vm.x86_64 0:1.1.4-1.el7
libdrm.x86_64 0:2.4.97-2.el7
libfontenc.x86_64 0:1.1.3-3.el7
libglvnd.x86_64 1:1.0.1-0.8.git5baa1e5.el7
libglvnd-egl.x86_64 1:1.0.1-0.8.git5baa1e5.el7
libglvnd-glx.x86_64 1:1.0.1-0.8.git5baa1e5.el7
libpciaccess.x86_64 0:0.14-1.el7
libthai.x86_64 0:0.1.14-9.el7
libwayland-client.x86_64 0:1.15.0-1.el7
libwayland-server.x86_64 0:1.15.0-1.el7
libxshmfence.x86_64 0:1.2-1.el7
lksctp-tools.x86_64 0:1.0.17-2.el7
mesa-libEGL.x86_64 0:18.3.4-12.el7_9
mesa-libGL.x86_64 0:18.3.4-12.el7_9
mesa-libgbm.x86_64 0:18.3.4-12.el7_9
mesa-libglapi.x86_64 0:18.3.4-12.el7_9
pango.x86_64 0:1.42.4-4.el7_7
pcsc-lite-libs.x86_64 0:1.8.8-8.el7
pixman.x86_64 0:0.34.0-1.el7
python-javapackages.noarch 0:3.4.1-11.el7
python-lxml.x86_64 0:3.2.1-4.el7
ttmkfdir.x86_64 0:3.0.9-42.el7
tzdata-java.noarch 0:2021a-1.el7
xorg-x11-font-utils.x86_64 1:7.5-21.el7
xorg-x11-fonts-Type1.noarch 0:7.5-9.el7
Complete!
[root@HIGDQEEMO63U9NR ~]#
三、完成安装后验证
java -version
[root@HIGDQEEMO63U9NR ~]# java -version
openjdk version "1.8.0_302"
OpenJDK Runtime Environment (build 1.8.0_302-b08)
OpenJDK 64-Bit Server VM (build 25.302-b08, mixed mode)
[root@HIGDQEEMO63U9NR ~]#
四、查看安装路径
[root@HIGDQEEMO63U9NR ~]# ls -l /usr/lib/jvm
total 0
drwxr-xr-x 1 root root 512 Sep 2 14:18 java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64
lrwxrwxrwx 1 root root 21 Sep 2 14:18 jre -> /etc/alternatives/jre
lrwxrwxrwx 1 root root 27 Sep 2 14:18 jre-1.8.0 -> /etc/alternatives/jre_1.8.0
lrwxrwxrwx 1 root root 35 Sep 2 14:18 jre-1.8.0-openjdk -> /etc/alternatives/jre_1.8.0_openjdk
lrwxrwxrwx 1 root root 51 Sep 2 14:18 jre-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64 -> java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre
lrwxrwxrwx 1 root root 29 Sep 2 14:18 jre-openjdk -> /etc/alternatives/jre_openjdk
[root@HIGDQEEMO63U9NR ~]#
五、设置 Java 环境变量
vi /etc/profile
在文件最后加入:
#set java environment
JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre
JRE_HOME=$JAVA_HOME
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export JAVA_HOME JRE_HOME CLASS_PATH PATH
让修改生效:
source /etc/profile
[root@HIGDQEEMO63U9NR ~]# vi /etc/profile
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# source /etc/profile
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# echo $PATH
/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/mnt/c/Windows/system32:/mnt/c/Windows:/mnt/c/Windows/System32/Wbem:/mnt/c/Windows/System32/WindowsPowerShell/v1.0/:/mnt/c/Windows/System32/OpenSSH/:/mnt/d/Worksoft/Git/cmd:/mnt/d/Worksoft/Vagrant/bin:/mnt/c/ProgramData/chocolatey/bin:/mnt/c/tools/lxrunoffline:/mnt/d/Worksoft/XShell/:/mnt/c/Users/zamp/AppData/Local/Microsoft/WindowsApps:/mnt/d/Worksoft/laragon/bin/php/php-7.4.19-Win32-vc15-x64:/root/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/bin
[root@HIGDQEEMO63U9NR ~]#
安装Kafka
从 Kafka官网 下载Kafka 安装包,解压安装:
[root@HIGDQEEMO63U9NR ~]# mv /mnt/d/kafka_2.13-2.8.0.tgz /usr/local/src/kafka_2.13-2.8.0.tgz
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# cd /usr/local/src/
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# tar zxvf kafka_2.13-2.8.0.tgz
[root@HIGDQEEMO63U9NR src]# ls -l
total 69760
drwxr-xr-x 1 root root 512 Apr 14 22:34 kafka_2.13-2.8.0
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
[root@HIGDQEEMO63U9NR src]# mv /usr/local/src/kafka_2.13-2.8.0 /usr/local/kafka
[root@HIGDQEEMO63U9NR src]# cd /usr/local/kafka
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# ls
bin config libs LICENSE licenses NOTICE site-docs
[root@HIGDQEEMO63U9NR kafka]#
功能验证
1、启动 Zookeeper
使用安装包中的脚本启动单节点Zookeeper 实例:
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
[root@HIGDQEEMO63U9NR kafka]# ls -l
total 20
drwxr-xr-x 1 root root 512 Apr 14 22:34 bin
drwxr-xr-x 1 root root 512 Apr 14 22:34 config
drwxr-xr-x 1 root root 512 Sep 2 14:50 libs
-rw-r--r-- 1 root root 14515 Apr 14 22:28 LICENSE
drwxr-xr-x 1 root root 512 Apr 14 22:34 licenses
drwxr-xr-x 1 root root 512 Sep 2 16:22 logs
-rw-r--r-- 1 root root 953 Apr 14 22:28 NOTICE
drwxr-xr-x 1 root root 512 Apr 14 22:34 site-docs
[root@HIGDQEEMO63U9NR kafka]# bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# ls -l /tmp/zookeeper
total 0
drwxr-xr-x 1 root root 512 Sep 2 16:30 version-2
[root@HIGDQEEMO63U9NR kafka]#
2、启动 Kafka 服务
修改 配置文件:
vi config/server.properties
配置对外访问的ip,如果不配置外网无法访问(这里本地测试环境,配置成了本地地址):
listeners=PLAINTEXT://127.0.0.1:9092
advertised.listeners=PLAINTEXT://127.0.0.1:9092
使用kafka-server-start.sh 启动kafka 服务(这个命令是前台运行,看一下就可以了,后面讲后台运行的方法):
bin/kafka-server-start.sh config/server.properties
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-server-start.sh config/server.properties
[2021-09-02 16:30:25,218] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2021-09-02 16:30:25,538] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util)
[2021-09-02 16:30:25,632] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[2021-09-02 16:30:25,638] INFO starting (kafka.server.KafkaServer)
[2021-09-02 16:30:25,639] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)
[2021-09-02 16:30:25,676] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient)
[2021-09-02 16:30:25,683] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,684] INFO Client environment:host.name=HIGDQEEMO63U9NR.localdomain (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,684] INFO Client environment:java.version=1.8.0_302 (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,684] INFO Client environment:java.vendor=Red Hat, Inc. (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,685] INFO Client environment:java.home=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,686] INFO Client environment:java.class.path=/usr/local/kafka/bin/../libs/activation-1.1.1.jar:/usr/local/kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/usr/local/kafka/bin/../libs/argparse4j-0.7.0.jar:/usr/local/kafka/bin/../libs/audience-annotations-0.5.0.jar:/usr/local/kafka/bin/../libs/commons-cli-1.4.jar:/usr/local/kafka/bin/../libs/commons-lang3-3.8.1.jar:/usr/local/kafka/bin/../libs/connect-api-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-file-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-json-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-mirror-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-mirror-client-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-runtime-2.8.0.jar:/usr/local/kafka/bin/../libs/connect-transforms-2.8.0.jar:/usr/local/kafka/bin/../libs/hk2-api-2.6.1.jar:/usr/local/kafka/bin/../libs/hk2-locator-2.6.1.jar:/usr/local/kafka/bin/../libs/hk2-utils-2.6.1.jar:/usr/local/kafka/bin/../libs/jackson-annotations-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-core-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-databind-2.10.5.1.jar:/usr/local/kafka/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-module-paranamer-2.10.5.jar:/usr/local/kafka/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/usr/local/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/usr/local/kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/usr/local/kafka/bin/../libs/jakarta.inject-2.6.1.jar:/usr/local/kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/usr/local/kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/usr/local/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/usr/local/kafka/bin/../libs/javassist-3.27.0-GA.jar:/usr/local/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/usr/local/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/usr/local/kafka/bin/../libs/jaxb-api-2.3.0.jar:/usr/local/kafka/bin/../libs/jersey-client-2.31.jar:/usr/local/kafka/bin/../libs/jersey-common-2.31.jar:/usr/local/kafka/bin/../libs/jersey-container-servlet-2.31.jar:/usr/local/kafka/bin/../libs/jersey-container-servlet-core-2.31.jar:/usr/local/kafka/bin/../libs/jersey-hk2-2.31.jar:/usr/local/kafka/bin/../libs/jersey-media-jaxb-2.31.jar:/usr/local/kafka/bin/../libs/jersey-server-2.31.jar:/usr/local/kafka/bin/../libs/jetty-client-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-http-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-io-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-security-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-server-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-util-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/usr/local/kafka/bin/../libs/jline-3.12.1.jar:/usr/local/kafka/bin/../libs/jopt-simple-5.0.4.jar:/usr/local/kafka/bin/../libs/kafka_2.13-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka_2.13-2.8.0-sources.jar:/usr/local/kafka/bin/../libs/kafka-clients-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-log4j-appender-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-metadata-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-raft-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-shell-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-streams-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-streams-examples-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/usr/local/kafka/bin/../libs/kafka-tools-2.8.0.jar:/usr/local/kafka/bin/../libs/log4j-1.2.17.jar:/usr/local/kafka/bin/../libs/lz4-java-1.7.1.jar:/usr/local/kafka/bin/../libs/maven-artifact-3.6.3.jar:/usr/local/kafka/bin/../libs/metrics-core-2.2.0.jar:/usr/local/kafka/bin/../libs/netty-buffer-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-codec-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-common-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-handler-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-resolver-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-transport-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/usr/local/kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/usr/local/kafka/bin/../libs/paranamer-2.8.jar:/usr/local/kafka/bin/../libs/plexus-utils-3.2.1.jar:/usr/local/kafka/bin/../libs/reflections-0.9.12.jar:/usr/local/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/usr/local/kafka/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/usr/local/kafka/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/usr/local/kafka/bin/../libs/scala-library-2.13.5.jar:/usr/local/kafka/bin/../libs/scala-logging_2.13-3.9.2.jar:/usr/local/kafka/bin/../libs/scala-reflect-2.13.5.jar:/usr/local/kafka/bin/../libs/slf4j-api-1.7.30.jar:/usr/local/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/usr/local/kafka/bin/../libs/snappy-java-1.1.8.1.jar:/usr/local/kafka/bin/../libs/zookeeper-3.5.9.jar:/usr/local/kafka/bin/../libs/zookeeper-jute-3.5.9.jar:/usr/local/kafka/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,689] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,690] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,690] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,690] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,691] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,691] INFO Client environment:os.version=4.4.0-18362-Microsoft (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,692] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,692] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,692] INFO Client environment:user.dir=/usr/local/kafka (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,693] INFO Client environment:os.memory.free=1013MB (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,693] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,694] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,697] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@38d8f54a (org.apache.zookeeper.ZooKeeper)
[2021-09-02 16:30:25,703] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket)
[2021-09-02 16:30:25,710] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn)
[2021-09-02 16:30:25,713] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2021-09-02 16:30:25,716] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
[2021-09-02 16:30:25,722] INFO Socket connection established, initiating session, client: /127.0.0.1:51414, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn)
[2021-09-02 16:30:25,772] INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x100010c95430000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[2021-09-02 16:30:25,777] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[2021-09-02 16:30:25,990] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)
[2021-09-02 16:30:26,006] INFO Feature ZK node at path: /feature does not exist (kafka.server.FinalizedFeatureChangeListener)
[2021-09-02 16:30:26,008] INFO Cleared cache (kafka.server.FinalizedFeatureCache)
[2021-09-02 16:30:26,234] INFO Cluster ID = zdAxMkfKRHCkUyoDyuHXlg (kafka.server.KafkaServer)
[2021-09-02 16:30:26,239] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-09-02 16:30:26,294] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = null
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num = 11
alter.log.dirs.replication.quota.window.size.seconds = 1
authorizer.class.name =
auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads = 10
broker.heartbeat.interval.ms = 2000
broker.id = 0
broker.id.generation.enable = true
broker.rack = null
broker.session.timeout.ms = 9000
client.quota.callback.class = null
compression.type = producer
connection.failed.authentication.delay.ms = 100
connections.max.idle.ms = 600000
connections.max.reauth.ms = 0
control.plane.listener.name = null
controlled.shutdown.enable = true
controlled.shutdown.max.retries = 3
controlled.shutdown.retry.backoff.ms = 5000
controller.listener.names = null
controller.quorum.append.linger.ms = 25
controller.quorum.election.backoff.max.ms = 1000
controller.quorum.election.timeout.ms = 1000
controller.quorum.fetch.timeout.ms = 2000
controller.quorum.request.timeout.ms = 2000
controller.quorum.retry.backoff.ms = 20
controller.quorum.voters = []
controller.quota.window.num = 11
controller.quota.window.size.seconds = 1
controller.socket.timeout.ms = 30000
create.topic.policy.class.name = null
default.replication.factor = 1
delegation.token.expiry.check.interval.ms = 3600000
delegation.token.expiry.time.ms = 86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms = 604800000
delegation.token.secret.key = null
delete.records.purgatory.purge.interval.requests = 1
delete.topic.enable = true
fetch.max.bytes = 57671680
fetch.purgatory.purge.interval.requests = 1000
group.initial.rebalance.delay.ms = 0
group.max.session.timeout.ms = 1800000
group.max.size = 2147483647
group.min.session.timeout.ms = 6000
host.name =
initial.broker.registration.timeout.ms = 60000
inter.broker.listener.name = null
inter.broker.protocol.version = 2.8-IV1
kafka.metrics.polling.interval.secs = 10
kafka.metrics.reporters = []
leader.imbalance.check.interval.seconds = 300
leader.imbalance.per.broker.percentage = 10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = null
log.cleaner.backoff.ms = 15000
log.cleaner.dedupe.buffer.size = 134217728
log.cleaner.delete.retention.ms = 86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor = 0.9
log.cleaner.io.buffer.size = 524288
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
log.cleaner.max.compaction.lag.ms = 9223372036854775807
log.cleaner.min.cleanable.ratio = 0.5
log.cleaner.min.compaction.lag.ms = 0
log.cleaner.threads = 1
log.cleanup.policy = [delete]
log.dir = /tmp/kafka-logs
log.dirs = /tmp/kafka-logs
log.flush.interval.messages = 9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms = 60000
log.flush.scheduler.interval.ms = 9223372036854775807
log.flush.start.offset.checkpoint.interval.ms = 60000
log.index.interval.bytes = 4096
log.index.size.max.bytes = 10485760
log.message.downconversion.enable = true
log.message.format.version = 2.8-IV1
log.message.timestamp.difference.max.ms = 9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes = -1
log.retention.check.interval.ms = 300000
log.retention.hours = 168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours = 168
log.roll.jitter.hours = 0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes = 1073741824
log.segment.delete.delay.ms = 60000
max.connection.creation.rate = 2147483647
max.connections = 2147483647
max.connections.per.ip = 2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots = 1000
message.max.bytes = 1048588
metadata.log.dir = null
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
min.insync.replicas = 1
node.id = -1
num.io.threads = 8
num.network.threads = 3
num.partitions = 1
num.recovery.threads.per.data.dir = 1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers = 1
offset.metadata.max.bytes = 4096
offsets.commit.required.acks = -1
offsets.commit.timeout.ms = 5000
offsets.load.buffer.size = 5242880
offsets.retention.check.interval.ms = 600000
offsets.retention.minutes = 10080
offsets.topic.compression.codec = 0
offsets.topic.num.partitions = 50
offsets.topic.replication.factor = 1
offsets.topic.segment.bytes = 104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations = 4096
password.encoder.key.length = 128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port = 9092
principal.builder.class = null
process.roles = []
producer.purgatory.purge.interval.requests = 1000
queued.max.request.bytes = -1
queued.max.requests = 500
quota.consumer.default = 9223372036854775807
quota.producer.default = 9223372036854775807
quota.window.num = 11
quota.window.size.seconds = 1
replica.fetch.backoff.ms = 1000
replica.fetch.max.bytes = 1048576
replica.fetch.min.bytes = 1
replica.fetch.response.max.bytes = 10485760
replica.fetch.wait.max.ms = 500
replica.high.watermark.checkpoint.interval.ms = 5000
replica.lag.time.max.ms = 30000
replica.selector.class = null
replica.socket.receive.buffer.bytes = 65536
replica.socket.timeout.ms = 30000
replication.quota.window.num = 11
replication.quota.window.size.seconds = 1
request.timeout.ms = 30000
reserved.broker.max.id = 1000
sasl.client.callback.handler.class = null
sasl.enabled.mechanisms = [GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.principal.to.local.rules = [DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism.controller.protocol = GSSAPI
sasl.mechanism.inter.broker.protocol = GSSAPI
sasl.server.callback.handler.class = null
security.inter.broker.protocol = PLAINTEXT
security.providers = null
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
socket.receive.buffer.bytes = 102400
socket.request.max.bytes = 104857600
socket.send.buffer.bytes = 102400
ssl.cipher.suites = []
ssl.client.auth = none
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.principal.mapping.rules = DEFAULT
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
transaction.max.timeout.ms = 900000
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
transaction.state.log.load.buffer.size = 5242880
transaction.state.log.min.isr = 1
transaction.state.log.num.partitions = 50
transaction.state.log.replication.factor = 1
transaction.state.log.segment.bytes = 104857600
transactional.id.expiration.ms = 604800000
unclean.leader.election.enable = false
zookeeper.clientCnxnSocket = null
zookeeper.connect = localhost:2181
zookeeper.connection.timeout.ms = 18000
zookeeper.max.in.flight.requests = 10
zookeeper.session.timeout.ms = 18000
zookeeper.set.acl = false
zookeeper.ssl.cipher.suites = null
zookeeper.ssl.client.enable = false
zookeeper.ssl.crl.enable = false
zookeeper.ssl.enabled.protocols = null
zookeeper.ssl.endpoint.identification.algorithm = HTTPS
zookeeper.ssl.keystore.location = null
zookeeper.ssl.keystore.password = null
zookeeper.ssl.keystore.type = null
zookeeper.ssl.ocsp.enable = false
zookeeper.ssl.protocol = TLSv1.2
zookeeper.ssl.truststore.location = null
zookeeper.ssl.truststore.password = null
zookeeper.ssl.truststore.type = null
zookeeper.sync.time.ms = 2000
(kafka.server.KafkaConfig)
[2021-09-02 16:30:26,383] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = null
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num = 11
alter.log.dirs.replication.quota.window.size.seconds = 1
authorizer.class.name =
auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads = 10
broker.heartbeat.interval.ms = 2000
broker.id = 0
broker.id.generation.enable = true
broker.rack = null
broker.session.timeout.ms = 9000
client.quota.callback.class = null
compression.type = producer
connection.failed.authentication.delay.ms = 100
connections.max.idle.ms = 600000
connections.max.reauth.ms = 0
control.plane.listener.name = null
controlled.shutdown.enable = true
controlled.shutdown.max.retries = 3
controlled.shutdown.retry.backoff.ms = 5000
controller.listener.names = null
controller.quorum.append.linger.ms = 25
controller.quorum.election.backoff.max.ms = 1000
controller.quorum.election.timeout.ms = 1000
controller.quorum.fetch.timeout.ms = 2000
controller.quorum.request.timeout.ms = 2000
controller.quorum.retry.backoff.ms = 20
controller.quorum.voters = []
controller.quota.window.num = 11
controller.quota.window.size.seconds = 1
controller.socket.timeout.ms = 30000
create.topic.policy.class.name = null
default.replication.factor = 1
delegation.token.expiry.check.interval.ms = 3600000
delegation.token.expiry.time.ms = 86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms = 604800000
delegation.token.secret.key = null
delete.records.purgatory.purge.interval.requests = 1
delete.topic.enable = true
fetch.max.bytes = 57671680
fetch.purgatory.purge.interval.requests = 1000
group.initial.rebalance.delay.ms = 0
group.max.session.timeout.ms = 1800000
group.max.size = 2147483647
group.min.session.timeout.ms = 6000
host.name =
initial.broker.registration.timeout.ms = 60000
inter.broker.listener.name = null
inter.broker.protocol.version = 2.8-IV1
kafka.metrics.polling.interval.secs = 10
kafka.metrics.reporters = []
leader.imbalance.check.interval.seconds = 300
leader.imbalance.per.broker.percentage = 10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = null
log.cleaner.backoff.ms = 15000
log.cleaner.dedupe.buffer.size = 134217728
log.cleaner.delete.retention.ms = 86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor = 0.9
log.cleaner.io.buffer.size = 524288
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
log.cleaner.max.compaction.lag.ms = 9223372036854775807
log.cleaner.min.cleanable.ratio = 0.5
log.cleaner.min.compaction.lag.ms = 0
log.cleaner.threads = 1
log.cleanup.policy = [delete]
log.dir = /tmp/kafka-logs
log.dirs = /tmp/kafka-logs
log.flush.interval.messages = 9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms = 60000
log.flush.scheduler.interval.ms = 9223372036854775807
log.flush.start.offset.checkpoint.interval.ms = 60000
log.index.interval.bytes = 4096
log.index.size.max.bytes = 10485760
log.message.downconversion.enable = true
log.message.format.version = 2.8-IV1
log.message.timestamp.difference.max.ms = 9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes = -1
log.retention.check.interval.ms = 300000
log.retention.hours = 168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours = 168
log.roll.jitter.hours = 0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes = 1073741824
log.segment.delete.delay.ms = 60000
max.connection.creation.rate = 2147483647
max.connections = 2147483647
max.connections.per.ip = 2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots = 1000
message.max.bytes = 1048588
metadata.log.dir = null
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
min.insync.replicas = 1
node.id = -1
num.io.threads = 8
num.network.threads = 3
num.partitions = 1
num.recovery.threads.per.data.dir = 1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers = 1
offset.metadata.max.bytes = 4096
offsets.commit.required.acks = -1
offsets.commit.timeout.ms = 5000
offsets.load.buffer.size = 5242880
offsets.retention.check.interval.ms = 600000
offsets.retention.minutes = 10080
offsets.topic.compression.codec = 0
offsets.topic.num.partitions = 50
offsets.topic.replication.factor = 1
offsets.topic.segment.bytes = 104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations = 4096
password.encoder.key.length = 128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port = 9092
principal.builder.class = null
process.roles = []
producer.purgatory.purge.interval.requests = 1000
queued.max.request.bytes = -1
queued.max.requests = 500
quota.consumer.default = 9223372036854775807
quota.producer.default = 9223372036854775807
quota.window.num = 11
quota.window.size.seconds = 1
replica.fetch.backoff.ms = 1000
replica.fetch.max.bytes = 1048576
replica.fetch.min.bytes = 1
replica.fetch.response.max.bytes = 10485760
replica.fetch.wait.max.ms = 500
replica.high.watermark.checkpoint.interval.ms = 5000
replica.lag.time.max.ms = 30000
replica.selector.class = null
replica.socket.receive.buffer.bytes = 65536
replica.socket.timeout.ms = 30000
replication.quota.window.num = 11
replication.quota.window.size.seconds = 1
request.timeout.ms = 30000
reserved.broker.max.id = 1000
sasl.client.callback.handler.class = null
sasl.enabled.mechanisms = [GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.principal.to.local.rules = [DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism.controller.protocol = GSSAPI
sasl.mechanism.inter.broker.protocol = GSSAPI
sasl.server.callback.handler.class = null
security.inter.broker.protocol = PLAINTEXT
security.providers = null
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
socket.receive.buffer.bytes = 102400
socket.request.max.bytes = 104857600
socket.send.buffer.bytes = 102400
ssl.cipher.suites = []
ssl.client.auth = none
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.principal.mapping.rules = DEFAULT
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
transaction.max.timeout.ms = 900000
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
transaction.state.log.load.buffer.size = 5242880
transaction.state.log.min.isr = 1
transaction.state.log.num.partitions = 50
transaction.state.log.replication.factor = 1
transaction.state.log.segment.bytes = 104857600
transactional.id.expiration.ms = 604800000
unclean.leader.election.enable = false
zookeeper.clientCnxnSocket = null
zookeeper.connect = localhost:2181
zookeeper.connection.timeout.ms = 18000
zookeeper.max.in.flight.requests = 10
zookeeper.session.timeout.ms = 18000
zookeeper.set.acl = false
zookeeper.ssl.cipher.suites = null
zookeeper.ssl.client.enable = false
zookeeper.ssl.crl.enable = false
zookeeper.ssl.enabled.protocols = null
zookeeper.ssl.endpoint.identification.algorithm = HTTPS
zookeeper.ssl.keystore.location = null
zookeeper.ssl.keystore.password = null
zookeeper.ssl.keystore.type = null
zookeeper.ssl.ocsp.enable = false
zookeeper.ssl.protocol = TLSv1.2
zookeeper.ssl.truststore.location = null
zookeeper.ssl.truststore.password = null
zookeeper.ssl.truststore.type = null
zookeeper.sync.time.ms = 2000
(kafka.server.KafkaConfig)
[2021-09-02 16:30:26,506] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 16:30:26,507] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 16:30:26,509] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 16:30:26,511] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 16:30:26,532] INFO Log directory /tmp/kafka-logs not found, creating it. (kafka.log.LogManager)
[2021-09-02 16:30:26,554] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager)
[2021-09-02 16:30:26,562] INFO Attempting recovery for all logs in /tmp/kafka-logs since no clean shutdown file was found (kafka.log.LogManager)
[2021-09-02 16:30:26,568] INFO Loaded 0 logs in 8ms. (kafka.log.LogManager)
[2021-09-02 16:30:26,569] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2021-09-02 16:30:26,573] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2021-09-02 16:30:27,135] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas)
[2021-09-02 16:30:27,143] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2021-09-02 16:30:27,188] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer)
[2021-09-02 16:30:27,228] INFO [broker-0-to-controller-send-thread]: Starting (kafka.server.BrokerToControllerRequestThread)
[2021-09-02 16:30:27,248] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,249] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,250] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,250] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,267] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-09-02 16:30:27,301] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient)
[2021-09-02 16:30:27,328] INFO Stat of the created znode at /brokers/ids/0 is: 25,25,1630571427317,1630571427317,1,0,0,72058747593359360,238,0,25
(kafka.zk.KafkaZkClient)
[2021-09-02 16:30:27,331] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://HIGDQEEMO63U9NR.localdomain:9092, czxid (broker epoch): 25 (kafka.zk.KafkaZkClient)
[2021-09-02 16:30:27,421] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,431] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,432] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,438] INFO Successfully created /controller_epoch with initial epoch 0 (kafka.zk.KafkaZkClient)
[2021-09-02 16:30:27,449] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2021-09-02 16:30:27,456] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2021-09-02 16:30:27,463] INFO Feature ZK node created at path: /feature (kafka.server.FinalizedFeatureChangeListener)
[2021-09-02 16:30:27,504] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager)
[2021-09-02 16:30:27,507] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-09-02 16:30:27,513] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-09-02 16:30:27,513] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-09-02 16:30:27,520] INFO Updated cache from existing <empty> to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0). (kafka.server.FinalizedFeatureCache)
[2021-09-02 16:30:27,556] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 16:30:27,587] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-09-02 16:30:27,602] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Starting socket server acceptors and processors (kafka.network.SocketServer)
[2021-09-02 16:30:27,614] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started data-plane acceptor and processor(s) for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer)
[2021-09-02 16:30:27,615] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started socket server acceptors and processors (kafka.network.SocketServer)
[2021-09-02 16:30:27,624] INFO Kafka version: 2.8.0 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-02 16:30:27,624] INFO Kafka commitId: ebb1d6e21cc92130 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-02 16:30:27,626] INFO Kafka startTimeMs: 1630571427617 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-02 16:30:27,629] INFO [KafkaServer id=0] started (kafka.server.KafkaServer)
[2021-09-02 16:30:27,747] INFO [broker-0-to-controller-send-thread]: Recorded new controller, from now on will use broker HIGDQEEMO63U9NR.localdomain:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread)
发现页面会停在这,我们需要后台运行这个程序:
bin/kafka-server-start.sh config/server.properties 1>/dev/null 2>&1 &
输出进程id,和运行的打印信息:
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-server-start.sh config/server.properties 1>/dev/null 2>&1 &
[2] 3176
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# ps aux
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 8892 180 ? Ssl 11:36 0:00 /init
root 6 0.0 0.0 8896 128 tty1 Ss 11:36 0:00 /init
root 7 0.0 0.0 115792 1956 tty1 S 11:36 0:01 -bash
root 2312 0.2 1.0 5840936 88464 tty1 Sl 16:30 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8
root 2355 0.8 4.4 6831508 367208 tty1 Tl 16:30 0:07 /usr/lib/jvm/java-1.8.0-openjdk-1.8
root 3176 3.5 4.4 6831508 368536 tty1 Sl 16:41 0:06 /usr/lib/jvm/java-1.8.0-openjdk-1.8
root 3603 0.0 0.0 119064 2080 tty1 R 16:44 0:00 ps aux
[root@HIGDQEEMO63U9NR kafka]#
3、创建topic
使用kafka-topics.sh 创建单分区单副本的topic test:
bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic test --replication-factor 1 --partitions 1
查看topic:
bin/kafka-topics.sh --zookeeper localhost:2181 --list
查看partition:
bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic test
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
Created topic test.
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-topics.sh --list --zookeeper localhost:2181
test
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic test
Topic: test TopicId: 3lmx996XQOqSodxFDSDXEA PartitionCount: 1 ReplicationFactor: 1 Configs:
Topic: test Partition: 0 Leader: 0 Replicas: 0 Isr: 0
[root@HIGDQEEMO63U9NR kafka]#
4、生产消息
使用kafka-console-producer.sh 发送消息:
bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic test
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic test
>1
>2
>3
>4
>^Z
[22]+ Stopped bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic test
[root@HIGDQEEMO63U9NR kafka]#
5、消费消息
使用kafka-console-consumer.sh 接收消息并在终端打印:
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
1
2
3
4
^Z
[24]+ Stopped bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
[root@HIGDQEEMO63U9NR kafka]#
如果输入错误,报错会显示具体参数:
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
zookeeper is not a recognized option
Option Description
------ -----------
--bootstrap-server <String: server to REQUIRED: The server(s) to connect to.
connect to>
--consumer-property <String: A mechanism to pass user-defined
consumer_prop> properties in the form key=value to
the consumer.
--consumer.config <String: config file> Consumer config properties file. Note
that [consumer-property] takes
precedence over this config.
--enable-systest-events Log lifecycle events of the consumer
in addition to logging consumed
messages. (This is specific for
system tests.)
--formatter <String: class> The name of a class to use for
formatting kafka messages for
display. (default: kafka.tools.
DefaultMessageFormatter)
--from-beginning If the consumer does not already have
an established offset to consume
from, start with the earliest
message present in the log rather
than the latest message.
--group <String: consumer group id> The consumer group id of the consumer.
--help Print usage information.
--isolation-level <String> Set to read_committed in order to
filter out transactional messages
which are not committed. Set to
read_uncommitted to read all
messages. (default: read_uncommitted)
--key-deserializer <String:
deserializer for key>
--max-messages <Integer: num_messages> The maximum number of messages to
consume before exiting. If not set,
consumption is continual.
--offset <String: consume offset> The offset id to consume from (a non-
negative number), or 'earliest'
which means from beginning, or
'latest' which means from end
(default: latest)
--partition <Integer: partition> The partition to consume from.
Consumption starts from the end of
the partition unless '--offset' is
specified.
--property <String: prop> The properties to initialize the
message formatter. Default
properties include:
print.timestamp=true|false
print.key=true|false
print.offset=true|false
print.partition=true|false
print.headers=true|false
print.value=true|false
key.separator=<key.separator>
line.separator=<line.separator>
headers.separator=<line.separator>
null.literal=<null.literal>
key.deserializer=<key.deserializer>
value.deserializer=<value.
deserializer>
header.deserializer=<header.
deserializer>
Users can also pass in customized
properties for their formatter; more
specifically, users can pass in
properties keyed with 'key.
deserializer.', 'value.
deserializer.' and 'headers.
deserializer.' prefixes to configure
their deserializers.
--skip-message-on-error If there is an error when processing a
message, skip it instead of halt.
--timeout-ms <Integer: timeout_ms> If specified, exit if no message is
available for consumption for the
specified interval.
--topic <String: topic> The topic id to consume on.
--value-deserializer <String:
deserializer for values>
--version Display Kafka version.
--whitelist <String: whitelist> Regular expression specifying
whitelist of topics to include for
consumption.
[root@HIGDQEEMO63U9NR kafka]#
其他
看一下配置文件内容:
cat config/server.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
#listeners=PLAINTEXT://:9092
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
#advertised.listeners=PLAINTEXT://your.host.name:9092
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/tmp/kafka-logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startu
p.
group.initial.rebalance.delay.ms=0
集群配置
修改配置文件,主要修改三个地方:
broker.id=1 # 保证三个broker的id不一样
listeners=PLAINTEXT://:9001 # 保证broker监听端口号不一样
log.dirs=/tmp/kafka-logs1 # 保证broker的数据文件不是一样的
broker.list=localhost:9001,localhost:9002,localhost:9003 # 添加的
停止服务
停止Kafka服务:
bin/kafka-server-stop.sh
停止ZooKeeper服务:
bin/zookeeper-server-stop.sh
[root@HIGDQEEMO63U9NR kafka]# ps aux
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 8892 172 ? Ssl 11:36 0:00 /init
root 6 0.0 0.0 8896 124 tty1 Ss 11:36 0:00 /init
root 7 0.0 0.0 115792 1916 tty1 S 11:36 0:01 -bash
root 2312 0.0 0.8 5840936 66376 tty1 Sl 16:30 0:03 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 2355 0.1 0.6 6831508 50436 tty1 Tl 16:30 0:07 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 3176 0.6 3.4 6831508 282796 tty1 Sl 16:41 0:24 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 4328 0.0 0.4 4316104 37944 tty1 Tl 16:54 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 4684 0.0 0.3 4316104 26864 tty1 Tl 16:55 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 5040 0.0 0.4 4316104 33668 tty1 Tl 16:55 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 5760 0.0 0.3 4316104 28988 tty1 Tl 17:03 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 6116 0.0 0.9 4316104 74660 tty1 Tl 17:03 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 6474 0.0 0.9 4316104 74444 tty1 Tl 17:06 0:01 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 6830 0.1 0.9 4316104 76520 tty1 Tl 17:08 0:02 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 7545 0.3 1.4 4251588 119088 tty1 Tl 17:29 0:02 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 7900 0.3 1.3 4316104 113180 tty1 Tl 17:30 0:02 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 8256 0.3 1.4 4251588 118552 tty1 Tl 17:31 0:02 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 8975 0.9 1.3 4316104 113760 tty1 Tl 17:38 0:02 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86
root 9332 2.0 0.0 119064 2076 tty1 R 17:42 0:00 ps aux
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-server-stop.sh
[root@HIGDQEEMO63U9NR kafka]# [2021-09-02 17:42:23,051] INFO Terminating process due to signal SIGTERM (org.apache.kafka.common.utils.LoggingSignalHandler)
[2021-09-02 17:42:23,056] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer)
[2021-09-02 17:42:23,066] INFO [KafkaServer id=0] Starting controlled shutdown (kafka.server.KafkaServer)
[2021-09-02 17:42:23,093] WARN [KafkaServer id=0] Error during controlled shutdown, possibly because leader movement took longer than the configured controller.socket.timeout.ms and/or request.timeout.ms: Connection to 0 was disconnected before the response was read (kafka.server.KafkaServer)
[2021-09-02 17:42:28,097] WARN [KafkaServer id=0] Retrying controlled shutdown after the previous attempt failed... (kafka.server.KafkaServer)
[2021-09-02 17:42:28,117] WARN [KafkaServer id=0] Error during controlled shutdown, possibly because leader movement took longer than the configured controller.socket.timeout.ms and/or request.timeout.ms: Connection to 0 was disconnected before the response was read (kafka.server.KafkaServer)
[2021-09-02 17:42:33,122] WARN [KafkaServer id=0] Retrying controlled shutdown after the previous attempt failed... (kafka.server.KafkaServer)
[2021-09-02 17:42:33,148] WARN [KafkaServer id=0] Error during controlled shutdown, possibly because leader movement took longer than the configured controller.socket.timeout.ms and/or request.timeout.ms: Connection to 0 was disconnected before the response was read (kafka.server.KafkaServer)
[2021-09-02 17:42:38,154] WARN [KafkaServer id=0] Retrying controlled shutdown after the previous attempt failed... (kafka.server.KafkaServer)
[2021-09-02 17:42:38,186] WARN [KafkaServer id=0] Proceeding to do an unclean shutdown as all the controlled shutdown attempts failed (kafka.server.KafkaServer)
[2021-09-02 17:42:38,191] INFO [/config/changes-event-process-thread]: Shutting down (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-09-02 17:42:38,196] INFO [/config/changes-event-process-thread]: Stopped (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-09-02 17:42:38,196] INFO [/config/changes-event-process-thread]: Shutdown completed (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-09-02 17:42:38,206] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopping socket server request processors (kafka.network.SocketServer)
[2021-09-02 17:42:38,231] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopped socket server request processors (kafka.network.SocketServer)
[2021-09-02 17:42:38,235] INFO [data-plane Kafka Request Handler on Broker 0], shutting down (kafka.server.KafkaRequestHandlerPool)
[2021-09-02 17:42:38,243] INFO [data-plane Kafka Request Handler on Broker 0], shut down completely (kafka.server.KafkaRequestHandlerPool)
[2021-09-02 17:42:38,250] INFO [ExpirationReaper-0-AlterAcls]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,293] INFO [ExpirationReaper-0-AlterAcls]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,293] INFO [ExpirationReaper-0-AlterAcls]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,296] INFO [KafkaApi-0] Shutdown complete. (kafka.server.KafkaApis)
[2021-09-02 17:42:38,298] INFO [ExpirationReaper-0-topic]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,412] INFO [ExpirationReaper-0-topic]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,412] INFO [ExpirationReaper-0-topic]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,420] INFO [TransactionCoordinator id=0] Shutting down. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-09-02 17:42:38,423] INFO [ProducerId Manager 0]: Shutdown complete: last producerId assigned 1000 (kafka.coordinator.transaction.ProducerIdManager)
[2021-09-02 17:42:38,426] INFO [Transaction State Manager 0]: Shutdown complete (kafka.coordinator.transaction.TransactionStateManager)
[2021-09-02 17:42:38,428] INFO [Transaction Marker Channel Manager 0]: Shutting down (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-09-02 17:42:38,430] INFO [Transaction Marker Channel Manager 0]: Stopped (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-09-02 17:42:38,430] INFO [Transaction Marker Channel Manager 0]: Shutdown completed (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-09-02 17:42:38,442] INFO [TransactionCoordinator id=0] Shutdown complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-09-02 17:42:38,445] INFO [GroupCoordinator 0]: Shutting down. (kafka.coordinator.group.GroupCoordinator)
[2021-09-02 17:42:38,447] INFO [ExpirationReaper-0-Heartbeat]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,485] INFO [ExpirationReaper-0-Heartbeat]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,485] INFO [ExpirationReaper-0-Heartbeat]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,487] INFO [ExpirationReaper-0-Rebalance]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,677] INFO [ExpirationReaper-0-Rebalance]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,677] INFO [ExpirationReaper-0-Rebalance]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,680] INFO [GroupCoordinator 0]: Shutdown complete. (kafka.coordinator.group.GroupCoordinator)
[2021-09-02 17:42:38,681] INFO [ReplicaManager broker=0] Shutting down (kafka.server.ReplicaManager)
[2021-09-02 17:42:38,681] INFO [LogDirFailureHandler]: Shutting down (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-09-02 17:42:38,682] INFO [LogDirFailureHandler]: Stopped (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-09-02 17:42:38,682] INFO [LogDirFailureHandler]: Shutdown completed (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-09-02 17:42:38,683] INFO [ReplicaFetcherManager on broker 0] shutting down (kafka.server.ReplicaFetcherManager)
[2021-09-02 17:42:38,685] INFO [ReplicaFetcherManager on broker 0] shutdown completed (kafka.server.ReplicaFetcherManager)
[2021-09-02 17:42:38,686] INFO [ReplicaAlterLogDirsManager on broker 0] shutting down (kafka.server.ReplicaAlterLogDirsManager)
[2021-09-02 17:42:38,687] INFO [ReplicaAlterLogDirsManager on broker 0] shutdown completed (kafka.server.ReplicaAlterLogDirsManager)
[2021-09-02 17:42:38,689] INFO [ExpirationReaper-0-Fetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,775] INFO [ExpirationReaper-0-Fetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,775] INFO [ExpirationReaper-0-Fetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,778] INFO [ExpirationReaper-0-Produce]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,811] INFO [ExpirationReaper-0-Produce]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,811] INFO [ExpirationReaper-0-Produce]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:38,814] INFO [ExpirationReaper-0-DeleteRecords]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,014] INFO [ExpirationReaper-0-DeleteRecords]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,014] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,021] INFO [ExpirationReaper-0-ElectLeader]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,135] INFO [ExpirationReaper-0-ElectLeader]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,135] INFO [ExpirationReaper-0-ElectLeader]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-09-02 17:42:39,170] INFO [ReplicaManager broker=0] Shut down completely (kafka.server.ReplicaManager)
[2021-09-02 17:42:39,172] INFO [broker-0-to-controller-send-thread]: Shutting down (kafka.server.BrokerToControllerRequestThread)
[2021-09-02 17:42:39,175] INFO [broker-0-to-controller-send-thread]: Stopped (kafka.server.BrokerToControllerRequestThread)
[2021-09-02 17:42:39,175] INFO [broker-0-to-controller-send-thread]: Shutdown completed (kafka.server.BrokerToControllerRequestThread)
[2021-09-02 17:42:39,188] INFO Broker to controller channel manager for alterIsrChannel shutdown (kafka.server.BrokerToControllerChannelManagerImpl)
[2021-09-02 17:42:39,191] INFO Shutting down. (kafka.log.LogManager)
[2021-09-02 17:42:39,212] INFO Shutdown complete. (kafka.log.LogManager)
[2021-09-02 17:42:39,224] INFO [feature-zk-node-event-process-thread]: Shutting down (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)
[2021-09-02 17:42:39,225] INFO [feature-zk-node-event-process-thread]: Stopped (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)
[2021-09-02 17:42:39,225] INFO [feature-zk-node-event-process-thread]: Shutdown completed (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)
[2021-09-02 17:42:39,228] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient)
[2021-09-02 17:42:39,348] INFO EventThread shut down for session: 0x100010c95430001 (org.apache.zookeeper.ClientCnxn)
[2021-09-02 17:42:39,348] INFO Session: 0x100010c95430001 closed (org.apache.zookeeper.ZooKeeper)
[2021-09-02 17:42:39,350] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient)
[2021-09-02 17:42:39,351] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:40,269] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:40,269] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:40,271] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:41,262] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:41,262] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:41,269] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:42,262] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:42,262] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:42,269] INFO [ThrottledChannelReaper-ControllerMutation]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:43,225] INFO [ThrottledChannelReaper-ControllerMutation]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:43,225] INFO [ThrottledChannelReaper-ControllerMutation]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-09-02 17:42:43,230] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutting down socket server (kafka.network.SocketServer)
[2021-09-02 17:42:43,280] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutdown completed (kafka.network.SocketServer)
[2021-09-02 17:42:43,282] INFO Metrics scheduler closed (org.apache.kafka.common.metrics.Metrics)
[2021-09-02 17:42:43,283] INFO Closing reporter org.apache.kafka.common.metrics.JmxReporter (org.apache.kafka.common.metrics.Metrics)
[2021-09-02 17:42:43,285] INFO Metrics reporters closed (org.apache.kafka.common.metrics.Metrics)
[2021-09-02 17:42:43,288] INFO Broker and topic stats closed (kafka.server.BrokerTopicStats)
[2021-09-02 17:42:43,290] INFO App info kafka.server for 0 unregistered (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-02 17:42:43,290] INFO [KafkaServer id=0] shut down completed (kafka.server.KafkaServer)
[2] Exit 143 bin/kafka-server-start.sh config/server.properties
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# bin/zookeeper-server-stop.sh
[root@HIGDQEEMO63U9NR kafka]#
安装ES
Elasticsearch 依赖Java运行环境,需要先安装JDK,上面安装Kafka时安装过了,这里不再重复安装。
安装Elasticsearch
从 Elasticsearch官网 下载 Elasticsearch软件包。
[root@HIGDQEEMO63U9NR ~]# pwd
/root
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# mv /mnt/d/elasticsearch-7.14.1-linux-x86_64.tar.gz /usr/local/src/elasticsearch-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# cd /usr/local/src
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# tar -zxvf elasticsearch-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]# ls -l
total 406400
drwxr-xr-x 1 root root 512 Aug 26 17:04 elasticsearch-7.14.1
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# mv /usr/local/src/elasticsearch-7.14.1 /usr/local/elasticsearch
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# ls -l
total 406400
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# cd /usr/local/elasticsearch/
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# ls -l
total 612
drwxr-xr-x 1 root root 512 Aug 26 17:05 bin
drwxr-xr-x 1 root root 512 Sep 3 11:14 config
drwxr-xr-x 1 root root 512 Aug 26 17:05 jdk
drwxr-xr-x 1 root root 512 Aug 26 17:04 lib
-rw-r--r-- 1 root root 3860 Aug 26 16:58 LICENSE.txt
drwxr-xr-x 1 root root 512 Aug 26 17:02 logs
drwxr-xr-x 1 root root 512 Aug 26 17:05 modules
-rw-r--r-- 1 root root 615722 Aug 26 17:02 NOTICE.txt
drwxr-xr-x 1 root root 512 Aug 26 17:02 plugins
-rw-r--r-- 1 root root 2710 Aug 26 16:57 README.asciidoc
[root@HIGDQEEMO63U9NR elasticsearch]#
安装完成。
账户准备
在ES5.0版本后不支持与logstash和kibana2.x版本的混用,且安全级别的提升,使得ES在后续的版本中不允许使用root用户启动, 因此我们需要创建另外独立账户专供ES使用。并且需要在root权限下将该特定环境准备好。
确认现存组和用户:
[root@HIGDQEEMO63U9NR elasticsearch]# cat /etc/group
root:x:0:
bin:x:1:
daemon:x:2:
sys:x:3:
adm:x:4:
tty:x:5:
disk:x:6:
lp:x:7:
mem:x:8:
kmem:x:9:
wheel:x:10:
cdrom:x:11:
mail:x:12:postfix
man:x:15:
dialout:x:18:
floppy:x:19:
games:x:20:
tape:x:33:
video:x:39:
ftp:x:50:
lock:x:54:
audio:x:63:
nobody:x:99:
users:x:100:
utmp:x:22:
utempter:x:35:
input:x:999:
systemd-journal:x:190:
systemd-network:x:192:
dbus:x:81:
polkitd:x:998:
rpc:x:32:
ssh_keys:x:997:
cgred:x:996:
rpcuser:x:29:
nfsnobody:x:65534:
sshd:x:74:
postdrop:x:90:
postfix:x:89:
chrony:x:995:
nginx:x:994:
apache:x:48:
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# cat /etc/passwd
root:x:0:0:root:/root:/bin/bash
bin:x:1:1:bin:/bin:/sbin/nologin
daemon:x:2:2:daemon:/sbin:/sbin/nologin
adm:x:3:4:adm:/var/adm:/sbin/nologin
lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
sync:x:5:0:sync:/sbin:/bin/sync
shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
halt:x:7:0:halt:/sbin:/sbin/halt
mail:x:8:12:mail:/var/spool/mail:/sbin/nologin
operator:x:11:0:operator:/root:/sbin/nologin
games:x:12:100:games:/usr/games:/sbin/nologin
ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin
nobody:x:99:99:Nobody:/:/sbin/nologin
systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin
dbus:x:81:81:System message bus:/:/sbin/nologin
polkitd:x:999:998:User for polkitd:/:/sbin/nologin
rpc:x:32:32:Rpcbind Daemon:/var/lib/rpcbind:/sbin/nologin
rpcuser:x:29:29:RPC Service User:/var/lib/nfs:/sbin/nologin
nfsnobody:x:65534:65534:Anonymous NFS User:/var/lib/nfs:/sbin/nologin
sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin
postfix:x:89:89::/var/spool/postfix:/sbin/nologin
chrony:x:998:995::/var/lib/chrony:/sbin/nologin
nginx:x:997:994:nginx user:/var/cache/nginx:/sbin/nologin
apache:x:48:48:Apache:/usr/share/httpd:/sbin/nologin
[root@HIGDQEEMO63U9NR elasticsearch]#
新增组和用户,uid与gid 在一组集群中必须保持一致:
[root@HIGDQEEMO63U9NR elasticsearch]# groupadd -g 1001 elasticsearch
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# useradd -r elasticsearch -u 993 -g elasticsearch
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# passwd elasticsearch
Changing password for user elasticsearch.
New password:
BAD PASSWORD: The password contains the user name in some form
Retype new password:
passwd: all authentication tokens updated successfully.
[root@HIGDQEEMO63U9NR elasticsearch]#
准备文件夹:
[root@HIGDQEEMO63U9NR elasticsearch]# mkdir -p /data/es-data
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# mkdir -p /var/log/elasticsearch
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# chown -R elasticsearch.elasticsearch /usr/local/elasticsearch/
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# chown -R elasticsearch.elasticsearch /data/es-data
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# chown -R elasticsearch.elasticsearch /var/log/elasticsearch
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]# ls -l
total 612
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 bin
drwxr-xr-x 1 elasticsearch elasticsearch 512 Sep 3 11:14 config
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 jdk
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:04 lib
-rw-r--r-- 1 elasticsearch elasticsearch 3860 Aug 26 16:58 LICENSE.txt
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:02 logs
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 modules
-rw-r--r-- 1 elasticsearch elasticsearch 615722 Aug 26 17:02 NOTICE.txt
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:02 plugins
-rw-r--r-- 1 elasticsearch elasticsearch 2710 Aug 26 16:57 README.asciidoc
[root@HIGDQEEMO63U9NR elasticsearch]#
[root@HIGDQEEMO63U9NR elasticsearch]#
修改配置文件
查看配置文件:
cat config/elasticsearch.yml
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
#path.data: /path/to/data
#
# Path to log files:
#
#path.logs: /path/to/logs
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# By default Elasticsearch is only accessible on localhost. Set a different
# address here to expose this node on the network:
#
#network.host: 192.168.0.1
#
# By default Elasticsearch listens for HTTP traffic on the first free port it
# finds starting at 9200. Set a specific HTTP port here:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
修改配置文件:
vi config/elasticsearch.yml
下面几个参数改一下:
cluster.name: my-application
node.name: node-1
path.data: /data/es-data
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
http.port: 9200
cluster.initial_master_nodes: ["node-1"]
启动ES
[root@HIGDQEEMO63U9NR elasticsearch]# su elasticsearch
bash-4.2$
bash-4.2$ pwd
/usr/local/elasticsearch
bash-4.2$
bash-4.2$ ls -l
total 612
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 bin
drwxr-xr-x 1 elasticsearch elasticsearch 512 Sep 3 11:55 config
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 jdk
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:04 lib
-rw-r--r-- 1 elasticsearch elasticsearch 3860 Aug 26 16:58 LICENSE.txt
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:02 logs
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:05 modules
-rw-r--r-- 1 elasticsearch elasticsearch 615722 Aug 26 17:02 NOTICE.txt
drwxr-xr-x 1 elasticsearch elasticsearch 512 Aug 26 17:02 plugins
-rw-r--r-- 1 elasticsearch elasticsearch 2710 Aug 26 16:57 README.asciidoc
bash-4.2$
bash-4.2$ ./bin/elasticsearch -d
warning: usage of JAVA_HOME is deprecated, use ES_JAVA_HOME
Future versions of Elasticsearch will require Java 11; your Java version from [/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre] does not meet this requirement. Consider switching to a distribution of Elasticsearch with a bundled JDK. If you are already using a distribution with a bundled JDK, ensure the JAVA_HOME environment variable is not set.
warning: usage of JAVA_HOME is deprecated, use ES_JAVA_HOME
Future versions of Elasticsearch will require Java 11; your Java version from [/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre] does not meet this requirement. Consider switching to a distribution of Elasticsearch with a bundled JDK. If you are already using a distribution with a bundled JDK, ensure the JAVA_HOME environment variable is not set.
bash-4.2$
bash-4.2$ ERROR: [1] bootstrap checks failed. You must address the points described in the following [1] lines before starting Elasticsearch.
bootstrap check failure [1] of [1]: system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
ERROR: Elasticsearch did not exit normally - check the logs at /var/log/elasticsearch/my-application.log
bash-4.2$
报错了。
修改环境配置文件/etc/profile
,新增两行:
ES_JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre
export ES_JAVA_HOME
修改ES配置文件config/elasticsearch.yml
,新增两行:
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
再次启动ES成功。
打开浏览器,访问 http://127.0.0.1:9200 ,返回:
{
"name" : "node-1",
"cluster_name" : "my-application",
"cluster_uuid" : "QSDPIoSwQ8C7RILpVkU3cQ",
"version" : {
"number" : "7.14.1",
"build_flavor" : "default",
"build_type" : "tar",
"build_hash" : "66b55ebfa59c92c15db3f69a335d500018b3331e",
"build_date" : "2021-08-26T09:01:05.390870785Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
或者命令行操作:
bash-4.2$ curl 127.0.0.1:9200
{
"name" : "node-1",
"cluster_name" : "my-application",
"cluster_uuid" : "QSDPIoSwQ8C7RILpVkU3cQ",
"version" : {
"number" : "7.14.1",
"build_flavor" : "default",
"build_type" : "tar",
"build_hash" : "66b55ebfa59c92c15db3f69a335d500018b3331e",
"build_date" : "2021-08-26T09:01:05.390870785Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
bash-4.2$
bash-4.2$ curl 127.0.0.1:9200/_cat/health?v
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1630653511 07:18:31 my-application green 1 1 1 1 0 0 0 0 - 100.0%
bash-4.2$
bash-4.2$ curl 127.0.0.1:9200/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.8.17.174 21 90 12 0.52 0.58 0.59 cdfhilmrstw * node-1
bash-4.2$
安装Kibana
在 官网 下载 kibana 软件包。 如果觉得新版不好用,可以下载 历史版本 。
解压时间比较长,耐心等待完成:
[root@HIGDQEEMO63U9NR ~]# mv /mnt/d/kibana-7.14.1-linux-x86_64.tar.gz /usr/local/src/kibana-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# cd /usr/local/src
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# tar -zxvf kibana-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# ls -l
total 685952
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
drwxr-xr-x 1 root root 512 Sep 3 15:40 kibana-7.14.1-linux-x86_64
-rwxrwxrwx 1 root root 285786508 Sep 3 15:27 kibana-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# mv /usr/local/src/kibana-7.14.1-linux-x86_64 /usr/local/kibana
[root@HIGDQEEMO63U9NR src]# cd /usr/local/kibana
配置准备
kibana不允许使用root用户启动,需要准备用户:
[root@HIGDQEEMO63U9NR kibana]# groupadd -g 1002 kibana
[root@HIGDQEEMO63U9NR kibana]#
[root@HIGDQEEMO63U9NR kibana]# useradd -r kibana -u 992 -g kibana
[root@HIGDQEEMO63U9NR kibana]#
[root@HIGDQEEMO63U9NR kibana]# passwd kibana
Changing password for user kibana.
New password:
BAD PASSWORD: The password is shorter than 8 characters
Retype new password:
passwd: all authentication tokens updated successfully.
[root@HIGDQEEMO63U9NR kibana]# chown -R kibana.kibana /usr/local/kibana
查看配置文件:
cat config/kibana.yml
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
#server.host: "localhost"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# This setting was effectively always `false` before Kibana 6.3 and will
# default to `true` starting in Kibana 7.0.
#server.rewriteBasePath: false
# Specifies the public URL at which Kibana is available for end users. If
# `server.basePath` is configured this URL should end with the same basePath.
#server.publicBaseUrl: ""
# The maximum payload size in bytes for incoming server requests.
#server.maxPayload: 1048576
# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"
# The URLs of the Elasticsearch instances to use for all your queries.
#elasticsearch.hosts: ["http://localhost:9200"]
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "home"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000
# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
#elasticsearch.logQueries: false
# Specifies the path where Kibana creates the process ID file.
#pid.file: /run/kibana/kibana.pid
# Enables you to specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
#i18n.locale: "en"
编辑配置文件:
vi config/kibana.yml
server.port: 5601
server.host: "0.0.0.0"
server.name: "your-hostname"
elasticsearch.hosts: ["http://localhost:9200"]
kibana.index: ".kibana"
i18n.locale: "zh-CN"
启动
切换用户,执行:
su kibana
./bin/kibana 1>/dev/null 2>&1 &
打开浏览器,访问 127.0.0.1:5601 。 页面操作 ,访问 https://www.elastic.co/guide/en/kibana/current/get-started.html
创建文档
我们调用ES服务创建文档:
curl -H "Content-Type: application/json" -XPUT 'localhost:9200/index_test/test_type/1' -d '{"name": "zhangsan","age":"12"}'
[root@HIGDQEEMO63U9NR kafka]# curl -H "Content-Type: application/json" -XPUT 'localhost:9200/index_test/test_type/6' -d '{"name": "lisi","age":"15"}'
{"_index":"index_test","_type":"test_type","_id":"6","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":11,"_primary_term":1}
[root@HIGDQEEMO63U9NR kafka]#
用kibana看一下效果:
{
"_index": "index_test",
"_type": "test_type",
"_id": "1",
"_score": 1,
"_source": {
"name": "zhangsan",
"age": "12"
},
"fields": {
"age.keyword": [
"12"
],
"name": [
"zhangsan"
],
"name.keyword": [
"zhangsan"
],
"age": [
"12"
]
}
}
停止
查找进程,然后停止:
ps aux
kill -9 1212
kibana-5.6.16安装
上面7.14版本比较难用,换成 5.6.16 版本:
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# mv /mnt/d/kibana-5.6.16-linux-x86_64.tar.gz /usr/local/src/kibana-5.6.16-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# ls -l
total 739008
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
-rwxrwxrwx 1 root root 53468981 Sep 3 17:00 kibana-5.6.16-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 285786508 Sep 3 15:27 kibana-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# tar -zxf kibana-5.6.16-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]# ls -l
total 739008
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
drwxrwxr-x 1 1000 1000 512 Mar 13 2019 kibana-5.6.16-linux-x86_64
-rwxrwxrwx 1 root root 53468981 Sep 3 17:00 kibana-5.6.16-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 285786508 Sep 3 15:27 kibana-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# mv kibana-5.6.16-linux-x86_64 /usr/local/kibana-5.6.16
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# cd /usr/local/kibana-5.6.16/
[root@HIGDQEEMO63U9NR kibana-5.6.16]#
[root@HIGDQEEMO63U9NR kibana-5.6.16]# chown -R kibana.kibana /usr/local/kibana-5.6.16
[root@HIGDQEEMO63U9NR kibana-5.6.16]#
[root@HIGDQEEMO63U9NR kibana-5.6.16]# ls -l
total 921
drwxr-xr-x 1 kibana kibana 512 Mar 13 2019 bin
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 config
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 data
-rw-rw-r-- 1 kibana kibana 562 Mar 13 2019 LICENSE.txt
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 node
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 node_modules
-rw-rw-r-- 1 kibana kibana 929825 Mar 13 2019 NOTICE.txt
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 optimize
-rw-rw-r-- 1 kibana kibana 722 Mar 13 2019 package.json
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 plugins
-rw-rw-r-- 1 kibana kibana 3993 Mar 13 2019 README.txt
drwxr-xr-x 1 kibana kibana 512 Mar 13 2019 src
drwxrwxr-x 1 kibana kibana 512 Mar 13 2019 ui_framework
drwxr-xr-x 1 kibana kibana 512 Mar 13 2019 webpackShims
[root@HIGDQEEMO63U9NR kibana-5.6.16]#
[root@HIGDQEEMO63U9NR kibana-5.6.16]# vi config/kibana.yml
[root@HIGDQEEMO63U9NR kibana-5.6.16]#
[root@HIGDQEEMO63U9NR kibana-5.6.16]# su kibana
bash-4.2$
bash-4.2$ ./bin/kibana 1>/dev/null 2>&1 &
[1] 17353
bash-4.2$
访问页面,报错了:
Elasticsearch plugin is red
This version of Kibana requires Elasticsearch v5.6.16 on all nodes. I found the following incompatible nodes in your cluster: v7.14.1 @ 10.8.17.174:9200 (10.8.17.174)
Elasticsearch、Kibana、Logstash 使用时,版本要适应, 看一下相应版本 Compatibility with Elasticsearch 。
安装Logstash
在 官网 下载 Logstash 软件包。
安装:
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# mv /mnt/d/logstash-7.14.1-linux-x86_64.tar.gz /usr/local/src/logstash-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR ~]#
[root@HIGDQEEMO63U9NR ~]# cd /usr/local/src
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# ls -l
total 1094272
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
-rwxrwxrwx 1 root root 53468981 Sep 3 17:00 kibana-5.6.16-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 285786508 Sep 3 15:27 kibana-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 363072947 Sep 7 09:45 logstash-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# tar -zxf logstash-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# ls -l
total 1094272
-rwxrwxrwx 1 root root 344049417 Sep 3 10:48 elasticsearch-7.14.1-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 71403603 Sep 2 14:47 kafka_2.13-2.8.0.tgz
-rwxrwxrwx 1 root root 53468981 Sep 3 17:00 kibana-5.6.16-linux-x86_64.tar.gz
-rwxrwxrwx 1 root root 285786508 Sep 3 15:27 kibana-7.14.1-linux-x86_64.tar.gz
drwxr-xr-x 1 root root 512 Sep 7 09:49 logstash-7.14.1
-rwxrwxrwx 1 root root 363072947 Sep 7 09:45 logstash-7.14.1-linux-x86_64.tar.gz
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# mv /usr/local/src/logstash-7.14.1 /usr/local/logstash
[root@HIGDQEEMO63U9NR src]#
[root@HIGDQEEMO63U9NR src]# cd /usr/local/logstash
[root@HIGDQEEMO63U9NR logstash]#
[root@HIGDQEEMO63U9NR logstash]# pwd
/usr/local/logstash
[root@HIGDQEEMO63U9NR logstash]# ls -l
total 468
drwxr-xr-x 1 root root 512 Sep 7 09:48 bin
drwxr-xr-x 1 root root 512 Sep 7 09:48 config
-rw-r--r-- 1 root wheel 2276 Aug 26 15:57 CONTRIBUTORS
drwxr-xr-x 1 root wheel 512 Aug 26 15:57 data
-rw-r--r-- 1 root wheel 4053 Aug 26 15:57 Gemfile
-rw-r--r-- 1 root wheel 25009 Aug 26 15:57 Gemfile.lock
drwxr-xr-x 1 root root 512 Sep 7 09:49 jdk
drwxr-xr-x 1 root root 512 Sep 7 09:48 lib
-rw-r--r-- 1 root wheel 13675 Aug 26 15:57 LICENSE.txt
drwxr-xr-x 1 root root 512 Sep 7 09:48 logstash-core
drwxr-xr-x 1 root root 512 Sep 7 09:48 logstash-core-plugin-api
drwxr-xr-x 1 root root 512 Sep 7 09:48 modules
-rw-r--r-- 1 root wheel 424030 Aug 26 15:57 NOTICE.TXT
drwxr-xr-x 1 root root 512 Sep 7 09:48 tools
drwxr-xr-x 1 root root 512 Sep 7 09:49 vendor
drwxr-xr-x 1 root root 512 Sep 7 09:49 x-pack
[root@HIGDQEEMO63U9NR logstash]#
准备kafka配置:
[root@HIGDQEEMO63U9NR logstash]# ls -l ./config/
total 32
-rw-r--r-- 1 root wheel 2034 Aug 26 15:57 jvm.options
-rw-r--r-- 1 root wheel 7437 Aug 26 15:57 log4j2.properties
-rw-r--r-- 1 root wheel 342 Aug 26 15:57 logstash-sample.conf
-rw-r--r-- 1 root wheel 11196 Aug 26 15:57 logstash.yml
-rw-r--r-- 1 root wheel 3693 Aug 26 15:57 pipelines.yml
-rw-r--r-- 1 root wheel 1696 Aug 26 15:57 startup.options
[root@HIGDQEEMO63U9NR logstash]#
[root@HIGDQEEMO63U9NR logstash]# cat ./config/logstash-sample.conf
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["http://localhost:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
[root@HIGDQEEMO63U9NR logstash]# vi config/kafka-logstash.conf
写入:
input {
kafka {
bootstrap_servers => "localhost:9092"
topics => ["test"]
codec => "json"
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "index_test-%{+YYYY.MM.dd}"
}
}
首次执行,我们可以看一下输出(别执行,这个是前台运行,会持续在终端输出,后台运行方法看下面):
./bin/logstash -f ./config/kafka-logstash.conf &
[root@HIGDQEEMO63U9NR logstash]# ls -l ./config/
total 32
-rw-r--r-- 1 root wheel 2034 Aug 26 15:57 jvm.options
-rw-r--r-- 1 root root 250 Sep 7 10:11 kafka-logstash.conf
-rw-r--r-- 1 root wheel 7437 Aug 26 15:57 log4j2.properties
-rw-r--r-- 1 root wheel 342 Aug 26 15:57 logstash-sample.conf
-rw-r--r-- 1 root wheel 11196 Aug 26 15:57 logstash.yml
-rw-r--r-- 1 root wheel 3693 Aug 26 15:57 pipelines.yml
-rw-r--r-- 1 root wheel 1696 Aug 26 15:57 startup.options
[root@HIGDQEEMO63U9NR logstash]#
[root@HIGDQEEMO63U9NR logstash]#
[root@HIGDQEEMO63U9NR logstash]# ./bin/logstash -f ./config/kafka-logstash.conf &
[27] 18129
[root@HIGDQEEMO63U9NR logstash]# Using JAVA_HOME defined java: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre
WARNING, using JAVA_HOME while Logstash distribution comes with a bundled JDK
/usr/local/logstash/vendor/bundle/jruby/2.5.0/gems/bundler-1.17.3/lib/bundler/rubygems_integration.rb:200: warning: constant Gem::ConfigMap is deprecated
Sending Logstash logs to /usr/local/logstash/logs which is now configured via log4j2.properties
[2021-09-07T10:29:03,866][INFO ][logstash.runner ] Log4j configuration path used is: /usr/local/logstash/config/log4j2.properties
[2021-09-07T10:29:03,883][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.14.1", "jruby.version"=>"jruby 9.2.19.0 (2.5.8) 2021-06-15 55810c552b OpenJDK 64-Bit Server VM 25.302-b08 on 1.8.0_302-b08 +indy +jit [linux-x86_64]"}
[2021-09-07T10:29:04,380][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2021-09-07T10:29:06,314][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2021-09-07T10:29:06,989][INFO ][org.reflections.Reflections] Reflections took 89 ms to scan 1 urls, producing 120 keys and 417 values
[2021-09-07T10:29:08,619][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["http://127.0.0.1:9200"]}
[2021-09-07T10:29:08,990][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://127.0.0.1:9200/]}}
[2021-09-07T10:29:09,174][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://127.0.0.1:9200/"}
[2021-09-07T10:29:09,304][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch version determined (7.14.1) {:es_version=>7}
[2021-09-07T10:29:09,318][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2021-09-07T10:29:09,518][INFO ][logstash.outputs.elasticsearch][main] Using a default mapping template {:es_version=>7, :ecs_compatibility=>:disabled}
[2021-09-07T10:29:09,571][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>1000, "pipeline.sources"=>["/usr/local/logstash/config/kafka-logstash.conf"], :thread=>"#<Thread:0x7ef081b run>"}
[2021-09-07T10:29:09,617][INFO ][logstash.outputs.elasticsearch][main] Installing Elasticsearch template {:name=>"logstash"}
[2021-09-07T10:29:10,622][INFO ][logstash.javapipeline ][main] Pipeline Java execution initialization time {"seconds"=>1.05}
[2021-09-07T10:29:10,657][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2021-09-07T10:29:10,753][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2021-09-07T10:29:10,806][INFO ][org.apache.kafka.clients.consumer.ConsumerConfig][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = default
client.id = logstash-0
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = logstash
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 50
reconnect.backoff.ms = 50
request.timeout.ms = 40000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
[2021-09-07T10:29:10,938][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] Kafka version: 2.5.1
[2021-09-07T10:29:10,938][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] Kafka commitId: 0efa8fb0f4c73d92
[2021-09-07T10:29:10,939][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] Kafka startTimeMs: 1630981750936
[2021-09-07T10:29:10,949][INFO ][org.apache.kafka.clients.consumer.KafkaConsumer][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Subscribed to topic(s): test
[2021-09-07T10:29:11,517][INFO ][org.apache.kafka.clients.Metadata][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Cluster ID: zdAxMkfKRHCkUyoDyuHXlg
[2021-09-07T10:29:11,556][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Discovered group coordinator 127.0.0.1:9092 (id: 2147483647 rack: null)
[2021-09-07T10:29:11,562][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-09-07T10:29:11,599][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Join group failed with org.apache.kafka.common.errors.MemberIdRequiredException: The group member needs to have a valid member id before actually entering a consumer group
[2021-09-07T10:29:11,600][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-09-07T10:29:11,634][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Finished assignment for group at generation 1: {logstash-0-309b885c-a3af-4d52-99bd-26aff0d95764=Assignment(partitions=[test-0])}
[2021-09-07T10:29:11,684][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 1
[2021-09-07T10:29:11,694][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: test-0
[2021-09-07T10:29:11,715][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Found no committed offset for partition test-0
[2021-09-07T10:29:11,792][INFO ][org.apache.kafka.clients.consumer.internals.SubscriptionState][main][b944b964041f8a36a35ee609b7e124c01e2dc8708d6ff36871ef6a22036d94a2] [Consumer clientId=logstash-0, groupId=logstash] Resetting offset for partition test-0 to offset 6.
[root@HIGDQEEMO63U9NR logstash]#
以后可以忽略输出:
./bin/logstash -f ./config/kafka-logstash.conf 1>/dev/null 2>&1 &
向Kafka发送一条信息:
[root@HIGDQEEMO63U9NR logstash]# cd ../kafka/
[root@HIGDQEEMO63U9NR kafka]#
[root@HIGDQEEMO63U9NR kafka]# bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic test
>
>{"name":"zhangsan","age":"12"}
>
用Kibana看一下效果:
{
"_index": "index_test-2021.09.07",
"_type": "_doc",
"_id": "CvMkvnsBn3_jzcdThRCA",
"_score": 1,
"_source": {
"age": "12",
"@timestamp": "2021-09-07T02:44:28.762Z",
"name": "zhangsan",
"@version": "1"
},
"fields": {
"@timestamp": [
"2021-09-07T02:44:28.762Z"
],
"age.keyword": [
"12"
],
"name": [
"zhangsan"
],
"@version": [
"1"
],
"name.keyword": [
"zhangsan"
],
"@version.keyword": [
"1"
],
"age": [
"12"
]
}
}
shell脚本实现
看到其他一个处理方式。
解压,修改配置文件:
tar -zxvf logstash-7.12.1-linux-x86_64.tar.gz
cd logstash-7.12.1/
cp config/logstash-sample.conf config/logstash.conf
vim startup.sh
编辑如下内容,保存退出
#!/bin/bash
nohup ./bin/logstash -f config/logstash.conf &
chmod +x startup.sh
vim config/logstash.conf 添加配置如下
input {
beats {
port => 5044
}
tcp {
mode => "server"
host => "0.0.0.0" # 允许任意主机发送日志
type => "elk1" # 设定type以区分每个输入源
port => 4567
codec => json_lines # 数据格式
}
}
output {
if [type] == "elk1" {
elasticsearch {
action => "index" # 输出时创建映射
hosts => "192.168.163.132:9200" # ElasticSearch 的地址和端口
index => "elk1-%{+YYYY.MM.dd}" # 指定索引名
codec => "json"
}
}
}
启动 logstash
./startup.sh
查看日志
tail -100f nohup.out
参考资料
在CentOS 7上安装Kafka https://blog.csdn.net/wisgood/article/details/51497329
linux安装java步骤 https://www.cnblogs.com/wjup/p/11041274.html
Kafka消息中间件介绍与安装 https://blog.csdn.net/qq_31279701/article/details/112692707
kafka 相关的命令操作 以及配置server.properties https://blog.csdn.net/weixin_42047790/article/details/106048665
linux下安装kafka https://www.cnblogs.com/marshhu/p/12072446.html
Kafka史上最详细原理总结上 https://www.jianshu.com/p/734cf729d77b
Elasticsearch 节点安装 https://ibaiyang.github.io/blog/elasticsearch/2021/03/10/Elasticsearch-节点安装.html
CentOS 7下安装部署Elasticsearch7.3.1 https://blog.csdn.net/zerokissingthefire/article/details/100104607
Elasticsearch入门,这一篇就够了 https://www.cnblogs.com/sunsky303/p/9438737.html
搭建ELK分布式日志解决方案 + Springboot + logback 输出日志到 ELK https://www.jianshu.com/p/fc74c082545c