diff --git a/.github/workflows/ci_build.yml b/.github/workflows/ci_build.yml new file mode 100644 index 000000000..073d46522 --- /dev/null +++ b/.github/workflows/ci_build.yml @@ -0,0 +1,43 @@ +name: KnowStreaming Build + +on: + push: + branches: [ "master", "ve_3.x", "ve_demo_3.x" ] + pull_request: + branches: [ "master", "ve_3.x", "ve_demo_3.x" ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Set up JDK 11 + uses: actions/setup-java@v3 + with: + java-version: '11' + distribution: 'temurin' + cache: maven + + - name: Setup Node + uses: actions/setup-node@v1 + with: + node-version: '12.22.12' + + - name: Build With Maven + run: mvn -Prelease-package -Dmaven.test.skip=true clean install -U + + - name: Get KnowStreaming Version + if: ${{ success() }} + run: | + version=`mvn -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive exec:exec -q` + echo "VERSION=${version}" >> $GITHUB_ENV + + - name: Upload Binary Package + if: ${{ success() }} + uses: actions/upload-artifact@v3 + with: + name: KnowStreaming-${{ env.VERSION }}.tar.gz + path: km-dist/target/KnowStreaming-${{ env.VERSION }}.tar.gz diff --git a/README.md b/README.md index 8f5262686..d95a05874 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,7 @@ - [单机部署手册](docs/install_guide/单机部署手册.md) - [版本升级手册](docs/install_guide/版本升级手册.md) - [本地源码启动手册](docs/dev_guide/本地源码启动手册.md) +- [页面无数据排查手册](docs/dev_guide/页面无数据排查手册.md) **`产品相关手册`** @@ -155,3 +156,4 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况 ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=didi/KnowStreaming&type=Date)](https://star-history.com/#didi/KnowStreaming&Date) + diff --git "a/docs/contribute_guide/\350\264\241\347\214\256\346\214\207\345\215\227.md" "b/docs/contribute_guide/\350\264\241\347\214\256\346\214\207\345\215\227.md" index 37cf89bc1..8ea7c5516 100644 --- "a/docs/contribute_guide/\350\264\241\347\214\256\346\214\207\345\215\227.md" +++ "b/docs/contribute_guide/\350\264\241\347\214\256\346\214\207\345\215\227.md" @@ -47,14 +47,13 @@ **1、`Header` 规范** -`Header` 格式为 `[Type]Message(#IssueID)`, 主要有三部分组成,分别是`Type`、`Message`、`IssueID`, +`Header` 格式为 `[Type]Message`, 主要有三部分组成,分别是`Type`、`Message`, - `Type`:说明这个提交是哪一个类型的,比如有 Bugfix、Feature、Optimize等; - `Message`:说明提交的信息,比如修复xx问题; -- `IssueID`:该提交,关联的Issue的编号; -实际例子:[`[Bugfix]修复新接入的集群,Controller-Host不显示的问题(#927)`](https://github.com/didi/KnowStreaming/pull/933/commits) +实际例子:[`[Bugfix]修复新接入的集群,Controller-Host不显示的问题`](https://github.com/didi/KnowStreaming/pull/933/commits) @@ -67,7 +66,7 @@ **3、实际例子** ``` -[Optimize]优化 MySQL & ES 测试容器的初始化(#906) +[Optimize]优化 MySQL & ES 测试容器的初始化 主要的变更 1、knowstreaming/knowstreaming-manager 容器; @@ -138,7 +137,7 @@ 1. 切换到主分支:`git checkout github_master`; 2. 主分支拉最新代码:`git pull`; 3. 基于主分支拉新分支:`git checkout -b fix_928`; -4. 提交代码,安装commit的规范进行提交,例如:`git commit -m "[Optimize]优化xxx问题(#928)"`; +4. 提交代码,安装commit的规范进行提交,例如:`git commit -m "[Optimize]优化xxx问题"`; 5. 提交到自己远端仓库:`git push --set-upstream origin fix_928`; 6. `GitHub` 页面发起 `Pull Request` 请求,管理员合入主仓库。这部分详细见下一节; @@ -162,6 +161,8 @@ ### 4.1、如何将多个 Commit-Log 合并为一个? -可以使用 `git rebase -i` 命令进行解决。 +可以不需要将多个commit合并为一个,如果要合并,可以使用 `git rebase -i` 命令进行解决。 + + diff --git a/docs/dev_guide/assets/connect_jmx_failed/check_jmx_opened.jpg b/docs/dev_guide/assets/connect_jmx_failed/check_jmx_opened.jpg deleted file mode 100644 index 1890983c9..000000000 Binary files a/docs/dev_guide/assets/connect_jmx_failed/check_jmx_opened.jpg and /dev/null differ diff --git "a/docs/dev_guide/\346\216\245\345\205\245ZK\345\270\246\350\256\244\350\257\201Kafka\351\233\206\347\276\244.md" "b/docs/dev_guide/\346\216\245\345\205\245ZK\345\270\246\350\256\244\350\257\201Kafka\351\233\206\347\276\244.md" new file mode 100644 index 000000000..294a4742f --- /dev/null +++ "b/docs/dev_guide/\346\216\245\345\205\245ZK\345\270\246\350\256\244\350\257\201Kafka\351\233\206\347\276\244.md" @@ -0,0 +1,180 @@ + +![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) + +--- + +# 接入 ZK 带认证的 Kafka 集群 + +- [接入 ZK 带认证的 Kafka 集群](#接入-zk-带认证的-kafka-集群) + - [1、简要说明](#1简要说明) + - [2、支持 Digest-MD5 认证](#2支持-digest-md5-认证) + - [3、支持 Kerberos 认证](#3支持-kerberos-认证) + + + +## 1、简要说明 + +- 1、当前 KnowStreaming 暂无页面可以直接配置 ZK 的认证信息,但是 KnowStreaming 的后端预留了 MySQL 的字段用于存储 ZK 的认证信息,用户可通过将认证信息存储至该字段,从而达到支持接入 ZK 带认证的 Kafka 集群。 +  + +- 2、该字段位于 MySQL 库 ks_km_physical_cluster 表中的 zk_properties 字段,该字段的格式是: +```json +{ + "openSecure": false, # 是否开启认证,开启时配置为true + "sessionTimeoutUnitMs": 15000, # session超时时间 + "requestTimeoutUnitMs": 5000, # request超时时间 + "otherProps": { # 其他配置,认证信息主要配置在该位置 + "zookeeper.sasl.clientconfig": "kafkaClusterZK1" # 例子, + } +} +``` + +- 3、实际生效的代码位置 +```java +// 代码位置:https://github.com/didi/KnowStreaming/blob/master/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java + +kafkaZkClient = KafkaZkClient.apply( + clusterPhy.getZookeeper(), + zkConfig.getOpenSecure(), // 是否开启认证,开启时配置为true + zkConfig.getSessionTimeoutUnitMs(), // session超时时间 + zkConfig.getRequestTimeoutUnitMs(), // request超时时间 + 5, + Time.SYSTEM, + "KS-ZK-ClusterPhyId-" + clusterPhyId, + "KS-ZK-SessionExpireListener-clusterPhyId-" + clusterPhyId, + Option.apply("KS-ZK-ClusterPhyId-" + clusterPhyId), + Option.apply(this.getZKConfig(clusterPhyId, zkConfig.getOtherProps())) // 其他配置,认证信息主要配置在该位置 +); +``` + +- 4、SQL例子 +```sql +update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID; +``` + + +- 5、zk_properties 字段不能覆盖所有的场景,所以实际使用过程中还可能需要在此基础之上,进行其他的调整。比如,`Digest-MD5 认证` 和 `Kerberos 认证` 都还需要修改启动脚本等。后续看能否通过修改 ZK 客户端的源码,使得 ZK 认证的相关配置能和 Kafka 认证的配置一样方便。 + + +--- + + +## 2、支持 Digest-MD5 认证 + +1. 假设你有两个 Kafka 集群, 对应两个 ZK 集群; +2. 两个 ZK 集群的认证信息如下所示 + +```bash +# ZK1集群的认证信息,这里的 kafkaClusterZK1 可以是随意的名称,只需要和后续数据库的配置对应上即可。 +kafkaClusterZK1 { + org.apache.zookeeper.server.auth.DigestLoginModule required + username="zk1" + password="zk1-passwd"; +}; + +# ZK2集群的认证信息,这里的 kafkaClusterZK2 可以是随意的名称,只需要和后续数据库的配置对应上即可。 +kafkaClusterZK2 { + org.apache.zookeeper.server.auth.DigestLoginModule required + username="zk2" + password="zk2-passwd"; +}; +``` + +3. 将这两个ZK集群的认证信息存储到 `/xxx/zk_client_jaas.conf` 文件中,文件中的内容如下所示: + +```bash +kafkaClusterZK1 { + org.apache.zookeeper.server.auth.DigestLoginModule required + username="zk1" + password="zk1-passwd"; +}; + +kafkaClusterZK2 { + org.apache.zookeeper.server.auth.DigestLoginModule required + username="zk2" + password="zk2-passwd"; +}; + +``` + +4. 修改 KnowStreaming 的启动脚本 + +```bash +# `KnowStreaming/bin/startup.sh` 中的 47 行的 JAVA_OPT 中追加如下设置 + +-Djava.security.auth.login.config=/xxx/zk_client_jaas.conf +``` + +5. 修改 KnowStreaming 的表数据 + +```sql +# 这里的 kafkaClusterZK1 要和 /xxx/zk_client_jaas.conf 中的对应上 +update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK1" } }' where id=集群1的ID; + +update ks_km_physical_cluster set zk_properties='{ "openSecure": true, "otherProps": { "zookeeper.sasl.clientconfig": "kafkaClusterZK2" } }' where id=集群2的ID; +``` + +6. 重启 KnowStreaming + + +--- + + +## 3、支持 Kerberos 认证 + +**第一步:查看用户在ZK的ACL** + +假设我们使用的用户是 `kafka` 这个用户。 + +- 1、查看 server.properties 的配置的 zookeeper.connect 的地址; +- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面; +- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限; + +此时,我们可以看到如下信息: +![watch_user_acl.png](assets/support_kerberos_zk/watch_user_acl.png) + +`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl` + + +**第二步:创建Kerberos的keytab并修改 KnowStreaming 主机** + +- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`; +- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下; +- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录; +- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下: +```bash +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=false + serviceName="zookeeper" + keyTab="/etc/keytab/zookeeper.keytab" + principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX"; +}; +``` +- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下; + + +**第三步:修改 KnowStreaming 的配置** + +- 1、修改数据库,开启ZK的认证 +```sql +update ks_km_physical_cluster set zk_properties='{ "openSecure": true }' where id=集群1的ID; +``` + +- 2、在 `KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置 +```bash +-Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas +``` + +- 3、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功; + +![success_1.png](assets/support_kerberos_zk/success_1.png) + +![success_2.png](assets/support_kerberos_zk/success_2.png) + + +**第四步:补充说明** + +- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证; +- 2、多个Kerberos域暂时未适配; \ No newline at end of file diff --git "a/docs/dev_guide/\346\224\257\346\214\201Kerberos\350\256\244\350\257\201\347\232\204ZK.md" "b/docs/dev_guide/\346\224\257\346\214\201Kerberos\350\256\244\350\257\201\347\232\204ZK.md" deleted file mode 100644 index 116643ba6..000000000 --- "a/docs/dev_guide/\346\224\257\346\214\201Kerberos\350\256\244\350\257\201\347\232\204ZK.md" +++ /dev/null @@ -1,69 +0,0 @@ - -## 支持Kerberos认证的ZK - - -### 1、修改 KnowStreaming 代码 - -代码位置:`src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminZKClient.java` - -将 `createZKClient` 的 `135行 的 false 改为 true -![need_modify_code.png](assets/support_kerberos_zk/need_modify_code.png) - - -修改完后重新进行打包编译,打包编译见:[打包编译](https://github.com/didi/KnowStreaming/blob/master/docs/install_guide/%E6%BA%90%E7%A0%81%E7%BC%96%E8%AF%91%E6%89%93%E5%8C%85%E6%89%8B%E5%86%8C.md -) - - - -### 2、查看用户在ZK的ACL - -假设我们使用的用户是 `kafka` 这个用户。 - -- 1、查看 server.properties 的配置的 zookeeper.connect 的地址; -- 2、使用 `zkCli.sh -serve zookeeper.connect的地址` 登录到ZK页面; -- 3、ZK页面上,执行命令 `getAcl /kafka` 查看 `kafka` 用户的权限; - -此时,我们可以看到如下信息: -![watch_user_acl.png](assets/support_kerberos_zk/watch_user_acl.png) - -`kafka` 用户需要的权限是 `cdrwa`。如果用户没有 `cdrwa` 权限的话,需要创建用户并授权,授权命令为:`setAcl` - - -### 3、创建Kerberos的keytab并修改 KnowStreaming 主机 - -- 1、在 Kerberos 的域中创建 `kafka/_HOST` 的 `keytab`,并导出。例如:`kafka/dbs-kafka-test-8-53`; -- 2、导出 keytab 后上传到安装 KS 的机器的 `/etc/keytab` 下; -- 3、在 KS 机器上,执行 `kinit -kt zookeepe.keytab kafka/dbs-kafka-test-8-53` 看是否能进行 `Kerberos` 登录; -- 4、可以登录后,配置 `/opt/zookeeper.jaas` 文件,例子如下: -```sql -Client { - com.sun.security.auth.module.Krb5LoginModule required - useKeyTab=true - storeKey=false - serviceName="zookeeper" - keyTab="/etc/keytab/zookeeper.keytab" - principal="kafka/dbs-kafka-test-8-53@XXX.XXX.XXX"; -}; -``` -- 5、需要配置 `KDC-Server` 对 `KnowStreaming` 的机器开通防火墙,并在KS的机器 `/etc/host/` 配置 `kdc-server` 的 `hostname`。并将 `krb5.conf` 导入到 `/etc` 下; - - -### 4、修改 KnowStreaming 的配置 - -- 1、在 `/usr/local/KnowStreaming/KnowStreaming/bin/startup.sh` 中的47行的JAVA_OPT中追加如下设置 -```bash --Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/opt/zookeeper.jaas -``` - -- 2、重启KS集群后再 start.out 中看到如下信息,则证明Kerberos配置成功; - -![success_1.png](assets/support_kerberos_zk/success_1.png) - -![success_2.png](assets/support_kerberos_zk/success_2.png) - - -### 5、补充说明 - -- 1、多Kafka集群如果用的是一样的Kerberos域的话,只需在每个`ZK`中给`kafka`用户配置`crdwa`权限即可,这样集群初始化的时候`zkclient`是都可以认证; -- 2、当前需要修改代码重新打包才可以支持,后续考虑通过页面支持Kerberos认证的ZK接入; -- 3、多个Kerberos域暂时未适配; \ No newline at end of file diff --git "a/docs/dev_guide/\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\226\207\346\241\243.md" "b/docs/dev_guide/\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\226\207\346\241\243.md" deleted file mode 100644 index fd7886bc8..000000000 --- "a/docs/dev_guide/\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\226\207\346\241\243.md" +++ /dev/null @@ -1,285 +0,0 @@ -## 1、集群接入错误 - -### 1.1、异常现象 - -如下图所示,集群非空时,大概率为地址配置错误导致。 - - - - - -### 1.2、解决方案 - -接入集群时,依据提示的错误,进行相应的解决。例如: - - - -### 1.3、正常情况 - -接入集群时,页面信息都自动正常出现,没有提示错误。 - - - -## 2、JMX连接失败(需使用3.0.1及以上版本) - -### 2.1异常现象 - -Broker列表的JMX Port列出现红色感叹号,则该Broker的JMX连接异常。 - - - - - -#### 2.1.1、原因一:JMX未开启 - -##### 2.1.1.1、异常现象 - -broker列表的JMX Port值为-1,对应Broker的JMX未开启。 - - - -##### 2.1.1.2、解决方案 - -开启JMX,开启流程如下: - -1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件 - -``` -# 在这个下面增加JMX端口的配置 -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" - export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 -fi -``` - - - -2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件 - -``` -# JMX settings -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false - -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}" -fi - -# JMX port to use -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT - Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" -fi -``` - - - -3、重启Kafka-Broker。 - - - -#### 2.1.2、原因二:JMX配置错误 - -##### 2.1.2.1、异常现象 - -错误日志: - -``` -# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is: - -# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is: -``` - - - -##### 2.1.2.2、解决方案 - -开启JMX,开启流程如下: - -1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件 - -``` -# 在这个下面增加JMX端口的配置 -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" - export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 -fi -``` - - - -2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件 - -``` -# JMX settings -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false - -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}" -fi - -# JMX port to use -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT - Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" -fi -``` - - - -3、重启Kafka-Broker。 - - - -#### 2.1.3、原因三:JMX开启SSL - -##### 2.1.3.1、解决方案 - - - -#### 2.1.4、原因四:连接了错误IP - -##### 2.1.4.1、异常现象 - -Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时`KnowStreaming` 需要连接到特定网络的IP才可以进行访问。 - - 比如:Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址。 - -```json -{ - "listener_security_protocol_map": { - "EXTERNAL": "SASL_PLAINTEXT", - "INTERNAL": "SASL_PLAINTEXT" - }, - "endpoints": [ - "EXTERNAL://192.168.0.1:7092", - "INTERNAL://192.168.0.2:7093" - ], - "jmx_port": 8099, - "host": "192.168.0.1", - "timestamp": "1627289710439", - "port": -1, - "version": 4 -} -``` - -##### 2.1.4.2、解决方案 - -可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。 - -`jmx_properties`格式: - -```json -{ - "maxConn": 100, // KM对单台Broker的最大JMX连接数 - "username": "xxxxx", //用户名,可以不填写 - "password": "xxxx", // 密码,可以不填写 - "openSSL": true, //开启SSL, true表示开启ssl, false表示关闭 - "useWhichEndpoint": "EXTERNAL" //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址 -} -``` - - - -SQL例子: - -```sql -UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx}; -``` - -### 2.2、正常情况 - -修改完成后,如果看到 JMX PORT这一列全部为绿色,则表示JMX已正常。 - - - - - -## 3、Elasticsearch问题 - -注意:mac系统在执行curl指令时,可能报zsh错误。可参考以下操作。 - -``` -1 进入.zshrc 文件 vim ~/.zshrc -2.在.zshrc中加入 setopt no_nomatch -3.更新配置 source ~/.zshrc -``` - -### 3.1、原因一:缺少索引 - -#### 3.1.1、异常现象 - -报错信息 - -``` -com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found] -``` - -curl http://{ES的IP地址}:{ES的端口号}/_cat/indices/ks_kafka* 查看KS索引列表,发现没有索引。 - -#### 3.1.2、解决方案 - -执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。 - - - -### 3.2、原因二:索引模板错误 - -#### 3.2.1、异常现象 - -多集群列表有数据,集群详情页图标无数据。查询KS索引模板列表,发现不存在。 - -``` -curl {ES的IP地址}:{ES的端口号}/_cat/templates/ks_kafka*?v&h=name -``` - -正常KS模板如下图所示。 - - - - - -#### 3.2.2、解决方案 - -删除KS索引模板和索引 - -``` -curl -XDELETE {ES的IP地址}:{ES的端口号}/ks_kafka* -curl -XDELETE {ES的IP地址}:{ES的端口号}/_template/ks_kafka* -``` - -执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。 - - -### 3.3、原因三:集群Shard满 - -#### 3.3.1、异常现象 - -报错信息 - -``` -com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found] -``` - -尝试手动创建索引失败。 - -``` -#创建ks_kafka_cluster_metric_test索引的指令 -curl -s -XPUT http://{ES的IP地址}:{ES的端口号}/ks_kafka_cluster_metric_test -``` - -#### 3.3.2、解决方案 - -ES索引的默认分片数量为1000,达到数量以后,索引创建失败。 - -+ 扩大ES索引数量上限,执行指令 - -``` -curl -XPUT -H"content-type:application/json" http://{ES的IP地址}:{ES的端口号}/_cluster/settings -d ' -{ - "persistent": { - "cluster": { - "max_shards_per_node":{索引上限,默认为1000} - } - } -}' -``` - -执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来补全索引。 diff --git "a/docs/dev_guide/\350\247\243\345\206\263\350\277\236\346\216\245JMX\345\244\261\350\264\245.md" "b/docs/dev_guide/\350\247\243\345\206\263\350\277\236\346\216\245JMX\345\244\261\350\264\245.md" index 03271837b..c6f73fb92 100644 --- "a/docs/dev_guide/\350\247\243\345\206\263\350\277\236\346\216\245JMX\345\244\261\350\264\245.md" +++ "b/docs/dev_guide/\350\247\243\345\206\263\350\277\236\346\216\245JMX\345\244\261\350\264\245.md" @@ -2,125 +2,275 @@ ![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) -## JMX-连接失败问题解决 -集群正常接入`KnowStreaming`之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是`JMX`连接的问题了。 +## 2、解决连接 JMX 失败 -下面我们按照步骤来一步一步的检查。 +- [2、解决连接 JMX 失败](#2解决连接-jmx-失败) + - [2.1、正异常现象](#21正异常现象) + - [2.2、异因一:JMX未开启](#22异因一jmx未开启) + - [2.2.1、异常现象](#221异常现象) + - [2.2.2、解决方案](#222解决方案) + - [2.3、异原二:JMX配置错误](#23异原二jmx配置错误) + - [2.3.1、异常现象](#231异常现象) + - [2.3.2、解决方案](#232解决方案) + - [2.4、异因三:JMX开启SSL](#24异因三jmx开启ssl) + - [2.4.1、异常现象](#241异常现象) + - [2.4.2、解决方案](#242解决方案) + - [2.5、异因四:连接了错误IP](#25异因四连接了错误ip) + - [2.5.1、异常现象](#251异常现象) + - [2.5.2、解决方案](#252解决方案) + - [2.6、异因五:连接了错误端口](#26异因五连接了错误端口) + - [2.6.1、异常现象](#261异常现象) + - [2.6.2、解决方案](#262解决方案) -### 1、问题说明 -**类型一:JMX配置未开启** +背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。 -未开启时,直接到`2、解决方法`查看如何开启即可。 -![check_jmx_opened](http://img-ys011.didistatic.com/static/dc2img/do1_dRX6UHE2IUSHqsN95DGb) +### 2.1、正异常现象 +**1、异常现象** -**类型二:配置错误** +Broker 列表的 JMX PORT 列出现红色感叹号,则表示 JMX 连接存在异常。 -`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因: + -- `JMX`配置错误:见`2、解决方法`。 -- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。 -- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`。 +**2、正常现象** -错误日志例子: +Broker 列表的 JMX PORT 列出现绿色,则表示 JMX 连接正常。 + + + + +--- + + + + + + +### 2.2、异因一:JMX未开启 + +#### 2.2.1、异常现象 + +broker列表的JMX Port值为-1,对应Broker的JMX未开启。 + + + +#### 2.2.2、解决方案 + +开启JMX,开启流程如下: + +1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件 + +```bash +# 在这个下面增加JMX端口的配置 +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" + export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 +fi ``` -# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. -java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is: -# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. -java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is: +2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件 + +```bash +# JMX settings +if [ -z "$KAFKA_JMX_OPTS" ]; then + KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP" +fi + +# JMX port to use +if [ $JMX_PORT ]; then + KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" +fi ``` -**类型三:连接特定IP** -Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时 `KnowStreaming` 需要连接到特定网络的IP才可以进行访问。 -比如: +3、重启Kafka-Broker。 -Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址,此时可以看 `4、解决方法 —— JMX连接特定网络` 进行解决。 -```json - { - "listener_security_protocol_map": {"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"}, - "endpoints": ["EXTERNAL://192.168.0.1:7092","INTERNAL://192.168.0.2:7093"], - "jmx_port": 8099, - "host": "192.168.0.1", - "timestamp": "1627289710439", - "port": -1, - "version": 4 - } -``` +--- + + + + + + + +### 2.3、异原二:JMX配置错误 + +#### 2.3.1、异常现象 -### 2、解决方法 +错误日志: -这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。 +```log +# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。 +2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is: -修改`kafka-server-start.sh`文件: +# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。 +2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is: ``` + +#### 2.3.2、解决方案 + +开启JMX,开启流程如下: + +1、修改kafka的bin目录下面的:`kafka-server-start.sh`文件 + +```bash # 在这个下面增加JMX端口的配置 if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" - export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 + export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" + export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 fi ``` -  +2、修改kafka的bin目录下面对的:`kafka-run-class.sh`文件 -修改`kafka-run-class.sh`文件 -``` -# JMX settings +```bash +# JMX settings if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}" -fi + KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=当前机器的IP" +fi -# JMX port to use +# JMX port to use if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" fi ``` +3、重启Kafka-Broker。 + + +--- + + + + + + + +### 2.4、异因三:JMX开启SSL + +#### 2.4.1、异常现象 + +```log +# 连接JMX的日志中,出现SSL认证失败的相关日志。TODO:欢迎补充具体日志案例。 +``` + +#### 2.4.2、解决方案 + + -### 3、解决方法 —— 认证的JMX -如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。 +--- -在`JMX`的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,可以在集群接入界面配置你的`JMX`认证信息。 - +### 2.5、异因四:连接了错误IP +#### 2.5.1、异常现象 +Broker 配置了内外网,而JMX在配置时,可能配置了内网IP或者外网IP,此时`KnowStreaming` 需要连接到特定网络的IP才可以进行访问。 -### 4、解决方法 —— JMX连接特定网络 + 比如:Broker在ZK的存储结构如下所示,我们期望连接到 `endpoints` 中标记为 `INTERNAL` 的地址,但是 `KnowStreaming` 却连接了 `EXTERNAL` 的地址。 + +```json +{ + "listener_security_protocol_map": { + "EXTERNAL": "SASL_PLAINTEXT", + "INTERNAL": "SASL_PLAINTEXT" + }, + "endpoints": [ + "EXTERNAL://192.168.0.1:7092", + "INTERNAL://192.168.0.2:7093" + ], + "jmx_port": 8099, + "host": "192.168.0.1", + "timestamp": "1627289710439", + "port": -1, + "version": 4 +} +``` + +#### 2.5.2、解决方案 可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`useWhichEndpoint`字段,从而控制 `KnowStreaming` 连接到特定的JMX IP及PORT。 `jmx_properties`格式: + ```json { - "maxConn": 100, # KM对单台Broker的最大JMX连接数 - "username": "xxxxx", # 用户名,可以不填写 - "password": "xxxx", # 密码,可以不填写 - "openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭 - "useWhichEndpoint": "EXTERNAL" #指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址 + "maxConn": 100, // KM对单台Broker的最大JMX连接数 + "username": "xxxxx", //用户名,可以不填写 + "password": "xxxx", // 密码,可以不填写 + "openSSL": true, //开启SSL, true表示开启ssl, false表示关闭 + "useWhichEndpoint": "EXTERNAL" //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址 } ``` -  + SQL例子: + ```sql UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "useWhichEndpoint": "xxx"}' where id={xxx}; ``` -注意: -+ 目前此功能只支持采用 `ZK` 做分布式协调的kafka集群。 +--- + + + + + + +### 2.6、异因五:连接了错误端口 + +3.3.0 以上版本,或者是 master 分支最新代码,才具备该能力。 + +#### 2.6.1、异常现象 + +在 AWS 或者是容器上的 Kafka-Broker,使用同一个IP,但是外部服务想要去连接 JMX 端口时,需要进行映射。因此 KnowStreaming 如果直接连接 ZK 上获取到的 JMX 端口,会连接失败,因此需要具备连接端口可配置的能力。 + +TODO:补充具体的日志。 + + +#### 2.6.2、解决方案 + +可以手动往`ks_km_physical_cluster`表的`jmx_properties`字段增加一个`specifiedJmxPortList`字段,从而控制 `KnowStreaming` 连接到特定的JMX PORT。 + +`jmx_properties`格式: +```json +{ + "jmxPort": 2445, // 最低优先级使用的jmx端口 + "maxConn": 100, // KM对单台Broker的最大JMX连接数 + "username": "xxxxx", //用户名,可以不填写 + "password": "xxxx", // 密码,可以不填写 + "openSSL": true, //开启SSL, true表示开启ssl, false表示关闭 + "useWhichEndpoint": "EXTERNAL", //指定要连接的网络名称,填写EXTERNAL就是连接endpoints里面的EXTERNAL地址 + "specifiedJmxPortList": [ // 配置最高优先使用的jmx端口 + { + "serverId": "1", // kafka-broker的brokerId, 注意这个是字符串类型,字符串类型的原因是要兼容connect的jmx端口的连接 + "jmxPort": 1234 // 该 broker 所连接的jmx端口 + }, + { + "serverId": "2", + "jmxPort": 1234 + }, + ] +} +``` + + + +SQL例子: + +```sql +UPDATE ks_km_physical_cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false , "specifiedJmxPortList": [{"serverId": "1", "jmxPort": 1234}] }' where id={xxx}; +``` + - \ No newline at end of file +--- diff --git "a/docs/dev_guide/\351\241\265\351\235\242\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\211\213\345\206\214.md" "b/docs/dev_guide/\351\241\265\351\235\242\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\211\213\345\206\214.md" new file mode 100644 index 000000000..6fdd136f4 --- /dev/null +++ "b/docs/dev_guide/\351\241\265\351\235\242\346\227\240\346\225\260\346\215\256\346\216\222\346\237\245\346\211\213\345\206\214.md" @@ -0,0 +1,183 @@ +![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) + +# 页面无数据排查手册 + +- [页面无数据排查手册](#页面无数据排查手册) + - [1、集群接入错误](#1集群接入错误) + - [1.1、异常现象](#11异常现象) + - [1.2、解决方案](#12解决方案) + - [1.3、正常情况](#13正常情况) + - [2、JMX连接失败](#2jmx连接失败) + - [3、ElasticSearch问题](#3elasticsearch问题) + - [3.1、异因一:缺少索引](#31异因一缺少索引) + - [3.1.1、异常现象](#311异常现象) + - [3.1.2、解决方案](#312解决方案) + - [3.2、异因二:索引模板错误](#32异因二索引模板错误) + - [3.2.1、异常现象](#321异常现象) + - [3.2.2、解决方案](#322解决方案) + - [3.3、异因三:集群Shard满](#33异因三集群shard满) + - [3.3.1、异常现象](#331异常现象) + - [3.3.2、解决方案](#332解决方案) + + +--- + +## 1、集群接入错误 + +### 1.1、异常现象 + +如下图所示,集群非空时,大概率为地址配置错误导致。 + + + + + +### 1.2、解决方案 + +接入集群时,依据提示的错误,进行相应的解决。例如: + + + +### 1.3、正常情况 + +接入集群时,页面信息都自动正常出现,没有提示错误。 + + + + + + + +--- + +## 2、JMX连接失败 + +背景:Kafka 通过 JMX 服务进行运行指标的暴露,因此 `KnowStreaming` 会主动连接 Kafka 的 JMX 服务进行指标采集。如果我们发现页面缺少指标,那么可能原因之一是 Kafka 的 JMX 端口配置的有问题导致指标获取失败,进而页面没有数据。 + + +具体见同目录下的文档:[解决连接JMX失败](./%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md) + + +--- + + + + + + + + + +## 3、ElasticSearch问题 + +**背景:** +`KnowStreaming` 将从 Kafka 中采集到的指标存储到 ES 中,如果 ES 存在问题,则也可能会导致页面出现无数据的情况。 + +**日志:** +`KnowStreaming` 读写 ES 相关日志,在 `logs/es/es.log` 中! + + +**注意:** +mac系统在执行curl指令时,可能报zsh错误。可参考以下操作。 + +```bash +1 进入.zshrc 文件 vim ~/.zshrc +2.在.zshrc中加入 setopt no_nomatch +3.更新配置 source ~/.zshrc +``` + +--- + +### 3.1、异因一:缺少索引 + +#### 3.1.1、异常现象 + +报错信息 + +```log +# 日志位置 logs/es/es.log +com.didiglobal.logi.elasticsearch.client.model.exception.ESIndexNotFoundException: method [GET], host[http://127.0.0.1:9200], URI [/ks_kafka_broker_metric_2022-10-21,ks_kafka_broker_metric_2022-10-22/_search], status line [HTTP/1.1 404 Not Found] +``` + + +`curl http://{ES的IP地址}:{ES的端口号}/_cat/indices/ks_kafka*` 查看KS索引列表,发现没有索引。 + +#### 3.1.2、解决方案 + +执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。 + + +--- + + +### 3.2、异因二:索引模板错误 + +#### 3.2.1、异常现象 + +多集群列表有数据,集群详情页图标无数据。查询KS索引模板列表,发现不存在。 + +```bash +curl {ES的IP地址}:{ES的端口号}/_cat/templates/ks_kafka*?v&h=name +``` + +正常KS模板如下图所示。 + + + + + +#### 3.2.2、解决方案 + +删除KS索引模板和索引 + +```bash +curl -XDELETE {ES的IP地址}:{ES的端口号}/ks_kafka* +curl -XDELETE {ES的IP地址}:{ES的端口号}/_template/ks_kafka* +``` + +执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来创建索引及模版。 + + +--- + + +### 3.3、异因三:集群Shard满 + +#### 3.3.1、异常现象 + +报错信息 + +```log +# 日志位置 logs/es/es.log + +{"error":{"root_cause":[{"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"}],"type":"validation_exception","reason":"Validation Failed: 1: this action would add [4] total shards, but this cluster currently has [1000]/[1000] maximum shards open;"},"status":400} +``` + +尝试手动创建索引失败。 + +```bash +#创建ks_kafka_cluster_metric_test索引的指令 +curl -s -XPUT http://{ES的IP地址}:{ES的端口号}/ks_kafka_cluster_metric_test +``` + + +#### 3.3.2、解决方案 + +ES索引的默认分片数量为1000,达到数量以后,索引创建失败。 + ++ 扩大ES索引数量上限,执行指令 + +``` +curl -XPUT -H"content-type:application/json" http://{ES的IP地址}:{ES的端口号}/_cluster/settings -d ' +{ + "persistent": { + "cluster": { + "max_shards_per_node":{索引上限,默认为1000, 测试时可以将其调整为10000} + } + } +}' +``` + +执行 [ES索引及模版初始化](https://github.com/didi/KnowStreaming/blob/master/bin/init_es_template.sh) 脚本,来补全索引。 + + diff --git "a/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" "b/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" index 061c080d0..675a9dabd 100644 --- "a/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" +++ "b/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" @@ -6,6 +6,54 @@ ### 升级至 `master` 版本 +**配置变更** + +```yaml +# 新增的配置 +request: # 请求相关的配置 + api-call: # api调用 + timeout-unit-ms: 8000 # 超时时间,默认8000毫秒 +``` + +**SQL 变更** +```sql +-- 多集群管理权限2023-06-27新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2026', 'Connector-新增', '1593', '1', '2', 'Connector-新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2028', 'Connector-编辑', '1593', '1', '2', 'Connector-编辑', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2030', 'Connector-删除', '1593', '1', '2', 'Connector-删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2032', 'Connector-重启', '1593', '1', '2', 'Connector-重启', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2034', 'Connector-暂停&恢复', '1593', '1', '2', 'Connector-暂停&恢复', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2026', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2028', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2030', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2032', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2034', '0', 'know-streaming'); + + +-- 多集群管理权限2023-06-29新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2036', 'Security-ACL新增', '1593', '1', '2', 'Security-ACL新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2038', 'Security-ACL删除', '1593', '1', '2', 'Security-ACL删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2040', 'Security-User新增', '1593', '1', '2', 'Security-User新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2042', 'Security-User删除', '1593', '1', '2', 'Security-User删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2044', 'Security-User修改密码', '1593', '1', '2', 'Security-User修改密码', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2036', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2038', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2040', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2042', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2044', '0', 'know-streaming'); + + +-- 多集群管理权限2023-07-06新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2046', 'Group-删除', '1593', '1', '2', 'Group-删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2048', 'GroupOffset-Topic纬度删除', '1593', '1', '2', 'GroupOffset-Topic纬度删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2050', 'GroupOffset-Partition纬度删除', '1593', '1', '2', 'GroupOffset-Partition纬度删除', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming'); +``` ### 升级至 `3.3.0` 版本 diff --git a/docs/user_guide/faq.md b/docs/user_guide/faq.md index 1656ec371..b66523ff4 100644 --- a/docs/user_guide/faq.md +++ b/docs/user_guide/faq.md @@ -1,13 +1,35 @@ + +![Logo](https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png) + # FAQ -## 8.1、支持哪些 Kafka 版本? +- [FAQ](#faq) + - [1、支持哪些 Kafka 版本?](#1支持哪些-kafka-版本) + - [1、2.x 版本和 3.0 版本有什么差异?](#12x-版本和-30-版本有什么差异) + - [3、页面流量信息等无数据?](#3页面流量信息等无数据) + - [8.4、`Jmx`连接失败如何解决?](#84jmx连接失败如何解决) + - [5、有没有 API 文档?](#5有没有-api-文档) + - [6、删除 Topic 成功后,为何过段时间又出现了?](#6删除-topic-成功后为何过段时间又出现了) + - [7、如何在不登录的情况下,调用接口?](#7如何在不登录的情况下调用接口) + - [8、Specified key was too long; max key length is 767 bytes](#8specified-key-was-too-long-max-key-length-is-767-bytes) + - [9、出现 ESIndexNotFoundEXception 报错](#9出现-esindexnotfoundexception-报错) + - [10、km-console 打包构建失败](#10km-console-打包构建失败) + - [11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用?](#11在-km-console-目录下执行-npm-run-start-时看不到应用构建和热加载过程如何启动单个应用) + - [12、权限识别失败问题](#12权限识别失败问题) + - [13、接入开启kerberos认证的kafka集群](#13接入开启kerberos认证的kafka集群) + - [14、对接Ldap的配置](#14对接ldap的配置) + - [15、测试时使用Testcontainers的说明](#15测试时使用testcontainers的说明) + - [16、JMX连接失败怎么办](#16jmx连接失败怎么办) + - [17、zk监控无数据问题](#17zk监控无数据问题) + +## 1、支持哪些 Kafka 版本? - 支持 0.10+ 的 Kafka 版本; - 支持 ZK 及 Raft 运行模式的 Kafka 版本;   -## 8.1、2.x 版本和 3.0 版本有什么差异? +## 1、2.x 版本和 3.0 版本有什么差异? **全新设计理念** @@ -23,7 +45,7 @@   -## 8.3、页面流量信息等无数据? +## 3、页面流量信息等无数据? - 1、`Broker JMX`未正确开启 @@ -41,7 +63,7 @@   -## 8.5、有没有 API 文档? +## 5、有没有 API 文档? `KnowStreaming` 采用 Swagger 进行 API 说明,在启动 KnowStreaming 服务之后,就可以从下面地址看到。 @@ -49,7 +71,7 @@ Swagger-API 地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-   -## 8.6、删除 Topic 成功后,为何过段时间又出现了? +## 6、删除 Topic 成功后,为何过段时间又出现了? **原因说明:** @@ -74,7 +96,7 @@ for (int i= 0; i < 100000; ++i) {   -## 8.7、如何在不登录的情况下,调用接口? +## 7、如何在不登录的情况下,调用接口? 步骤一:接口调用时,在 header 中,增加如下信息: @@ -109,7 +131,7 @@ SECURITY.TRICK_USERS 但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。 -## 8.8、Specified key was too long; max key length is 767 bytes +## 8、Specified key was too long; max key length is 767 bytes **原因:** 不同版本的 InoDB 引擎,参数‘innodb_large_prefix’默认值不同,即在 5.6 默认值为 OFF,5.7 默认值为 ON。 @@ -121,13 +143,13 @@ SECURITY.TRICK_USERS - 将字符集改为 latin1(一个字符=一个字节)。 - 开启‘innodb_large_prefix’,修改默认行格式‘innodb_file_format’为 Barracuda,并设置 row_format=dynamic。 -## 8.9、出现 ESIndexNotFoundEXception 报错 +## 9、出现 ESIndexNotFoundEXception 报错 **原因 :**没有创建 ES 索引模版 **解决方案:**执行 init_es_template.sh 脚本,创建 ES 索引模版即可。 -## 8.10、km-console 打包构建失败 +## 10、km-console 打包构建失败 首先,**请确保您正在使用最新版本**,版本列表见 [Tags](https://github.com/didi/KnowStreaming/tags)。如果不是最新版本,请升级后再尝试有无问题。 @@ -161,14 +183,14 @@ Node 版本: v12.22.12 错误截图: ``` -## 8.11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用? +## 11、在 `km-console` 目录下执行 `npm run start` 时看不到应用构建和热加载过程?如何启动单个应用? 需要到具体的应用中执行 `npm run start`,例如 `cd packages/layout-clusters-fe` 后,执行 `npm run start`。 应用启动后需要到基座应用中查看(需要启动基座应用,即 layout-clusters-fe)。 -## 8.12、权限识别失败问题 +## 12、权限识别失败问题 1、使用admin账号登陆KnowStreaming时,点击系统管理-用户管理-角色管理-新增角色,查看页面是否正常。 @@ -184,7 +206,7 @@ Node 版本: v12.22.12 + 解决方案:清空数据库数据,将数据库字符集调整为utf8,最后重新执行[dml-logi.sql](https://github.com/didi/KnowStreaming/blob/master/km-dist/init/sql/dml-logi.sql)脚本导入数据即可。 -## 8.13、接入开启kerberos认证的kafka集群 +## 13、接入开启kerberos认证的kafka集群 1. 部署KnowStreaming的机器上安装krb客户端; 2. 替换/etc/krb5.conf配置文件; @@ -200,7 +222,7 @@ Node 版本: v12.22.12 ``` -## 8.14、对接Ldap的配置 +## 14、对接Ldap的配置 ```yaml # 需要在application.yml中增加如下配置。相关配置的信息,按实际情况进行调整 @@ -223,6 +245,36 @@ spring: login-extend-bean-name: ksLdapLoginService # 表示使用ldap的service ``` -## 8.15、测试时使用Testcontainers的说明 +## 15、测试时使用Testcontainers的说明 + 1. 需要docker运行环境 [Testcontainers运行环境说明](https://www.testcontainers.org/supported_docker_environment/) -2. 如果本机没有docker,可以使用[远程访问docker](https://docs.docker.com/config/daemon/remote-access/) [Testcontainers配置说明](https://www.testcontainers.org/features/configuration/#customizing-docker-host-detection) \ No newline at end of file +2. 如果本机没有docker,可以使用[远程访问docker](https://docs.docker.com/config/daemon/remote-access/) [Testcontainers配置说明](https://www.testcontainers.org/features/configuration/#customizing-docker-host-detection) + + +## 16、JMX连接失败怎么办 + +详细见:[解决连接JMX失败](../dev_guide/%E8%A7%A3%E5%86%B3%E8%BF%9E%E6%8E%A5JMX%E5%A4%B1%E8%B4%A5.md) + + +## 17、zk监控无数据问题 + +**现象:** +zookeeper集群正常,但Ks上zk页面所有监控指标无数据,`KnowStreaming` log_error.log日志提示 + +```vim +[MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.c.z.HealthCheckZookeeperService||method=checkWatchCount||param=ZookeeperParam(zkAddressList=[Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}, Tuple{v1=192.168.xxx.xx, v2=2181}], zkConfig=null)||config=HealthAmountRatioConfig(amount=100000, ratio=0.8)||result=Result{message='mntr is not executed because it is not in the whitelist. +', code=8031, data=null}||errMsg=get metrics failed, may be collect failed or zk mntr command not in whitelist. +2023-04-23 14:39:07.234 [MetricCollect-Shard-0-8-thread-1] ERROR class=c.x.k.s.k.c.s.h.checker.AbstractHeal +``` + + +原因就很明确了。需要开放zk的四字命令,在`zoo.cfg`配置文件中添加 +``` +4lw.commands.whitelist=mntr,stat,ruok,envi,srvr,envi,cons,conf,wchs,wchp +``` + + +建议至少开放上述几个四字命令,当然,您也可以全部开放 +``` +4lw.commands.whitelist=* +``` diff --git a/km-biz/pom.xml b/km-biz/pom.xml index d9a228ab3..da4192c83 100644 --- a/km-biz/pom.xml +++ b/km-biz/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-biz - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterBrokersManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterBrokersManagerImpl.java index ab5d6a6d6..c77724dd8 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterBrokersManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterBrokersManagerImpl.java @@ -202,7 +202,7 @@ private List convert2ClusterBrokersOverviewVOList(Clus //补充非zk模式的JMXPort信息 if (!clusterPhy.getRunState().equals(ClusterRunStateEnum.RUN_ZK.getRunState())) { JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(clusterPhy.getJmxProperties(), JmxConfig.class); - voList.forEach(elem -> elem.setJmxPort(jmxConfig.getJmxPort() == null ? -1 : jmxConfig.getJmxPort())); + voList.forEach(elem -> elem.setJmxPort(jmxConfig.getFinallyJmxPort(String.valueOf(elem.getBrokerId())))); } return voList; diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterZookeepersManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterZookeepersManagerImpl.java index aca302693..8aed18ddf 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterZookeepersManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/cluster/impl/ClusterZookeepersManagerImpl.java @@ -62,7 +62,8 @@ public Result getClusterPhyZookeepersState(Long cluste vo.setTotalObserverCount(0); vo.setAliveServerCount(0); for (ZookeeperInfo info: infoList) { - if (info.getRole().equals(ZKRoleEnum.LEADER.getRole())) { + if (info.getRole().equals(ZKRoleEnum.LEADER.getRole()) || info.getRole().equals(ZKRoleEnum.STANDALONE.getRole())) { + // leader 或者 standalone vo.setLeaderNode(info.getHost()); } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java index 5800b26f0..191afc6bb 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java @@ -49,9 +49,9 @@ public Result updateConnectorConfig(Long connectClusterId, String connecto @Override public Result createConnector(ConnectorCreateDTO dto, String operator) { - dto.getConfigs().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); + dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); - Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getConfigs(), operator); + Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); if (createResult.failed()) { return Result.buildFromIgnoreData(createResult); } @@ -67,9 +67,9 @@ public Result createConnector(ConnectorCreateDTO dto, String operator) { @Override public Result createConnector(ConnectorCreateDTO dto, String heartbeatName, String checkpointName, String operator) { - dto.getConfigs().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); + dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); - Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getConfigs(), operator); + Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); if (createResult.failed()) { return Result.buildFromIgnoreData(createResult); } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java index 803daa266..de10b0f00 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java @@ -48,6 +48,7 @@ import org.springframework.stereotype.Service; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import java.util.stream.Collectors; @@ -131,17 +132,17 @@ public Result createMirrorMaker(MirrorMakerCreateDTO dto, String operator) } else if (checkpointResult.failed() && checkpointResult.failed()) { return Result.buildFromRSAndMsg( ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED, - String.format("创建 checkpoint & heartbeat 失败.\n失败信息分别为:%s\n\n%s", checkpointResult.getMessage(), heartbeatResult.getMessage()) + String.format("创建 checkpoint & heartbeat 失败.%n失败信息分别为:%s%n%n%s", checkpointResult.getMessage(), heartbeatResult.getMessage()) ); } else if (checkpointResult.failed()) { return Result.buildFromRSAndMsg( ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED, - String.format("创建 checkpoint 失败.\n失败信息分别为:%s", checkpointResult.getMessage()) + String.format("创建 checkpoint 失败.%n失败信息分别为:%s", checkpointResult.getMessage()) ); } else{ return Result.buildFromRSAndMsg( ResultStatus.KAFKA_CONNECTOR_OPERATE_FAILED, - String.format("创建 heartbeat 失败.\n失败信息分别为:%s", heartbeatResult.getMessage()) + String.format("创建 heartbeat 失败.%n失败信息分别为:%s", heartbeatResult.getMessage()) ); } } @@ -193,7 +194,7 @@ public Result modifyMirrorMakerConfig(MirrorMakerCreateDTO dto, String ope return rv; } - return connectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getConfigs(), operator); + return connectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); } @Override @@ -425,7 +426,7 @@ public Result> getMM2Configs(Long connectClusterId, String conn public Result> validateConnectors(MirrorMakerCreateDTO dto) { List voList = new ArrayList<>(); - Result infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getConfigs()); + Result infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getSuitableConfig()); if (infoResult.failed()) { return Result.buildFromIgnoreData(infoResult); } @@ -479,11 +480,11 @@ public Result checkCreateMirrorMakerParamAndUnifyData(MirrorMakerCreateDTO return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(connectCluster.getKafkaClusterPhyId())); } - if (!dto.getConfigs().containsKey(CONNECTOR_CLASS_FILED_NAME)) { + if (!dto.getSuitableConfig().containsKey(CONNECTOR_CLASS_FILED_NAME)) { return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "SourceConnector缺少connector.class"); } - if (!MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(dto.getConfigs().getProperty(CONNECTOR_CLASS_FILED_NAME))) { + if (!MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(dto.getSuitableConfig().getProperty(CONNECTOR_CLASS_FILED_NAME))) { return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "SourceConnector的connector.class类型错误"); } @@ -588,16 +589,14 @@ public static List supplyData2ClusterMirrorMakerOv } } - voList.forEach(elem -> { - elem.setMetricLines(metricLineMap.get(elem.getConnectClusterId() + "#" + elem.getConnectorName())); - }); + voList.forEach(elem -> elem.setMetricLines(metricLineMap.get(elem.getConnectClusterId() + "#" + elem.getConnectorName()))); return voList; } private List completeClusterInfo(List mirrorMakerVOList) { - Map connectorInfoMap = new HashMap<>(); + Map connectorInfoMap = new ConcurrentHashMap<>(); for (ClusterMirrorMakerOverviewVO mirrorMakerVO : mirrorMakerVOList) { ApiCallThreadPoolService.runnableTask(String.format("method=completeClusterInfo||connectClusterId=%d||connectorName=%s||getMirrorMakerInfo", mirrorMakerVO.getConnectClusterId(), mirrorMakerVO.getConnectorName()), @@ -607,12 +606,10 @@ private List completeClusterInfo(List newMirrorMakerVOList = new ArrayList<>(); for (ClusterMirrorMakerOverviewVO mirrorMakerVO : mirrorMakerVOList) { diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java index 5a6d3ac66..ea6465a38 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java @@ -1,6 +1,7 @@ package com.xiaojukeji.know.streaming.km.biz.group; import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO; +import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetDeleteDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO; @@ -39,5 +40,9 @@ PaginationResult pagingGroupTopicConsumedMetrics(Lon Result resetGroupOffsets(GroupOffsetResetDTO dto, String operator) throws Exception; + Result deleteGroupOffsets(GroupOffsetDeleteDTO dto, String operator) throws Exception; + + @Deprecated List getGroupTopicOverviewVOList(Long clusterPhyId, List groupMemberPOList); + List getGroupTopicOverviewVOList(Long clusterPhyId, List groupMemberPOList, Integer timeoutUnitMs); } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java index 17216793d..753768dfc 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java @@ -4,6 +4,7 @@ import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.biz.group.GroupManager; import com.xiaojukeji.know.streaming.km.common.bean.dto.cluster.ClusterGroupSummaryDTO; +import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetDeleteDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationBaseDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.pagination.PaginationSortDTO; @@ -17,6 +18,9 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSMemberDescription; import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.GroupMetrics; import com.xiaojukeji.know.streaming.km.common.bean.entity.offset.KSOffsetSpec; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic; @@ -32,6 +36,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.AggTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum; +import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum; import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException; import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; @@ -40,11 +45,14 @@ import com.xiaojukeji.know.streaming.km.common.utils.PaginationUtil; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; +import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils; import com.xiaojukeji.know.streaming.km.core.service.group.GroupMetricService; import com.xiaojukeji.know.streaming.km.core.service.group.GroupService; +import com.xiaojukeji.know.streaming.km.core.service.group.OpGroupService; import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems; +import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService; import com.xiaojukeji.know.streaming.km.persistence.es.dao.GroupMetricESDAO; import org.apache.kafka.common.ConsumerGroupState; import org.apache.kafka.common.TopicPartition; @@ -52,13 +60,14 @@ import org.springframework.stereotype.Component; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum.CONNECT_CLUSTER_PROTOCOL_TYPE; @Component public class GroupManagerImpl implements GroupManager { - private static final ILog log = LogFactory.getLog(GroupManagerImpl.class); + private static final ILog LOGGER = LogFactory.getLog(GroupManagerImpl.class); @Autowired private TopicService topicService; @@ -66,6 +75,9 @@ public class GroupManagerImpl implements GroupManager { @Autowired private GroupService groupService; + @Autowired + private OpGroupService opGroupService; + @Autowired private PartitionService partitionService; @@ -78,6 +90,9 @@ public class GroupManagerImpl implements GroupManager { @Autowired private ClusterPhyService clusterPhyService; + @Autowired + private KSConfigUtils ksConfigUtils; + @Override public PaginationResult pagingGroupMembers(Long clusterPhyId, String topicName, @@ -85,19 +100,27 @@ public PaginationResult pagingGroupMembers(Long clusterPhy String searchTopicKeyword, String searchGroupKeyword, PaginationBaseDTO dto) { + long startTimeUnitMs = System.currentTimeMillis(); + PaginationResult paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, groupName, searchTopicKeyword, searchGroupKeyword, dto); if (!paginationResult.hasData()) { return PaginationResult.buildSuc(new ArrayList<>(), paginationResult); } - List groupTopicVOList = this.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData()); + List groupTopicVOList = this.getGroupTopicOverviewVOList( + clusterPhyId, + paginationResult.getData().getBizData(), + ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间 + ); return PaginationResult.buildSuc(groupTopicVOList, paginationResult); } @Override public PaginationResult pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) { + long startTimeUnitMs = System.currentTimeMillis(); + Group group = groupService.getGroupFromDB(clusterPhyId, groupName); //没有topicMember则直接返回 @@ -113,7 +136,14 @@ public PaginationResult pagingGroupTopicMembers(Long clust List groupMemberPOList = paginationResult.getData().getBizData().stream().map(elem -> new GroupMemberPO(clusterPhyId, elem.getTopicName(), groupName, group.getState().getState(), elem.getMemberCount())).collect(Collectors.toList()); - return PaginationResult.buildSuc(this.getGroupTopicOverviewVOList(clusterPhyId, groupMemberPOList), paginationResult); + return PaginationResult.buildSuc( + this.getGroupTopicOverviewVOList( + clusterPhyId, + groupMemberPOList, + ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间 + ), + paginationResult + ); } @Override @@ -121,7 +151,7 @@ public PaginationResult pagingClusterGroupsOverview(Long cluste List groupList = groupService.listClusterGroups(clusterPhyId); // 类型转化 - List voList = groupList.stream().map(elem -> GroupConverter.convert2GroupOverviewVO(elem)).collect(Collectors.toList()); + List voList = groupList.stream().map(GroupConverter::convert2GroupOverviewVO).collect(Collectors.toList()); // 搜索groupName voList = PaginationUtil.pageByFuzzyFilter(voList, dto.getSearchGroupName(), Arrays.asList("name")); @@ -168,9 +198,10 @@ public PaginationResult pagingGroupTopicConsumedMetr // 转换存储格式 Map tpMemberMap = new HashMap<>(); - //如果不是connect集群 + // 如果不是connect集群 if (!groupDescription.protocolType().equals(CONNECT_CLUSTER_PROTOCOL_TYPE)) { for (KSMemberDescription description : groupDescription.members()) { + // 如果是 Consumer 的 Description ,则 Assignment 的类型为 KSMemberConsumerAssignment 的 KSMemberConsumerAssignment assignment = (KSMemberConsumerAssignment) description.assignment(); for (TopicPartition tp : assignment.topicPartitions()) { tpMemberMap.put(tp, description); @@ -245,6 +276,52 @@ public Result resetGroupOffsets(GroupOffsetResetDTO dto, String operator) return groupService.resetGroupOffsets(dto.getClusterId(), dto.getGroupName(), offsetMapResult.getData(), operator); } + @Override + public Result deleteGroupOffsets(GroupOffsetDeleteDTO dto, String operator) throws Exception { + ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(dto.getClusterPhyId()); + if (clusterPhy == null) { + return Result.buildFromRSAndMsg(ResultStatus.CLUSTER_NOT_EXIST, MsgConstant.getClusterPhyNotExist(dto.getClusterPhyId())); + } + + + // 按照group纬度进行删除 + if (ValidateUtils.isBlank(dto.getGroupName())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "groupName不允许为空"); + } + if (DeleteGroupTypeEnum.GROUP.getCode().equals(dto.getDeleteType())) { + return opGroupService.deleteGroupOffset( + new DeleteGroupParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP), + operator + ); + } + + + // 按照topic纬度进行删除 + if (ValidateUtils.isBlank(dto.getTopicName())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "topicName不允许为空"); + } + if (DeleteGroupTypeEnum.GROUP_TOPIC.getCode().equals(dto.getDeleteType())) { + return opGroupService.deleteGroupTopicOffset( + new DeleteGroupTopicParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP, dto.getTopicName()), + operator + ); + } + + + // 按照partition纬度进行删除 + if (ValidateUtils.isNullOrLessThanZero(dto.getPartitionId())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "partitionId不允许为空或小于0"); + } + if (DeleteGroupTypeEnum.GROUP_TOPIC_PARTITION.getCode().equals(dto.getDeleteType())) { + return opGroupService.deleteGroupTopicPartitionOffset( + new DeleteGroupTopicPartitionParam(dto.getClusterPhyId(), dto.getGroupName(), DeleteGroupTypeEnum.GROUP, dto.getTopicName(), dto.getPartitionId()), + operator + ); + } + + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "deleteType类型错误"); + } + @Override public List getGroupTopicOverviewVOList(Long clusterPhyId, List groupMemberPOList) { // 获取指标 @@ -256,11 +333,54 @@ public List getGroupTopicOverviewVOList(Long clusterPhyId, ); if (metricsListResult.failed()) { // 如果查询失败,则输出错误信息,但是依旧进行已有数据的返回 - log.error("method=completeMetricData||clusterPhyId={}||result={}||errMsg=search es failed", clusterPhyId, metricsListResult); + LOGGER.error("method=completeMetricData||clusterPhyId={}||result={}||errMsg=search es failed", clusterPhyId, metricsListResult); } return this.convert2GroupTopicOverviewVOList(groupMemberPOList, metricsListResult.getData()); } + @Override + public List getGroupTopicOverviewVOList(Long clusterPhyId, List poList, Integer timeoutUnitMs) { + Set requestedGroupSet = new HashSet<>(); + + // 获取指标 + Map> groupTopicLagMap = new ConcurrentHashMap<>(); + poList.forEach(elem -> { + if (requestedGroupSet.contains(elem.getGroupName())) { + // 该Group已经处理过 + return; + } + + requestedGroupSet.add(elem.getGroupName()); + ApiCallThreadPoolService.runnableTask( + String.format("clusterPhyId=%d||groupName=%s||msg=getGroupTopicLag", clusterPhyId, elem.getGroupName()), + timeoutUnitMs, + () -> { + Result> listResult = groupMetricService.collectGroupMetricsFromKafka(clusterPhyId, elem.getGroupName(), GroupMetricVersionItems.GROUP_METRIC_LAG); + if (listResult == null || !listResult.hasData()) { + return; + } + + Map lagMetricMap = new HashMap<>(); + listResult.getData().forEach(item -> { + Float newLag = item.getMetric(GroupMetricVersionItems.GROUP_METRIC_LAG); + if (newLag == null) { + return; + } + + Float oldLag = lagMetricMap.getOrDefault(item.getTopic(), newLag); + lagMetricMap.put(item.getTopic(), Math.max(oldLag, newLag)); + }); + + groupTopicLagMap.put(elem.getGroupName(), lagMetricMap); + } + ); + }); + + ApiCallThreadPoolService.waitResult(); + + return this.convert2GroupTopicOverviewVOList(poList, groupTopicLagMap); + } + /**************************************************** private method ****************************************************/ @@ -314,13 +434,22 @@ private List convert2GroupTopicOverviewVOList(List(); } - // > - Map> metricsMap = new HashMap<>(); + // > + Map> metricsMap = new HashMap<>(); metricsList.stream().forEach(elem -> { + Float metricValue = elem.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG); + if (metricValue == null) { + return; + } + metricsMap.putIfAbsent(elem.getGroup(), new HashMap<>()); - metricsMap.get(elem.getGroup()).put(elem.getTopic(), elem); + metricsMap.get(elem.getGroup()).put(elem.getTopic(), metricValue); }); + return this.convert2GroupTopicOverviewVOList(poList, metricsMap); + } + + private List convert2GroupTopicOverviewVOList(List poList, Map> metricsMap) { List voList = new ArrayList<>(); for (GroupMemberPO po: poList) { GroupTopicOverviewVO vo = ConvertUtil.obj2Obj(po, GroupTopicOverviewVO.class); @@ -328,9 +457,9 @@ private List convert2GroupTopicOverviewVOList(List()).get(po.getTopicName()); - if (metrics != null) { - vo.setMaxLag(ConvertUtil.Float2Long(metrics.getMetrics().get(GroupMetricVersionItems.GROUP_METRIC_LAG))); + Float metricValue = metricsMap.getOrDefault(po.getGroupName(), new HashMap<>()).get(po.getTopicName()); + if (metricValue != null) { + vo.setMaxLag(ConvertUtil.Float2Long(metricValue)); } voList.add(vo); diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/OpTopicManager.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/OpTopicManager.java index 5c3fa742a..90b7e241f 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/OpTopicManager.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/OpTopicManager.java @@ -19,4 +19,9 @@ public interface OpTopicManager { * 扩分区 */ Result expandTopic(TopicExpansionDTO dto, String operator); + + /** + * 清空Topic + */ + Result truncateTopic(Long clusterPhyId, String topicName, String operator); } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java index 5d27ed746..22d204ea5 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java @@ -10,10 +10,12 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.partition.Partition; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic; +import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; @@ -156,6 +158,16 @@ public Result expandTopic(TopicExpansionDTO dto, String operator) { return rv; } + @Override + public Result truncateTopic(Long clusterPhyId, String topicName, String operator) { + // 清空Topic + Result rv = opTopicService.truncateTopic(new TopicTruncateParam(clusterPhyId, topicName, KafkaConstant.TOPICK_TRUNCATE_DEFAULT_OFFSET), operator); + if (rv.failed()) { + return rv; + } + + return Result.buildSuc(); + } /**************************************************** private method ****************************************************/ diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java index cd970528b..d973ece98 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/TopicStateManagerImpl.java @@ -28,6 +28,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; +import com.xiaojukeji.know.streaming.km.common.constant.PaginationConstant; import com.xiaojukeji.know.streaming.km.common.converter.TopicVOConverter; import com.xiaojukeji.know.streaming.km.common.enums.OffsetTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.SortTypeEnum; @@ -38,6 +39,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; +import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils; import com.xiaojukeji.know.streaming.km.core.service.group.GroupService; import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionMetricService; import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService; @@ -45,8 +47,7 @@ import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems; -import org.apache.commons.lang3.ObjectUtils; -import org.apache.commons.lang3.StringUtils; +import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService; import org.apache.kafka.clients.consumer.*; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.TopicConfig; @@ -60,7 +61,7 @@ @Component public class TopicStateManagerImpl implements TopicStateManager { - private static final ILog log = LogFactory.getLog(TopicStateManagerImpl.class); + private static final ILog LOGGER = LogFactory.getLog(TopicStateManagerImpl.class); @Autowired private TopicService topicService; @@ -89,6 +90,9 @@ public class TopicStateManagerImpl implements TopicStateManager { @Autowired private GroupManager groupManager; + @Autowired + private KSConfigUtils ksConfigUtils; + @Override public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException { Topic topic = topicService.getTopic(clusterPhyId, topicName); @@ -101,7 +105,7 @@ public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, S TopicBrokerAllVO allVO = new TopicBrokerAllVO(); allVO.setTotal(topic.getBrokerIdSet().size()); - allVO.setLive((int)brokerMap.values().stream().filter(elem -> elem.alive()).count()); + allVO.setLive((int)brokerMap.values().stream().filter(Broker::alive).count()); allVO.setDead(allVO.getTotal() - allVO.getLive()); allVO.setPartitionCount(topic.getPartitionNum()); @@ -153,97 +157,28 @@ public Result> getTopicMessages(Long clusterPhyId, String to return Result.buildFromIgnoreData(endOffsetsMapResult); } - List voList = new ArrayList<>(); - - KafkaConsumer kafkaConsumer = null; - try { - // 创建kafka-consumer - kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords())); - - List partitionList = new ArrayList<>(); - long maxMessage = 0; - for (Map.Entry entry : endOffsetsMapResult.getData().entrySet()) { - long begin = beginOffsetsMapResult.getData().get(entry.getKey()); - long end = entry.getValue(); - if (begin == end){ - continue; - } - maxMessage += end - begin; - partitionList.add(entry.getKey()); - } - maxMessage = Math.min(maxMessage, dto.getMaxRecords()); - kafkaConsumer.assign(partitionList); - - Map partitionOffsetAndTimestampMap = new HashMap<>(); - // 获取指定时间每个分区的offset(按指定开始时间查询消息时) - if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) { - Map timestampsToSearch = new HashMap<>(); - partitionList.forEach(topicPartition -> { - timestampsToSearch.put(topicPartition, dto.getStartTimestampUnitMs()); - }); - partitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch); - } - - for (TopicPartition partition : partitionList) { - if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getFilterOffsetReset()) { - // 重置到最旧 - kafkaConsumer.seek(partition, beginOffsetsMapResult.getData().get(partition)); - } else if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) { - // 重置到指定时间 - kafkaConsumer.seek(partition, partitionOffsetAndTimestampMap.get(partition).offset()); - } else if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getFilterOffsetReset()) { - // 重置到指定位置 - - } else { - // 默认,重置到最新 - kafkaConsumer.seek(partition, Math.max(beginOffsetsMapResult.getData().get(partition), endOffsetsMapResult.getData().get(partition) - dto.getMaxRecords())); - } - } - - // 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间 - while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) { - ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS)); - for (ConsumerRecord consumerRecord : consumerRecords) { - if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) { - continue; - } - - voList.add(TopicVOConverter.convert2TopicRecordVO(topicName, consumerRecord)); - if (voList.size() >= dto.getMaxRecords()) { - break; - } - } - - // 超时则返回 - if (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS > dto.getPullTimeoutUnitMs() - || voList.size() > dto.getMaxRecords()) { - break; - } - } - - // 排序 - if (ObjectUtils.isNotEmpty(voList)) { - // 默认按时间倒序排序 - if (StringUtils.isBlank(dto.getSortType())) { - dto.setSortType(SortTypeEnum.DESC.getSortType()); - } - PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType()); - } + // 数据采集 + List voList = this.getTopicMessages(clusterPhy, topicName, beginOffsetsMapResult.getData(), endOffsetsMapResult.getData(), startTime, dto); - return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size()))); - } catch (Exception e) { - log.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhyId, topicName, dto, e); + // 排序 + if (ValidateUtils.isBlank(dto.getSortType())) { + // 默认按时间倒序排序 + dto.setSortType(SortTypeEnum.DESC.getSortType()); + } + if (ValidateUtils.isBlank(dto.getSortField())) { + // 默认按照timestampUnitMs字段排序 + dto.setSortField(PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD); + } - throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); - } finally { - if (kafkaConsumer != null) { - try { - kafkaConsumer.close(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS)); - } catch (Exception e) { - // ignore - } - } + if (PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD.equals(dto.getSortField())) { + // 如果是时间类型,则第二排序规则是offset + PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType(), PaginationConstant.TOPIC_RECORDS_OFFSET_SORTED_FIELD, dto.getSortType()); + } else { + // 如果是非时间类型,则第二排序规则是时间 + PaginationUtil.pageBySort(voList, dto.getSortField(), dto.getSortType(), PaginationConstant.TOPIC_RECORDS_TIME_SORTED_FIELD, dto.getSortType()); } + + return Result.buildSuc(voList.subList(0, Math.min(dto.getMaxRecords(), voList.size()))); } @Override @@ -298,26 +233,37 @@ public Result getTopicState(Long clusterPhyId, String topicName) { @Override public Result> getTopicPartitions(Long clusterPhyId, String topicName, List metricsNames) { + long startTime = System.currentTimeMillis(); + List partitionList = partitionService.listPartitionByTopic(clusterPhyId, topicName); if (ValidateUtils.isEmptyList(partitionList)) { return Result.buildSuc(); } - Result> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, metricsNames); - if (metricsResult.failed()) { - // 仅打印错误日志,但是不直接返回错误 - log.error( - "method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from es failed", - clusterPhyId, topicName, metricsResult - ); - } - - // 转map Map metricsMap = new HashMap<>(); - if (metricsResult.hasData()) { - for (PartitionMetrics metrics: metricsResult.getData()) { - metricsMap.put(metrics.getPartitionId(), metrics); - } + ApiCallThreadPoolService.runnableTask( + String.format("clusterPhyId=%d||topicName=%s||method=getTopicPartitions", clusterPhyId, topicName), + ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTime), + () -> { + Result> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, metricsNames); + if (metricsResult.failed()) { + // 仅打印错误日志,但是不直接返回错误 + LOGGER.error( + "method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from kafka failed", + clusterPhyId, topicName, metricsResult + ); + } + + for (PartitionMetrics metrics: metricsResult.getData()) { + metricsMap.put(metrics.getPartitionId(), metrics); + } + } + ); + boolean finished = ApiCallThreadPoolService.waitResultAndReturnFinished(1); + + if (!finished && metricsMap.isEmpty()) { + // 未完成 -> 打印日志 + LOGGER.error("method=getTopicPartitions||clusterPhyId={}||topicName={}||msg=get metrics from kafka failed", clusterPhyId, topicName); } List voList = new ArrayList<>(); @@ -336,7 +282,7 @@ public Result getTopicBrokersPartitionsSummary( // Broker统计信息 vo.setBrokerCount(brokerMap.size()); - vo.setLiveBrokerCount((int)brokerMap.values().stream().filter(elem -> elem.alive()).count()); + vo.setLiveBrokerCount((int)brokerMap.values().stream().filter(Broker::alive).count()); vo.setDeadBrokerCount(vo.getBrokerCount() - vo.getLiveBrokerCount()); // Partition统计信息 @@ -360,13 +306,19 @@ public Result getTopicBrokersPartitionsSummary( @Override public PaginationResult pagingTopicGroupsOverview(Long clusterPhyId, String topicName, String searchGroupName, PaginationBaseDTO dto) { + long startTimeUnitMs = System.currentTimeMillis(); + PaginationResult paginationResult = groupService.pagingGroupMembers(clusterPhyId, topicName, "", "", searchGroupName, dto); if (!paginationResult.hasData()) { return PaginationResult.buildSuc(new ArrayList<>(), paginationResult); } - List groupTopicVOList = groupManager.getGroupTopicOverviewVOList(clusterPhyId, paginationResult.getData().getBizData()); + List groupTopicVOList = groupManager.getGroupTopicOverviewVOList( + clusterPhyId, + paginationResult.getData().getBizData(), + ksConfigUtils.getApiCallLeftTimeUnitMs(System.currentTimeMillis() - startTimeUnitMs) // 超时时间 + ); return PaginationResult.buildSuc(groupTopicVOList, paginationResult); } @@ -386,11 +338,8 @@ private boolean checkIfIgnore(ConsumerRecord consumerRecord, Str // ignore return true; } - if (filterValue != null && consumerRecord.value() != null && !consumerRecord.value().contains(filterValue)) { - return true; - } - return false; + return (filterValue != null && consumerRecord.value() != null && !consumerRecord.value().contains(filterValue)); } private TopicBrokerSingleVO getTopicBrokerSingle(Long clusterPhyId, @@ -450,4 +399,90 @@ private Properties generateClientProperties(ClusterPhy clusterPhy, Integer maxPo props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Math.max(2, Math.min(5, maxPollRecords))); return props; } + + private List getTopicMessages(ClusterPhy clusterPhy, + String topicName, + Map beginOffsetsMap, + Map endOffsetsMap, + long startTime, + TopicRecordDTO dto) throws AdminOperateException { + List voList = new ArrayList<>(); + + try (KafkaConsumer kafkaConsumer = new KafkaConsumer<>(this.generateClientProperties(clusterPhy, dto.getMaxRecords()))) { + // 移动到指定位置 + long maxMessage = this.assignAndSeekToSpecifiedOffset(kafkaConsumer, beginOffsetsMap, endOffsetsMap, dto); + + // 这里需要减去 KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS 是因为poll一次需要耗时,如果这里不减去,则可能会导致poll之后,超过要求的时间 + while (System.currentTimeMillis() - startTime <= dto.getPullTimeoutUnitMs() && voList.size() < maxMessage) { + ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.ofMillis(KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS)); + for (ConsumerRecord consumerRecord : consumerRecords) { + if (this.checkIfIgnore(consumerRecord, dto.getFilterKey(), dto.getFilterValue())) { + continue; + } + + voList.add(TopicVOConverter.convert2TopicRecordVO(topicName, consumerRecord)); + if (voList.size() >= dto.getMaxRecords()) { + break; + } + } + + // 超时则返回 + if (System.currentTimeMillis() - startTime + KafkaConstant.POLL_ONCE_TIMEOUT_UNIT_MS > dto.getPullTimeoutUnitMs() + || voList.size() > dto.getMaxRecords()) { + break; + } + } + + return voList; + } catch (Exception e) { + LOGGER.error("method=getTopicMessages||clusterPhyId={}||topicName={}||param={}||errMsg=exception", clusterPhy.getId(), topicName, dto, e); + + throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); + } + } + + private long assignAndSeekToSpecifiedOffset(KafkaConsumer kafkaConsumer, + Map beginOffsetsMap, + Map endOffsetsMap, + TopicRecordDTO dto) { + List partitionList = new ArrayList<>(); + long maxMessage = 0; + for (Map.Entry entry : endOffsetsMap.entrySet()) { + long begin = beginOffsetsMap.get(entry.getKey()); + long end = entry.getValue(); + if (begin == end){ + continue; + } + maxMessage += end - begin; + partitionList.add(entry.getKey()); + } + maxMessage = Math.min(maxMessage, dto.getMaxRecords()); + kafkaConsumer.assign(partitionList); + + Map partitionOffsetAndTimestampMap = new HashMap<>(); + // 获取指定时间每个分区的offset(按指定开始时间查询消息时) + if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) { + Map timestampsToSearch = new HashMap<>(); + partitionList.forEach(topicPartition -> timestampsToSearch.put(topicPartition, dto.getStartTimestampUnitMs())); + partitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(timestampsToSearch); + } + + for (TopicPartition partition : partitionList) { + if (OffsetTypeEnum.EARLIEST.getResetType() == dto.getFilterOffsetReset()) { + // 重置到最旧 + kafkaConsumer.seek(partition, beginOffsetsMap.get(partition)); + } else if (OffsetTypeEnum.PRECISE_TIMESTAMP.getResetType() == dto.getFilterOffsetReset()) { + // 重置到指定时间 + kafkaConsumer.seek(partition, partitionOffsetAndTimestampMap.get(partition).offset()); + } else if (OffsetTypeEnum.PRECISE_OFFSET.getResetType() == dto.getFilterOffsetReset()) { + // 重置到指定位置 + + } else { + // 默认,重置到最新 + kafkaConsumer.seek(partition, Math.max(beginOffsetsMap.get(partition), endOffsetsMap.get(partition) - dto.getMaxRecords())); + } + } + + return maxMessage; + } } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java index 740974d70..a8b7da8f4 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/version/impl/VersionControlManagerImpl.java @@ -35,6 +35,8 @@ import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.GroupMetricVersionItems.*; import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems.*; import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.MirrorMakerMetricVersionItems.*; +import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectClusterMetricVersionItems.*; +import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.connect.ConnectorMetricVersionItems.*; import static com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.ZookeeperMetricVersionItems.*; @Service @@ -123,6 +125,42 @@ public void init(){ defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_RECORD_COUNT, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_RECORD_RATE, true)); defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_MIRROR_MAKER.getCode(), MIRROR_MAKER_METRIC_REPLICATION_LATENCY_MS_MAX, true)); + + // Connect Cluster + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_ATTEMPTS_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_PERCENTAGE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_CONNECTOR_STARTUP_FAILURE_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_ATTEMPTS_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_PERCENTAGE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_TASK_STARTUP_FAILURE_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CLUSTER.getCode(), CONNECT_CLUSTER_METRIC_COLLECT_COST_TIME, true)); + + + // Connect Connector + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_STATE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_CHECK_PASSED, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_HEALTH_CHECK_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_COLLECT_COST_TIME, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_RUNNING_TASK_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_CONNECTOR_FAILED_TASK_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_ACTIVE_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_POLL_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_WRITE_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_ACTIVE_COUNT, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_READ_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_SEND_TOTAL, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_FAILURES, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_DEADLETTERQUEUE_PRODUCE_REQUESTS, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_TOTAL_ERRORS_LOGGED, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_POLL_RATE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SOURCE_RECORD_WRITE_RATE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_READ_RATE, true)); + defaultMetrics.add(new UserMetricConfig(METRIC_CONNECT_CONNECTOR.getCode(), CONNECTOR_METRIC_SINK_RECORD_SEND_RATE, true)); + + } @Autowired diff --git a/km-collector/pom.xml b/km-collector/pom.xml index e1fc023d4..af1d489b4 100644 --- a/km-collector/pom.xml +++ b/km-collector/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-collector - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-common/pom.xml b/km-common/pom.xml index 63c94a483..17e5619a9 100644 --- a/km-common/pom.xml +++ b/km-common/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-common - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/connector/ConnectorCreateDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/connector/ConnectorCreateDTO.java index 46639f0e4..038e617f4 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/connector/ConnectorCreateDTO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/connector/ConnectorCreateDTO.java @@ -1,12 +1,12 @@ package com.xiaojukeji.know.streaming.km.common.bean.dto.connect.connector; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.ClusterConnectorDTO; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import lombok.Data; import lombok.NoArgsConstructor; -import javax.validation.constraints.NotNull; import java.util.Properties; /** @@ -14,15 +14,23 @@ * @date 2022-10-17 */ @Data +@JsonIgnoreProperties(ignoreUnknown = true) @NoArgsConstructor @ApiModel(description = "创建Connector") public class ConnectorCreateDTO extends ClusterConnectorDTO { - @NotNull(message = "configs不允许为空") - @ApiModelProperty(value = "配置", example = "") + @Deprecated + @ApiModelProperty(value = "配置, 优先使用config字段,3.5.0版本将删除该字段", example = "") protected Properties configs; - public ConnectorCreateDTO(Long connectClusterId, String connectorName, Properties configs) { + @ApiModelProperty(value = "配置", example = "") + protected Properties config; + + public ConnectorCreateDTO(Long connectClusterId, String connectorName, Properties config) { super(connectClusterId, connectorName); - this.configs = configs; + this.config = config; + } + + public Properties getSuitableConfig() { + return config != null? config: configs; } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/mm2/MirrorMakerCreateDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/mm2/MirrorMakerCreateDTO.java index fa9867ecb..c2a60dacc 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/mm2/MirrorMakerCreateDTO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/connect/mm2/MirrorMakerCreateDTO.java @@ -40,7 +40,7 @@ public void unifyData(Long sourceKafkaClusterId, String sourceBootstrapServers, targetKafkaProps = new Properties(); } - this.unifyData(this.configs, sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps); + this.unifyData(this.getSuitableConfig(), sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps); if (heartbeatConnectorConfigs != null) { this.unifyData(this.heartbeatConnectorConfigs, sourceKafkaClusterId, sourceBootstrapServers, sourceKafkaProps, targetKafkaClusterId, targetBootstrapServers, targetKafkaProps); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/group/GroupOffsetDeleteDTO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/group/GroupOffsetDeleteDTO.java new file mode 100644 index 000000000..03cb61c62 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/dto/group/GroupOffsetDeleteDTO.java @@ -0,0 +1,40 @@ +package com.xiaojukeji.know.streaming.km.common.bean.dto.group; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.xiaojukeji.know.streaming.km.common.bean.dto.BaseDTO; +import io.swagger.annotations.ApiModelProperty; +import lombok.Data; + +import javax.validation.constraints.Min; +import javax.validation.constraints.NotBlank; +import javax.validation.constraints.NotNull; + +/** + * 删除offset + * @author zengqiao + * @date 19/4/8 + */ +@Data +@JsonIgnoreProperties(ignoreUnknown = true) +public class GroupOffsetDeleteDTO extends BaseDTO { + @Min(value = 0, message = "clusterPhyId不允许为null或者小于0") + @ApiModelProperty(value = "集群ID", example = "6") + private Long clusterPhyId; + + @NotBlank(message = "groupName不允许为空") + @ApiModelProperty(value = "消费组名称", example = "g-know-streaming") + private String groupName; + + @ApiModelProperty(value = "Topic名称,按照Topic纬度进行删除时需要传", example = "know-streaming") + protected String topicName; + + @ApiModelProperty(value = "分区ID,按照分区纬度进行删除时需要传") + private Integer partitionId; + + /** + * @see com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum + */ + @NotNull(message = "deleteType不允许为空") + @ApiModelProperty(value = "删除类型", example = "0:group纬度,1:Topic纬度,2:Partition纬度") + private Integer deleteType; +} \ No newline at end of file diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java index 752aade03..35fa1f5a9 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/broker/Broker.java @@ -4,6 +4,8 @@ import com.alibaba.fastjson.TypeReference; import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData; import com.xiaojukeji.know.streaming.km.common.bean.po.broker.BrokerPO; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; +import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import lombok.AllArgsConstructor; import lombok.Data; @@ -71,10 +73,10 @@ public static Broker buildFrom(Long clusterPhyId, Node node, Long startTimestamp metadata.setBrokerId(node.id()); metadata.setHost(node.host()); metadata.setPort(node.port()); - metadata.setJmxPort(-1); + metadata.setJmxPort(JmxEnum.UNKNOWN.getPort()); metadata.setStartTimestamp(startTimestamp); metadata.setRack(node.rack()); - metadata.setStatus(1); + metadata.setStatus(Constant.ALIVE); return metadata; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxAuthConfig.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxAuthConfig.java new file mode 100644 index 000000000..02fec6b4e --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxAuthConfig.java @@ -0,0 +1,29 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.config; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import lombok.Data; + +import java.io.Serializable; + +/** + * @author zengqiao + * @date 23/05/19 + */ +@Data +@ApiModel(description = "Jmx配置") +public class JmxAuthConfig implements Serializable { + @ApiModelProperty(value="最大连接", example = "100") + protected Integer maxConn; + + @ApiModelProperty(value="是否开启SSL,如果开始则username 与 token 必须非空", example = "false") + protected Boolean openSSL; + + @ApiModelProperty(value="SSL情况下的username", example = "Ks-Km") + protected String username; + + @ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19") + protected String token; +} + + diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java index 87607c1f9..7620e960e 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/config/JmxConfig.java @@ -1,10 +1,12 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.config; +import com.xiaojukeji.know.streaming.km.common.bean.entity.jmx.ServerIdJmxPort; +import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import lombok.Data; -import java.io.Serializable; +import java.util.List; /** * @author zengqiao @@ -12,24 +14,69 @@ */ @Data @ApiModel(description = "Jmx配置") -public class JmxConfig implements Serializable { - @ApiModelProperty(value="jmx端口", example = "8099") +public class JmxConfig extends JmxAuthConfig { + @ApiModelProperty(value="jmx端口,最低优先使用的端口", example = "8099") private Integer jmxPort; - @ApiModelProperty(value="最大连接", example = "100") - private Integer maxConn; + @ApiModelProperty(value="使用哪个endpoint网络", example = "EXTERNAL") + private String useWhichEndpoint; - @ApiModelProperty(value="是否开启SSL,如果开始则username 与 token 必须非空", example = "false") - private Boolean openSSL; + @ApiModelProperty(value="指定server的JMX端口, 最高优先使用的端口", example = "") + private List specifiedJmxPortList; - @ApiModelProperty(value="SSL情况下的username", example = "Ks-Km") - private String username; + /** + * 选取最终的jmx端口 + * @param serverId 服务ID + * @param metadataJmxPort ks从元信息中获取到的jmx端口 + */ + public Integer getFinallyJmxPort(String serverId, Integer metadataJmxPort) { + if (specifiedJmxPortList == null || specifiedJmxPortList.isEmpty()) { + // 未进行特殊指定时,zkJMX端口存在则优先使用zkJmxPort,否则使用配置的jmxPort + return this.selectJmxPort(jmxPort, metadataJmxPort); + } - @ApiModelProperty(value="SSL情况下的token", example = "KsKmCCY19") - private String token; + // 进行特殊配置时 + for (ServerIdJmxPort serverIdJmxPort: specifiedJmxPortList) { + if (serverId.equals(serverIdJmxPort.getServerId()) && serverIdJmxPort.getJmxPort() != null) { + // 当前server有指定具体的jmx端口时,则使用具体的端口 + return serverIdJmxPort.getJmxPort(); + } + } - @ApiModelProperty(value="使用哪个endpoint网络", example = "EXTERNAL") - private String useWhichEndpoint; + return this.selectJmxPort(jmxPort, metadataJmxPort); + } + + /** + * 选取最终的jmx端口 + * @param serverId serverId + */ + public Integer getFinallyJmxPort(String serverId) { + return this.getFinallyJmxPort(serverId, null); + } + + /** + * 选取jmx端口 + * @param feJmxPort 前端页面配置的jmx端口 + * @param metadataJmxPort ks从元信息中获取到的jmx端口 + */ + private Integer selectJmxPort(Integer feJmxPort, Integer metadataJmxPort) { + if (metadataJmxPort == null) { + return feJmxPort != null? feJmxPort: JmxEnum.NOT_OPEN.getPort(); + } + + if (JmxEnum.NOT_OPEN.getPort().equals(metadataJmxPort)) { + // 如果元信息提示未开启,则直接返回未开启 + return JmxEnum.NOT_OPEN.getPort(); + } + + if (JmxEnum.UNKNOWN.getPort().equals(metadataJmxPort)) { + // 如果元信息提示未知,则直接返回feJmxPort 或者 未开启 + return feJmxPort != null? feJmxPort: JmxEnum.NOT_OPEN.getPort(); + } + + // 其他情况,返回 metadataJmxPort + return metadataJmxPort; + } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/connect/ConnectCluster.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/connect/ConnectCluster.java index a4c67bbc3..43a6ce217 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/connect/ConnectCluster.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/connect/ConnectCluster.java @@ -1,6 +1,7 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.connect; import com.xiaojukeji.know.streaming.km.common.bean.entity.EntityIdInterface; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import lombok.Data; import java.io.Serializable; @@ -54,6 +55,22 @@ public class ConnectCluster implements Serializable, Comparable, */ private String clusterUrl; + public String getSuitableRequestUrl() { + // 优先使用用户填写的url + String suitableRequestUrl = this.clusterUrl; + if (ValidateUtils.isBlank(suitableRequestUrl)) { + // 用户如果没有填写,则使用元信息中的url + suitableRequestUrl = this.memberLeaderUrl; + } + + //url去斜杠 + if (suitableRequestUrl.length() > 0 && suitableRequestUrl.charAt(suitableRequestUrl.length() - 1) == '/') { + return suitableRequestUrl.substring(0, suitableRequestUrl.length() - 1); + } + + return suitableRequestUrl; + } + @Override public int compareTo(ConnectCluster connectCluster) { return this.id.compareTo(connectCluster.getId()); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/jmx/ServerIdJmxPort.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/jmx/ServerIdJmxPort.java new file mode 100644 index 000000000..df27cb87d --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/jmx/ServerIdJmxPort.java @@ -0,0 +1,25 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.jmx; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.io.Serializable; + +/** + * @author didi + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class ServerIdJmxPort implements Serializable { + /** + * serverID + */ + private String serverId; + + /** + * JMX端口 + */ + private Integer jmxPort; +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/BaseMetrics.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/BaseMetrics.java index c6ce63512..890a539da 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/BaseMetrics.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/BaseMetrics.java @@ -27,6 +27,10 @@ public abstract class BaseMetrics implements Serializable { protected Map metrics = new ConcurrentHashMap<>(); public void putMetric(String key, Float value){ + if (value == null || key == null) { + return; + } + metrics.put(key, value); } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupParam.java new file mode 100644 index 000000000..3c9360a50 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupParam.java @@ -0,0 +1,16 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group; + +import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class DeleteGroupParam extends GroupParam { + protected DeleteGroupTypeEnum deleteGroupTypeEnum; + + public DeleteGroupParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum) { + super(clusterPhyId, groupName); + this.deleteGroupTypeEnum = deleteGroupTypeEnum; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicParam.java new file mode 100644 index 000000000..c72fd97c9 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicParam.java @@ -0,0 +1,16 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group; + +import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class DeleteGroupTopicParam extends DeleteGroupParam { + protected String topicName; + + public DeleteGroupTopicParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum, String topicName) { + super(clusterPhyId, groupName, deleteGroupTypeEnum); + this.topicName = topicName; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicPartitionParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicPartitionParam.java new file mode 100644 index 000000000..e2f049cb5 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/DeleteGroupTopicPartitionParam.java @@ -0,0 +1,16 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group; + +import com.xiaojukeji.know.streaming.km.common.enums.group.DeleteGroupTypeEnum; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class DeleteGroupTopicPartitionParam extends DeleteGroupTopicParam { + protected Integer partitionId; + + public DeleteGroupTopicPartitionParam(Long clusterPhyId, String groupName, DeleteGroupTypeEnum deleteGroupTypeEnum, String topicName, Integer partitionId) { + super(clusterPhyId, groupName, deleteGroupTypeEnum, topicName); + this.partitionId = partitionId; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/GroupParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/GroupParam.java index d7bf15f84..4f7552d93 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/GroupParam.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/group/GroupParam.java @@ -1,13 +1,11 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.param.group; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; -import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @Data @NoArgsConstructor -@AllArgsConstructor public class GroupParam extends ClusterPhyParam { protected String groupName; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicTruncateParam.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicTruncateParam.java new file mode 100644 index 000000000..8186b3e5f --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/param/topic/TopicTruncateParam.java @@ -0,0 +1,29 @@ +package com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.cluster.ClusterPhyParam; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +public class TopicTruncateParam extends ClusterPhyParam { + protected String topicName; + protected long offset; + + public TopicTruncateParam(Long clusterPhyId, String topicName, long offset) { + super(clusterPhyId); + this.topicName = topicName; + this.offset = offset; + } + + @Override + public String toString() { + return "TopicParam{" + + "clusterPhyId=" + clusterPhyId + + ", topicName='" + topicName + '\'' + + ", offset='" + offset + '\'' + + '}'; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/MonitorCmdData.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/MonitorCmdData.java index 7e2a10f41..3c862cece 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/MonitorCmdData.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/MonitorCmdData.java @@ -25,15 +25,15 @@ public class MonitorCmdData extends BaseFourLetterWordCmdData { private Float zkAvgLatency; private Float zkMaxLatency; private Float zkMinLatency; - private Long zkPacketsReceived; - private Long zkPacketsSent; - private Long zkNumAliveConnections; - private Long zkOutstandingRequests; + private Float zkPacketsReceived; + private Float zkPacketsSent; + private Float zkNumAliveConnections; + private Float zkOutstandingRequests; private String zkServerState; - private Long zkZnodeCount; - private Long zkWatchCount; - private Long zkEphemeralsCount; - private Long zkApproximateDataSize; - private Long zkOpenFileDescriptorCount; - private Long zkMaxFileDescriptorCount; + private Float zkZnodeCount; + private Float zkWatchCount; + private Float zkEphemeralsCount; + private Float zkApproximateDataSize; + private Float zkOpenFileDescriptorCount; + private Float zkMaxFileDescriptorCount; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/ServerCmdData.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/ServerCmdData.java index 0bd9e0a48..350885d33 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/ServerCmdData.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/ServerCmdData.java @@ -20,11 +20,11 @@ public class ServerCmdData extends BaseFourLetterWordCmdData { private Float zkAvgLatency; private Float zkMaxLatency; private Float zkMinLatency; - private Long zkPacketsReceived; - private Long zkPacketsSent; - private Long zkNumAliveConnections; - private Long zkOutstandingRequests; + private Float zkPacketsReceived; + private Float zkPacketsSent; + private Float zkNumAliveConnections; + private Float zkOutstandingRequests; private String zkServerState; - private Long zkZnodeCount; + private Float zkZnodeCount; private Long zkZxid; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/MonitorCmdDataParser.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/MonitorCmdDataParser.java index 8c3e6958b..888f0beab 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/MonitorCmdDataParser.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/MonitorCmdDataParser.java @@ -51,7 +51,7 @@ public MonitorCmdData parseAndInitData(Long clusterPhyId, String host, int port, } MonitorCmdData monitorCmdData = new MonitorCmdData(); - dataMap.entrySet().stream().forEach(elem -> { + dataMap.entrySet().forEach(elem -> { try { switch (elem.getKey()) { case "zk_version": @@ -67,37 +67,37 @@ public MonitorCmdData parseAndInitData(Long clusterPhyId, String host, int port, monitorCmdData.setZkMinLatency(ConvertUtil.string2Float(elem.getValue())); break; case "zk_packets_received": - monitorCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue())); + monitorCmdData.setZkPacketsReceived(ConvertUtil.string2Float(elem.getValue())); break; case "zk_packets_sent": - monitorCmdData.setZkPacketsSent(Long.valueOf(elem.getValue())); + monitorCmdData.setZkPacketsSent(ConvertUtil.string2Float(elem.getValue())); break; case "zk_num_alive_connections": - monitorCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue())); + monitorCmdData.setZkNumAliveConnections(ConvertUtil.string2Float(elem.getValue())); break; case "zk_outstanding_requests": - monitorCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue())); + monitorCmdData.setZkOutstandingRequests(ConvertUtil.string2Float(elem.getValue())); break; case "zk_server_state": monitorCmdData.setZkServerState(elem.getValue()); break; case "zk_znode_count": - monitorCmdData.setZkZnodeCount(Long.valueOf(elem.getValue())); + monitorCmdData.setZkZnodeCount(ConvertUtil.string2Float(elem.getValue())); break; case "zk_watch_count": - monitorCmdData.setZkWatchCount(Long.valueOf(elem.getValue())); + monitorCmdData.setZkWatchCount(ConvertUtil.string2Float(elem.getValue())); break; case "zk_ephemerals_count": - monitorCmdData.setZkEphemeralsCount(Long.valueOf(elem.getValue())); + monitorCmdData.setZkEphemeralsCount(ConvertUtil.string2Float(elem.getValue())); break; case "zk_approximate_data_size": - monitorCmdData.setZkApproximateDataSize(Long.valueOf(elem.getValue())); + monitorCmdData.setZkApproximateDataSize(ConvertUtil.string2Float(elem.getValue())); break; case "zk_open_file_descriptor_count": - monitorCmdData.setZkOpenFileDescriptorCount(Long.valueOf(elem.getValue())); + monitorCmdData.setZkOpenFileDescriptorCount(ConvertUtil.string2Float(elem.getValue())); break; case "zk_max_file_descriptor_count": - monitorCmdData.setZkMaxFileDescriptorCount(Long.valueOf(elem.getValue())); + monitorCmdData.setZkMaxFileDescriptorCount(ConvertUtil.string2Float(elem.getValue())); break; case "Proposal sizes last/min/max": case "zk_fsync_threshold_exceed_count": diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/ServerCmdDataParser.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/ServerCmdDataParser.java index e16fbdb0d..0355272d8 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/ServerCmdDataParser.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/zookeeper/fourletterword/parser/ServerCmdDataParser.java @@ -46,7 +46,7 @@ public ServerCmdData parseAndInitData(Long clusterPhyId, String host, int port, } ServerCmdData serverCmdData = new ServerCmdData(); - dataMap.entrySet().stream().forEach(elem -> { + dataMap.entrySet().forEach(elem -> { try { switch (elem.getKey()) { case "Zookeeper version": @@ -59,22 +59,22 @@ public ServerCmdData parseAndInitData(Long clusterPhyId, String host, int port, serverCmdData.setZkMaxLatency(ConvertUtil.string2Float(data[2])); break; case "Received": - serverCmdData.setZkPacketsReceived(Long.valueOf(elem.getValue())); + serverCmdData.setZkPacketsReceived(ConvertUtil.string2Float(elem.getValue())); break; case "Sent": - serverCmdData.setZkPacketsSent(Long.valueOf(elem.getValue())); + serverCmdData.setZkPacketsSent(ConvertUtil.string2Float(elem.getValue())); break; case "Connections": - serverCmdData.setZkNumAliveConnections(Long.valueOf(elem.getValue())); + serverCmdData.setZkNumAliveConnections(ConvertUtil.string2Float(elem.getValue())); break; case "Outstanding": - serverCmdData.setZkOutstandingRequests(Long.valueOf(elem.getValue())); + serverCmdData.setZkOutstandingRequests(ConvertUtil.string2Float(elem.getValue())); break; case "Mode": serverCmdData.setZkServerState(elem.getValue()); break; case "Node count": - serverCmdData.setZkZnodeCount(Long.valueOf(elem.getValue())); + serverCmdData.setZkZnodeCount(ConvertUtil.string2Float(elem.getValue())); break; case "Zxid": serverCmdData.setZkZxid(Long.parseUnsignedLong(elem.getValue().trim().substring(2), 16)); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/connect/ConnectClusterPO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/connect/ConnectClusterPO.java index f0a364e61..9175a6c10 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/connect/ConnectClusterPO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/connect/ConnectClusterPO.java @@ -29,7 +29,7 @@ public class ConnectClusterPO extends BasePO { private Integer state; /** - * 集群地址 + * 用户填写的集群地址 */ private String clusterUrl; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupMemberPO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupMemberPO.java index 7992ac17b..432f061c8 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupMemberPO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupMemberPO.java @@ -7,6 +7,7 @@ import lombok.NoArgsConstructor; import java.util.Date; +import java.util.Objects; @Data @NoArgsConstructor @@ -37,4 +38,16 @@ public GroupMemberPO(Long clusterPhyId, String topicName, String groupName, Stri this.memberCount = memberCount; this.updateTime = updateTime; } + + public boolean equal2GroupMemberPO(GroupMemberPO that) { + if (that == null) { + return false; + } + + return Objects.equals(clusterPhyId, that.clusterPhyId) + && Objects.equals(topicName, that.topicName) + && Objects.equals(groupName, that.groupName) + && Objects.equals(state, that.state) + && Objects.equals(memberCount, that.memberCount); + } } \ No newline at end of file diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupPO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupPO.java index 49ac5bf30..53b925d40 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupPO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/po/group/GroupPO.java @@ -9,6 +9,8 @@ import lombok.Data; import lombok.NoArgsConstructor; +import java.util.Objects; + @Data @NoArgsConstructor @@ -58,4 +60,18 @@ public class GroupPO extends BasePO { */ private int coordinatorId; + public boolean equal2GroupPO(GroupPO groupPO) { + if (groupPO == null) { + return false; + } + + return coordinatorId == groupPO.coordinatorId + && Objects.equals(clusterPhyId, groupPO.clusterPhyId) + && Objects.equals(type, groupPO.type) + && Objects.equals(name, groupPO.name) + && Objects.equals(state, groupPO.state) + && Objects.equals(memberCount, groupPO.memberCount) + && Objects.equals(topicMembers, groupPO.topicMembers) + && Objects.equals(partitionAssignor, groupPO.partitionAssignor); + } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/mm2/MirrorMakerBaseStateVO.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/mm2/MirrorMakerBaseStateVO.java index 04aed0356..33ae15daf 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/mm2/MirrorMakerBaseStateVO.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/vo/cluster/mm2/MirrorMakerBaseStateVO.java @@ -13,7 +13,6 @@ @Data @ApiModel(description = "集群MM2状态信息") public class MirrorMakerBaseStateVO extends BaseVO { - @ApiModelProperty(value = "worker数", example = "1") private Integer workerCount; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java index 465f6f8ac..a3f959bf3 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/KafkaConstant.java @@ -49,6 +49,8 @@ public class KafkaConstant { public static final Map KAFKA_ALL_CONFIG_DEF_MAP = new ConcurrentHashMap<>(); + public static final Integer TOPICK_TRUNCATE_DEFAULT_OFFSET = -1; + static { try { KAFKA_ALL_CONFIG_DEF_MAP.putAll(CollectionConverters.asJava(LogConfig$.MODULE$.configKeys())); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/PaginationConstant.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/PaginationConstant.java index 9b8def80c..9d3b83558 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/PaginationConstant.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/constant/PaginationConstant.java @@ -27,5 +27,8 @@ private PaginationConstant() { /** * groupTopic列表的默认排序规则 */ - public static final String DEFAULT_GROUP_TOPIC_SORTED_FIELD = "topicName"; + public static final String DEFAULT_GROUP_TOPIC_SORTED_FIELD = "topicName"; + + public static final String TOPIC_RECORDS_TIME_SORTED_FIELD = "timestampUnitMs"; + public static final String TOPIC_RECORDS_OFFSET_SORTED_FIELD = "offset"; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/GroupConverter.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/GroupConverter.java index 131bd2432..c203b3dfe 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/GroupConverter.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/GroupConverter.java @@ -10,6 +10,7 @@ import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import java.util.ArrayList; +import java.util.Date; import java.util.stream.Collectors; /** @@ -57,6 +58,7 @@ public static GroupPO convert2GroupPO(Group group) { po.setTopicMembers(ConvertUtil.obj2Json(group.getTopicMembers())); po.setType(group.getType().getCode()); po.setState(group.getState().getState()); + po.setUpdateTime(new Date()); return po; } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/group/DeleteGroupTypeEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/group/DeleteGroupTypeEnum.java new file mode 100644 index 000000000..ef99344cb --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/group/DeleteGroupTypeEnum.java @@ -0,0 +1,28 @@ +package com.xiaojukeji.know.streaming.km.common.enums.group; + +import lombok.Getter; + + +/** + * @author wyb + * @date 2022/10/11 + */ +@Getter +public enum DeleteGroupTypeEnum { + UNKNOWN(-1, "Unknown"), + + GROUP(0, "Group纬度"), + + GROUP_TOPIC(1, "GroupTopic纬度"), + + GROUP_TOPIC_PARTITION(2, "GroupTopicPartition纬度"); + + private final Integer code; + + private final String msg; + + DeleteGroupTypeEnum(Integer code, String msg) { + this.code = code; + this.msg = msg; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/jmx/JmxEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/jmx/JmxEnum.java new file mode 100644 index 000000000..314402e84 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/jmx/JmxEnum.java @@ -0,0 +1,20 @@ +package com.xiaojukeji.know.streaming.km.common.enums.jmx; + +import lombok.Getter; + +@Getter +public enum JmxEnum { + NOT_OPEN(-1, "未开启JMX端口"), + + UNKNOWN(-2, "JMX端口未知"), + + ; + + private final Integer port; + private final String message; + + JmxEnum(Integer port, String message) { + this.port = port; + this.message = message; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/operaterecord/OperationEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/operaterecord/OperationEnum.java index 302cb38b0..da25bc14f 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/operaterecord/OperationEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/operaterecord/OperationEnum.java @@ -32,6 +32,8 @@ public enum OperationEnum { RESTART(11, "重启"), + TRUNCATE(12, "清空"), + ; OperationEnum(int code, String desc) { diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java index 7bcf3234b..d11c3bfa2 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionItemTypeEnum.java @@ -41,6 +41,8 @@ public enum VersionItemTypeEnum { SERVICE_OP_REASSIGNMENT(330, "service_reassign_operation"), + SERVICE_OP_GROUP(340, "service_group_operation"), + SERVICE_OP_CONNECT_CLUSTER(400, "service_connect_cluster_operation"), SERVICE_OP_CONNECT_CONNECTOR(401, "service_connect_connector_operation"), SERVICE_OP_CONNECT_PLUGIN(402, "service_connect_plugin_operation"), diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/zookeeper/ZKRoleEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/zookeeper/ZKRoleEnum.java index fd379dc84..420ccc36b 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/zookeeper/ZKRoleEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/zookeeper/ZKRoleEnum.java @@ -10,6 +10,8 @@ public enum ZKRoleEnum { OBSERVER("observer"), + STANDALONE("standalone"), + UNKNOWN("unknown"), ; diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java index d9cfb0821..366fe9478 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/jmx/JmxConnectorWrap.java @@ -1,6 +1,8 @@ package com.xiaojukeji.know.streaming.km.common.jmx; +import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxAuthConfig; import com.xiaojukeji.know.streaming.km.common.bean.entity.config.JmxConfig; +import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum; import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import org.slf4j.Logger; @@ -33,26 +35,26 @@ public class JmxConnectorWrap { private final Long brokerStartupTime; - private final String host; + private final String jmxHost; - private final Integer port; + private final Integer jmxPort; private JMXConnector jmxConnector; private final AtomicInteger atomicInteger; - private JmxConfig jmxConfig; + private JmxAuthConfig jmxConfig; - public JmxConnectorWrap(String clientLogIdent, Long brokerStartupTime, String host, Integer port, JmxConfig jmxConfig) { - this.clientLogIdent=clientLogIdent; - this.brokerStartupTime = brokerStartupTime; - this.host = host; + public JmxConnectorWrap(String clientLogIdent, Long brokerStartupTime, String jmxHost, Integer jmxPort, JmxAuthConfig jmxConfig) { + LOGGER.info( + "method=JmxConnectorWrap||clientLogIdent={}||brokerStartupTime={}||jmxHost={}||jmxPort={}||jmxConfig={}||msg=start construct JmxWrap.", + clientLogIdent, brokerStartupTime, jmxHost, jmxPort, jmxConfig + ); - if (port == null || port == -1 && jmxConfig.getJmxPort() != null) { - this.port = jmxConfig.getJmxPort(); - } else { - this.port = port; - } + this.clientLogIdent = clientLogIdent; + this.brokerStartupTime = brokerStartupTime; + this.jmxHost = jmxHost; + this.jmxPort = (jmxPort == null? JmxEnum.UNKNOWN.getPort() : jmxPort); this.jmxConfig = jmxConfig; if (ValidateUtils.isNull(this.jmxConfig)) { @@ -61,6 +63,7 @@ public JmxConnectorWrap(String clientLogIdent, Long brokerStartupTime, String ho if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) { this.jmxConfig.setMaxConn(1000); } + this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn()); } @@ -68,7 +71,7 @@ public boolean checkJmxConnectionAndInitIfNeed() { if (jmxConnector != null) { return true; } - if (port == null || port == -1) { + if (jmxPort == null || jmxPort == -1) { return false; } return createJmxConnector(); @@ -91,7 +94,10 @@ public synchronized void close() { jmxConnector = null; } catch (IOException e) { - LOGGER.warn("close JmxConnector exception, clientLogIdent:{} host:{} port:{}.", clientLogIdent, host, port, e); + LOGGER.error( + "method=close||clientLogIdent={}||jmxHost={}||jmxPort={}||msg=close jmx JmxConnector exception.", + clientLogIdent, jmxHost, jmxPort, e + ); } } @@ -159,7 +165,12 @@ private synchronized boolean createJmxConnector() { if (jmxConnector != null) { return true; } - String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port); + LOGGER.info( + "method=createJmxConnector||clientLogIdent={}||brokerStartupTime={}||jmxHost={}||jmxPort={}||jmxConfig={}||msg=start create jmx connector.", + clientLogIdent, brokerStartupTime, jmxHost, jmxPort, jmxConfig + ); + + String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", jmxHost, jmxPort); try { Map environment = new HashMap(); if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getToken())) { @@ -174,12 +185,21 @@ private synchronized boolean createJmxConnector() { } jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment); - LOGGER.info("JMX connect success, clientLogIdent:{} host:{} port:{}.", clientLogIdent, host, port); + LOGGER.info( + "method=createJmxConnector||clientLogIdent={}||jmxHost={}||jmxPort={}||msg=jmx connect success.", + clientLogIdent, jmxHost, jmxPort + ); return true; } catch (MalformedURLException e) { - LOGGER.error("JMX url exception, clientLogIdent:{} host:{} port:{} jmxUrl:{}", clientLogIdent, host, port, jmxUrl, e); + LOGGER.error( + "method=createJmxConnector||clientLogIdent={}||jmxHost={}||jmxPort={}||jmxUrl={}||msg=jmx url exception.", + clientLogIdent, jmxHost, jmxPort, jmxUrl, e + ); } catch (Exception e) { - LOGGER.error("JMX connect exception, clientLogIdent:{} host:{} port:{}.", clientLogIdent, host, port, e); + LOGGER.error( + "method=createJmxConnector||clientLogIdent={}||jmxHost={}||jmxPort={}||msg=jmx connect exception.", + clientLogIdent, jmxHost, jmxPort, e + ); } return false; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/utils/kafka/KSPartialKafkaAdminClient.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/utils/kafka/KSPartialKafkaAdminClient.java index f985fb01c..8f53d9988 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/utils/kafka/KSPartialKafkaAdminClient.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/utils/kafka/KSPartialKafkaAdminClient.java @@ -78,6 +78,8 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.runtime.distributed.ConnectProtocol; +import org.apache.kafka.connect.runtime.distributed.ExtendedWorkerState; +import org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol; import org.slf4j.Logger; import java.net.InetSocketAddress; @@ -1338,21 +1340,11 @@ void handleResponse(AbstractResponse abstractResponse) { if (groupMember.memberAssignment().length > 0) { final Assignment assignment = ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(groupMember.memberAssignment())); memberBaseAssignment = new KSMemberConsumerAssignment(new HashSet<>(assignment.partitions())); + } else { + memberBaseAssignment = new KSMemberConsumerAssignment(new HashSet<>()); } } else { - ConnectProtocol.Assignment assignment = null; - if (groupMember.memberAssignment().length > 0) { - assignment = ConnectProtocol. - deserializeAssignment(ByteBuffer.wrap(groupMember.memberAssignment())); - } - - ConnectProtocol.WorkerState workerState = null; - if (groupMember.memberMetadata().length > 0) { - workerState = ConnectProtocol. - deserializeMetadata(ByteBuffer.wrap(groupMember.memberMetadata())); - } - - memberBaseAssignment = new KSMemberConnectAssignment(assignment, workerState); + memberBaseAssignment = deserializeConnectGroupDataCompatibility(groupMember); } memberDescriptions.add(new KSMemberDescription( @@ -1381,6 +1373,36 @@ void handleFailure(Throwable throwable) { }; } + private KSMemberBaseAssignment deserializeConnectGroupDataCompatibility(DescribedGroupMember groupMember) { + try { + // 高版本的反序列化方式 + ExtendedWorkerState workerState = null; + if (groupMember.memberMetadata().length > 0) { + workerState = IncrementalCooperativeConnectProtocol. + deserializeMetadata(ByteBuffer.wrap(groupMember.memberMetadata())); + + return new KSMemberConnectAssignment(workerState.assignment(), workerState); + } + } catch (Exception e) { + // ignore + } + + // 低版本的反序列化方式 + ConnectProtocol.Assignment assignment = null; + if (groupMember.memberAssignment().length > 0) { + assignment = ConnectProtocol. + deserializeAssignment(ByteBuffer.wrap(groupMember.memberAssignment())); + } + + ConnectProtocol.WorkerState workerState = null; + if (groupMember.memberMetadata().length > 0) { + workerState = ConnectProtocol. + deserializeMetadata(ByteBuffer.wrap(groupMember.memberMetadata())); + } + + return new KSMemberConnectAssignment(assignment, workerState); + } + private Set validAclOperations(final int authorizedOperations) { if (authorizedOperations == MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED) { diff --git a/km-console/packages/layout-clusters-fe/src/api/index.ts b/km-console/packages/layout-clusters-fe/src/api/index.ts index 1c6bc77ea..7dadd9ec8 100755 --- a/km-console/packages/layout-clusters-fe/src/api/index.ts +++ b/km-console/packages/layout-clusters-fe/src/api/index.ts @@ -94,6 +94,7 @@ const api = { getTopicGroupPartitionsHistory: (clusterPhyId: number, groupName: string) => getApi(`/clusters/${clusterPhyId}/groups/${groupName}/partitions`), resetGroupOffset: () => getApi('/group-offsets'), + getGroupOverview: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/groups-overview`), // topics列表 getTopicsList: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/topics-overview`), diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx index b03f68327..75cc390a0 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx @@ -19,18 +19,19 @@ const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number }} onChange={(e) => { setTimeSetMode(e.target.value); - if (e.target.value === 'newest') { - onChange('newest'); + if (e.target.value === 'newest' || e.target.value === 'oldest') { + onChange(e.target.value); } }} value={timeSetMode} > 最新Offset + 最旧Offset 自定义 {timeSetMode === 'custom' && ( { @@ -88,7 +89,7 @@ export default (props: any) => { topicName: record.topicName, }; if (formData.resetType === 'assignedTime') { - resetParams.resetType = formData.timestamp === 'newest' ? 0 : 2; + resetParams.resetType = formData.timestamp === 'newest' ? 0 : formData.timestamp === 'oldest' ? 1 : 2; if (resetParams.resetType === 2) { resetParams.timestamp = formData.timestamp; } diff --git a/km-console/packages/layout-clusters-fe/src/pages/Consumers/ResetOffsetDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/Consumers/ResetOffsetDrawer.tsx index bba5059e2..7a2b02e7c 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Consumers/ResetOffsetDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Consumers/ResetOffsetDrawer.tsx @@ -22,18 +22,19 @@ const CustomSelectResetTime = (props: { value?: string; onChange?: (val: number }} onChange={(e) => { setTimeSetMode(e.target.value); - if (e.target.value === 'newest') { - onChange('newest'); + if (e.target.value === 'newest' || e.target.value === 'oldest') { + onChange(e.target.value); } }} value={timeSetMode} > 最新Offset + 最旧Offset 自定义 {timeSetMode === 'custom' && ( { @@ -91,7 +92,7 @@ export default (props: any) => { topicName: record.topicName, }; if (formData.resetType === 'assignedTime') { - resetParams.resetType = formData.timestamp === 'newest' ? 0 : 2; + resetParams.resetType = formData.timestamp === 'newest' ? 0 : formData.timestamp === 'oldest' ? 1 : 2; if (resetParams.resetType === 2) { resetParams.timestamp = formData.timestamp; } diff --git a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx index 8afcdccc7..a325a9bce 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx @@ -85,6 +85,7 @@ const AddDrawer = forwardRef((_, ref) => { return; }); const [topicMetaData, setTopicMetaData] = React.useState([]); + const [groupMetaData, setGroupMetaData] = React.useState([]); // 获取 Topic 元信息 const getTopicMetaData = (newValue: any) => { @@ -102,6 +103,21 @@ const AddDrawer = forwardRef((_, ref) => { }); }; + // 获取 Group 元信息 + const getGroupMetaData = () => { + Utils.request(api.getGroupOverview(+clusterId), { + method: 'GET', + }).then((res: any) => { + const groups = res?.bizData.map((item: any) => { + return { + label: item.name, + value: item.name, + }; + }); + setGroupMetaData(groups); + }); + }; + // 获取 kafkaUser 列表 const getKafkaUserList = () => { Utils.request(api.getKafkaUsers(clusterId), { @@ -209,6 +225,7 @@ const AddDrawer = forwardRef((_, ref) => { useEffect(() => { getKafkaUserList(); getTopicMetaData(''); + getGroupMetaData(); }, []); return ( @@ -321,7 +338,7 @@ const AddDrawer = forwardRef((_, ref) => { } return false; }} - options={topicMetaData} + options={type === 'topic' ? topicMetaData : groupMetaData} placeholder={`请输入 ${type}Name`} /> diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx index c079393b0..ed948e8c0 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx @@ -19,18 +19,19 @@ const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number }} onChange={(e) => { setTimeSetMode(e.target.value); - if (e.target.value === 'newest') { - onChange('newest'); + if (e.target.value === 'newest' || e.target.value === 'oldest') { + onChange(e.target.value); } }} value={timeSetMode} > 最新Offset + 最旧Offset 自定义 {timeSetMode === 'custom' && ( { @@ -88,7 +89,7 @@ export default (props: any) => { topicName: record.topicName, }; if (formData.resetType === 'assignedTime') { - resetParams.resetType = formData.timestamp === 'newest' ? 0 : 2; + resetParams.resetType = formData.timestamp === 'newest' ? 0 : formData.timestamp === 'oldest' ? 1 : 2; if (resetParams.resetType === 2) { resetParams.timestamp = formData.timestamp; } diff --git a/km-console/pom.xml b/km-console/pom.xml index 2863160ae..76201e592 100644 --- a/km-console/pom.xml +++ b/km-console/pom.xml @@ -4,13 +4,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 km-console - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-core/pom.xml b/km-core/pom.xml index 896d54d6e..05158736b 100644 --- a/km-core/pom.xml +++ b/km-core/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-core - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java index 97dc00c83..e34c035c5 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/broker/impl/BrokerServiceImpl.java @@ -360,7 +360,7 @@ private Result> getBrokersFromAdminClient(ClusterPhy clusterPhy) { private Broker getStartTimeAndBuildBroker(Long clusterPhyId, Node newNode, JmxConfig jmxConfig) { try { - Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), jmxConfig.getJmxPort(), jmxConfig); + Long startTime = jmxDAO.getServerStartTime(clusterPhyId, newNode.host(), jmxConfig.getFinallyJmxPort(String.valueOf(newNode.id())), jmxConfig); return Broker.buildFrom(clusterPhyId, newNode, startTime); } catch (Exception e) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/ConfigUtils.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java similarity index 55% rename from km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/ConfigUtils.java rename to km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java index 3cc6cc472..db77720b4 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/ConfigUtils.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java @@ -11,10 +11,17 @@ */ @Getter @Service -public class ConfigUtils { - private ConfigUtils() { +public class KSConfigUtils { + private KSConfigUtils() { } @Value("${cluster-balance.ignored-topics.time-second:300}") private Integer clusterBalanceIgnoredTopicsTimeSecond; + + @Value(value = "${request.api-call.timeout-unit-ms:8000}") + private Integer apiCallTimeoutUnitMs; + + public Integer getApiCallLeftTimeUnitMs(Long costedUnitMs) { + return Math.max(1000, (int)(apiCallTimeoutUnitMs - costedUnitMs)); + } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java index 030b78ad1..86879662d 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java @@ -40,12 +40,6 @@ public class ConnectClusterServiceImpl implements ConnectClusterService { @Override public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) { - //url去斜杠 - String clusterUrl = metadata.getMemberLeaderUrl(); - if (clusterUrl.charAt(clusterUrl.length() - 1) == '/') { - clusterUrl = clusterUrl.substring(0, clusterUrl.length() - 1); - } - ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName()); if (oldPO == null) { oldPO = new ConnectClusterPO(); @@ -54,7 +48,7 @@ public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) { oldPO.setName(metadata.getGroupName()); oldPO.setState(metadata.getState().getCode()); oldPO.setMemberLeaderUrl(metadata.getMemberLeaderUrl()); - oldPO.setClusterUrl(clusterUrl); + oldPO.setClusterUrl(""); oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION); connectClusterDAO.insert(oldPO); @@ -69,11 +63,11 @@ public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) { if (ValidateUtils.isBlank(oldPO.getVersion())) { oldPO.setVersion(KafkaConstant.DEFAULT_CONNECT_VERSION); } - if (!ValidateUtils.isBlank(clusterUrl)) { - oldPO.setClusterUrl(clusterUrl); + if (ValidateUtils.isNull(oldPO.getClusterUrl())) { + oldPO.setClusterUrl(""); } - connectClusterDAO.updateById(oldPO); + connectClusterDAO.updateById(oldPO); return oldPO.getId(); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java index c042276df..133355a84 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java @@ -87,7 +87,7 @@ public Result createConnector(Long connectClusterId, String con props.put("config", configs); ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent( - connectCluster.getClusterUrl() + CREATE_CONNECTOR_URI, + connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI, props, ConnectorInfo.class ); @@ -127,7 +127,7 @@ public Result> listConnectorsFromCluster(Long connectClusterId) { } List nameList = restTool.getArrayObjectWithJsonContent( - connectCluster.getClusterUrl() + LIST_CONNECTORS_URI, + connectCluster.getSuitableRequestUrl() + LIST_CONNECTORS_URI, new HashMap<>(), String.class ); @@ -224,7 +224,7 @@ public Result resumeConnector(Long connectClusterId, String connectorName, } restTool.putJsonForObject( - connectCluster.getClusterUrl() + String.format(RESUME_CONNECTOR_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName), new HashMap<>(), String.class ); @@ -259,7 +259,7 @@ public Result restartConnector(Long connectClusterId, String connectorName } restTool.postObjectWithJsonContent( - connectCluster.getClusterUrl() + String.format(RESTART_CONNECTOR_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName), new HashMap<>(), String.class ); @@ -294,7 +294,7 @@ public Result stopConnector(Long connectClusterId, String connectorName, S } restTool.putJsonForObject( - connectCluster.getClusterUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName), new HashMap<>(), String.class ); @@ -329,7 +329,7 @@ public Result deleteConnector(Long connectClusterId, String connectorName, } restTool.deleteWithParamsAndHeader( - connectCluster.getClusterUrl() + String.format(DELETE_CONNECTOR_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName), new HashMap<>(), new HashMap<>(), String.class @@ -365,7 +365,7 @@ public Result updateConnectorConfig(Long connectClusterId, String connecto } ConnectorInfo connectorInfo = restTool.putJsonForObject( - connectCluster.getClusterUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName), configs, org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class ); @@ -532,7 +532,7 @@ private int deleteConnectorInDB(Long connectClusterId, String connectorName) { private Result getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) { try { ConnectorInfo connectorInfo = restTool.getForObject( - connectCluster.getClusterUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName, + connectCluster.getSuitableRequestUrl() + GET_CONNECTOR_INFO_PREFIX_URI + "/" + connectorName, new HashMap<>(), ConnectorInfo.class ); @@ -558,7 +558,7 @@ private Result getConnectorInfoFromCluster(ConnectCluster conne private Result> getConnectorTopicsFromCluster(ConnectCluster connectCluster, String connectorName) { try { Properties properties = restTool.getForObject( - connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_TOPICS_URI, connectorName), new HashMap<>(), Properties.class ); @@ -578,7 +578,7 @@ private Result> getConnectorTopicsFromCluster(ConnectCluster connec private Result getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) { try { KSConnectorStateInfo connectorStateInfo = restTool.getForObject( - connectCluster.getClusterUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName), + connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName), new HashMap<>(), KSConnectorStateInfo.class ); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/plugin/impl/PluginServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/plugin/impl/PluginServiceImpl.java index fa6f13942..8ef4d3917 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/plugin/impl/PluginServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/plugin/impl/PluginServiceImpl.java @@ -66,7 +66,7 @@ public Result validateConfig(Long connectClusterId, Properti // 通过参数检查接口,获取插件配置 ConfigInfos configInfos = restTool.putJsonForObject( - connectCluster.getClusterUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)), + connectCluster.getSuitableRequestUrl() + String.format(GET_PLUGIN_CONFIG_DESC_URI, props.getProperty(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME)), props, ConfigInfos.class ); @@ -94,7 +94,7 @@ public Result> listPluginsFromCluster(Long connectClust // 通过参数检查接口,获取插件配置 List pluginList = restTool.getArrayObjectWithJsonContent( - connectCluster.getClusterUrl() + GET_ALL_PLUGINS_URI, + connectCluster.getSuitableRequestUrl() + GET_ALL_PLUGINS_URI, new HashMap<>(), ConnectPluginBasic.class ); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerConnectorServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerConnectorServiceImpl.java index 99fb9ba21..eb2c80fc9 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerConnectorServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerConnectorServiceImpl.java @@ -105,7 +105,7 @@ public Result actionTask(TaskActionDTO dto) { return Result.buildFailure(ResultStatus.NOT_EXIST); } - String url = String.format(RESTART_TASK_URI, connectCluster.getClusterUrl(), dto.getConnectorName(), dto.getTaskId()); + String url = String.format(RESTART_TASK_URI, connectCluster.getSuitableRequestUrl(), dto.getConnectorName(), dto.getTaskId()); try { restTool.postObjectWithJsonContent(url, null, String.class); } catch (Exception e) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerServiceImpl.java index c52998f18..bb5dacc6a 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/worker/impl/WorkerServiceImpl.java @@ -7,8 +7,8 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectWorker; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult; import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectWorkerPO; -import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO; import com.xiaojukeji.know.streaming.km.common.bean.vo.cluster.connector.ClusterWorkerOverviewVO; +import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; @@ -50,6 +50,11 @@ public void batchReplaceInDB(Long connectClusterId, List workerLi connectWorkerDAO.insert(newPO); } else { newPO.setId(oldPO.getId()); + if (JmxEnum.UNKNOWN.getPort().equals(newPO.getJmxPort())) { + // 如果所获取的jmx端口未知,则不更新jmx端口 + newPO.setJmxPort(oldPO.getJmxPort()); + } + connectWorkerDAO.updateById(newPO); } } catch (DuplicateKeyException dke) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java index 47317c804..c2cb71800 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/GroupService.java @@ -12,9 +12,9 @@ import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; import org.apache.kafka.common.TopicPartition; -import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Set; public interface GroupService { /** @@ -35,10 +35,11 @@ public interface GroupService { /** * 批量更新DB + * @param clusterPhyId 集群ID + * @param newGroupList 新的group列表 + * @param getFailedGroupSet 元信息获取失败的group列表 */ - void batchReplaceGroupsAndMembers(Long clusterPhyId, List newGroupList, long updateTime); - - int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime); + void batchReplaceGroupsAndMembers(Long clusterPhyId, List newGroupList, Set getFailedGroupSet); /** * DB-Group相关接口 diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/OpGroupService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/OpGroupService.java new file mode 100644 index 000000000..dbd6bae5d --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/OpGroupService.java @@ -0,0 +1,15 @@ +package com.xiaojukeji.know.streaming.km.core.service.group; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; + +public interface OpGroupService { + /** + * 删除Offset + */ + Result deleteGroupOffset(DeleteGroupParam param, String operator); + Result deleteGroupTopicOffset(DeleteGroupTopicParam param, String operator); + Result deleteGroupTopicPartitionOffset(DeleteGroupTopicPartitionParam param, String operator); +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java index c9d65468c..1303c2aef 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupMetricServiceImpl.java @@ -39,7 +39,7 @@ */ @Service("groupMetricService") public class GroupMetricServiceImpl extends BaseMetricService implements GroupMetricService { - private static final ILog LOGGER = LogFactory.getLog( GroupMetricServiceImpl.class); + private static final ILog LOGGER = LogFactory.getLog(GroupMetricServiceImpl.class); public static final String GROUP_METHOD_GET_JUST_FRO_TEST = "getMetricJustForTest"; public static final String GROUP_METHOD_GET_HEALTH_SCORE = "getMetricHealthScore"; @@ -54,7 +54,7 @@ protected List listMetricPOFields(){ @Override protected void initRegisterVCHandler(){ registerVCHandler( GROUP_METHOD_GET_JUST_FRO_TEST, this::getMetricJustForTest); - registerVCHandler( GROUP_METHOD_GET_LAG_RELEVANT_FROM_ADMIN_CLIENT, this::getLagRelevantFromAdminClient ); + registerVCHandler( GROUP_METHOD_GET_LAG_RELEVANT_FROM_ADMIN_CLIENT, this::getLagRelevantFromAdminClient); registerVCHandler( GROUP_METHOD_GET_HEALTH_SCORE, this::getMetricHealthScore); registerVCHandler( GROUP_METHOD_GET_STATE, this::getGroupState); } @@ -129,8 +129,14 @@ public Result> collectGroupMetricsFromKafka(Long clusterId, S @Override public Result> listGroupMetricsFromES(Long clusterId, MetricGroupPartitionDTO dto) { Table> retTable = groupMetricESDAO.listGroupMetrics( - clusterId, dto.getGroup(), dto.getGroupTopics(), dto.getMetricsNames(), - dto.getAggType(), dto.getStartTime(), dto.getEndTime()); + clusterId, + dto.getGroup(), + dto.getGroupTopics(), + dto.getMetricsNames(), + dto.getAggType(), + dto.getStartTime(), + dto.getEndTime() + ); List multiLinesVOS = metricMap2VO(clusterId, retTable.rowMap()); return Result.buildSuc(multiLinesVOS); @@ -140,7 +146,11 @@ public Result> listGroupMetricsFromES(Long clusterId, M public Result> listLatestMetricsAggByGroupTopicFromES(Long clusterPhyId, List groupTopicList, List metricNames, AggTypeEnum aggType) { List groupMetricPOS = groupMetricESDAO.listLatestMetricsAggByGroupTopic( - clusterPhyId, groupTopicList, metricNames, aggType); + clusterPhyId, + groupTopicList, + metricNames, + aggType + ); return Result.buildSuc( ConvertUtil.list2List(groupMetricPOS, GroupMetrics.class)); } @@ -149,7 +159,11 @@ public Result> listLatestMetricsAggByGroupTopicFromES(Long cl public Result> listPartitionLatestMetricsFromES(Long clusterPhyId, String groupName, String topicName, List metricNames) { List groupMetricPOS = groupMetricESDAO.listPartitionLatestMetrics( - clusterPhyId, groupName, topicName, metricNames); + clusterPhyId, + groupName, + topicName, + metricNames + ); return Result.buildSuc( ConvertUtil.list2List(groupMetricPOS, GroupMetrics.class)); } @@ -158,9 +172,7 @@ public Result> listPartitionLatestMetricsFromES(Long clusterP public Result countMetricValueOccurrencesFromES(Long clusterPhyId, String groupName, SearchTerm term, Long startTime, Long endTime) { setQueryMetricFlag(term); - int count = groupMetricESDAO.countMetricValue(clusterPhyId, groupName, - term, startTime, endTime); - + int count = groupMetricESDAO.countMetricValue(clusterPhyId, groupName, term, startTime, endTime); if(count < 0){ return Result.buildFail(); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java index 15dc21081..21511a96b 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java @@ -49,7 +49,7 @@ @Service public class GroupServiceImpl extends BaseKafkaVersionControlService implements GroupService { - private static final ILog log = LogFactory.getLog(GroupServiceImpl.class); + private static final ILog LOGGER = LogFactory.getLog(GroupServiceImpl.class); @Autowired private GroupDAO groupDAO; @@ -92,7 +92,7 @@ public List listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOpera return groupNameList; } catch (Exception e) { - log.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); + LOGGER.error("method=listGroupsFromKafka||clusterPhyId={}||errMsg=exception!", clusterPhy.getId(), e); throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); } finally { @@ -142,7 +142,8 @@ public Group getGroupFromKafka(ClusterPhy clusterPhy, String groupName) throws N member.setMemberCount(member.getMemberCount() + 1); } } - group.setTopicMembers(memberMap.values().stream().collect(Collectors.toList())); + + group.setTopicMembers(new ArrayList<>(memberMap.values())); return group; } @@ -161,7 +162,7 @@ public Map getGroupOffsetFromKafka(Long clusterPhyId, Stri return offsetMap; } catch (Exception e) { - log.error("method=getGroupOffset||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e); + LOGGER.error("method=getGroupOffset||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e); throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); } @@ -187,7 +188,7 @@ public KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, St return describeGroupsResult.all().get().get(groupName); } catch(Exception e){ - log.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e); + LOGGER.error("method=getGroupDescription||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhy.getId(), groupName, e); throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); } finally { @@ -202,12 +203,12 @@ public KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, St } @Override - public void batchReplaceGroupsAndMembers(Long clusterPhyId, List newGroupList, long updateTime) { + public void batchReplaceGroupsAndMembers(Long clusterPhyId, List newGroupList, Set getFailedGroupSet) { // 更新Group信息 - this.batchReplaceGroups(clusterPhyId, newGroupList, updateTime); + this.batchReplaceGroups(clusterPhyId, newGroupList, getFailedGroupSet); // 更新Group-Topic信息 - this.batchReplaceGroupMembers(clusterPhyId, newGroupList, updateTime); + this.batchReplaceGroupMembers(clusterPhyId, newGroupList, getFailedGroupSet); } @Override @@ -283,21 +284,6 @@ public List listClusterGroups(Long clusterPhyId) { return groupDAO.selectList(lambdaQueryWrapper).stream().map(elem -> GroupConverter.convert2Group(elem)).collect(Collectors.toList()); } - @Override - public int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime) { - // 删除过期Group信息 - LambdaQueryWrapper groupPOLambdaQueryWrapper = new LambdaQueryWrapper<>(); - groupPOLambdaQueryWrapper.eq(GroupPO::getClusterPhyId, clusterPhyId); - groupPOLambdaQueryWrapper.le(GroupPO::getUpdateTime, beforeTime); - groupDAO.delete(groupPOLambdaQueryWrapper); - - // 删除过期GroupMember信息 - LambdaQueryWrapper queryWrapper = new LambdaQueryWrapper<>(); - queryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); - queryWrapper.le(GroupMemberPO::getUpdateTime, beforeTime); - return groupMemberDAO.delete(queryWrapper); - } - @Override public List getGroupsFromDB(Long clusterPhyId) { LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); @@ -368,7 +354,7 @@ public Result resetGroupOffsets(Long clusterPhyId, return Result.buildSuc(); } catch(Exception e){ - log.error("method=resetGroupOffsets||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e); + LOGGER.error("method=resetGroupOffsets||clusterPhyId={}|groupName={}||errMsg=exception!", clusterPhyId, groupName, e); throw new AdminOperateException(e.getMessage(), e, ResultStatus.KAFKA_OPERATE_FAILED); } @@ -378,62 +364,96 @@ public Result resetGroupOffsets(Long clusterPhyId, /**************************************************** private method ****************************************************/ - private void batchReplaceGroupMembers(Long clusterPhyId, List newGroupList, long updateTime) { - if (ValidateUtils.isEmptyList(newGroupList)) { - return; - } - - List dbPOList = this.listClusterGroupsMemberPO(clusterPhyId); - Map dbPOMap = dbPOList.stream().collect(Collectors.toMap(elem -> elem.getGroupName() + elem.getTopicName(), Function.identity())); + private void batchReplaceGroupMembers(Long clusterPhyId, List newGroupList, Set getFailedGroupSet) { + // DB 中的数据 + Map dbPOMap = this.listClusterGroupsMemberPO(clusterPhyId) + .stream() + .collect(Collectors.toMap(elem -> elem.getGroupName() + elem.getTopicName(), Function.identity())); + // 进行数据的更新 for (Group group: newGroupList) { for (GroupTopicMember member : group.getTopicMembers()) { try { - GroupMemberPO newPO = new GroupMemberPO(clusterPhyId, member.getTopicName(), group.getName(), group.getState().getState(), member.getMemberCount(), new Date(updateTime)); + GroupMemberPO newPO = new GroupMemberPO(clusterPhyId, member.getTopicName(), group.getName(), group.getState().getState(), member.getMemberCount(), new Date()); GroupMemberPO dbPO = dbPOMap.remove(newPO.getGroupName() + newPO.getTopicName()); - if (dbPO != null) { + if (dbPO == null) { + // 数据不存在则直接写入 + groupMemberDAO.insert(newPO); + } else if (!dbPO.equal2GroupMemberPO(newPO)) { + // 数据发生了变化则进行更新 newPO.setId(dbPO.getId()); groupMemberDAO.updateById(newPO); - continue; } - - groupMemberDAO.insert(newPO); } catch (Exception e) { - log.error( + LOGGER.error( "method=batchReplaceGroupMembers||clusterPhyId={}||groupName={}||topicName={}||errMsg=exception", clusterPhyId, group.getName(), member.getTopicName(), e ); } } } - } - private void batchReplaceGroups(Long clusterPhyId, List newGroupList, long updateTime) { - if (ValidateUtils.isEmptyList(newGroupList)) { - return; - } + // 删除剩余不存在的 + dbPOMap.values().forEach(elem -> { + try { + if (getFailedGroupSet.contains(elem.getGroupName())) { + // 该group信息获取失败,所以忽略对该数据的删除 + return; + } + + groupDAO.deleteById(elem.getId()); + } catch (Exception e) { + LOGGER.error( + "method=batchReplaceGroupMembers||clusterPhyId={}||groupName={}||topicName={}||msg=delete expired group data in db failed||errMsg=exception", + clusterPhyId, elem.getGroupName(), elem.getTopicName(), e + ); + } + }); + } - List dbGroupList = this.listClusterGroupsPO(clusterPhyId); - Map dbGroupMap = dbGroupList.stream().collect(Collectors.toMap(elem -> elem.getName(), Function.identity())); + private void batchReplaceGroups(Long clusterPhyId, List newGroupList, Set getFailedGroupSet) { + // 获取 DB 中的数据 + Map dbGroupMap = this.listClusterGroupsPO(clusterPhyId) + .stream() + .collect(Collectors.toMap(elem -> elem.getName(), Function.identity())); + // 进行数据的更新 for (Group newGroup: newGroupList) { try { - GroupPO newPO = GroupConverter.convert2GroupPO(newGroup); - newPO.setUpdateTime(new Date(updateTime)); - GroupPO dbPO = dbGroupMap.remove(newGroup.getName()); - if (dbPO != null) { + if (dbPO == null) { + // 一条新的数据,则直接insert + groupDAO.insert(GroupConverter.convert2GroupPO(newGroup)); + continue; + } + + GroupPO newPO = GroupConverter.convert2GroupPO(newGroup); + if (!newPO.equal2GroupPO(dbPO)) { + // 如果不相等,则直接更新 newPO.setId(dbPO.getId()); groupDAO.updateById(newPO); - continue; } - groupDAO.insert(newPO); + // 其他情况,则不需要进行任何操作 } catch (Exception e) { - log.error("method=batchGroupReplace||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhyId, newGroup.getName(), e); + LOGGER.error("method=batchReplaceGroups||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhyId, newGroup.getName(), e); } } + + // 删除剩余不存在的 + dbGroupMap.values().forEach(elem -> { + try { + if (getFailedGroupSet.contains(elem.getName())) { + // 该group信息获取失败,所以忽略对该数据的删除 + return; + } + + groupDAO.deleteById(elem.getId()); + } catch (Exception e) { + LOGGER.error("method=batchReplaceGroups||clusterPhyId={}||groupName={}||msg=delete expired group data in db failed||errMsg=exception", clusterPhyId, elem.getName(), e); + } + }); } private List listClusterGroupsPO(Long clusterPhyId) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/OpGroupServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/OpGroupServiceImpl.java new file mode 100644 index 000000000..e82c36a9c --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/OpGroupServiceImpl.java @@ -0,0 +1,272 @@ +package com.xiaojukeji.know.streaming.km.core.service.group.impl; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.didiglobal.logi.security.common.dto.oplog.OplogDTO; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.VersionItemParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.group.DeleteGroupTopicPartitionParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; +import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupMemberPO; +import com.xiaojukeji.know.streaming.km.common.bean.po.group.GroupPO; +import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; +import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum; +import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum; +import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; +import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import com.xiaojukeji.know.streaming.km.core.service.group.OpGroupService; +import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService; +import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; +import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupDAO; +import com.xiaojukeji.know.streaming.km.persistence.mysql.group.GroupMemberDAO; +import org.apache.kafka.clients.admin.*; +import org.apache.kafka.common.TopicPartition; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import javax.annotation.PostConstruct; +import java.util.*; +import java.util.stream.Collectors; + +import static com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus.VC_HANDLE_NOT_EXIST; +import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*; +import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_GROUP; + +/** + * @author didi + */ +@Service +public class OpGroupServiceImpl extends BaseKafkaVersionControlService implements OpGroupService { + private static final ILog LOGGER = LogFactory.getLog(OpGroupServiceImpl.class); + + private static final String DELETE_GROUP_OFFSET = "deleteGroupOffset"; + private static final String DELETE_GROUP_TOPIC_OFFSET = "deleteGroupTopicOffset"; + private static final String DELETE_GROUP_TP_OFFSET = "deleteGroupTopicPartitionOffset"; + + @Autowired + private GroupDAO groupDAO; + + @Autowired + private GroupMemberDAO groupMemberDAO; + + @Autowired + private OpLogWrapService opLogWrapService; + + @Autowired + private KafkaAdminClient kafkaAdminClient; + + @Override + protected VersionItemTypeEnum getVersionItemType() { + return SERVICE_OP_GROUP; + } + + @PostConstruct + private void init() { + registerVCHandler(DELETE_GROUP_OFFSET, V_2_0_0, V_MAX, "deleteGroupOffsetByClient", this::deleteGroupOffsetByClient); + registerVCHandler(DELETE_GROUP_TOPIC_OFFSET, V_2_4_0, V_MAX, "deleteGroupTopicOffsetByClient", this::deleteGroupTopicOffsetByClient); + registerVCHandler(DELETE_GROUP_TP_OFFSET, V_2_4_0, V_MAX, "deleteGroupTopicPartitionOffsetByClient", this::deleteGroupTopicPartitionOffsetByClient); + } + + @Override + public Result deleteGroupOffset(DeleteGroupParam param, String operator) { + // 日志记录 + LOGGER.info("method=deleteGroupOffset||param={}||operator={}||msg=delete group offset", ConvertUtil.obj2Json(param), operator); + + try { + Result rv = (Result) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_OFFSET, param); + if (rv == null || rv.failed()) { + return rv; + } + + // 记录操作 + OplogDTO oplogDTO = new OplogDTO(operator, + OperationEnum.DELETE.getDesc(), + ModuleEnum.KAFKA_GROUP.getDesc(), + String.format("集群ID:[%d] Group名称:[%s]", param.getClusterPhyId(), param.getGroupName()), + String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param)) + ); + opLogWrapService.saveOplogAndIgnoreException(oplogDTO); + + // 清理Group数据 + this.deleteGroupInDB(param.getClusterPhyId(), param.getGroupName()); + this.deleteGroupMemberInDB(param.getClusterPhyId(), param.getGroupName()); + + return rv; + } catch (VCHandlerNotExistException e) { + return Result.buildFailure(VC_HANDLE_NOT_EXIST); + } + } + + @Override + public Result deleteGroupTopicOffset(DeleteGroupTopicParam param, String operator) { + // 日志记录 + LOGGER.info("method=deleteGroupTopicOffset||param={}||operator={}||msg=delete group topic offset", ConvertUtil.obj2Json(param), operator); + + try { + Result rv = (Result) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_TOPIC_OFFSET, param); + if (rv == null || rv.failed()) { + return rv; + } + + // 清理数据库中的数据 + // 记录操作 + OplogDTO oplogDTO = new OplogDTO(operator, + OperationEnum.DELETE.getDesc(), + ModuleEnum.KAFKA_GROUP.getDesc(), + String.format("集群ID:[%d] Group名称:[%s] Topic名称:[%s]", param.getClusterPhyId(), param.getGroupName(), param.getTopicName()), + String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param)) + ); + opLogWrapService.saveOplogAndIgnoreException(oplogDTO); + + // 清理group + topic 数据 + this.deleteGroupMemberInDB(param.getClusterPhyId(), param.getGroupName(), param.getTopicName()); + + return rv; + } catch (VCHandlerNotExistException e) { + return Result.buildFailure(VC_HANDLE_NOT_EXIST); + } + } + + @Override + public Result deleteGroupTopicPartitionOffset(DeleteGroupTopicPartitionParam param, String operator) { + // 日志记录 + LOGGER.info("method=deleteGroupTopicPartitionOffset||param={}||operator={}||msg=delete group topic partition offset", ConvertUtil.obj2Json(param), operator); + + try { + Result rv = (Result) doVCHandler(param.getClusterPhyId(), DELETE_GROUP_TP_OFFSET, param); + if (rv == null || rv.failed()) { + return rv; + } + + // 记录操作 + OplogDTO oplogDTO = new OplogDTO(operator, + OperationEnum.DELETE.getDesc(), + ModuleEnum.KAFKA_GROUP.getDesc(), + String.format("集群ID:[%d] Group名称:[%s] Topic名称:[%s] PartitionID:[%d]", param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), param.getPartitionId()), + String.format("删除Offset:[%s]", ConvertUtil.obj2Json(param)) + ); + opLogWrapService.saveOplogAndIgnoreException(oplogDTO); + + return rv; + } catch (VCHandlerNotExistException e) { + return Result.buildFailure(VC_HANDLE_NOT_EXIST); + } + } + + /**************************************************** private method ****************************************************/ + + private Result deleteGroupOffsetByClient(VersionItemParam itemParam) { + DeleteGroupParam param = (DeleteGroupParam) itemParam; + try { + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); + + DeleteConsumerGroupsResult deleteConsumerGroupsResult = adminClient.deleteConsumerGroups( + Collections.singletonList(param.getGroupName()), + new DeleteConsumerGroupsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + deleteConsumerGroupsResult.all().get(); + } catch (Exception e) { + LOGGER.error( + "method=deleteGroupOffsetByClient||clusterPhyId={}||groupName={}||errMsg=delete group failed||msg=exception!", + param.getClusterPhyId(), param.getGroupName(), e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); + } + + return Result.buildSuc(); + } + + private Result deleteGroupTopicOffsetByClient(VersionItemParam itemParam) { + DeleteGroupTopicParam param = (DeleteGroupTopicParam) itemParam; + try { + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); + + DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList( + param.getTopicName()), + new DescribeTopicsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + List tpList = describeTopicsResult + .all() + .get() + .get(param.getTopicName()) + .partitions() + .stream() + .map(elem -> new TopicPartition(param.getTopicName(), elem.partition())) + .collect(Collectors.toList()); + + DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsetsResult = adminClient.deleteConsumerGroupOffsets( + param.getGroupName(), + new HashSet<>(tpList), + new DeleteConsumerGroupOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + deleteConsumerGroupOffsetsResult.all().get(); + } catch (Exception e) { + LOGGER.error( + "method=deleteGroupTopicOffsetByClient||clusterPhyId={}||groupName={}||topicName={}||errMsg=delete group failed||msg=exception!", + param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); + } + + return Result.buildSuc(); + } + + private Result deleteGroupTopicPartitionOffsetByClient(VersionItemParam itemParam) { + DeleteGroupTopicPartitionParam param = (DeleteGroupTopicPartitionParam) itemParam; + try { + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); + + DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsetsResult = adminClient.deleteConsumerGroupOffsets( + param.getGroupName(), + new HashSet<>(Arrays.asList(new TopicPartition(param.getTopicName(), param.getPartitionId()))), + new DeleteConsumerGroupOffsetsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS) + ); + + deleteConsumerGroupOffsetsResult.all().get(); + } catch (Exception e) { + LOGGER.error( + "method=deleteGroupTopicPartitionOffsetByClient||clusterPhyId={}||groupName={}||topicName={}||partitionId={}||errMsg=delete group failed||msg=exception!", + param.getClusterPhyId(), param.getGroupName(), param.getTopicName(), param.getPartitionId(), e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); + } + + return Result.buildSuc(); + } + + private int deleteGroupInDB(Long clusterPhyId, String groupName) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(GroupPO::getClusterPhyId, clusterPhyId); + lambdaQueryWrapper.eq(GroupPO::getName, groupName); + + return groupDAO.delete(lambdaQueryWrapper); + } + + private int deleteGroupMemberInDB(Long clusterPhyId, String groupName) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); + lambdaQueryWrapper.eq(GroupMemberPO::getGroupName, groupName); + + return groupMemberDAO.delete(lambdaQueryWrapper); + } + + private int deleteGroupMemberInDB(Long clusterPhyId, String groupName, String topicName) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(GroupMemberPO::getClusterPhyId, clusterPhyId); + lambdaQueryWrapper.eq(GroupMemberPO::getGroupName, groupName); + lambdaQueryWrapper.eq(GroupMemberPO::getTopicName, topicName); + + return groupMemberDAO.delete(lambdaQueryWrapper); + } +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/checker/zookeeper/HealthCheckZookeeperService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/checker/zookeeper/HealthCheckZookeeperService.java index f18f3172c..933310dcb 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/checker/zookeeper/HealthCheckZookeeperService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/checker/zookeeper/HealthCheckZookeeperService.java @@ -102,6 +102,10 @@ private HealthCheckResult checkBrainSplit(Tuple ZKRoleEnum.LEADER.getRole().equals(elem.getRole())).count(); + if (value == 0) { + // ZK 在单机模式下,leader角色就是standalone + value = infoList.stream().filter(elem -> ZKRoleEnum.STANDALONE.getRole().equals(elem.getRole())).count(); + } checkResult.setPassed(value == 1 ? Constant.YES : Constant.NO); return checkResult; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/OpTopicService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/OpTopicService.java index 3b529f817..1f656a6e1 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/OpTopicService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/OpTopicService.java @@ -3,6 +3,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic; @@ -21,4 +22,9 @@ public interface OpTopicService { * 扩分区 */ Result expandTopic(TopicPartitionExpandParam expandParam, String operator); + + /** + * 清空topic消息 + */ + Result truncateTopic(TopicTruncateParam param, String operator); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java index 466f7a2fa..bb3e553d6 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/topic/impl/OpTopicServiceImpl.java @@ -8,6 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicTruncateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; @@ -33,6 +34,7 @@ import kafka.zk.KafkaZkClient; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import scala.Option; @@ -57,6 +59,7 @@ public class OpTopicServiceImpl extends BaseKafkaVersionControlService implement private static final String TOPIC_CREATE = "createTopic"; private static final String TOPIC_DELETE = "deleteTopic"; private static final String TOPIC_EXPAND = "expandTopic"; + private static final String TOPIC_TRUNCATE = "truncateTopic"; @Autowired private TopicService topicService; @@ -92,6 +95,8 @@ private void init() { registerVCHandler(TOPIC_EXPAND, V_0_10_0_0, V_0_11_0_3, "expandTopicByZKClient", this::expandTopicByZKClient); registerVCHandler(TOPIC_EXPAND, V_0_11_0_3, V_MAX, "expandTopicByKafkaClient", this::expandTopicByKafkaClient); + + registerVCHandler(TOPIC_TRUNCATE, V_0_11_0_0, V_MAX, "truncateTopicByKafkaClient", this::truncateTopicByKafkaClient); } @Override @@ -203,9 +208,58 @@ public Result expandTopic(TopicPartitionExpandParam expandParam, String op return rv; } + @Override + public Result truncateTopic(TopicTruncateParam param, String operator) { + try { + // 清空topic数据 + Result rv = (Result) doVCHandler(param.getClusterPhyId(), TOPIC_TRUNCATE, param); + + if (rv == null || rv.failed()) { + return rv; + } + + // 记录操作 + OplogDTO oplogDTO = new OplogDTO(operator, + OperationEnum.TRUNCATE.getDesc(), + ModuleEnum.KAFKA_TOPIC.getDesc(), + MsgConstant.getTopicBizStr(param.getClusterPhyId(), param.getTopicName()), + String.format("清空Topic:[%s]", param.toString())); + opLogWrapService.saveOplogAndIgnoreException(oplogDTO); + return rv; + } catch (VCHandlerNotExistException e) { + return Result.buildFailure(VC_HANDLE_NOT_EXIST); + } + } /**************************************************** private method ****************************************************/ + private Result truncateTopicByKafkaClient(VersionItemParam itemParam) { + TopicTruncateParam param = (TopicTruncateParam) itemParam; + try { + AdminClient adminClient = kafkaAdminClient.getClient(param.getClusterPhyId()); + //获取topic的分区信息 + DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(param.getTopicName()), new DescribeTopicsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)); + Map descriptionMap = describeTopicsResult.all().get(); + + Map recordsToDelete = new HashMap<>(); + RecordsToDelete recordsToDeleteOffset = RecordsToDelete.beforeOffset(param.getOffset()); + + descriptionMap.forEach((topicName, topicDescription) -> { + for (TopicPartitionInfo topicPartition : topicDescription.partitions()) { + recordsToDelete.put(new TopicPartition(topicName, topicPartition.partition()), recordsToDeleteOffset); + } + }); + + DeleteRecordsResult deleteRecordsResult = adminClient.deleteRecords(recordsToDelete, new DeleteRecordsOptions().timeoutMs(KafkaConstant.ADMIN_CLIENT_REQUEST_TIME_OUT_UNIT_MS)); + deleteRecordsResult.all().get(); + } catch (Exception e) { + log.error("truncate topic by kafka-client failed,clusterPhyId:{} topicName:{} offset:{}", param.getClusterPhyId(), param.getTopicName(), param.getOffset(), e); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); + } + + return Result.buildSuc(); + } private Result deleteByKafkaClient(VersionItemParam itemParam) { TopicParam param = (TopicParam) itemParam; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/fe/FrontEndControlVersionItems.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/fe/FrontEndControlVersionItems.java index 3f4c0a687..18cdb44e1 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/fe/FrontEndControlVersionItems.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/fe/FrontEndControlVersionItems.java @@ -36,7 +36,15 @@ public class FrontEndControlVersionItems extends BaseMetricVersionMetric { private static final String FE_HA_CREATE_MIRROR_TOPIC = "FEHaCreateMirrorTopic"; private static final String FE_HA_DELETE_MIRROR_TOPIC = "FEHaDeleteMirrorTopic"; - public FrontEndControlVersionItems(){} + private static final String FE_TRUNCATE_TOPIC = "FETruncateTopic"; + + private static final String FE_DELETE_GROUP_OFFSET = "FEDeleteGroupOffset"; + private static final String FE_DELETE_GROUP_TOPIC_OFFSET = "FEDeleteGroupTopicOffset"; + private static final String FE_DELETE_GROUP_TOPIC_PARTITION_OFFSET = "FEDeleteGroupTopicPartitionOffset"; + + public FrontEndControlVersionItems() { + // ignore + } @Override public int versionItemType() { @@ -89,6 +97,17 @@ public List init(){ itemList.add(buildItem().minVersion(VersionEnum.V_2_5_0_D_300).maxVersion(VersionEnum.V_2_5_0_D_MAX) .name(FE_HA_DELETE_MIRROR_TOPIC).desc("HA-取消Topic复制")); + // truncate topic + itemList.add(buildItem().minVersion(VersionEnum.V_0_11_0_0).maxVersion(VersionEnum.V_MAX) + .name(FE_TRUNCATE_TOPIC).desc("清空Topic")); + + // 删除Offset + itemList.add(buildItem().minVersion(VersionEnum.V_2_0_0).maxVersion(VersionEnum.V_MAX) + .name(FE_DELETE_GROUP_OFFSET).desc("删除GroupOffset")); + itemList.add(buildItem().minVersion(VersionEnum.V_2_4_0).maxVersion(VersionEnum.V_MAX) + .name(FE_DELETE_GROUP_TOPIC_OFFSET).desc("删除GroupTopicOffset")); + itemList.add(buildItem().minVersion(VersionEnum.V_2_4_0).maxVersion(VersionEnum.V_MAX) + .name(FE_DELETE_GROUP_TOPIC_PARTITION_OFFSET).desc("删除GroupTopicPartitionOffset")); return itemList; } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperMetricServiceImpl.java index bd41f43bc..07844c6b1 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperMetricServiceImpl.java @@ -161,7 +161,7 @@ public Result> listMetricsFromES(Long clusterPhyId, MetricDTO // 格式转化 List voList = new ArrayList<>(); - pointVOMap.entrySet().stream().forEach(entry -> + pointVOMap.entrySet().forEach(entry -> voList.add(new MetricLineVO(String.valueOf(clusterPhyId), entry.getKey(), entry.getValue())) ); return Result.buildSuc(voList); @@ -208,11 +208,11 @@ private Result getMetricFromServerCmd(VersionItemParam metricP metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency()); metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency()); metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency()); - metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent().floatValue()); + metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests()); + metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount()); + metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections()); + metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived()); + metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent()); return Result.buildSuc(metrics); } @@ -257,16 +257,16 @@ private Result getMetricFromMonitorCmd(VersionItemParam metric metrics.putMetric(ZOOKEEPER_METRIC_AVG_REQUEST_LATENCY, cmdData.getZkAvgLatency()); metrics.putMetric(ZOOKEEPER_METRIC_MIN_REQUEST_LATENCY, cmdData.getZkMinLatency()); metrics.putMetric(ZOOKEEPER_METRIC_MAX_REQUEST_LATENCY, cmdData.getZkMaxLatency()); - metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_WATCH_COUNT, cmdData.getZkWatchCount().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_EPHEMERALS_COUNT, cmdData.getZkEphemeralsCount().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_APPROXIMATE_DATA_SIZE, cmdData.getZkApproximateDataSize().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_OPEN_FILE_DESCRIPTOR_COUNT, cmdData.getZkOpenFileDescriptorCount().floatValue()); - metrics.putMetric(ZOOKEEPER_METRIC_MAX_FILE_DESCRIPTOR_COUNT, cmdData.getZkMaxFileDescriptorCount().floatValue()); + metrics.putMetric(ZOOKEEPER_METRIC_OUTSTANDING_REQUESTS, cmdData.getZkOutstandingRequests()); + metrics.putMetric(ZOOKEEPER_METRIC_NODE_COUNT, cmdData.getZkZnodeCount()); + metrics.putMetric(ZOOKEEPER_METRIC_WATCH_COUNT, cmdData.getZkWatchCount()); + metrics.putMetric(ZOOKEEPER_METRIC_NUM_ALIVE_CONNECTIONS, cmdData.getZkNumAliveConnections()); + metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_RECEIVED, cmdData.getZkPacketsReceived()); + metrics.putMetric(ZOOKEEPER_METRIC_PACKETS_SENT, cmdData.getZkPacketsSent()); + metrics.putMetric(ZOOKEEPER_METRIC_EPHEMERALS_COUNT, cmdData.getZkEphemeralsCount()); + metrics.putMetric(ZOOKEEPER_METRIC_APPROXIMATE_DATA_SIZE, cmdData.getZkApproximateDataSize()); + metrics.putMetric(ZOOKEEPER_METRIC_OPEN_FILE_DESCRIPTOR_COUNT, cmdData.getZkOpenFileDescriptorCount()); + metrics.putMetric(ZOOKEEPER_METRIC_MAX_FILE_DESCRIPTOR_COUNT, cmdData.getZkMaxFileDescriptorCount()); return Result.buildSuc(metrics); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/utils/ApiCallThreadPoolService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/utils/ApiCallThreadPoolService.java index e66b4aa5c..671616577 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/utils/ApiCallThreadPoolService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/utils/ApiCallThreadPoolService.java @@ -6,6 +6,7 @@ import org.springframework.stereotype.Service; import javax.annotation.PostConstruct; +import java.util.List; import java.util.concurrent.Callable; /** @@ -21,7 +22,7 @@ public class ApiCallThreadPoolService { @Value(value = "${thread-pool.api.queue-size:500}") private Integer queueSize; - private static FutureWaitUtil apiFutureUtil; + private static FutureWaitUtil apiFutureUtil; @PostConstruct private void init() { @@ -33,11 +34,21 @@ private void init() { ); } - public static void runnableTask(String taskName, Integer timeoutUnisMs, Callable callable) { + public static void runnableTask(String taskName, Integer timeoutUnisMs, Callable callable) { apiFutureUtil.runnableTask(taskName, timeoutUnisMs, callable); } - public static void waitResult(Integer stepWaitTimeUnitMs) { - apiFutureUtil.waitResult(stepWaitTimeUnitMs); + public static void runnableTask(String taskName, Integer timeoutUnisMs, Runnable runnable) { + apiFutureUtil.runnableTask(taskName, timeoutUnisMs, runnable); + } + + public static void waitResult() { + apiFutureUtil.waitResult(0); + } + + public static boolean waitResultAndReturnFinished(int taskNum) { + List resultList = apiFutureUtil.waitResult(0); + + return resultList != null && resultList.size() == taskNum; } } \ No newline at end of file diff --git a/km-dist/pom.xml b/km-dist/pom.xml index d74390059..d5ed33ab9 100644 --- a/km-dist/pom.xml +++ b/km-dist/pom.xml @@ -4,13 +4,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 km-dist - ${km.revision} + ${revision} pom km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-enterprise/km-ha/pom.xml b/km-enterprise/km-ha/pom.xml index 88a2e1e05..3285386a1 100644 --- a/km-enterprise/km-ha/pom.xml +++ b/km-enterprise/km-ha/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-ha - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} ../../pom.xml diff --git a/km-enterprise/km-rebalance/pom.xml b/km-enterprise/km-rebalance/pom.xml index d960e3628..fcc1167f4 100644 --- a/km-enterprise/km-rebalance/pom.xml +++ b/km-enterprise/km-rebalance/pom.xml @@ -5,7 +5,7 @@ km com.xiaojukeji.kafka - ${km.revision} + ${revision} ../../pom.xml 4.0.0 diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java index 5892f0f1f..6cbecad0a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java @@ -6,6 +6,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker; import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.BrokerSpec; import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.entity.job.ClusterBalanceReassignDetail; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.entity.job.detail.ClusterBalanceDetailDataGroupByTopic; import com.xiaojukeji.know.streaming.km.common.bean.entity.job.Job; @@ -31,7 +32,6 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; -import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils; import com.xiaojukeji.know.streaming.km.core.service.job.JobHandler; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.persistence.mysql.job.JobDAO; @@ -91,7 +91,7 @@ public class ClusterBalanceJobHandler implements JobHandler { private TopicService topicService; @Autowired - private ConfigUtils configUtils; + private KSConfigUtils ksConfigUtils; @Override public JobTypeEnum type() { @@ -118,7 +118,7 @@ public Result submit(Job job, String operator) { } //获取任务计划 - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(dto.getClusterId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(dto.getClusterId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); try { ExecutionRebalance executionRebalance = new ExecutionRebalance(); @@ -203,7 +203,7 @@ public Result modify(Job job, String operator) { List brokers = brokerService.listAllBrokersFromDB(clusterPhy.getId()); Map brokerSpecMap = brokerSpecService.getBrokerSpecMap(clusterPhy.getId()); - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(job.getClusterId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(job.getClusterId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); JobClusterBalanceContent dto = ConvertUtil.str2ObjByJson(job.getJobData(), JobClusterBalanceContent.class); BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); ExecutionRebalance executionRebalance = new ExecutionRebalance(); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java index 75e22075b..addde520a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java @@ -22,6 +22,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; +import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.entity.ClusterBalanceInterval; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.entity.job.detail.ClusterBalancePlanDetail; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.po.ClusterBalanceJobConfigPO; @@ -37,8 +38,6 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; -import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils; -import com.xiaojukeji.know.streaming.km.core.service.partition.OpPartitionService; import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignService; import com.xiaojukeji.know.streaming.km.core.service.reassign.ReassignStrategyService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; @@ -96,7 +95,7 @@ public class ClusterBalanceJobServiceImpl implements ClusterBalanceJobService { private TopicService topicService; @Autowired - private ConfigUtils configUtils; + private KSConfigUtils ksConfigUtils; @Autowired private ReassignService reassignService; @@ -104,9 +103,6 @@ public class ClusterBalanceJobServiceImpl implements ClusterBalanceJobService { @Autowired private ReassignStrategyService reassignStrategyService; - @Autowired - private OpPartitionService opPartitionService; - @Override public Result deleteByJobId(Long jobId, String operator) { if (jobId == null) { @@ -304,7 +300,7 @@ public Result verifyClusterBalanceAndUpdateStatue(Long jobId) { Map brokerMap = brokerService.listAllBrokersFromDB(clusterBalanceJobPO.getClusterId()).stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity())); //更新平衡任务状态信息 - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhy.getId(), configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhy.getId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); Map brokerBalanceStateMap = ExecutionRebalance .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(clusterBalanceJobPO, brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); List oldDetails = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBrokerBalanceDetail(), ClusterBalancePlanDetail.class); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java index d5ccf3984..22c2f200a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java @@ -11,7 +11,7 @@ import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerSpecService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; -import com.xiaojukeji.know.streaming.km.core.service.config.ConfigUtils; +import com.xiaojukeji.know.streaming.km.core.service.config.KSConfigUtils; import com.xiaojukeji.know.streaming.km.core.service.job.JobService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.BrokerMetricVersionItems; @@ -96,7 +96,7 @@ public class ClusterBalanceServiceImpl implements ClusterBalanceService { private TopicService topicService; @Autowired - private ConfigUtils configUtils; + private KSConfigUtils ksConfigUtils; private final Cache> balanceStateCache = Caffeine.newBuilder() .expireAfterWrite(150, TimeUnit.SECONDS) @@ -134,7 +134,7 @@ public Result state(Long clusterPhyId) { } catch (ParseException e) { logger.error("method=state||clusterId:{}||errMsg=exception", clusterPhyId, e); } - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); clusterBalanceStateVO.setEnable(configPOResult.getData().getStatus() == 1); Map resourceDoubleMap; @@ -190,7 +190,7 @@ public PaginationResult overview(Long clusterPhyId, Cl Map brokerBalanceStateMap = new HashMap<>(); if (configPOResult.hasData()) { try { - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); brokerBalanceStateMap = ExecutionRebalance .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); } catch (Exception e) { @@ -268,7 +268,7 @@ public Result getItemState(Long clusterPhyId) { // Topic信息 List recentTopicNameList = topicService.listRecentUpdateTopicNamesFromDB( clusterPhyId, - configUtils.getClusterBalanceIgnoredTopicsTimeSecond() + ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond() ); ClusterBalanceItemState balanceState = new ClusterBalanceItemState(); @@ -382,7 +382,7 @@ public Result preview(Long clusterPhyId, ClusterBalancePre //获取任务计划 Map brokerMap = allBrokers.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity())); - List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, configUtils.getClusterBalanceIgnoredTopicsTimeSecond()); + List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(clusterBalancePreviewDTO, brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); ExecutionRebalance executionRebalance = new ExecutionRebalance(); try { diff --git a/km-enterprise/km-testing/pom.xml b/km-enterprise/km-testing/pom.xml index ebafbbc9c..40006270d 100644 --- a/km-enterprise/km-testing/pom.xml +++ b/km-enterprise/km-testing/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-testing - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} ../../pom.xml diff --git a/km-extends/km-account/pom.xml b/km-extends/km-account/pom.xml index d8f631b45..234e5dd30 100644 --- a/km-extends/km-account/pom.xml +++ b/km-extends/km-account/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-account - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} ../../pom.xml diff --git a/km-extends/km-monitor/pom.xml b/km-extends/km-monitor/pom.xml index 8db003d70..74f0a3c28 100644 --- a/km-extends/km-monitor/pom.xml +++ b/km-extends/km-monitor/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-monitor - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} ../../pom.xml diff --git a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java index f35c5ec6e..bbb316475 100644 --- a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java +++ b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java @@ -123,6 +123,7 @@ private List partitionMetric2SinkPoint(List p tagsMap.put(CLUSTER_ID.getName(), p.getClusterPhyId()); tagsMap.put(BROKER_ID.getName(), p.getBrokerId()); tagsMap.put(PARTITION_ID.getName(), p.getPartitionId()); + tagsMap.put(TOPIC.getName(), p.getTopic()); pointList.addAll(genSinkPoint("Partition", p.getMetrics(), p.getTimestamp(), tagsMap)); } diff --git a/km-persistence/pom.xml b/km-persistence/pom.xml index e1b0ff5a5..eeb57bcbb 100644 --- a/km-persistence/pom.xml +++ b/km-persistence/pom.xml @@ -4,13 +4,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 km-persistence - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/connect/ConnectJMXClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/connect/ConnectJMXClient.java index 727ad7f6e..ea7e38c5c 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/connect/ConnectJMXClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/connect/ConnectJMXClient.java @@ -78,31 +78,34 @@ private JmxConnectorWrap createJmxConnectorWrap(ConnectCluster connectCluster, S return jmxConnectorWrap; } - log.debug("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap starting", connectCluster.getId(), workerId); + log.info("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap starting", connectCluster.getId(), workerId); JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(connectCluster.getJmxProperties(), JmxConfig.class); if (jmxConfig == null) { jmxConfig = new JmxConfig(); } - jmxConnectorWrap = new JmxConnectorWrap( - "connectClusterId: " + connectCluster.getId() + " workerId: " + workerId, + String.format("clusterPhyId=%s,workerId=%s", connectCluster.getId(), workerId), null, connectWorker.getHost(), - connectWorker.getJmxPort() != null ? connectWorker.getJmxPort() : jmxConfig.getJmxPort(), + jmxConfig.getFinallyJmxPort(workerId, connectWorker.getJmxPort()), jmxConfig ); Map workerMap = JMX_MAP.getOrDefault(connectCluster.getId(), new ConcurrentHashMap<>()); workerMap.put(workerId, jmxConnectorWrap); JMX_MAP.put(connectCluster.getId(), workerMap); + + log.info("method=createJmxConnectorWrap||clusterPhyId={}||workerId={}||msg=create JmxConnectorWrap success", connectCluster.getId(), workerId); + return jmxConnectorWrap; } catch (Exception e) { - log.debug("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap failed||errMsg=exception||", connectCluster.getId(), workerId, e); + log.error("method=createJmxConnectorWrap||connectClusterId={}||workerId={}||msg=create JmxConnectorWrap failed||errMsg=exception||", connectCluster.getId(), workerId, e); } finally { modifyClientMapLock.unlock(); } + return null; } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java index 48651dd75..77d7bbdf4 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/BaseMetricESDAO.java @@ -24,6 +24,7 @@ import org.springframework.util.CollectionUtils; import java.util.*; +import java.util.stream.IntStream; import static com.xiaojukeji.know.streaming.km.common.constant.ESConstant.*; @@ -68,13 +69,11 @@ public void checkCurrentDayIndexExist(){ String indexTemplate = templateLoaderUtil.getContextByFileName(indexName); esOpClient.createIndexTemplateIfNotExist(indexName, indexTemplate); - //检查最近7天索引存在不存 - for(int i = 0; i < INDEX_DAYS; i++){ - String realIndex = IndexNameUtils.genDailyIndexName(indexName, i); - if(esOpClient.indexExist(realIndex)){continue;} - - esOpClient.createIndex(realIndex); - } + int retainDays = indexExpireDays > INDEX_DAYS ? INDEX_DAYS : indexExpireDays; + // 检查最近【retainDays】天索引存在不存 + IntStream.range(0, retainDays).mapToObj(i -> IndexNameUtils.genDailyIndexName(indexName, i)) + .filter(realIndex -> !esOpClient.indexExist(realIndex)) + .forEach(realIndex -> esOpClient.createIndex(realIndex)); } catch (Exception e) { LOGGER.error("method=checkCurrentDayIndexExist||errMsg=exception!", e); } @@ -94,8 +93,7 @@ public void delExpireIndex(){ indexExpireDays, indexList.subList(indexExpireDays, size)); } - indexList.subList(indexExpireDays, size).stream().forEach( - s -> esOpClient.delIndexByName(s)); + indexList.subList(indexExpireDays, size).forEach(s -> esOpClient.delIndexByName(s)); } } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java index 1ace6742e..8c8ca9cd1 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaJMXClient.java @@ -151,7 +151,7 @@ private JmxConnectorWrap createJmxConnectorWrap(ClusterPhy clusterPhy, Integer b return jmxMap; } - log.debug("method=createJmxConnectorWrap||clusterPhyId={}||brokerId={}||msg=create JmxConnectorWrap starting", clusterPhy.getId(), brokerId); + log.info("method=createJmxConnectorWrap||clusterPhyId={}||brokerId={}||msg=create JmxConnectorWrap starting", clusterPhy.getId(), brokerId); JmxConfig jmxConfig = ConvertUtil.str2ObjByJson(clusterPhy.getJmxProperties(), JmxConfig.class); if (jmxConfig == null) { @@ -159,10 +159,10 @@ private JmxConnectorWrap createJmxConnectorWrap(ClusterPhy clusterPhy, Integer b } JmxConnectorWrap jmxConnectorWrap = new JmxConnectorWrap( - "clusterPhyId: " + clusterPhy.getId() + " brokerId: " + brokerId, + String.format("clusterPhyId=%s,brokerId=%d", clusterPhy.getId(), brokerId), broker.getStartTimestamp(), - jmxConfig != null ? broker.getJmxHost(jmxConfig.getUseWhichEndpoint()) : broker.getHost(), - broker.getJmxPort() != null ? broker.getJmxPort() : jmxConfig.getJmxPort(), + broker.getJmxHost(jmxConfig.getUseWhichEndpoint()), + jmxConfig.getFinallyJmxPort(String.valueOf(brokerId), broker.getJmxPort()), jmxConfig ); diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java index 82cb8130b..b56c53d6f 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/service/impl/KafkaZKDAOImpl.java @@ -7,6 +7,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.kafkacontroller.KafkaController; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.enums.topic.TopicTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.AdminOperateException; import com.xiaojukeji.know.streaming.km.common.exception.NotExistException; @@ -78,7 +79,7 @@ public Broker getBrokerMetadata(Long clusterPhyId, Integer brokerId) throws NotE try { BrokerMetadata metadata = this.getData(kafkaZkClient.currentZooKeeper(), BrokerIdZNode.path(brokerId), false, BrokerMetadata.class); - BrokerMetadata.parseAndUpdateBrokerMetadata(metadata); + return this.convert2Broker(clusterPhyId, brokerId, metadata); } catch (KeeperException ke) { logger.error("method=getBrokerMetadata||clusterPhyId={}||brokerId={}||errMsg=exception", clusterPhyId, brokerId, ke); @@ -279,7 +280,7 @@ private Broker convert2Broker(Long clusterPhyId, Integer brokerId, BrokerMetadat metadata.setJmxPort(brokerMetadata.getJmxPort()); metadata.setStartTimestamp(brokerMetadata.getTimestamp()); metadata.setRack(brokerMetadata.getRack()); - metadata.setStatus(1); + metadata.setStatus(Constant.ALIVE); metadata.setEndpointMap(brokerMetadata.getEndpointMap()); return metadata; } diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java index 3b252c5f4..a944d4b96 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/zookeeper/znode/brokers/BrokerMetadata.java @@ -1,12 +1,11 @@ package com.xiaojukeji.know.streaming.km.persistence.kafka.zookeeper.znode.brokers; -import com.fasterxml.jackson.annotation.JsonIgnore; +import com.alibaba.fastjson.JSON; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.xiaojukeji.know.streaming.km.common.bean.entity.common.IpPortData; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; -import lombok.Data; import java.io.Serializable; import java.util.HashMap; @@ -51,7 +50,6 @@ * } * */ -@Data @JsonIgnoreProperties(ignoreUnknown = true) public class BrokerMetadata implements Serializable { private static final long serialVersionUID = 3918113492423375809L; @@ -74,34 +72,92 @@ public class BrokerMetadata implements Serializable { private String rack; - @JsonIgnore - public String getExternalHost() { - if (!endpointMap.containsKey(KafkaConstant.EXTERNAL_KEY)) { - // external如果不存在,就返回host - return host; + public List getEndpoints() { + return endpoints; + } + + public void setEndpoints(List endpoints) { + this.endpoints = endpoints; + } + + public Map getEndpointMap() { + if (endpointMap == null) { + this.parseBrokerMetadata(); } - return endpointMap.get(KafkaConstant.EXTERNAL_KEY).getIp(); + return endpointMap; + } + + public String getHost() { + if (endpointMap == null) { + this.parseBrokerMetadata(); + } + + return host; + } + + public void setHost(String host) { + this.host = host; } - @JsonIgnore - public String getInternalHost() { - if (!endpointMap.containsKey(KafkaConstant.INTERNAL_KEY)) { - // internal如果不存在,就返回host - return host; + public Integer getPort() { + if (endpointMap == null) { + this.parseBrokerMetadata(); } - return endpointMap.get(KafkaConstant.INTERNAL_KEY).getIp(); + + return port; } - public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) { - brokerMetadata.setEndpointMap(new HashMap<>()); + public void setPort(Integer port) { + this.port = port; + } + + public Integer getJmxPort() { + return jmxPort; + } + + public void setJmxPort(Integer jmxPort) { + this.jmxPort = jmxPort; + } + + public Integer getVersion() { + return version; + } - if (brokerMetadata.getEndpoints().isEmpty()) { + public void setVersion(Integer version) { + this.version = version; + } + + public Long getTimestamp() { + return timestamp; + } + + public void setTimestamp(Long timestamp) { + this.timestamp = timestamp; + } + + public String getRack() { + return rack; + } + + public void setRack(String rack) { + this.rack = rack; + } + + private synchronized void parseBrokerMetadata() { + if (this.endpointMap != null) { + return; + } + + if (this.endpoints == null || this.endpoints.isEmpty()) { + this.endpointMap = new HashMap<>(0); return; } + Map tempEndpointMap = new HashMap<>(); + // example EXTERNAL://10.179.162.202:7092 - for (String endpoint: brokerMetadata.getEndpoints()) { + for (String endpoint: this.endpoints) { int idx1 = endpoint.indexOf("://"); int idx2 = endpoint.lastIndexOf(":"); if (idx1 == -1 || idx2 == -1 || idx1 == idx2) { @@ -111,19 +167,37 @@ public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) { String brokerHost = endpoint.substring(idx1 + "://".length(), idx2); String brokerPort = endpoint.substring(idx2 + 1); - brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort)); + tempEndpointMap.put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort)); if (KafkaConstant.INTERNAL_KEY.equals(endpoint.substring(0, idx1))) { // 优先使用internal的地址进行展示 - brokerMetadata.setHost(brokerHost); - brokerMetadata.setPort(ConvertUtil.string2Integer(brokerPort)); + this.host = brokerHost; + this.port = ConvertUtil.string2Integer(brokerPort); } - if (null == brokerMetadata.getHost()) { - brokerMetadata.setHost(brokerHost); - brokerMetadata.setPort(ConvertUtil.string2Integer(brokerPort)); + if (null == this.host) { + this.host = brokerHost; + this.port = ConvertUtil.string2Integer(brokerPort); } } + + this.endpointMap = tempEndpointMap; + } + + public static void main(String[] args) { + String str = "{\t\n" + + "\t\"listener_security_protocol_map\":{\"EXTERNAL\":\"SASL_PLAINTEXT\",\"INTERNAL\":\"SASL_PLAINTEXT\"},\n" + + "\t\"endpoints\":[\"EXTERNAL://10.179.162.202:7092\",\"INTERNAL://10.179.162.202:7093\"],\n" + + "\t\"jmx_port\":8099,\n" + + "\t\"host\":null,\n" + + "\t\"timestamp\":\"1627289710439\",\n" + + "\t\"port\":-1,\n" + + "\t\"version\":4\n" + + "}"; + + BrokerMetadata bm = JSON.parseObject(str, BrokerMetadata.class); + System.out.println(bm.getHost()); + System.out.println(JSON.toJSON(bm)); } } diff --git a/km-persistence/src/main/resources/sql/dml-logi.sql b/km-persistence/src/main/resources/sql/dml-logi.sql index 0ad09b4bf..2beff22e1 100644 --- a/km-persistence/src/main/resources/sql/dml-logi.sql +++ b/km-persistence/src/main/resources/sql/dml-logi.sql @@ -119,3 +119,41 @@ INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_del INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2020', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2022', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2024', '0', 'know-streaming'); + + +-- 多集群管理权限2023-06-27新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2026', 'Connector-新增', '1593', '1', '2', 'Connector-新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2028', 'Connector-编辑', '1593', '1', '2', 'Connector-编辑', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2030', 'Connector-删除', '1593', '1', '2', 'Connector-删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2032', 'Connector-重启', '1593', '1', '2', 'Connector-重启', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2034', 'Connector-暂停&恢复', '1593', '1', '2', 'Connector-暂停&恢复', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2026', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2028', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2030', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2032', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2034', '0', 'know-streaming'); + + +-- 多集群管理权限2023-06-29新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2036', 'Security-ACL新增', '1593', '1', '2', 'Security-ACL新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2038', 'Security-ACL删除', '1593', '1', '2', 'Security-ACL删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2040', 'Security-User新增', '1593', '1', '2', 'Security-User新增', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2042', 'Security-User删除', '1593', '1', '2', 'Security-User删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2044', 'Security-User修改密码', '1593', '1', '2', 'Security-User修改密码', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2036', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2038', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2040', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2042', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2044', '0', 'know-streaming'); + + +-- 多集群管理权限2023-07-06新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2046', 'Group-删除', '1593', '1', '2', 'Group-删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2048', 'GroupOffset-Topic纬度删除', '1593', '1', '2', 'GroupOffset-Topic纬度删除', '0', 'know-streaming'); +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2050', 'GroupOffset-Partition纬度删除', '1593', '1', '2', 'GroupOffset-Partition纬度删除', '0', 'know-streaming'); + +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming'); diff --git a/km-rest/pom.xml b/km-rest/pom.xml index 3ce68d86b..512cf24fd 100644 --- a/km-rest/pom.xml +++ b/km-rest/pom.xml @@ -4,13 +4,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 km-rest - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java index d60314bbb..b03ca7cce 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java @@ -14,6 +14,7 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectActionEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService; import io.swagger.annotations.Api; @@ -44,6 +45,10 @@ public class KafkaConnectorController { @PostMapping(value = "connectors") @ResponseBody public Result createConnector(@Validated @RequestBody ConnectorCreateDTO dto) { + if (ValidateUtils.isNull(dto.getSuitableConfig())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "config字段不能为空"); + } + return connectorManager.createConnector(dto, HttpRequestUtil.getOperator()); } @@ -73,14 +78,27 @@ public Result operateConnectors(@Validated @RequestBody ConnectorActionDTO @PutMapping(value ="connectors-config") @ResponseBody public Result modifyConnectors(@Validated @RequestBody ConnectorCreateDTO dto) { - return connectorManager.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getConfigs(), HttpRequestUtil.getOperator()); + if (ValidateUtils.isNull(dto.getSuitableConfig())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "config字段不能为空"); + } + + return connectorManager.updateConnectorConfig( + dto.getConnectClusterId(), + dto.getConnectorName(), + dto.getSuitableConfig(), + HttpRequestUtil.getOperator() + ); } @ApiOperation(value = "校验Connector配置", notes = "") @PutMapping(value ="connectors-config/validate") @ResponseBody public Result validateConnectors(@Validated @RequestBody ConnectorCreateDTO dto) { - Result infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getConfigs()); + if (ValidateUtils.isNull(dto.getSuitableConfig())) { + return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "config字段不能为空"); + } + + Result infoResult = pluginService.validateConfig(dto.getConnectClusterId(), dto.getSuitableConfig()); if (infoResult.failed()) { return Result.buildFromIgnoreData(infoResult); } diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/group/GroupController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/group/GroupController.java index 55e7e778c..9233be887 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/group/GroupController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/group/GroupController.java @@ -2,6 +2,7 @@ import com.didiglobal.logi.security.util.HttpRequestUtil; import com.xiaojukeji.know.streaming.km.biz.group.GroupManager; +import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetDeleteDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupOffsetResetDTO; import com.xiaojukeji.know.streaming.km.common.bean.dto.group.GroupTopicConsumedDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.PaginationResult; @@ -32,13 +33,20 @@ public class GroupController { @Autowired private GroupService groupService; - @ApiOperation(value = "重置组消费偏移", notes = "") + @ApiOperation(value = "重置消费偏移", notes = "") @PutMapping(value = "group-offsets") @ResponseBody public Result resetGroupOffsets(@Validated @RequestBody GroupOffsetResetDTO dto) throws Exception { return groupManager.resetGroupOffsets(dto, HttpRequestUtil.getOperator()); } + @ApiOperation(value = "删除消费偏移", notes = "") + @DeleteMapping(value = "group-offsets") + @ResponseBody + public Result deleteGroupOffsets(@Validated @RequestBody GroupOffsetDeleteDTO dto) throws Exception { + return groupManager.deleteGroupOffsets(dto, HttpRequestUtil.getOperator()); + } + @ApiOperation(value = "Group-Topic指标信息", notes = "") @PostMapping(value = "clusters/{clusterId}/topics/{topicName}/groups/{groupName}/metric") @ResponseBody diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicController.java index a2022b8a5..9eaee2df4 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicController.java @@ -61,6 +61,13 @@ public Result expandTopics(@Validated @RequestBody TopicExpansionDTO dto) return opTopicManager.expandTopic(dto, HttpRequestUtil.getOperator()); } + @ApiOperation(value = "Topic数据清空", notes = "") + @PostMapping(value = "topics/truncate-topic") + @ResponseBody + public Result truncateTopic(@Validated @RequestBody ClusterTopicDTO dto) { + return opTopicManager.truncateTopic(dto.getClusterId(), dto.getTopicName(), HttpRequestUtil.getOperator()); + } + @ApiOperation(value = "Topic元信息", notes = "带是否存在信息") @GetMapping(value = "clusters/{clusterPhyId}/topics/{topicName}/metadata-combine-exist") @ResponseBody diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicStateController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicStateController.java index b03715370..c1b79c902 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicStateController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/topic/TopicStateController.java @@ -74,7 +74,7 @@ public Result getTopicBrokers(@PathVariable Long clusterPhyId, @GetMapping(value = "clusters/{clusterPhyId}/topics/{topicName}/brokers-partitions-summary") @ResponseBody public Result getTopicBrokersPartitionsSummary(@PathVariable Long clusterPhyId, - @PathVariable String topicName) throws Exception { + @PathVariable String topicName) { return topicStateManager.getTopicBrokersPartitionsSummary(clusterPhyId, topicName); } @@ -83,7 +83,7 @@ public Result getTopicBrokersPartitionsSummary( @ResponseBody public Result> getTopicPartitions(@PathVariable Long clusterPhyId, @PathVariable String topicName, - @RequestBody List metricsNames) throws Exception { + @RequestBody List metricsNames) { return topicStateManager.getTopicPartitions(clusterPhyId, topicName, metricsNames); } diff --git a/km-rest/src/main/resources/application.yml b/km-rest/src/main/resources/application.yml index ee7193f07..5dfb3303e 100644 --- a/km-rest/src/main/resources/application.yml +++ b/km-rest/src/main/resources/application.yml @@ -95,12 +95,14 @@ es: index: expire: 15 # 索引过期天数,15表示超过15天的索引会被KS过期删除 - # 集群自动均衡相关配置 cluster-balance: ignored-topics: time-second: 300 +request: # 请求相关的配置 + api-call: # api调用 + timeout-unit-ms: 8000 # 超时时间,默认8000毫秒 # 普罗米修斯指标导出相关配置 management: diff --git a/km-task/pom.xml b/km-task/pom.xml index d07b37b3e..a88442273 100644 --- a/km-task/pom.xml +++ b/km-task/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.xiaojukeji.kafka km-task - ${km.revision} + ${revision} jar km com.xiaojukeji.kafka - ${km.revision} + ${revision} diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncConnectClusterAndWorkerTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncConnectClusterAndWorkerTask.java index 76fb2b992..646bf6c0e 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncConnectClusterAndWorkerTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncConnectClusterAndWorkerTask.java @@ -18,8 +18,8 @@ import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.enums.group.GroupStateEnum; import com.xiaojukeji.know.streaming.km.common.enums.group.GroupTypeEnum; +import com.xiaojukeji.know.streaming.km.common.enums.jmx.JmxEnum; import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; -import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService; import com.xiaojukeji.know.streaming.km.core.service.group.GroupService; import com.xiaojukeji.know.streaming.km.persistence.connect.cache.LoadedConnectClusterCache; @@ -46,9 +46,6 @@ public class SyncConnectClusterAndWorkerTask extends AbstractAsyncMetadataDispat @Autowired private WorkerService workerService; - @Autowired - private WorkerConnectorService workerConnectorService; - @Autowired private ConnectClusterService connectClusterService; @@ -59,7 +56,6 @@ public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnit //获取connect集群 List groupList = groupService.listClusterGroups(clusterPhy.getId()).stream().filter(elem->elem.getType()==GroupTypeEnum.CONNECT_CLUSTER).collect(Collectors.toList()); for (Group group: groupList) { - try { KSGroupDescription ksGroupDescription = groupService.getGroupDescriptionFromKafka(clusterPhy, group.getName()); if (!ksGroupDescription.protocolType().equals(CONNECT_CLUSTER_PROTOCOL_TYPE)) { @@ -104,7 +100,7 @@ private Result handleWorkerMetadata(Long connectClusterId, KSGroupDescript connectClusterId, memberDescription.consumerId(), memberDescription.host().substring(1), - Constant.INVALID_CODE, + JmxEnum.UNKNOWN.getPort(), assignment.getWorkerState().url(), assignment.getAssignment().leaderUrl(), memberDescription.consumerId().equals(assignment.getAssignment().leader()) ? Constant.YES : Constant.NO @@ -115,7 +111,7 @@ private Result handleWorkerMetadata(Long connectClusterId, KSGroupDescript connectClusterId, memberDescription.consumerId(), memberDescription.host().substring(1), - Constant.INVALID_CODE, + JmxEnum.UNKNOWN.getPort(), "", "", Constant.NO diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaGroupTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaGroupTask.java index 521e1f846..9358993e3 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaGroupTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaGroupTask.java @@ -36,7 +36,7 @@ public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnit // 获取集群的Group列表 List groupNameList = groupService.listGroupsFromKafka(clusterPhy); - TaskResult allSuccess = TaskResult.SUCCESS; + Set getFailedGroupSet = new HashSet<>(); // 获取Group详细信息 List groupList = new ArrayList<>(); @@ -44,13 +44,16 @@ public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnit try { Group group = groupService.getGroupFromKafka(clusterPhy, groupName); if (group == null) { + // 获取到为空的 group 信息,直接忽略不要 continue; } groupList.add(group); } catch (Exception e) { log.error("method=processClusterTask||clusterPhyId={}||groupName={}||errMsg=exception", clusterPhy.getId(), groupName, e); - allSuccess = TaskResult.FAIL; + + // 记录获取失败的 group 信息 + getFailedGroupSet.add(groupName); } } @@ -58,17 +61,9 @@ public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnit this.filterTopicIfTopicNotExist(clusterPhy.getId(), groupList); // 更新DB中的Group信息 - groupService.batchReplaceGroupsAndMembers(clusterPhy.getId(), groupList, triggerTimeUnitMs); - - // 如果存在错误,则直接返回 - if (!TaskResult.SUCCESS.equals(allSuccess)) { - return allSuccess; - } - - // 删除历史的Group - groupService.deleteByUpdateTimeBeforeInDB(clusterPhy.getId(), new Date(triggerTimeUnitMs - 5 * 60 * 1000)); + groupService.batchReplaceGroupsAndMembers(clusterPhy.getId(), groupList, getFailedGroupSet); - return allSuccess; + return getFailedGroupSet.isEmpty()? TaskResult.SUCCESS: TaskResult.FAIL; } private void filterTopicIfTopicNotExist(Long clusterPhyId, List groupList) { diff --git a/pom.xml b/pom.xml index e2f062a33..f97c9a2c4 100644 --- a/pom.xml +++ b/pom.xml @@ -6,7 +6,7 @@ com.xiaojukeji.kafka km pom - ${km.revision} + ${revision} org.springframework.boot @@ -15,7 +15,7 @@ - enterprise-3.3.0 + enterprise-3.3.0 8 8 @@ -38,6 +38,7 @@ 2.3.7.RELEASE 5.3.19 9.0.41 + 2.13.5 1.2.83 @@ -330,6 +331,14 @@ import + + + + net.logstash.logback + logstash-logback-encoder + 7.1.1 + + \ No newline at end of file