From 8385d7ea0042da6e1fbfd90d490b217372602954 Mon Sep 17 00:00:00 2001 From: "xu.guo" <570736711@qq.com> Date: Thu, 28 Sep 2023 10:28:04 +0800 Subject: [PATCH 1/4] Improve host-level service startup --- .../api/controller/HostInstallController.java | 2 +- .../common/model/WorkerServiceMessage.java | 6 +++- .../ClusterServiceRoleInstanceMapper.java | 2 ++ .../ClusterServiceRoleInstanceMapper.xml | 6 ++++ .../api/master/WorkerStartActor.java | 36 ++++++++++++++----- .../ClusterServiceRoleInstanceService.java | 2 ++ .../api/service/InstallService.java | 2 +- ...ClusterServiceRoleInstanceServiceImpl.java | 5 +++ .../api/service/impl/InstallServiceImpl.java | 5 ++- datasophon-ui/src/pages/hostManage/index.vue | 15 ++++++-- 10 files changed, 66 insertions(+), 15 deletions(-) diff --git a/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java b/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java index f5b82557..ba2aeef5 100644 --- a/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java +++ b/datasophon-api/src/main/java/com/datasophon/api/controller/HostInstallController.java @@ -132,7 +132,7 @@ public Result generateHostAgentCommand( } /** - * 启动 主机上服务启动 + * 启动/停止 主机上服务启动 * @param clusterHostIds * @param commandType * @return diff --git a/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java b/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java index 09243b5a..04e9406d 100644 --- a/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java +++ b/datasophon-common/src/main/java/com/datasophon/common/model/WorkerServiceMessage.java @@ -17,6 +17,7 @@ package com.datasophon.common.model; +import com.datasophon.common.enums.CommandType; import lombok.Data; import java.io.Serializable; @@ -44,11 +45,14 @@ public class WorkerServiceMessage implements Serializable { */ private String ip; + private CommandType commandType; + public WorkerServiceMessage() { } - public WorkerServiceMessage(String hostname, Integer clusterId) { + public WorkerServiceMessage(String hostname, Integer clusterId,CommandType commandType) { this.hostname = hostname; this.clusterId = clusterId; + this.commandType = commandType; } } diff --git a/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java b/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java index d3de76f1..1adfe5f1 100644 --- a/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java +++ b/datasophon-infrastructure/src/main/java/com/datasophon/dao/mapper/ClusterServiceRoleInstanceMapper.java @@ -35,4 +35,6 @@ public interface ClusterServiceRoleInstanceMapper extends BaseMapper { void updateToNeedRestart(@Param("roleGroupId") Integer roleGroupId); + + void updateToNeedRestartByHost(@Param("hostName") String hostName); } diff --git a/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml b/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml index 72effecb..614c7fde 100644 --- a/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml +++ b/datasophon-infrastructure/src/main/resources/mapper/ClusterServiceRoleInstanceMapper.xml @@ -26,4 +26,10 @@ where role_group_id = #{roleGroupId} + + update t_ddh_cluster_service_role_instance + set need_restart = 2 + where hostname = #{hostName} + + \ No newline at end of file diff --git a/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java b/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java index 302bc4a6..1a11a8e4 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java +++ b/datasophon-service/src/main/java/com/datasophon/api/master/WorkerStartActor.java @@ -47,6 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.datasophon.dao.enums.ServiceRoleState; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -108,28 +109,45 @@ public void onReceive(Object message) throws Throwable { prometheusActor.tell(prometheusConfigCommand, getSelf()); // tell to worker what need to start - autoStartServiceNeeded(msg.getHostname(), cluster.getId()); + autoAddServiceOperatorNeeded(msg.getHostname(), cluster.getId(), CommandType.START_SERVICE,false); } else if(message instanceof WorkerServiceMessage) { - // 启动节点上安装的服务 WorkerServiceMessage msg = (WorkerServiceMessage) message; - // tell to worker what need to start - autoStartServiceNeeded(msg.getHostname(), msg.getClusterId()); + // tell to worker what need to start/stop + autoAddServiceOperatorNeeded(msg.getHostname(), msg.getClusterId(), msg.getCommandType(),true); } } /** - * Automatically start services that need to be started + * Automatically start/stop services that need to be started * * @param clusterId */ - private void autoStartServiceNeeded(String hostname, Integer clusterId) { + private void autoAddServiceOperatorNeeded(String hostname, Integer clusterId,CommandType commandType, + boolean needRestart) { ClusterServiceRoleInstanceService roleInstanceService = SpringTool.getApplicationContext().getBean(ClusterServiceRoleInstanceService.class); ClusterServiceCommandService serviceCommandService = SpringTool.getApplicationContext().getBean(ClusterServiceCommandService.class); - List serviceRoleList = - roleInstanceService.listStoppedServiceRoleListByHostnameAndClusterId(hostname, clusterId); + List serviceRoleList = null; + // 启动服务 + if (CommandType.START_SERVICE.equals(commandType)) { + serviceRoleList = roleInstanceService + .listStoppedServiceRoleListByHostnameAndClusterId(hostname, clusterId); + // 重启时重刷服务配置以支持磁盘故障等问题 + if(needRestart){ + roleInstanceService.updateToNeedRestartByHost(hostname); + } + } + + // 停止运行状态的服务 + if(commandType.STOP_SERVICE.equals(commandType)){ + serviceRoleList = roleInstanceService + .getServiceRoleListByHostnameAndClusterId(hostname, clusterId).stream() + .filter(roleInstance -> (!ServiceRoleState.STOP.equals(roleInstance.getServiceRoleState()) && + !ServiceRoleState.DECOMMISSIONED.equals(roleInstance.getServiceRoleState()))).collect(toList()); + } + if (CollectionUtils.isEmpty(serviceRoleList)) { logger.info("No services need to start at host {}.", hostname); return; @@ -141,7 +159,7 @@ private void autoStartServiceNeeded(String hostname, Integer clusterId) { ClusterServiceRoleInstanceEntity::getServiceId, mapping(i -> String.valueOf(i.getId()), toList()))); Result result = - serviceCommandService.generateServiceRoleCommands(clusterId, CommandType.START_SERVICE, serviceRoleMap); + serviceCommandService.generateServiceRoleCommands(clusterId, commandType, serviceRoleMap); if (result.getCode() == 200) { logger.info("Auto-start services successful"); } else { diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java b/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java index 68e20492..d8335ded 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/ClusterServiceRoleInstanceService.java @@ -65,6 +65,8 @@ List getServiceRoleInstanceListByClusterIdAndR void updateToNeedRestart(Integer roleGroupId); + void updateToNeedRestartByHost(String hostName); + List getObsoleteService(Integer id); List getStoppedRoleInstanceOnHost(Integer clusterId, String hostname, diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java b/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java index 9a996c09..2b65bf29 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/InstallService.java @@ -44,7 +44,7 @@ Result analysisHostList(Integer clusterId, String hosts, String sshUser, Integer /** - * 启动 主机上安装的服务启动 + * 启动/停止 主机上安装的服务启动 * @param clusterHostIds * @param commandType * @return diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java index cc81382f..13d1345a 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ClusterServiceRoleInstanceServiceImpl.java @@ -315,6 +315,11 @@ public void updateToNeedRestart(Integer roleGroupId) { roleInstanceMapper.updateToNeedRestart(roleGroupId); } + @Override + public void updateToNeedRestartByHost(String hostName) { + roleInstanceMapper.updateToNeedRestartByHost(hostName); + } + @Override public List getObsoleteService(Integer serviceInstanceId) { return this.lambdaQuery() diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java index 09d60a8a..74e515cc 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/InstallServiceImpl.java @@ -41,6 +41,7 @@ import com.datasophon.common.cache.CacheUtils; import com.datasophon.common.command.DispatcherHostAgentCommand; import com.datasophon.common.command.HostCheckCommand; +import com.datasophon.common.enums.CommandType; import com.datasophon.common.enums.InstallState; import com.datasophon.common.model.CheckResult; import com.datasophon.common.model.HostInfo; @@ -451,9 +452,11 @@ public Result generateHostServiceCommand(String clusterHostIds, String commandTy String[] clusterHostIdArray = clusterHostIds.split(Constants.COMMA); List clusterHostList = hostService.getHostListByIds(Arrays.asList(clusterHostIdArray)); Result result = null; + + CommandType serviceCommandType = "start".equalsIgnoreCase(commandType) ? CommandType.START_SERVICE : CommandType.STOP_SERVICE; for (ClusterHostDO clusterHostDO : clusterHostList) { WorkerServiceMessage serviceMessage = new WorkerServiceMessage( - clusterHostDO.getHostname(), clusterHostDO.getClusterId()); + clusterHostDO.getHostname(), clusterHostDO.getClusterId(), serviceCommandType); try { ActorRef actor = ActorUtils.getLocalActor(WorkerStartActor.class, "workerStartActor"); diff --git a/datasophon-ui/src/pages/hostManage/index.vue b/datasophon-ui/src/pages/hostManage/index.vue index f0daa8d8..800c8e03 100644 --- a/datasophon-ui/src/pages/hostManage/index.vue +++ b/datasophon-ui/src/pages/hostManage/index.vue @@ -43,6 +43,7 @@ 启动主机服务 + 停止主机服务 启动主机Worker 停止主机Worker 重新安装Worker @@ -482,6 +483,10 @@ export default { this.doConfirm("启动该主机服务", this.handStartService); return false; } + if(key.key === "handStopService") { + this.doConfirm("停止该主机服务", this.handStopCommand); + return false; + } if(key.key === "handStartHost") { // 启动主机 Worker this.doConfirm("启动该主机 Worker", this.handStartHost) @@ -539,13 +544,19 @@ export default { }) }, handStartService() { + this.handCommand('start') + }, + handStopCommand() { + this.handCommand('stop') + }, + handCommand(op){ let params = { clusterHostIds: this.hostnames.join(","), - commandType: "start", + commandType: op, }; this.$axiosPost(global.API.generateHostServiceCommand, params).then((resp) => { if (resp.code === 200) { - this.$message.success("启动 Worker 服务成功"); + this.$message.success(op == 'start' ? "启动 Worker 服务成功" : "停止 Worker 服务成功"); } else { this.$message.error(resp.msg); } From a81e46fe03fd0a202dcf8c92d730fcad8977e940 Mon Sep 17 00:00:00 2001 From: "xu.guo" <570736711@qq.com> Date: Mon, 23 Oct 2023 17:05:03 +0800 Subject: [PATCH 2/4] Integrating Apache Kyuubi service. --- .../meta/DDP-1.2.0/HDFS/service_ddl.json | 36 ++ .../meta/DDP-1.2.0/KYUUBI/service_ddl.json | 354 ++++++++++++++++++ .../java/com/datasophon/api/enums/Status.java | 3 +- .../impl/ServiceInstallServiceImpl.java | 3 + .../strategy/KyuubiServerHandlerStrategy.java | 89 +++++ .../strategy/ServiceRoleStrategyContext.java | 139 +++---- .../worker/WorkerApplicationServer.java | 1 + .../handler/ConfigureServiceHandler.java | 11 + .../strategy/KyuubiServerHandlerStrategy.java | 45 +++ .../strategy/ServiceRoleStrategyContext.java | 124 +++--- .../main/resources/script/datasophon-env.sh | 1 + .../main/resources/templates/kyuubi-env.ftl | 75 ++++ 12 files changed, 750 insertions(+), 131 deletions(-) create mode 100644 datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json create mode 100644 datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java create mode 100644 datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java create mode 100644 datasophon-worker/src/main/resources/templates/kyuubi-env.ftl diff --git a/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json index 7867bbff..758d88f2 100644 --- a/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json +++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/HDFS/service_ddl.json @@ -193,6 +193,9 @@ "hadoop.proxyuser.hive.hosts", "hadoop.proxyuser.hive.groups", "hadoop.proxyuser.hive.users", + "hadoop.proxyuser.kyuubi.hosts", + "hadoop.proxyuser.kyuubi.groups", + "hadoop.proxyuser.kyuubi.users", "hadoop.http.staticuser.user", "ha.zookeeper.quorum", "hadoop.tmp.dir", @@ -344,6 +347,39 @@ "hidden": false, "defaultValue": "*" }, + { + "name": "hadoop.proxyuser.kyuubi.hosts", + "label": "允许通过代理访问的主机节点", + "description": "配置kyuubi允许通过代理访问的主机节点", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "*" + }, + { + "name": "hadoop.proxyuser.kyuubi.groups", + "label": "允许通过代理用户所属组", + "description": "配置kyuubi允许通过代理用户所属组", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "*" + }, + { + "name": "hadoop.proxyuser.kyuubi.users", + "label": "允许通过代理的用户", + "description": "配置kyuubi允许通过代理的用户", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "*" + }, { "name": "dfs.replication", "label": "BLOCK副本数", diff --git a/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json new file mode 100644 index 00000000..3009cde5 --- /dev/null +++ b/datasophon-api/src/main/resources/meta/DDP-1.2.0/KYUUBI/service_ddl.json @@ -0,0 +1,354 @@ +{ + "name": "KYUUBI", + "label": "Kyuubi", + "description": "统一多租户JDBC网关", + "version": "1.7.3", + "sortNum": 30, + "dependencies":[], + "packageName": "kyuubi-1.7.3.tar.gz", + "decompressPackageName": "kyuubi-1.7.3", + "roles": [ + { + "name": "KyuubiServer", + "label": "KyuubiServer", + "roleType": "master", + "runAs": { + "user": "kyuubi", + "group": "hadoop" + }, + "cardinality": "1+", + "logFile": "logs/kyuubi-server-${host}.out", + "startRunner": { + "timeout": "60", + "program": "bin/kyuubi", + "args": [ + "start" + ] + }, + "stopRunner": { + "timeout": "600", + "program": "bin/kyuubi", + "args": [ + "stop" + ] + }, + "statusRunner": { + "timeout": "60", + "program": "bin/kyuubi", + "args": [ + "status" + ] + }, + "restartRunner": { + "timeout": "60", + "program": "bin/kyuubi", + "args": [ + "restart" + ] + } + }, + { + "name": "KyuubiClient", + "label": "KyuubiClient", + "roleType": "client", + "cardinality": "1+", + "logFile": "", + "runAs": { + "user": "kyuubi", + "group": "hadoop" + } + } + ], + "configWriter": { + "generators": [ + { + "filename": "kyuubi-defaults.conf", + "configFormat": "properties2", + "outputDirectory": "conf", + "includeParams": [ + "kyuubi.ha.zookeeper.namespace", + "kyuubi.ha.zookeeper.quorum", + "kyuubi.session.idle.timeout", + "kyuubi.session.engine.idle.timeout", + "kyuubi.session.engine.initialize.timeout", + "spark.master", + "kyuubi.metrics.reporters", + "kyuubi.metrics.prometheus.port", + "kyuubi.session.engine.spark.showProgress", + "kyuubi.metrics.enabled", + "enableKerberos", + "kyuubi.kinit.principal", + "kyuubi.kinit.keytab", + "kyuubi.authentication", + "custom.kyuubi-defaults.conf" + ] + }, + { + "filename": "kyuubi-env.sh", + "configFormat": "custom", + "outputDirectory": "conf", + "templateName": "kyuubi-env.ftl", + "includeParams": [ + "javaHome", + "sparkHome", + "hadoopConfDir", + "kyuubiServerHeapSize", + "kyuubiClientHeapSize", + "custom.kyuubi-env.sh" + ] + } + ] + }, + "parameters": [ + { + "name": "kyuubi.ha.zookeeper.quorum", + "label": "zookeeper服务信息", + "description": "zookeeper服务信息", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "" + }, + { + "name": "kyuubi.ha.zookeeper.namespace", + "label": "zookeeper目录", + "description": "zookeeper目录", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "" + }, + { + "name": "kyuubi.session.idle.timeout", + "label": "会话超时时间", + "description": "会话超时时间", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "PT6H" + }, + { + "name": "kyuubi.session.engine.idle.timeout", + "label": "引擎超时时间", + "description": "引擎超时时间", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "PT30M" + }, + { + "name": "spark.master", + "label": "配置spark为onYarn模式", + "description": "配置spark为onYarn模式", + "required": true, + "type": "input", + "value": "yarn", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "yarn" + }, + { + "name": "kyuubi.metrics.reporters", + "label": "监控输出格式", + "description": "监控输出格式", + "required": true, + "type": "input", + "value": "PROMETHEUS", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "PROMETHEUS" + }, + { + "name": "kyuubi.metrics.prometheus.port", + "label": "监控服务端口", + "description": "监控服务端口", + "required": true, + "type": "input", + "value": "10019", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "10019" + }, + { + "name": "kyuubi.session.engine.initialize.timeout", + "label": "引擎启动超时时间", + "description": "引擎启动超时时间", + "required": true, + "type": "input", + "value": "", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "PT3M" + }, + { + "name": "kyuubi.session.engine.spark.showProgress", + "label": "spark任务进度显示", + "description": "spark任务进度显示", + "required": true, + "type": "switch", + "value": false, + "configurableInWizard": true, + "hidden": false, + "defaultValue": false + }, + { + "name": "kyuubi.metrics.enabled", + "label": "服务监控指标", + "description": "服务监控指标", + "required": true, + "type": "switch", + "value": true, + "configurableInWizard": false, + "hidden": false, + "defaultValue": true + }, + { + "name": "javaHome", + "label": "java安装路径", + "description": "java安装路径", + "configType": "map", + "required": true, + "type": "input", + "value": "/usr/local/jdk1.8.0_333", + "configurableInWizard": false, + "hidden": false, + "defaultValue": "/usr/local/jdk1.8.0_333" + }, + { + "name": "sparkHome", + "label": "spark安装目录", + "description": "spark安装目录", + "configType": "map", + "required": true, + "type": "input", + "value": "/opt/datasophon/spark-3.1.3/", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "/opt/datasophon/spark-3.1.3/" + }, + { + "name": "hadoopConfDir", + "label": "hadoop配置目录", + "description": "hadoop配置目录", + "configType": "map", + "required": true, + "type": "input", + "value": "/opt/datasophon/hadoop-3.3.3/etc/hadoop", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "/opt/datasophon/hadoop-3.3.3/etc/hadoop" + }, + { + "name": "kyuubiServerHeapSize", + "label": "KyuubiServerjvm内存", + "description": "KyuubiServerjvm内存", + "configType": "map", + "required": true, + "minValue": 0, + "maxValue": 32, + "type": "slider", + "value": "", + "unit": "GB", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "4" + }, + { + "name": "kyuubiClientHeapSize", + "label": "KyuubiClientjvm内存", + "description": "KyuubiClientjvm内存", + "configType": "map", + "required": true, + "minValue": 0, + "maxValue": 32, + "type": "slider", + "value": "", + "unit": "GB", + "configurableInWizard": true, + "hidden": false, + "defaultValue": "2" + }, + { + "name": "enableKerberos", + "label": "开启Kerberos认证", + "description": "开启Kerberos认证", + "required": false, + "type": "switch", + "value": false, + "configurableInWizard": true, + "hidden": false, + "defaultValue": false + }, + { + "name": "kyuubi.authentication", + "label": "Kyuubi服务认证方式", + "description": "", + "configWithKerberos": true, + "required": false, + "configType": "kb", + "type": "input", + "value": "KERBEROS", + "configurableInWizard": true, + "hidden": true, + "defaultValue": "KERBEROS" + }, + { + "name": "kyuubi.kinit.principal", + "label": "Kyuubi服务的Kerberos主体", + "description": "", + "configWithKerberos": true, + "required": false, + "configType": "kb", + "type": "input", + "value": "kyuubi/${host}@${realm}", + "configurableInWizard": true, + "hidden": true, + "defaultValue": "kyuubi/${host}@${realm}" + }, + { + "name": "kyuubi.kinit.keytab", + "label": "Kyuubi服务的Kerberos密钥文件路径", + "description": "", + "configWithKerberos": true, + "required": false, + "configType": "kb", + "type": "input", + "value": "/etc/security/keytab/kyuubi.service.keytab", + "configurableInWizard": true, + "hidden": true, + "defaultValue": "/etc/security/keytab/kyuubi.service.keytab" + }, + { + "name": "custom.kyuubi-env.sh", + "label": "自定义配置kyuubi-env.sh", + "description": "自定义配置", + "configType": "custom", + "required": false, + "type": "multipleWithKey", + "value": [], + "configurableInWizard": true, + "hidden": false, + "defaultValue": "" + }, + { + "name": "custom.kyuubi-defaults.conf", + "label": "自定义配置kyuubi-defaults.conf", + "description": "自定义配置", + "configType": "custom", + "required": false, + "type": "multipleWithKey", + "value": [], + "configurableInWizard": true, + "hidden": false, + "defaultValue": "" + } + ] +} \ No newline at end of file diff --git a/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java b/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java index 6d5254ca..baab38ab 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java +++ b/datasophon-service/src/main/java/com/datasophon/api/enums/Status.java @@ -86,7 +86,8 @@ public enum Status { "All instances of the same service on the same machine need to be within the same role group", "同一个服务在同一台机器上的所有实例需要在同一个角色组内"), ODD_NUMBER_ARE_REQUIRED_FOR_DORISFE(10040, "The Number of DorisFE must be an odd number.", "DorisFE个数必须是奇数"), - NO_SERVICE_ROLE_SELECTED(10041, "No service role selected", "未选择需要安装的服务实例"); + NO_SERVICE_ROLE_SELECTED(10041, "No service role selected", "未选择需要安装的服务实例"), + TWO_KYUUBISERVERS_NEED_TO_BE_DEPLOYED(10042, "two kyuubiServer deployments are required", "KyuubiServer需要两个节点"),; private final int code; diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java index 5e031171..5b00bb93 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/impl/ServiceInstallServiceImpl.java @@ -706,6 +706,9 @@ private void serviceValidation(ServiceRoleHostMapping serviceRoleHostMapping) { if ("DorisFE".equals(serviceRole) && (hosts.size() & 1) == 0) { throw new ServiceException(Status.ODD_NUMBER_ARE_REQUIRED_FOR_DORISFE.getMsg()); } + if ("KyuubiServer".equals(serviceRole) && hosts.size() !=2) { + throw new ServiceException(Status.TWO_KYUUBISERVERS_NEED_TO_BE_DEPLOYED.getMsg()); + } } private List listServiceConfigByServiceInstance( diff --git a/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java b/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java new file mode 100644 index 00000000..9486152e --- /dev/null +++ b/datasophon-service/src/main/java/com/datasophon/api/strategy/KyuubiServerHandlerStrategy.java @@ -0,0 +1,89 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.datasophon.api.strategy; + +import com.datasophon.api.load.GlobalVariables; +import com.datasophon.api.load.ServiceConfigMap; +import com.datasophon.api.utils.ProcessUtils; +import com.datasophon.common.Constants; +import com.datasophon.common.model.ServiceConfig; +import com.datasophon.common.model.ServiceRoleInfo; +import com.datasophon.dao.entity.ClusterInfoEntity; +import com.datasophon.dao.entity.ClusterServiceRoleInstanceEntity; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class KyuubiServerHandlerStrategy extends ServiceHandlerAbstract implements ServiceRoleStrategy{ + + private static final Logger logger = LoggerFactory.getLogger(KyuubiServerHandlerStrategy.class); + private static final String ENABLE_KERBEROS = "enableKerberos"; + + @Override + public void handler(Integer clusterId, List hosts) { + + } + + @Override + public void handlerConfig(Integer clusterId, List list) { + + Map globalVariables = GlobalVariables.get(clusterId); + boolean enableKerberos = false; + Map map = ProcessUtils.translateToMap(list); + ClusterInfoEntity clusterInfo = ProcessUtils.getClusterInfo(clusterId); + // todo: 判断kerberos的逻辑应该抽取到公共方法中 + for (ServiceConfig config : list) { + if (ENABLE_KERBEROS.equals(config.getName())) { + enableKerberos = + isEnableKerberos( + clusterId, globalVariables, enableKerberos, config, "KYUUBI"); + } + } + String key = clusterInfo.getClusterFrame() + Constants.UNDERLINE + "KYUUBI" + Constants.CONFIG; + List configs = ServiceConfigMap.get(key); + ArrayList kbConfigs = new ArrayList<>(); + if (enableKerberos) { + addConfigWithKerberos(globalVariables, map, configs, kbConfigs); + } else { + removeConfigWithKerberos(list, map, configs); + } + list.addAll(kbConfigs); + } + + @Override + public void getConfig(Integer clusterId, List list) { + + } + + @Override + public void handlerServiceRoleInfo(ServiceRoleInfo serviceRoleInfo, String hostname) { + + } + + @Override + public void handlerServiceRoleCheck(ClusterServiceRoleInstanceEntity roleInstanceEntity, + Map map) { + } + +} diff --git a/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java b/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java index eba9235e..4c6d15e8 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java +++ b/datasophon-service/src/main/java/com/datasophon/api/strategy/ServiceRoleStrategyContext.java @@ -1,69 +1,70 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datasophon.api.strategy; - -import org.apache.commons.lang.StringUtils; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class ServiceRoleStrategyContext { - - private static final Map map = new ConcurrentHashMap<>(); - - static { - map.put("NameNode", new NameNodeHandlerStrategy()); - map.put("ResourceManager", new RMHandlerStrategy()); - map.put("HiveMetaStore", new HiveMetaStroreHandlerStrategy()); - map.put("HiveServer2", new HiveServer2HandlerStrategy()); - map.put("Grafana", new GrafanaHandlerStrategy()); - map.put("ZkServer", new ZkServerHandlerStrategy()); - map.put("HistoryServer", new HistoryServerHandlerStrategy()); - map.put("TimelineServer", new TSHandlerStrategy()); - map.put("TrinoCoordinator", new TrinoHandlerStrategy()); - map.put("JournalNode", new JournalNodeHandlerStrategy()); - map.put("ZKFC", new ZKFCHandlerStrategy()); - map.put("SRFE", new FEHandlerStartegy()); - map.put("DorisFE", new FEHandlerStartegy()); - map.put("DorisFEObserver", new FEObserverHandlerStartegy()); - map.put("SRBE", new BEHandlerStartegy()); - map.put("DorisBE", new BEHandlerStartegy()); - map.put("Krb5Kdc", new Krb5KdcHandlerStrategy()); - map.put("KAdmin", new KAdminHandlerStrategy()); - map.put("RangerAdmin", new RangerAdminHandlerStrategy()); - map.put("ElasticSearch", new ElasticSearchHandlerStrategy()); - map.put("Prometheus", new PrometheusHandlerStrategy()); - map.put("AlertManager", new AlertManagerHandlerStrategy()); - - map.put("RANGER", new RangerAdminHandlerStrategy()); - map.put("ZOOKEEPER", new ZkServerHandlerStrategy()); - map.put("YARN", new RMHandlerStrategy()); - map.put("HDFS", new NameNodeHandlerStrategy()); - map.put("HIVE", new HiveServer2HandlerStrategy()); - map.put("KAFKA", new KafkaHandlerStrategy()); - map.put("HBASE", new HBaseHandlerStrategy()); - map.put("FLINK", new FlinkHandlerStrategy()); - } - - public static ServiceRoleStrategy getServiceRoleHandler(String type) { - if (StringUtils.isBlank(type)) { - return null; - } - return map.get(type); - } -} +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datasophon.api.strategy; + +import org.apache.commons.lang.StringUtils; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class ServiceRoleStrategyContext { + + private static final Map map = new ConcurrentHashMap<>(); + + static { + map.put("NameNode", new NameNodeHandlerStrategy()); + map.put("ResourceManager", new RMHandlerStrategy()); + map.put("HiveMetaStore", new HiveMetaStroreHandlerStrategy()); + map.put("HiveServer2", new HiveServer2HandlerStrategy()); + map.put("Grafana", new GrafanaHandlerStrategy()); + map.put("ZkServer", new ZkServerHandlerStrategy()); + map.put("HistoryServer", new HistoryServerHandlerStrategy()); + map.put("TimelineServer", new TSHandlerStrategy()); + map.put("TrinoCoordinator", new TrinoHandlerStrategy()); + map.put("JournalNode", new JournalNodeHandlerStrategy()); + map.put("ZKFC", new ZKFCHandlerStrategy()); + map.put("SRFE", new FEHandlerStartegy()); + map.put("DorisFE", new FEHandlerStartegy()); + map.put("DorisFEObserver", new FEObserverHandlerStartegy()); + map.put("SRBE", new BEHandlerStartegy()); + map.put("DorisBE", new BEHandlerStartegy()); + map.put("Krb5Kdc", new Krb5KdcHandlerStrategy()); + map.put("KAdmin", new KAdminHandlerStrategy()); + map.put("RangerAdmin", new RangerAdminHandlerStrategy()); + map.put("ElasticSearch", new ElasticSearchHandlerStrategy()); + map.put("Prometheus", new PrometheusHandlerStrategy()); + map.put("AlertManager", new AlertManagerHandlerStrategy()); + + map.put("RANGER", new RangerAdminHandlerStrategy()); + map.put("ZOOKEEPER", new ZkServerHandlerStrategy()); + map.put("YARN", new RMHandlerStrategy()); + map.put("HDFS", new NameNodeHandlerStrategy()); + map.put("HIVE", new HiveServer2HandlerStrategy()); + map.put("KAFKA", new KafkaHandlerStrategy()); + map.put("HBASE", new HBaseHandlerStrategy()); + map.put("FLINK", new FlinkHandlerStrategy()); + map.put("KYUUBI",new KyuubiServerHandlerStrategy()); + } + + public static ServiceRoleStrategy getServiceRoleHandler(String type) { + if (StringUtils.isBlank(type)) { + return null; + } + return map.get(type); + } +} diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java b/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java index 479f73c9..61e8e559 100644 --- a/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java +++ b/datasophon-worker/src/main/java/com/datasophon/worker/WorkerApplicationServer.java @@ -108,6 +108,7 @@ private static void initUserMap(Map userMap) { userMap.put("hive", HADOOP); userMap.put("mapred", HADOOP); userMap.put("hbase", HADOOP); + userMap.put("kyuubi",HADOOP); userMap.put("elastic", "elastic"); } diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java b/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java index 93d4728b..9dd588eb 100644 --- a/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java +++ b/datasophon-worker/src/main/java/com/datasophon/worker/handler/ConfigureServiceHandler.java @@ -135,6 +135,17 @@ public ExecResult configure(Map> cofigFileMap, config.setName("priority_networks"); } + if("KyuubiServer".equals(serviceRoleName) && "sparkHome".equals(config.getName())){ + // add hive-site.xml link in kerberos module + final String targetPath = Constants.INSTALL_PATH + File.separator + decompressPackageName+"/conf/hive-site.xml"; + if(!FileUtil.exist(targetPath)){ + logger.info("Add hive-site.xml link"); + ExecResult result = ShellUtils.exceShell("ln -s "+config.getValue()+"/conf/hive-site.xml "+targetPath); + if(!result.getExecResult()){ + logger.warn("Add hive-site.xml link failed,msg: "+result.getExecErrOut()); + } + } + } } if (Objects.nonNull(myid) && StringUtils.isNotBlank(dataDir)) { diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java new file mode 100644 index 00000000..cba3d4b0 --- /dev/null +++ b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/KyuubiServerHandlerStrategy.java @@ -0,0 +1,45 @@ +package com.datasophon.worker.strategy; + +import cn.hutool.core.io.FileUtil; +import com.datasophon.common.Constants; +import com.datasophon.common.cache.CacheUtils; +import com.datasophon.common.command.ServiceRoleOperateCommand; +import com.datasophon.common.utils.ExecResult; +import com.datasophon.worker.handler.ServiceHandler; +import com.datasophon.worker.utils.KerberosUtils; +import java.sql.SQLException; + +/** + * @author thomasgx + * @date 2023年10月20日 19:02 + */ +public class KyuubiServerHandlerStrategy extends AbstractHandlerStrategy implements + ServiceRoleStrategy { + + private static final String KEYTAB_NAME = "kyuubi.service.keytab"; + private static final String KEYTAB_PATH = "/etc/security/keytab/" + KEYTAB_NAME; + + public KyuubiServerHandlerStrategy(String serviceName, String serviceRoleName) { + super(serviceName, serviceRoleName); + } + + @Override + public ExecResult handler(ServiceRoleOperateCommand command) + throws SQLException, ClassNotFoundException { + ExecResult startResult; + if (command.getEnableKerberos()) { + logger.info("start to get kyuubi keytab file"); + String hostname = CacheUtils.getString(Constants.HOSTNAME); + KerberosUtils.createKeytabDir(); + if (!FileUtil.exist(KEYTAB_PATH)) { + KerberosUtils.downloadKeytabFromMaster("kyuubi/" + hostname, KEYTAB_NAME); + } + } + + ServiceHandler serviceHandler = new ServiceHandler(command.getServiceName(), + command.getServiceRoleName()); + startResult = serviceHandler.start(command.getStartRunner(), command.getStatusRunner(), + command.getDecompressPackageName(), command.getRunAs()); + return startResult; + } +} diff --git a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java index 5665e05d..a5a21911 100644 --- a/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java +++ b/datasophon-worker/src/main/java/com/datasophon/worker/strategy/ServiceRoleStrategyContext.java @@ -1,61 +1,63 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datasophon.worker.strategy; - -import org.apache.commons.lang.StringUtils; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class ServiceRoleStrategyContext { - - private static final Map map = new ConcurrentHashMap<>(); - - static { - map.put("NameNode", new NameNodeHandlerStrategy("HDFS", "NameNode")); - map.put("ZKFC", new ZKFCHandlerStrategy("HDFS", "ZKFC")); - map.put("JournalNode", new JournalNodeHandlerStrategy("HDFS", "JournalNode")); - map.put("DataNode", new DataNodeHandlerStrategy("HDFS", "DataNode")); - map.put("ResourceManager", new ResourceManagerHandlerStrategy("YARN", "ResourceManager")); - map.put("NodeManager", new NodeManagerHandlerStrategy("YARN", "NodeManager")); - map.put("RangerAdmin", new RangerAdminHandlerStrategy("RANGER", "RangerAdmin")); - map.put("HiveServer2", new HiveServer2HandlerStrategy("HIVE", "HiveServer2")); - map.put("HbaseMaster", new HbaseHandlerStrategy("HBASE", "HbaseMaster")); - map.put("RegionServer", new HbaseHandlerStrategy("HBASE", "RegionServer")); - map.put("Krb5Kdc", new Krb5KdcHandlerStrategy("KERBEROS", "Krb5Kdc")); - map.put("KAdmin", new KAdminHandlerStrategy("KERBEROS", "KAdmin")); - map.put("SRFE", new FEHandlerStrategy("STARROCKS", "SRFE")); - map.put("DorisFE", new FEHandlerStrategy("DORIS", "DorisFE")); - map.put("DorisFEObserver", new FEObserverHandlerStrategy("DORIS", "DorisFEObserver")); - map.put("ZkServer", new ZkServerHandlerStrategy("ZOOKEEPER", "ZkServer")); - map.put("KafkaBroker", new KafkaHandlerStrategy("KAFKA", "KafkaBroker")); - map.put("SRBE", new BEHandlerStrategy("STARROCKS", "SRBE")); - map.put("DorisBE", new BEHandlerStrategy("DORIS", "DorisBE")); - map.put("HistoryServer", new HistoryServerHandlerStrategy("YARN", "HistoryServer")); - - // TEZ Server service - map.put("TezServer", new TezServerHandlerStrategy("TEZ", "TezServer")); - } - - public static ServiceRoleStrategy getServiceRoleHandler(String type) { - if (StringUtils.isBlank(type)) { - return null; - } - return map.get(type); - } -} +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datasophon.worker.strategy; + +import org.apache.commons.lang.StringUtils; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class ServiceRoleStrategyContext { + + private static final Map map = new ConcurrentHashMap<>(); + + static { + map.put("NameNode", new NameNodeHandlerStrategy("HDFS", "NameNode")); + map.put("ZKFC", new ZKFCHandlerStrategy("HDFS", "ZKFC")); + map.put("JournalNode", new JournalNodeHandlerStrategy("HDFS", "JournalNode")); + map.put("DataNode", new DataNodeHandlerStrategy("HDFS", "DataNode")); + map.put("ResourceManager", new ResourceManagerHandlerStrategy("YARN", "ResourceManager")); + map.put("NodeManager", new NodeManagerHandlerStrategy("YARN", "NodeManager")); + map.put("RangerAdmin", new RangerAdminHandlerStrategy("RANGER", "RangerAdmin")); + map.put("HiveServer2", new HiveServer2HandlerStrategy("HIVE", "HiveServer2")); + map.put("HbaseMaster", new HbaseHandlerStrategy("HBASE", "HbaseMaster")); + map.put("RegionServer", new HbaseHandlerStrategy("HBASE", "RegionServer")); + map.put("Krb5Kdc", new Krb5KdcHandlerStrategy("KERBEROS", "Krb5Kdc")); + map.put("KAdmin", new KAdminHandlerStrategy("KERBEROS", "KAdmin")); + map.put("SRFE", new FEHandlerStrategy("STARROCKS", "SRFE")); + map.put("DorisFE", new FEHandlerStrategy("DORIS", "DorisFE")); + map.put("DorisFEObserver", new FEObserverHandlerStrategy("DORIS", "DorisFEObserver")); + map.put("ZkServer", new ZkServerHandlerStrategy("ZOOKEEPER", "ZkServer")); + map.put("KafkaBroker", new KafkaHandlerStrategy("KAFKA", "KafkaBroker")); + map.put("SRBE", new BEHandlerStrategy("STARROCKS", "SRBE")); + map.put("DorisBE", new BEHandlerStrategy("DORIS", "DorisBE")); + map.put("HistoryServer", new HistoryServerHandlerStrategy("YARN", "HistoryServer")); + + // TEZ Server service + map.put("TezServer", new TezServerHandlerStrategy("TEZ", "TezServer")); + //kyuubi + map.put("KyuubiServer", new KyuubiServerHandlerStrategy("KYUUBI", "KyuubiServer")); + } + + public static ServiceRoleStrategy getServiceRoleHandler(String type) { + if (StringUtils.isBlank(type)) { + return null; + } + return map.get(type); + } +} diff --git a/datasophon-worker/src/main/resources/script/datasophon-env.sh b/datasophon-worker/src/main/resources/script/datasophon-env.sh index 2f3f0a88..f957c15e 100644 --- a/datasophon-worker/src/main/resources/script/datasophon-env.sh +++ b/datasophon-worker/src/main/resources/script/datasophon-env.sh @@ -2,6 +2,7 @@ export JAVA_HOME=/usr/local/jdk1.8.0_333 CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export JAVA_HOME CLASSPATH +export KYUUBI_HOME=/opt/datasophon/kyuubi-1.7.3 export SPARK_HOME=/opt/datasophon/spark-3.1.3 export PYSPARK_ALLOW_INSECURE_GATEWAY=1 export HIVE_HOME=/opt/datasophon/hive-3.1.0 diff --git a/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl b/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl new file mode 100644 index 00000000..670a9e1e --- /dev/null +++ b/datasophon-worker/src/main/resources/templates/kyuubi-env.ftl @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# - JAVA_HOME Java runtime to use. By default use "java" from PATH. +# +# +# - KYUUBI_CONF_DIR Directory containing the Kyuubi configurations to use. +# (Default: $KYUUBI_HOME/conf) +# - KYUUBI_LOG_DIR Directory for Kyuubi server-side logs. +# (Default: $KYUUBI_HOME/logs) +# - KYUUBI_PID_DIR Directory stores the Kyuubi instance pid file. +# (Default: $KYUUBI_HOME/pid) +# - KYUUBI_MAX_LOG_FILES Maximum number of Kyuubi server logs can rotate to. +# (Default: 5) +# - KYUUBI_JAVA_OPTS JVM options for the Kyuubi server itself in the form "-Dx=y". +# (Default: none). +# - KYUUBI_CTL_JAVA_OPTS JVM options for the Kyuubi ctl itself in the form "-Dx=y". +# (Default: none). +# - KYUUBI_BEELINE_OPTS JVM options for the Kyuubi BeeLine in the form "-Dx=Y". +# (Default: none) +# - KYUUBI_NICENESS The scheduling priority for Kyuubi server. +# (Default: 0) +# - KYUUBI_WORK_DIR_ROOT Root directory for launching sql engine applications. +# (Default: $KYUUBI_HOME/work) +# - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use. +# - YARN_CONF_DIR Directory containing the YARN configuration to use. +# +# - SPARK_HOME Spark distribution which you would like to use in Kyuubi. +# - SPARK_CONF_DIR Optional directory where the Spark configuration lives. +# (Default: $SPARK_HOME/conf) +# - FLINK_HOME Flink distribution which you would like to use in Kyuubi. +# - FLINK_CONF_DIR Optional directory where the Flink configuration lives. +# (Default: $FLINK_HOME/conf) +# - FLINK_HADOOP_CLASSPATH Required Hadoop jars when you use the Kyuubi Flink engine. +# - HIVE_HOME Hive distribution which you would like to use in Kyuubi. +# - HIVE_CONF_DIR Optional directory where the Hive configuration lives. +# (Default: $HIVE_HOME/conf) +# - HIVE_HADOOP_CLASSPATH Required Hadoop jars when you use the Kyuubi Hive engine. +# + +# set server jvm +export KYUUBI_JAVA_OPTS="-Xmx${kyuubiServerHeapSize}g -XX:+UnlockDiagnosticVMOptions -XX:ParGCCardsPerStrideChunk=4096 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -XX:MaxDirectMemorySize=1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./logs -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -Xloggc:./logs/kyuubi-server-gc-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=5M -XX:NewRatio=3 -XX:MetaspaceSize=512m" + +# set client jvm +export KYUUBI_BEELINE_OPTS="-Xmx${kyuubiClientHeapSize}g -XX:+UnlockDiagnosticVMOptions -XX:ParGCCardsPerStrideChunk=4096 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark" +#jdk +export JAVA_HOME=${javaHome} +#spark engine +export SPARK_HOME=${sparkHome} + +#hadoop config +export HADOOP_CONF_DIR=${hadoopConfDir} +export YARN_CONF_DIR=${hadoopConfDir} + +# customer env +<#list itemList as item> + export ${item.name}=${item.value} + + + From b6906287cbe90b7ababef9c138314c3b7343d504 Mon Sep 17 00:00:00 2001 From: "xu.guo" <570736711@qq.com> Date: Tue, 24 Oct 2023 09:30:04 +0800 Subject: [PATCH 3/4] Add ApacheKyuubi doc --- docs/zh/ApacheKyuubi.md | 68 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 docs/zh/ApacheKyuubi.md diff --git a/docs/zh/ApacheKyuubi.md b/docs/zh/ApacheKyuubi.md new file mode 100644 index 00000000..85bfe373 --- /dev/null +++ b/docs/zh/ApacheKyuubi.md @@ -0,0 +1,68 @@ +# ApacheKyuubi + +##概述 +` + ApacheKyuubi是一个分布式多租户网关,支持Spark,Flink,Hive等计算引擎, +依赖Kyuubi我们可以更方便的对数据湖组件进行集成. +` +##连接器说明 +` +目前默认对spark做了集成,如果需要对其他引擎或者数据湖做集成可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/connector/index.html +` +##服务认证 +` + ApacheKyuubi对于认证支持多种方式,默认对Kerberos做了集成,只需要在安装时打开相关选项即可,如果 +需要集成其他认证模式可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/security/index.html +` +##权限集成 +` +在使用Spark引擎时我们可以借助ApacheKyuubi提供的RangerAuth插件使用现有的hive权限策略实现统一的权限 +管理,目前在集成时没有对这部分做集成(集成的方式是SparkExtension需要改动Spark的相关配置),需要使用权限 +可以参考:https://kyuubi.readthedocs.io/en/v1.7.3/security/authorization/spark/index.html +` +##简单使用说明 + +###这里以Spark引擎为示例: + +####HA连接: +``` +beeline -u 'jdbc:hive2://zkhost:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=kyuubi_ns;principal=kyuubi/_HOST@HADOOP.COM' +``` +####指定服务器IP连接 +``` +beeline -u 'jdbc:hive2://serverhost:10009/;principal=kyuubi/_HOST@HADOOP.COM' -nuserName +``` +#####连接说明 +``` +我们在集成时默认是以HA的方式集成的,ApacheKyuubi高可用需要借助Zookeeper,因此这里的zkhost:2181是指我们的zk集群信息,serviceDiscoveryMode说明 +使用zk做服务发现,zooKeeperNamespace是zk的path信息,principal是在开启了Kerberos认证时需要指定的用户身份信息(注意这里的票据信息是固定的即Server端配置的信息, +_HOST是通配) +``` + +####Sql查询 +``` +#查询方面与Hive beeline 没有区别两者等价,语法方面可以参考对应的Spark版本,如下执行show tables语句(这里删除打印的其他日志) +0: jdbc:hive2://192.168.163.127:2181/> show tables; ++-----------+------------+--------------+ +| database | tableName | isTemporary | ++-----------+------------+--------------+ +| default | my_table3 | false | +| default | my_table4 | false | ++-----------+------------+--------------+ +2 rows selected (3.875 seconds) + +#select查询 +0: jdbc:hive2://192.168.163.127:2181/> select 1 as col1; ++-------+ +| col1 | ++-------+ +| 1 | ++-------+ +``` + +##其他 +``` +这里只列举简单的说明与基本使用方式,ApacheKyuubi的功能远不止于此,更详细的操作请参考官方文档:https://kyuubi.readthedocs.io/en/v1.7.3 +``` + + From 3dafc4e8c41c0f962c7d04a8a0f36e2951c86936 Mon Sep 17 00:00:00 2001 From: "xu.guo" <570736711@qq.com> Date: Tue, 24 Oct 2023 13:57:51 +0800 Subject: [PATCH 4/4] The rack information displayed in the host management shows the actual name instead of the rack --- .../api/service/host/impl/ClusterHostServiceImpl.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java b/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java index c5398fa6..5a3d0e2b 100644 --- a/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java +++ b/datasophon-service/src/main/java/com/datasophon/api/service/host/impl/ClusterHostServiceImpl.java @@ -26,6 +26,7 @@ import com.datasophon.api.master.ActorUtils; import com.datasophon.api.master.PrometheusActor; import com.datasophon.api.master.RackActor; +import com.datasophon.api.service.ClusterRackService; import com.datasophon.api.service.host.ClusterHostService; import com.datasophon.api.service.ClusterInfoService; import com.datasophon.api.service.ClusterServiceRoleInstanceService; @@ -39,6 +40,7 @@ import com.datasophon.common.utils.Result; import com.datasophon.dao.entity.ClusterHostDO; import com.datasophon.dao.entity.ClusterInfoEntity; +import com.datasophon.dao.entity.ClusterRack; import com.datasophon.dao.entity.ClusterServiceRoleInstanceEntity; import com.datasophon.domain.host.enums.HostState; import com.datasophon.dao.enums.RoleType; @@ -74,6 +76,9 @@ public class ClusterHostServiceImpl extends ServiceImpl rackMap = clusterRackService.queryClusterRack(clusterId).stream() + .collect(Collectors.toMap(obj->obj.getId()+"", ClusterRack::getRack)); for (ClusterHostDO clusterHostDO : list) { QueryHostListPageDTO queryHostListPageDTO = new QueryHostListPageDTO(); BeanUtils.copyProperties(clusterHostDO,queryHostListPageDTO); @@ -104,6 +113,7 @@ public Result listByPage(Integer clusterId, String hostname, String ip, String c .eq(Constants.HOSTNAME, clusterHostDO.getHostname())); queryHostListPageDTO.setServiceRoleNum(serviceRoleNum); queryHostListPageDTO.setHostState(clusterHostDO.getHostState().getValue()); + queryHostListPageDTO.setRack(rackMap.getOrDefault(queryHostListPageDTO.getRack(),"/default-rack")); hostListPageDTOS.add(queryHostListPageDTO); } int count = this.count(new QueryWrapper().eq(Constants.CLUSTER_ID, clusterId)