diff --git a/docs/en/seatunnel-engine/deployment.md b/docs/en/seatunnel-engine/deployment.md
index be38ac2db52..c07cd45d6b1 100644
--- a/docs/en/seatunnel-engine/deployment.md
+++ b/docs/en/seatunnel-engine/deployment.md
@@ -179,6 +179,7 @@ map:
type: hdfs
namespace: /tmp/seatunnel/imap
clusterName: seatunnel-cluster
+ storage.type: hdfs
fs.defaultFS: hdfs://localhost:9000
```
@@ -195,9 +196,32 @@ map:
type: hdfs
namespace: /tmp/seatunnel/imap
clusterName: seatunnel-cluster
+ storage.type: hdfs
fs.defaultFS: file:///
```
+if you used OSS, you can config like this:
+
+```yaml
+map:
+ engine*:
+ map-store:
+ enabled: true
+ initial-mode: EAGER
+ factory-class-name: org.apache.seatunnel.engine.server.persistence.FileMapStoreFactory
+ properties:
+ type: hdfs
+ namespace: /tmp/seatunnel/imap
+ clusterName: seatunnel-cluster
+ storage.type: oss
+ block.size: block size(bytes)
+ oss.bucket: oss://bucket name/
+ fs.oss.accessKeyId: OSS access key id
+ fs.oss.accessKeySecret: OSS access key secret
+ fs.oss.endpoint: OSS endpoint
+ fs.oss.credentials.provider: org.apache.hadoop.fs.aliyun.oss.AliyunCredentialsProvider
+```
+
## 6. Config SeaTunnel Engine Client
All SeaTunnel Engine Client config in `hazelcast-client.yaml`.
diff --git a/pom.xml b/pom.xml
index 84b16a88ded..ba473688193 100644
--- a/pom.xml
+++ b/pom.xml
@@ -140,6 +140,12 @@
3.10.0
4.2.0
true
+
+
+ 3.0.0
+ 2.4.7
+ 3.1.4
+ 4.1.60.Final
@@ -446,8 +452,40 @@
provided
-
+
+
+ org.apache.hadoop
+ hadoop-aliyun
+ ${hadoop-aliyun.version}
+ provided
+
+
+ net.minidev
+ json-smart
+
+
+
+
+
+ net.minidev
+ json-smart
+ ${json-smart.version}
+
+
+
+ org.apache.hadoop
+ hadoop-aws
+ ${hadoop-aws.version}
+ provided
+
+
+ io.netty
+ netty-buffer
+ ${netty-buffer.version}
+
+
+
diff --git a/seatunnel-dist/pom.xml b/seatunnel-dist/pom.xml
index de61efa9bf1..af963803fed 100644
--- a/seatunnel-dist/pom.xml
+++ b/seatunnel-dist/pom.xml
@@ -97,6 +97,14 @@
5.13.9
17.20.00.12
2.1.0.9
+
+
+ 3.0.0
+ 2.4.7
+ 3.1.4
+ 1.11.271
+ 4.1.89.Final
+
@@ -571,19 +579,28 @@
+
+ io.netty
+ netty-buffer
+ ${netty-buffer.version}
+ provided
+
+
org.apache.hadoop
- hadoop-aws
- 3.1.4
+ hadoop-aliyun
+ ${hadoop-aliyun.version}
provided
+
- com.amazonaws
- aws-java-sdk-bundle
- 1.11.271
+ org.apache.hadoop
+ hadoop-aws
+ ${hadoop-aws.version}
provided
+
org.apache.seatunnel
seatunnel-hadoop3-3.1.4-uber
diff --git a/seatunnel-dist/release-docs/LICENSE b/seatunnel-dist/release-docs/LICENSE
index 30f463ab7a0..7895b730525 100644
--- a/seatunnel-dist/release-docs/LICENSE
+++ b/seatunnel-dist/release-docs/LICENSE
@@ -273,7 +273,8 @@ The text of each license is the standard Apache 2.0 license.
(Apache-2.0) failureaccess (com.google.guava:failureaccess:1.0 https://mvnrepository.com/artifact/com.google.guava/failureaccess/1.0)
(Apache-2.0) j2objc-annotations (com.google.j2objc:j2objc-annotations:1.1 https://mvnrepository.com/artifact/com.google.j2objc/j2objc-annotations/1.1)
(Apache-2.0) listenablefuture (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava https://mvnrepository.com/artifact/com.google.guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava)
-
+ (Apache-2.0) accessors-smart (com.google.guava:accessors-smart:2.4.7 - https://mvnrepository.com/artifact/net.minidev/accessors-smart)
+ (Apache-2.0) json-smart (net.minidev:json-smart:2.4.7 - https://mvnrepository.com/artifact/net.minidev/json-smart)
========================================================================
MOZILLA PUBLIC LICENSE License
@@ -292,7 +293,7 @@ The text of each license is also included at licenses/LICENSE-[project].txt.
(New BSD license) Protocol Buffer Java API (com.google.protobuf:protobuf-java:2.5.0 - http://code.google.com/p/protobuf)
(BSD 3-Clause) Scala Library (org.scala-lang:scala-library:2.11.12 - http://www.scala-lang.org/)
-
+ (BSD 3-Clause) Scala Library (org.ow2.asm:asm:9.1 - https://mvnrepository.com/artifact/org.ow2.asm/asm/)
========================================================================
CDDL License
========================================================================
diff --git a/seatunnel-dist/release-docs/licenses/LICENSE-asm.txt b/seatunnel-dist/release-docs/licenses/LICENSE-asm.txt
new file mode 100644
index 00000000000..631ee53c53d
--- /dev/null
+++ b/seatunnel-dist/release-docs/licenses/LICENSE-asm.txt
@@ -0,0 +1,27 @@
+ASM: a very small and fast Java bytecode manipulation framework
+Copyright (c) 2000-2011 INRIA, France Telecom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
index c6f48949ab5..08d3d759a1a 100644
--- a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
+++ b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
@@ -170,6 +170,14 @@
org.apache.hadoop:hadoop-aws:jar
com.amazonaws:aws-java-sdk-bundle:jar
org.apache.seatunnel:seatunnel-hadoop3-3.1.4-uber:jar:*:optional
+
+ org.apache.hadoop:hadoop-aliyun:jar
+ com.aliyun.oss:aliyun-sdk-oss:jar
+ org.jdom:jdom:jar
+
+
+ io.netty:netty-buffer:jar
+ io.netty:netty-common:jar
${artifact.file.name}
/lib
diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/pom.xml b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/pom.xml
index 4bc151db9d1..a1315565349 100644
--- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/pom.xml
+++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/pom.xml
@@ -29,6 +29,8 @@
2.4
+ 3.0.0
+ 4.1.89.Final
@@ -69,6 +71,27 @@
+
+
+ org.apache.hadoop
+ hadoop-aliyun
+ ${hadoop-aliyun.version}
+ test
+
+
+
+ com.aliyun.oss
+ aliyun-sdk-oss
+ 2.8.3
+ test
+
+
+
+ io.netty
+ netty-buffer
+ ${netty-buffer.version}
+ test
+
diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/ClusterFaultToleranceIT.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/ClusterFaultToleranceIT.java
index e22d417ef92..f7571968e8f 100644
--- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/ClusterFaultToleranceIT.java
+++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/ClusterFaultToleranceIT.java
@@ -935,4 +935,223 @@ public void testStreamJobRestoreInAllNodeDown()
}
}
}
+
+ @SuppressWarnings("checkstyle:RegexpSingleline")
+ @Test
+ @Disabled
+ public void testStreamJobRestoreFromOssInAllNodeDown()
+ throws ExecutionException, InterruptedException {
+ String OSS_BUCKET_NAME = "oss://your bucket name/";
+ String OSS_ENDPOINT = "your oss endpoint";
+ String OSS_ACCESS_KEY_ID = "oss accessKey id";
+ String OSS_ACCESS_KEY_SECRET = "oss accessKey secret";
+
+ String testCaseName = "testStreamJobRestoreFromOssInAllNodeDown";
+ String testClusterName =
+ "ClusterFaultToleranceIT_testStreamJobRestoreFromOssInAllNodeDown_"
+ + System.currentTimeMillis();
+ int testRowNumber = 1000;
+ int testParallelism = 6;
+ HazelcastInstanceImpl node1 = null;
+ HazelcastInstanceImpl node2 = null;
+ SeaTunnelClient engineClient = null;
+
+ try {
+ String yaml =
+ "hazelcast:\n"
+ + " cluster-name: seatunnel\n"
+ + " network:\n"
+ + " rest-api:\n"
+ + " enabled: true\n"
+ + " endpoint-groups:\n"
+ + " CLUSTER_WRITE:\n"
+ + " enabled: true\n"
+ + " join:\n"
+ + " tcp-ip:\n"
+ + " enabled: true\n"
+ + " member-list:\n"
+ + " - localhost\n"
+ + " port:\n"
+ + " auto-increment: true\n"
+ + " port-count: 100\n"
+ + " port: 5801\n"
+ + " map:\n"
+ + " engine*:\n"
+ + " map-store:\n"
+ + " enabled: true\n"
+ + " initial-mode: EAGER\n"
+ + " factory-class-name: org.apache.seatunnel.engine.server.persistence.FileMapStoreFactory\n"
+ + " properties:\n"
+ + " type: hdfs\n"
+ + " namespace: /seatunnel-test/imap\n"
+ + " storage.type: oss\n"
+ + " clusterName: "
+ + testClusterName
+ + "\n"
+ + " oss.bucket: "
+ + OSS_BUCKET_NAME
+ + "\n"
+ + " fs.oss.accessKeyId: "
+ + OSS_ACCESS_KEY_ID
+ + "\n"
+ + " fs.oss.accessKeySecret: "
+ + OSS_ACCESS_KEY_SECRET
+ + "\n"
+ + " fs.oss.endpoint: "
+ + OSS_ENDPOINT
+ + "\n"
+ + " fs.oss.credentials.provider: org.apache.hadoop.fs.aliyun.oss.AliyunCredentialsProvider\n"
+ + " properties:\n"
+ + " hazelcast.invocation.max.retry.count: 200\n"
+ + " hazelcast.tcp.join.port.try.count: 30\n"
+ + " hazelcast.invocation.retry.pause.millis: 2000\n"
+ + " hazelcast.slow.operation.detector.stacktrace.logging.enabled: true\n"
+ + " hazelcast.logging.type: log4j2\n"
+ + " hazelcast.operation.generic.thread.count: 200\n";
+
+ Config hazelcastConfig = Config.loadFromString(yaml);
+ hazelcastConfig.setClusterName(TestUtils.getClusterName(testClusterName));
+ SeaTunnelConfig seaTunnelConfig = ConfigProvider.locateAndGetSeaTunnelConfig();
+ seaTunnelConfig.setHazelcastConfig(hazelcastConfig);
+ node1 = SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+
+ node2 = SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+
+ // waiting all node added to cluster
+ HazelcastInstanceImpl finalNode = node1;
+ Awaitility.await()
+ .atMost(10000, TimeUnit.MILLISECONDS)
+ .untilAsserted(
+ () ->
+ Assertions.assertEquals(
+ 2, finalNode.getCluster().getMembers().size()));
+
+ Common.setDeployMode(DeployMode.CLIENT);
+ ImmutablePair testResources =
+ createTestResources(
+ testCaseName, JobMode.STREAMING, testRowNumber, testParallelism);
+ JobConfig jobConfig = new JobConfig();
+ jobConfig.setName(testCaseName);
+
+ ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig();
+ clientConfig.setClusterName(TestUtils.getClusterName(testClusterName));
+ engineClient = new SeaTunnelClient(clientConfig);
+ JobExecutionEnvironment jobExecutionEnv =
+ engineClient.createExecutionContext(testResources.getRight(), jobConfig);
+ ClientJobProxy clientJobProxy = jobExecutionEnv.execute();
+ Long jobId = clientJobProxy.getJobId();
+
+ ClientJobProxy finalClientJobProxy = clientJobProxy;
+ Awaitility.await()
+ .atMost(600000, TimeUnit.MILLISECONDS)
+ .untilAsserted(
+ () -> {
+ // Wait some tasks commit finished, and we can get rows from the
+ // sink target dir
+ Thread.sleep(2000);
+ System.out.println(
+ "\n================================="
+ + FileUtils.getFileLineNumberFromDir(
+ testResources.getLeft())
+ + "=================================\n");
+ Assertions.assertTrue(
+ JobStatus.RUNNING.equals(finalClientJobProxy.getJobStatus())
+ && FileUtils.getFileLineNumberFromDir(
+ testResources.getLeft())
+ > 1);
+ });
+
+ Thread.sleep(5000);
+ // shutdown all node
+ node1.shutdown();
+ node2.shutdown();
+
+ log.info(
+ "==========================================All node is done========================================");
+ Thread.sleep(10000);
+
+ node1 = SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+
+ node2 = SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+
+ log.info(
+ "==========================================All node is start, begin check node size ========================================");
+ // waiting all node added to cluster
+ HazelcastInstanceImpl restoreFinalNode = node1;
+ Awaitility.await()
+ .atMost(60000, TimeUnit.MILLISECONDS)
+ .untilAsserted(
+ () ->
+ Assertions.assertEquals(
+ 2, restoreFinalNode.getCluster().getMembers().size()));
+
+ log.info(
+ "==========================================All node is running========================================");
+ engineClient = new SeaTunnelClient(clientConfig);
+ ClientJobProxy newClientJobProxy = engineClient.createJobClient().getJobProxy(jobId);
+ CompletableFuture waitForJobCompleteFuture =
+ CompletableFuture.supplyAsync(newClientJobProxy::waitForJobComplete);
+
+ Thread.sleep(10000);
+
+ Awaitility.await()
+ .atMost(100000, TimeUnit.MILLISECONDS)
+ .untilAsserted(
+ () -> {
+ // Wait job write all rows in file
+ Thread.sleep(2000);
+ System.out.println(
+ "\n================================="
+ + FileUtils.getFileLineNumberFromDir(
+ testResources.getLeft())
+ + "=================================\n");
+ JobStatus jobStatus = null;
+ try {
+ jobStatus = newClientJobProxy.getJobStatus();
+ } catch (Exception e) {
+ log.error(ExceptionUtils.getMessage(e));
+ }
+
+ Assertions.assertTrue(
+ JobStatus.RUNNING.equals(jobStatus)
+ && testRowNumber * testParallelism
+ == FileUtils.getFileLineNumberFromDir(
+ testResources.getLeft()));
+ });
+
+ // sleep 10s and expect the job don't write more rows.
+ Thread.sleep(10000);
+ log.info(
+ "==========================================Cancel Job========================================");
+ newClientJobProxy.cancelJob();
+
+ Awaitility.await()
+ .atMost(600000, TimeUnit.MILLISECONDS)
+ .untilAsserted(
+ () ->
+ Assertions.assertTrue(
+ waitForJobCompleteFuture.isDone()
+ && JobStatus.CANCELED.equals(
+ waitForJobCompleteFuture.get())));
+ // prove that the task was restarted
+ Long fileLineNumberFromDir =
+ FileUtils.getFileLineNumberFromDir(testResources.getLeft());
+ Assertions.assertEquals(testRowNumber * testParallelism, fileLineNumberFromDir);
+
+ } finally {
+ log.info(
+ "==========================================Clean test resource ========================================");
+ if (engineClient != null) {
+ engineClient.shutdown();
+ }
+
+ if (node1 != null) {
+ node1.shutdown();
+ }
+
+ if (node2 != null) {
+ node2.shutdown();
+ }
+ }
+ }
}
diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java
index 86f17a7bbb7..0b95baded64 100644
--- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java
+++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java
@@ -47,6 +47,7 @@
import org.apache.commons.collections4.CollectionUtils;
import com.google.common.collect.Lists;
+import com.hazelcast.instance.impl.NodeState;
import com.hazelcast.internal.metrics.DynamicMetricsProvider;
import com.hazelcast.internal.metrics.MetricDescriptor;
import com.hazelcast.internal.metrics.MetricsCollectionContext;
@@ -127,6 +128,8 @@ public class TaskExecutionService implements DynamicMetricsProvider {
private final ScheduledExecutorService scheduledExecutorService;
+ private CountDownLatch waitClusterStarted;
+
public TaskExecutionService(NodeEngineImpl nodeEngine, HazelcastProperties properties) {
seaTunnelConfig = ConfigProvider.locateAndGetSeaTunnelConfig();
this.hzInstanceName = nodeEngine.getHazelcastInstance().getName();
@@ -455,6 +458,15 @@ private synchronized void updateMetricsContextInImap() {
contextMap.putAll(finishedExecutionContexts);
contextMap.putAll(executionContexts);
try {
+ if (!nodeEngine.getNode().getState().equals(NodeState.ACTIVE)) {
+ logger.warning(
+ String.format(
+ "The Node is not ready yet, Node state %s,looking forward to the next "
+ + "scheduling",
+ nodeEngine.getNode().getState()));
+ return;
+ }
+
IMap map =
nodeEngine.getHazelcastInstance().getMap(Constant.IMAP_RUNNING_JOB_METRICS);
contextMap.forEach(
diff --git a/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/pom.xml b/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/pom.xml
index c4fd403ae11..c39ddda99c8 100644
--- a/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/pom.xml
+++ b/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/pom.xml
@@ -36,6 +36,7 @@
serializer-protobuf
${project.version}
+
org.apache.seatunnel
seatunnel-hadoop3-3.1.4-uber
@@ -62,6 +63,28 @@
org.awaitility
awaitility
+
+
+ org.apache.hadoop
+ hadoop-aliyun
+
+
+
+ net.minidev
+ json-smart
+
+
+
+ org.apache.hadoop
+ hadoop-aws
+
+
+
+ io.netty
+ netty-buffer
+ provided
+
+
diff --git a/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/src/main/java/org/apache/seatunnel/engine/imap/storage/file/IMapFileStorage.java b/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/src/main/java/org/apache/seatunnel/engine/imap/storage/file/IMapFileStorage.java
index c1e5ef18fc4..915981e476d 100644
--- a/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/src/main/java/org/apache/seatunnel/engine/imap/storage/file/IMapFileStorage.java
+++ b/seatunnel-engine/seatunnel-engine-storage/imap-storage-plugins/imap-storage-file/src/main/java/org/apache/seatunnel/engine/imap/storage/file/IMapFileStorage.java
@@ -25,6 +25,8 @@
import org.apache.seatunnel.engine.imap.storage.file.bean.IMapFileData;
import org.apache.seatunnel.engine.imap.storage.file.common.FileConstants;
import org.apache.seatunnel.engine.imap.storage.file.common.WALReader;
+import org.apache.seatunnel.engine.imap.storage.file.config.AbstractConfiguration;
+import org.apache.seatunnel.engine.imap.storage.file.config.FileConfiguration;
import org.apache.seatunnel.engine.imap.storage.file.disruptor.WALDisruptor;
import org.apache.seatunnel.engine.imap.storage.file.disruptor.WALEventType;
import org.apache.seatunnel.engine.imap.storage.file.future.RequestFuture;
@@ -52,7 +54,6 @@
import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.DEFAULT_IMAP_NAMESPACE;
import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.FileInitProperties.BUSINESS_KEY;
import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.FileInitProperties.CLUSTER_NAME;
-import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.FileInitProperties.HDFS_CONFIG_KEY;
import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.FileInitProperties.NAMESPACE_KEY;
import static org.apache.seatunnel.engine.imap.storage.file.common.FileConstants.FileInitProperties.WRITE_DATA_TIMEOUT_MILLISECONDS_KEY;
@@ -68,6 +69,8 @@
@Slf4j
public class IMapFileStorage implements IMapStorage {
+ private static final String STORAGE_TYPE_KEY = "storage.type";
+
public FileSystem fs;
public String namespace;
@@ -105,6 +108,8 @@ public class IMapFileStorage implements IMapStorage {
private Configuration conf;
+ private FileConfiguration fileConfiguration;
+
/**
* @param configuration configuration
* @see FileConstants.FileInitProperties
@@ -112,7 +117,16 @@ public class IMapFileStorage implements IMapStorage {
@Override
public void initialize(Map configuration) {
checkInitStorageProperties(configuration);
- Configuration hadoopConf = (Configuration) configuration.get(HDFS_CONFIG_KEY);
+
+ String storageType =
+ String.valueOf(
+ configuration.getOrDefault(
+ STORAGE_TYPE_KEY, FileConfiguration.HDFS.toString()));
+ this.fileConfiguration = FileConfiguration.valueOf(storageType.toUpperCase());
+ // build configuration
+ AbstractConfiguration fileConfiguration = this.fileConfiguration.getConfiguration();
+
+ Configuration hadoopConf = fileConfiguration.buildConfiguration(configuration);
this.conf = hadoopConf;
this.namespace = (String) configuration.getOrDefault(NAMESPACE_KEY, DEFAULT_IMAP_NAMESPACE);
this.businessName = (String) configuration.get(BUSINESS_KEY);
@@ -141,7 +155,10 @@ public void initialize(Map configuration) {
this.serializer = new ProtoStuffSerializer();
this.walDisruptor =
new WALDisruptor(
- fs, businessRootPath + region + DEFAULT_IMAP_FILE_PATH_SPLIT, serializer);
+ fs,
+ FileConfiguration.valueOf(storageType.toUpperCase()),
+ businessRootPath + region + DEFAULT_IMAP_FILE_PATH_SPLIT,
+ serializer);
}
@Override
@@ -211,7 +228,7 @@ public Set