Skip to content

Commit

Permalink
Replacing Jedis With Lettuce in ingestion and serving (#485)
Browse files Browse the repository at this point in the history
* Replacing Jedis With Lettuce in ingestion and serving

* Removing extra lines

* Abstacting redis connection based on store

* Check the connection before connecting as lettuce does the retry automatically

* Running spotless

* Throw Exception if the job store config is null

* Handle No enum constant RuntimeException
  • Loading branch information
lavkesh authored and khorshuheng committed Mar 16, 2020
1 parent bc22fda commit 517e4c6
Show file tree
Hide file tree
Showing 20 changed files with 561 additions and 208 deletions.
4 changes: 2 additions & 2 deletions ingestion/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,8 @@
</dependency>

<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<groupId>io.lettuce</groupId>
<artifactId>lettuce-core</artifactId>
</dependency>

<dependency>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import feast.core.FeatureSetProto.FeatureSet;
import feast.core.StoreProto.Store;
import feast.core.StoreProto.Store.BigQueryConfig;
import feast.core.StoreProto.Store.RedisConfig;
import feast.core.StoreProto.Store.StoreType;
import feast.ingestion.options.ImportOptions;
import feast.ingestion.utils.ResourceUtil;
Expand Down Expand Up @@ -88,13 +87,12 @@ public PDone expand(PCollection<FeatureRow> input) {

switch (storeType) {
case REDIS:
RedisConfig redisConfig = getStore().getRedisConfig();
PCollection<FailedElement> redisWriteResult =
input
.apply(
"FeatureRowToRedisMutation",
ParDo.of(new FeatureRowToRedisMutationDoFn(getFeatureSets())))
.apply("WriteRedisMutationToRedis", RedisCustomIO.write(redisConfig));
.apply("WriteRedisMutationToRedis", RedisCustomIO.write(getStore()));
if (options.getDeadLetterTableSpec() != null) {
redisWriteResult.apply(
WriteFailedElementToBigQuery.newBuilder()
Expand Down
14 changes: 8 additions & 6 deletions ingestion/src/main/java/feast/ingestion/utils/StoreUtil.java
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,15 @@
import feast.core.StoreProto.Store.RedisConfig;
import feast.core.StoreProto.Store.StoreType;
import feast.types.ValueProto.ValueType.Enum;
import io.lettuce.core.RedisClient;
import io.lettuce.core.RedisConnectionException;
import io.lettuce.core.RedisURI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.exceptions.JedisConnectionException;

// TODO: Create partitioned table by default

Expand Down Expand Up @@ -239,15 +240,16 @@ public static void setupBigQuery(
* @param redisConfig Plase refer to feast.core.Store proto
*/
public static void checkRedisConnection(RedisConfig redisConfig) {
JedisPool jedisPool = new JedisPool(redisConfig.getHost(), redisConfig.getPort());
RedisClient redisClient =
RedisClient.create(RedisURI.create(redisConfig.getHost(), redisConfig.getPort()));
try {
jedisPool.getResource();
} catch (JedisConnectionException e) {
redisClient.connect();
} catch (RedisConnectionException e) {
throw new RuntimeException(
String.format(
"Failed to connect to Redis at host: '%s' port: '%d'. Please check that your Redis is running and accessible from Feast.",
redisConfig.getHost(), redisConfig.getPort()));
}
jedisPool.close();
redisClient.shutdown();
}
}
2 changes: 1 addition & 1 deletion ingestion/src/main/java/feast/retry/Retriable.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
package feast.retry;

public interface Retriable {
void execute();
void execute() throws Exception;

Boolean isExceptionRetriable(Exception e);

Expand Down
127 changes: 62 additions & 65 deletions ingestion/src/main/java/feast/store/serving/redis/RedisCustomIO.java
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@

import feast.core.StoreProto;
import feast.ingestion.values.FailedElement;
import feast.retry.BackOffExecutor;
import feast.retry.Retriable;
import io.lettuce.core.RedisConnectionException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.apache.avro.reflect.Nullable;
import org.apache.beam.sdk.coders.AvroCoder;
import org.apache.beam.sdk.coders.DefaultCoder;
Expand All @@ -32,14 +34,9 @@
import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
import org.apache.beam.sdk.values.PCollection;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.joda.time.Duration;
import org.joda.time.Instant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
import redis.clients.jedis.Response;
import redis.clients.jedis.exceptions.JedisConnectionException;

public class RedisCustomIO {

Expand All @@ -50,8 +47,8 @@ public class RedisCustomIO {

private RedisCustomIO() {}

public static Write write(StoreProto.Store.RedisConfig redisConfig) {
return new Write(redisConfig);
public static Write write(StoreProto.Store store) {
return new Write(store);
}

public enum Method {
Expand Down Expand Up @@ -168,8 +165,8 @@ public static class Write

private WriteDoFn dofn;

private Write(StoreProto.Store.RedisConfig redisConfig) {
this.dofn = new WriteDoFn(redisConfig);
private Write(StoreProto.Store store) {
this.dofn = new WriteDoFn(store);
}

public Write withBatchSize(int batchSize) {
Expand All @@ -189,23 +186,14 @@ public PCollection<FailedElement> expand(PCollection<RedisMutation> input) {

public static class WriteDoFn extends DoFn<RedisMutation, FailedElement> {

private final String host;
private final int port;
private final BackOffExecutor backOffExecutor;
private final List<RedisMutation> mutations = new ArrayList<>();

private Jedis jedis;
private Pipeline pipeline;
private int batchSize = DEFAULT_BATCH_SIZE;
private int timeout = DEFAULT_TIMEOUT;
private RedisIngestionClient redisIngestionClient;

WriteDoFn(StoreProto.Store.RedisConfig redisConfig) {
this.host = redisConfig.getHost();
this.port = redisConfig.getPort();
long backoffMs =
redisConfig.getInitialBackoffMs() > 0 ? redisConfig.getInitialBackoffMs() : 1;
this.backOffExecutor =
new BackOffExecutor(redisConfig.getMaxRetries(), Duration.millis(backoffMs));
WriteDoFn(StoreProto.Store store) {
if (store.getType() == StoreProto.Store.StoreType.REDIS)
this.redisIngestionClient = new RedisStandaloneIngestionClient(store.getRedisConfig());
}

public WriteDoFn withBatchSize(int batchSize) {
Expand All @@ -224,55 +212,58 @@ public WriteDoFn withTimeout(int timeout) {

@Setup
public void setup() {
jedis = new Jedis(host, port, timeout);
this.redisIngestionClient.setup();
}

@StartBundle
public void startBundle() {
try {
redisIngestionClient.connect();
} catch (RedisConnectionException e) {
log.error("Connection to redis cannot be established ", e);
}
mutations.clear();
pipeline = jedis.pipelined();
}

private void executeBatch() throws Exception {
backOffExecutor.execute(
new Retriable() {
@Override
public void execute() {
mutations.forEach(
mutation -> {
writeRecord(mutation);
if (mutation.getExpiryMillis() != null && mutation.getExpiryMillis() > 0) {
pipeline.pexpire(mutation.getKey(), mutation.getExpiryMillis());
}
});
pipeline.sync();
mutations.clear();
}

@Override
public Boolean isExceptionRetriable(Exception e) {
return e instanceof JedisConnectionException;
}

@Override
public void cleanUpAfterFailure() {
try {
pipeline.close();
} catch (IOException e) {
log.error(String.format("Error while closing pipeline: %s", e.getMessage()));
}
jedis = new Jedis(host, port, timeout);
pipeline = jedis.pipelined();
}
});
this.redisIngestionClient
.getBackOffExecutor()
.execute(
new Retriable() {
@Override
public void execute() throws ExecutionException, InterruptedException {
if (!redisIngestionClient.isConnected()) {
redisIngestionClient.connect();
}
mutations.forEach(
mutation -> {
writeRecord(mutation);
if (mutation.getExpiryMillis() != null
&& mutation.getExpiryMillis() > 0) {
redisIngestionClient.pexpire(
mutation.getKey(), mutation.getExpiryMillis());
}
});
redisIngestionClient.sync();
mutations.clear();
}

@Override
public Boolean isExceptionRetriable(Exception e) {
return e instanceof RedisConnectionException;
}

@Override
public void cleanUpAfterFailure() {}
});
}

private FailedElement toFailedElement(
RedisMutation mutation, Exception exception, String jobName) {
return FailedElement.newBuilder()
.setJobName(jobName)
.setTransformName("RedisCustomIO")
.setPayload(mutation.getValue().toString())
.setPayload(Arrays.toString(mutation.getValue()))
.setErrorMessage(exception.getMessage())
.setStackTrace(ExceptionUtils.getStackTrace(exception))
.build();
Expand All @@ -297,20 +288,26 @@ public void processElement(ProcessContext context) {
}
}

private Response<?> writeRecord(RedisMutation mutation) {
private void writeRecord(RedisMutation mutation) {
switch (mutation.getMethod()) {
case APPEND:
return pipeline.append(mutation.getKey(), mutation.getValue());
redisIngestionClient.append(mutation.getKey(), mutation.getValue());
return;
case SET:
return pipeline.set(mutation.getKey(), mutation.getValue());
redisIngestionClient.set(mutation.getKey(), mutation.getValue());
return;
case LPUSH:
return pipeline.lpush(mutation.getKey(), mutation.getValue());
redisIngestionClient.lpush(mutation.getKey(), mutation.getValue());
return;
case RPUSH:
return pipeline.rpush(mutation.getKey(), mutation.getValue());
redisIngestionClient.rpush(mutation.getKey(), mutation.getValue());
return;
case SADD:
return pipeline.sadd(mutation.getKey(), mutation.getValue());
redisIngestionClient.sadd(mutation.getKey(), mutation.getValue());
return;
case ZADD:
return pipeline.zadd(mutation.getKey(), mutation.getScore(), mutation.getValue());
redisIngestionClient.zadd(mutation.getKey(), mutation.getScore(), mutation.getValue());
return;
default:
throw new UnsupportedOperationException(
String.format("Not implemented writing records for %s", mutation.getMethod()));
Expand All @@ -337,7 +334,7 @@ public void finishBundle(FinishBundleContext context)

@Teardown
public void teardown() {
jedis.close();
redisIngestionClient.shutdown();
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright 2018-2020 The Feast Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package feast.store.serving.redis;

import feast.retry.BackOffExecutor;
import java.io.Serializable;

public interface RedisIngestionClient extends Serializable {

void setup();

BackOffExecutor getBackOffExecutor();

void shutdown();

void connect();

boolean isConnected();

void sync();

void pexpire(byte[] key, Long expiryMillis);

void append(byte[] key, byte[] value);

void set(byte[] key, byte[] value);

void lpush(byte[] key, byte[] value);

void rpush(byte[] key, byte[] value);

void sadd(byte[] key, byte[] value);

void zadd(byte[] key, Long score, byte[] value);
}
Loading

0 comments on commit 517e4c6

Please sign in to comment.