Skip to content

Commit

Permalink
Fail Spotless formatting check before tests execute (feast-dev#487)
Browse files Browse the repository at this point in the history
* Fail formatting check before tests execute

By default, the spotless Maven plugin binds its check goal to the verify
phase (late in the lifecycle, after integration tests). Because we
currently only run `mvn test` for CI, it doesn't proceed as far as
verify so missed formatting is not caught by CI.

This binds the check to an earlier phase, in between test-compile and
test, so that it will fail before `mvn test` but not disrupt your dev
workflow of compiling main and test sources as you work. This strikes a
good compromise on failing fast for code standards without being _too_
nagging.

For the complete lifecycle reference, see:
https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html

* Apply spotless formatting
  • Loading branch information
ches authored Feb 24, 2020
1 parent aec7979 commit 6363540
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 14 deletions.
1 change: 0 additions & 1 deletion core/src/main/java/feast/core/util/PipelineUtil.java
Original file line number Diff line number Diff line change
Expand Up @@ -72,5 +72,4 @@ private static List<String> getClasspathFiles() {
.map(entry -> new File(entry).getPath())
.collect(Collectors.toList());
}

}
15 changes: 7 additions & 8 deletions ingestion/src/test/java/feast/ingestion/ImportJobTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,12 @@
import feast.core.StoreProto.Store.Subscription;
import feast.ingestion.options.BZip2Compressor;
import feast.ingestion.options.ImportOptions;
import feast.ingestion.options.OptionByteConverter;
import feast.storage.RedisProto.RedisKey;
import feast.test.TestUtil;
import feast.test.TestUtil.LocalKafka;
import feast.test.TestUtil.LocalRedis;
import feast.types.FeatureRowProto.FeatureRow;
import feast.types.ValueProto.ValueType.Enum;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
Expand All @@ -51,7 +49,6 @@
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.PipelineResult.State;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.joda.time.Duration;
import org.junit.AfterClass;
Expand Down Expand Up @@ -166,11 +163,13 @@ public void runPipeline_ShouldWriteToRedisCorrectlyGivenValidSpecAndFeatureRow()
.build();

ImportOptions options = PipelineOptionsFactory.create().as(ImportOptions.class);
BZip2Compressor<FeatureSetSpec> compressor = new BZip2Compressor<>(option -> {
JsonFormat.Printer printer =
JsonFormat.printer().omittingInsignificantWhitespace().printingEnumsAsInts();
return printer.print(option).getBytes();
});
BZip2Compressor<FeatureSetSpec> compressor =
new BZip2Compressor<>(
option -> {
JsonFormat.Printer printer =
JsonFormat.printer().omittingInsignificantWhitespace().printingEnumsAsInts();
return printer.print(option).getBytes();
});
options.setFeatureSetJson(compressor.compress(spec));
options.setStoreJson(Collections.singletonList(JsonFormat.printer().print(redis)));
options.setProject("");
Expand Down
10 changes: 10 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,16 @@
<removeUnusedImports />
</java>
</configuration>
<executions>
<!-- Move check to fail faster, but after compilation. Default is verify phase -->
<execution>
<id>spotless-check</id>
<phase>process-test-classes</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,11 +195,7 @@ private Map<String, String> getFeatureToFeatureSetMapping(
HashMap<String, String> mapping = new HashMap<>();

featureSets.values().stream()
.collect(
groupingBy(
featureSet ->
Pair.of(
featureSet.getProject(), featureSet.getName())))
.collect(groupingBy(featureSet -> Pair.of(featureSet.getProject(), featureSet.getName())))
.forEach(
(group, groupedFeatureSets) -> {
groupedFeatureSets =
Expand Down

0 comments on commit 6363540

Please sign in to comment.