diff --git a/README.md b/README.md index 03508be869..1d579bf03f 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ SnappyData is a **distributed in-memory data store for real-time operational ana ## Download binary distribution You can download the latest version of SnappyData here: -* SnappyData Preview 0.2 download link [(tar.gz)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.0-PREVIEW.tar.gz) [(zip)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.0-PREVIEW.zip) +* SnappyData Preview 0.2 download link [(tar.gz)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.1-PREVIEW-bin.tar.gz) [(zip)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.1-PREVIEW-bin.zip) SnappyData has been tested on Linux and Mac OSX. If not already installed, you will need to download [Java 8](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html). @@ -32,12 +32,12 @@ SnappyData artifacts are hosted in Maven Central. You can add a Maven dependency ``` groupId: io.snappydata artifactId: snappy-tools_2.10 -version: 0.2.0-PREVIEW +version: 0.2.1-PREVIEW ``` If you are using sbt, add this line to your build.sbt for core snappy artifacts: -`libraryDependencies += "io.snappydata" % "snappy-core_2.10" % "0.2.0-PREVIEW"` +`libraryDependencies += "io.snappydata" % "snappy-core_2.10" % "0.2.1-PREVIEW"` Check out more specific SnappyData artifacts here: http://mvnrepository.com/artifact/io.snappydata diff --git a/build.gradle b/build.gradle index c632c4c501..4299396efa 100644 --- a/build.gradle +++ b/build.gradle @@ -50,7 +50,7 @@ allprojects { apply plugin: 'eclipse' group = 'io.snappydata' - version = '0.2.0-PREVIEW' + version = '0.2.1-PREVIEW' // apply compiler options compileJava.options.encoding = 'UTF-8' @@ -72,7 +72,7 @@ allprojects { slf4jVersion = '1.7.12' junitVersion = '4.11' hadoopVersion = '2.4.1' - gemfireXDVersion = '2.0-BETA' + gemfireXDVersion = '1.5.0-BETA' buildFlags = '' createdBy = System.getProperty("user.name") } @@ -212,7 +212,6 @@ subprojects { } tasks.withType(Test).each { test -> test.configure { - onlyIf { !Boolean.getBoolean('skip.tests') } environment 'SNAPPY_HOME': snappyProductDir, 'SNAPPY_DIST_CLASSPATH': "${sourceSets.test.runtimeClasspath.asPath}" @@ -450,12 +449,16 @@ task product { rename { filename -> archiveName } } // copy datanucleus jars specifically since they don't work as part of fat jar + // copy hbase jar as required for GFXD HDFS support (needs to be explicitly added to SPARK_DIST_CLASSPATH) // copy bin, sbin, data etc from spark if (new File(rootDir, "snappy-spark/build.gradle").exists()) { copy { from project(":snappy-spark:snappy-spark-hive_${scalaBinaryVersion}").configurations.runtime.filter { it.getName().contains('datanucleus') } + from project(":snappy-store:gemfire-core").configurations.provided.filter { + it.getName().contains('hbase') + } into "${snappyProductDir}/lib" } copy { @@ -568,9 +571,14 @@ distributions { } } } -distTar.dependsOn product -distZip.dependsOn product - +distTar { + dependsOn product + classifier 'bin' +} +distZip { + dependsOn product + classifier 'bin' +} def copyTestsCommonResources(def bdir) { def outdir = "${bdir}/resources/test" @@ -733,7 +741,7 @@ task checkAll { dependsOn project(':snappy-spark').getTasksByName('check', true).collect { it.path } } if (project.hasProperty('store')) { - dependsOn ':snappy-store:checkAll' + dependsOn ':snappy-store:check' } mustRunAfter buildAll } diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md index 1bde755fc7..51fb0deb37 100644 --- a/docs/GettingStarted.md +++ b/docs/GettingStarted.md @@ -44,7 +44,7 @@ SnappyData is a **distributed in-memory data store for real-time operational ana ## Download binary distribution You can download the latest version of SnappyData here: -* SnappyData Preview 0.2 download link [(tar.gz)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.0-PREVIEW.tar.gz) [(zip)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.0-PREVIEW.zip) +* SnappyData Preview 0.2 download link [(tar.gz)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.1-PREVIEW-bin.tar.gz) [(zip)](https://github.com/SnappyDataInc/snappydata/releases/download/v0.2-preview/snappydata-0.2.1-PREVIEW-bin.zip) SnappyData has been tested on Linux and Mac OSX. If not already installed, you will need to download [Java 8](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html). @@ -59,7 +59,7 @@ SnappyData artifacts are hosted in Maven Central. You can add a Maven dependency ``` groupId: io.snappydata artifactId: snappy-tools_2.10 -version: 0.2.0-PREVIEW +version: 0.2.1-PREVIEW ``` ## Working with SnappyData Source Code @@ -495,7 +495,7 @@ Submit `CreateAndLoadAirlineDataJob` over the REST API to create row and column ```bash # Submit a job to Lead node on port 8090 -$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name airlineApp --class io.snappydata.examples.CreateAndLoadAirlineDataJob --app-jar ./lib/quickstart-0.2.0-PREVIEW.jar +$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name airlineApp --class io.snappydata.examples.CreateAndLoadAirlineDataJob --app-jar ./lib/quickstart-0.2.1-PREVIEW.jar {"status": "STARTED", "result": { "jobId": "321e5136-4a18-4c4f-b8ab-f3c8f04f0b48", @@ -540,7 +540,7 @@ snappyContext.update(rowTableName, filterExpr, newColumnValues, updateColumns) ```bash # Submit AirlineDataJob to SnappyData's Lead node on port 8090 -$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name airlineApp --class io.snappydata.examples.AirlineDataJob --app-jar ./lib/quickstart-0.2.0-PREVIEW.jar +$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name airlineApp --class io.snappydata.examples.AirlineDataJob --app-jar ./lib/quickstart-0.2.1-PREVIEW.jar { "status": "STARTED", "result": { "jobId": "1b0d2e50-42da-4fdd-9ea2-69e29ab92de2", @@ -656,7 +656,7 @@ Submit the `TwitterPopularTagsJob` that declares a stream table, creates and pop ```bash # Submit the TwitterPopularTagsJob to SnappyData's Lead node on port 8090 -$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name TwitterPopularTagsJob --class io.snappydata.examples.TwitterPopularTagsJob --app-jar ./lib/quickstart-0.2.0-PREVIEW.jar --stream +$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name TwitterPopularTagsJob --class io.snappydata.examples.TwitterPopularTagsJob --app-jar ./lib/quickstart-0.2.1-PREVIEW.jar --stream # Run the following utility in another terminal to simulate a twitter stream by copying tweets in the folder on which file stream table is listening. $ quickstart/scripts/simulateTwitterStream @@ -671,7 +671,7 @@ $ export APP_PROPS="consumerKey=,consumerSecret=,ac # submit the TwitterPopularTagsJob Lead node on port 8090 that declares a stream table, creates and populates a topk -structure, registers CQ on it and stores the result in a snappy store table # This job runs streaming for two minutes. -$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name TwitterPopularTagsJob --class io.snappydata.examples.TwitterPopularTagsJob --app-jar ./lib/quickstart-0.2.0-PREVIEW.jar --stream +$ ./bin/snappy-job.sh submit --lead localhost:8090 --app-name TwitterPopularTagsJob --class io.snappydata.examples.TwitterPopularTagsJob --app-jar ./lib/quickstart-0.2.1-PREVIEW.jar --stream ``` The output of the job can be found in `TwitterPopularTagsJob_timestamp.out` in the lead directory which by default is `SNAPPY_HOME/work/localhost-lead-*/`. @@ -699,7 +699,7 @@ scala> val airlineDF = sqlContext.table("airline").show # Start the Spark standalone cluster. $ sbin/start-all.sh # Submit AirlineDataSparkApp to Spark Cluster with snappydata's locator host port. -$ bin/spark-submit --class io.snappydata.examples.AirlineDataSparkApp --master spark://masterhost:7077 --conf snappydata.store.locators=localhost:10334 --conf spark.ui.port=4041 $SNAPPY_HOME/lib/quickstart-0.2.0-PREVIEW.jar +$ bin/spark-submit --class io.snappydata.examples.AirlineDataSparkApp --master spark://masterhost:7077 --conf snappydata.store.locators=localhost:10334 --conf spark.ui.port=4041 $SNAPPY_HOME/lib/quickstart-0.2.1-PREVIEW.jar # The results can be seen on the command line. ``` diff --git a/docs/connectingToCluster.md b/docs/connectingToCluster.md index 7ff9583941..1ab1209b12 100644 --- a/docs/connectingToCluster.md +++ b/docs/connectingToCluster.md @@ -8,7 +8,7 @@ The SnappyData SQL Shell (_snappy-shell_) provides a simple command line interfa // from the SnappyData base directory $ cd quickstart/scripts $ ../../bin/snappy-shell -Version 2.0-BETA +Version 1.5.0-BETA snappy> //Connect to the cluster as a client @@ -53,7 +53,7 @@ Any spark application can also use the SnappyData as store and spark as computat # Start the Spark standalone cluster from SnappyData base directory $ sbin/start-all.sh # Submit AirlineDataSparkApp to Spark Cluster with snappydata's locator host port. -$ bin/spark-submit --class io.snappydata.examples.AirlineDataSparkApp --master spark://masterhost:7077 --conf snappydata.store.locators=locatorhost:port --conf spark.ui.port=4041 $SNAPPY_HOME/lib/quickstart-0.2.0-PREVIEW.jar +$ bin/spark-submit --class io.snappydata.examples.AirlineDataSparkApp --master spark://masterhost:7077 --conf snappydata.store.locators=locatorhost:port --conf spark.ui.port=4041 $SNAPPY_HOME/lib/quickstart-0.2.1-PREVIEW.jar # The results can be seen on the command line. ``` diff --git a/docs/jobs.md b/docs/jobs.md index 014146089f..de70318c74 100644 --- a/docs/jobs.md +++ b/docs/jobs.md @@ -120,14 +120,14 @@ SnappySQLJob trait extends the SparkJobBase trait. It provides users the singlet #### Submitting jobs -Following command submits [CreateAndLoadAirlineDataJob](https://github.com/SnappyDataInc/snappydata/blob/master/snappy-examples/src/main/scala/io/snappydata/examples/CreateAndLoadAirlineDataJob.scala) from the [snappy-examples](https://github.com/SnappyDataInc/snappydata/tree/master/snappy-examples/src/main/scala/io/snappydata/examples) directory. This job creates dataframes from parquet files, loads the data from dataframe into column tables and row tables and creates sample table on column table in its runJob method. The program is compiled into a jar file (quickstart-0.2.0-PREVIEW.jar) and submitted to jobs server as shown below. +Following command submits [CreateAndLoadAirlineDataJob](https://github.com/SnappyDataInc/snappydata/blob/master/snappy-examples/src/main/scala/io/snappydata/examples/CreateAndLoadAirlineDataJob.scala) from the [snappy-examples](https://github.com/SnappyDataInc/snappydata/tree/master/snappy-examples/src/main/scala/io/snappydata/examples) directory. This job creates dataframes from parquet files, loads the data from dataframe into column tables and row tables and creates sample table on column table in its runJob method. The program is compiled into a jar file (quickstart-0.2.1-PREVIEW.jar) and submitted to jobs server as shown below. ``` $ bin/snappy-job.sh submit \ --lead hostNameOfLead:8090 \ --app-name airlineApp \ --class io.snappydata.examples.CreateAndLoadAirlineDataJob \ - --app-jar $SNAPPY_HOME/lib/quickstart-0.2.0-PREVIEW.jar + --app-jar $SNAPPY_HOME/lib/quickstart-0.2.1-PREVIEW.jar ``` The utility snappy-job.sh submits the job and returns a JSON that has a jobId of this job. @@ -169,7 +169,7 @@ $ bin/snappy-job.sh submit \ --lead hostNameOfLead:8090 \ --app-name airlineApp \ --class io.snappydata.examples.AirlineDataJob \ - --app-jar $SNAPPY_HOME/lib/quickstart-0.2.0-PREVIEW.jar + --app-jar $SNAPPY_HOME/lib/quickstart-0.2.1-PREVIEW.jar ``` The status of this job can be queried in the same manner as shown above. The result of the this job will return a file path that has the query results. @@ -183,6 +183,6 @@ $ bin/snappy-job.sh submit \ --lead hostNameOfLead:8090 \ --app-name airlineApp \ --class io.snappydata.examples.TwitterPopularTagsJob \ - --app-jar $SNAPPY_HOME/lib/quickstart-0.2.0-PREVIEW.jar \ + --app-jar $SNAPPY_HOME/lib/quickstart-0.2.1-PREVIEW.jar \ --stream ``` diff --git a/snappy-dunits/build.gradle b/snappy-dunits/build.gradle index bdfc000a84..44c302257e 100644 --- a/snappy-dunits/build.gradle +++ b/snappy-dunits/build.gradle @@ -46,7 +46,7 @@ testClasses.doLast { test { dependsOn ':cleanDUnit' dependsOn ':product' - maxParallelForks = Math.max((int)Math.sqrt(Runtime.getRuntime().availableProcessors() + 1), 2) + maxParallelForks = 1 minHeapSize '128m' maxHeapSize '1g' diff --git a/snappy-store b/snappy-store index 994f79e64f..8185705a75 160000 --- a/snappy-store +++ b/snappy-store @@ -1 +1 @@ -Subproject commit 994f79e64f5f8f66394eff478ebc042f046c923e +Subproject commit 8185705a7571603f804d86713ab3d83f407ad785