diff --git a/.github/workflows/upload-binaries.yml b/.github/workflows/upload-binaries.yml
new file mode 100644
index 0000000..c825f88
--- /dev/null
+++ b/.github/workflows/upload-binaries.yml
@@ -0,0 +1,34 @@
+name: Build and release JAR files
+
+on:
+ release:
+ types:
+ - published
+
+jobs:
+ upload-jar:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write # release changes require contents write
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4
+
+ - uses: actions/setup-java@v3
+ with:
+ java-version: "8"
+ distribution: temurin
+
+ - name: Generate JARs
+ run: mvn clean package -DskipTests
+
+ - name: Set project version env variable
+ run: |
+ echo "PROJECT_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)" >> $GITHUB_ENV
+
+ - name: Build and upload JAR
+ env:
+ GH_TOKEN: ${{ github.token }}
+ run: |
+ gh release upload ${{ github.event.release.tag_name }} target/spark-${{ env.PROJECT_VERSION }}.jar
\ No newline at end of file
diff --git a/README.md b/README.md
index a207189..645ae95 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@ For use with Java and Scala projects, the package can be found [here](https://ce
io.qdrant
spark
- 2.0
+ 2.0.1
```
@@ -43,7 +43,7 @@ from pyspark.sql import SparkSession
spark = SparkSession.builder.config(
"spark.jars",
- "spark-2.0.jar", # specify the downloaded JAR file
+ "spark-2.0.1.jar", # specify the downloaded JAR file
)
.master("local[*]")
.appName("qdrant")
@@ -75,7 +75,7 @@ You can use the `qdrant-spark` connector as a library in Databricks to ingest da
- Go to the `Libraries` section in your cluster dashboard.
- Select `Install New` to open the library installation modal.
-- Search for `io.qdrant:spark:2.0` in the Maven packages and click `Install`.
+- Search for `io.qdrant:spark:2.0.1` in the Maven packages and click `Install`.