-
Notifications
You must be signed in to change notification settings - Fork 97
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fixes #280 #281
base: master
Are you sure you want to change the base?
Fixes #280 #281
Changes from all commits
ce74dc1
b46e2b1
1c87d8d
1baf541
7b4be54
473d70f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,7 @@ | ||
sudo: false | ||
language: scala | ||
scala: | ||
- 2.11.8 | ||
jdk: | ||
- oraclejdk8 | ||
- 2.12.8 | ||
cache: | ||
directories: | ||
- '$HOME/.ivy2/cache' |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,15 @@ | ||
package skuber.api.client | ||
|
||
import akka.stream.scaladsl.{Sink, Source} | ||
import akka.http.scaladsl.Http | ||
import akka.util.ByteString | ||
import play.api.libs.json.{Writes,Format} | ||
import play.api.libs.json.{Format, Writes} | ||
import skuber.{DeleteOptions, HasStatusSubresource, LabelSelector, ListOptions, ListResource, ObjectResource, Pod, ResourceDefinition, Scale} | ||
import skuber.api.patch.Patch | ||
import skuber.api.watch.WatchSource | ||
import skuber.batch.Job | ||
|
||
import scala.concurrent.duration.{Duration, FiniteDuration} | ||
import scala.concurrent.{Future, Promise} | ||
|
||
/** | ||
|
@@ -88,6 +92,15 @@ trait KubernetesClient { | |
*/ | ||
def deleteWithOptions[O <: ObjectResource](name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] | ||
|
||
/** | ||
* Monitor a resource existence until no longer available | ||
* @param name the name of the resource to monitor its existence | ||
* @param monitorRepeatDelay delay for repeating the monitoring as long as the resource is available by name | ||
* @tparam O the specific object resource type e.g. Pod, Deployment | ||
* @return A future that will be set to success when Kubernetes confirm the resource is no longer available by name, otherwise failure | ||
*/ | ||
def monitorResourceUntilUnavailable[O <: ObjectResource](name: String, monitorRepeatDelay: FiniteDuration)(implicit fmt: Format[O], rd: ResourceDefinition[O]): Future[Unit] | ||
|
||
/** | ||
* Delete all resources of specified type in current namespace | ||
* @tparam L list resource type of resources to delete e.g. PodList, DeploymentList | ||
|
@@ -216,10 +229,12 @@ trait KubernetesClient { | |
* Watch a specific object resource continuously. This returns a source that will continue to produce | ||
* events on any updates to the object even if the server times out, by transparently restarting the watch as needed. | ||
* @param obj the object resource to watch | ||
* @param pool reuse a skuber pool for querying the server if any or create a new one | ||
* @tparam O the type of the resource e.g Pod | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted where the materialized | ||
* value is a pair of the skuber pool used and the underlying Akka host connection pool used, if any. | ||
*/ | ||
def watchContinuously[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] | ||
def watchContinuously[O <: ObjectResource](obj: O, pool: Option[Pool[WatchSource.Start[O]]])(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm reluctant to modify the existing There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The current API completely hides the fact that there's a pool involved and therefore prevents doing anything with it:
If you're thinking of refactoring this API, it would be good to keep the above use cases in mind; the distinction between sequential & parallel invocations of long-duration operations is very important for us. I don't know how to support these use cases without changing the API in some incompatible way. Please advise how you want to proceed. I'm OK in deferring action until you refactor the API as you mentioned. |
||
|
||
/** | ||
* Watch a specific object resource continuously. This returns a source that will continue to produce | ||
|
@@ -232,11 +247,13 @@ trait KubernetesClient { | |
* applicable type (e.g. PodList, DeploymentList) and then supplies that to this method to receive any future updates. If no resource version is specified, | ||
* a single ADDED event will be produced for an already existing object followed by events for any future changes. | ||
* @param bufSize optional buffer size for received object updates, normally the default is more than enough | ||
* @param pool reuse a skuber pool for querying the server if any or create a new one | ||
* @tparam O the type of the resource | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted where the materialized | ||
* value is a pair of the skuber pool used and the underlying Akka host connection pool used, if any. | ||
*/ | ||
def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] | ||
def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] | ||
|
||
/** | ||
* Watch all object resources of a specified type continuously. This returns a source that will continue to produce | ||
|
@@ -248,24 +265,29 @@ trait KubernetesClient { | |
* applicable type (e.g. PodList, DeploymentList) and then supplies that to this method to receive any future updates. If no resource version is specified, | ||
* a single ADDED event will be produced for an already existing object followed by events for any future changes. | ||
* @param bufSize optional buffer size for received object updates, normally the default is more than enough | ||
* @param pool reuse a skuber pool for querying the server if any or create a new one | ||
* @tparam O the type pf the resource | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted where the materialized | ||
* value is a pair of the skuber pool used and the underlying Akka host connection pool used, if any. | ||
*/ | ||
def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] | ||
def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] | ||
|
||
/** | ||
* Watch all object resources of a specified type continuously, passing the specified options to the API server with the watch request. | ||
* This returns a source that will continue to produce events even if the server times out, by transparently restarting the watch as needed. | ||
* | ||
* @param options a set of list options to pass to the server. See https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ListOptions | ||
* for the meaning of the options. Note that the `watch` flag in the options will be ignored / overridden by the client, which | ||
* ensures a watch is always requested on the server. | ||
* @param bufsize optional buffer size for received object updates, normally the default is more than enough | ||
* @param pool reuse a skuber pool for querying the server if any or create a new one | ||
* @tparam O the resource type to watch | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted | ||
* @return A future containing an Akka streams Source of WatchEvents that will be emitted where the materialized | ||
* value is a pair of the skuber pool used and the underlying Akka host connection pool used, if any. | ||
*/ | ||
def watchWithOptions[O <: ObjectResource](options: ListOptions, bufsize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] | ||
def watchWithOptions[O <: ObjectResource](options: ListOptions, bufsize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] | ||
|
||
/** | ||
* Get the scale subresource of the named object resource | ||
|
@@ -350,6 +372,33 @@ trait KubernetesClient { | |
tty: Boolean = false, | ||
maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): Future[Unit] | ||
|
||
/** | ||
* Execute a job, monitoring the progress of its pod until completion and monitor its deletion until complete | ||
* @param job the Kubernetes job to execute | ||
* @param labelSelector the label selector for monitoring the job's pod status | ||
* @param podProgress the predicate for monitoring the pod status while satisfied before deleting the job | ||
* @param podCompletion a callback invoked at the completion of the job's pod (successful or not), | ||
* after which the job will be deleted if and only if the podCompletion result is true | ||
* @param watchContinuouslyRequestTimeout the delay for continuously monitoring the pod progress | ||
* @param deletionMonitorRepeatDelay the delay for continuously monitoring the job deletion | ||
* @param pool a skuber pool to reuse, if any, or to create otherwise | ||
* @param bufSize optional buffer size for received object updates, normally the default is more than enough | ||
* @return A future consisting of a triple of the following: | ||
* - the skuber pool suitable for subsequently executing other jobs on the same server | ||
* - the akka host connection pool that can be shutdown when no further jobs need to be executed on the same server | ||
* - the last pod status received when the pod progress predicate became unsatisfied | ||
*/ | ||
def executeJobAndWaitUntilDeleted( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In general I try to keep the API generic in regards to resource types, which reflects the actual Kubernetes REST API design largely (exception of some specific methods on pods such as There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok |
||
job: Job, | ||
labelSelector: LabelSelector, | ||
podProgress: WatchEvent[Pod] => Boolean, | ||
podCompletion: WatchEvent[Pod] => Future[Boolean], | ||
watchContinuouslyRequestTimeout: Duration, | ||
deletionMonitorRepeatDelay: FiniteDuration, | ||
pool: Option[Pool[WatchSource.Start[Pod]]], | ||
bufSize: Int = 10000)(implicit jfmt: Format[Job], pfmt: Format[Pod], jrd: ResourceDefinition[Job], prd: ResourceDefinition[Pod]): | ||
Future[(Pool[WatchSource.Start[Pod]], Option[Http.HostConnectionPool], WatchEvent[Pod])] | ||
|
||
/** | ||
* Return list of API versions supported by the server | ||
* @param lc | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,12 +7,13 @@ import akka.http.scaladsl.model._ | |
import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings} | ||
import akka.http.scaladsl.unmarshalling.Unmarshal | ||
import akka.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} | ||
import akka.pattern.after | ||
import akka.stream.Materializer | ||
import akka.stream.scaladsl.{Sink, Source} | ||
import akka.stream.scaladsl.{Keep, Sink, Source} | ||
import akka.util.ByteString | ||
import com.typesafe.config.{Config, ConfigFactory} | ||
import javax.net.ssl.SSLContext | ||
import play.api.libs.json.{Format, Writes, Reads} | ||
import play.api.libs.json.{Format, Reads, Writes} | ||
import skuber._ | ||
import skuber.api.client.exec.PodExecImpl | ||
import skuber.api.client.{K8SException => _, _} | ||
|
@@ -22,6 +23,7 @@ import skuber.json.PlayJsonSupportForAkkaHttp._ | |
import skuber.json.format.apiobj.statusReads | ||
import skuber.json.format.{apiVersionsFormat, deleteOptionsFmt, namespaceListFmt} | ||
import skuber.api.patch._ | ||
import skuber.batch.Job | ||
|
||
import scala.concurrent.duration._ | ||
import scala.concurrent.{ExecutionContext, Future, Promise} | ||
|
@@ -403,6 +405,17 @@ class KubernetesClientImpl private[client] ( | |
} yield () | ||
} | ||
|
||
override def monitorResourceUntilUnavailable[O <: ObjectResource](name: String, monitorRepeatDelay: FiniteDuration)( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why not use |
||
implicit fmt: Format[O], rd: ResourceDefinition[O]): Future[Unit] = | ||
getOption[O](name).flatMap { | ||
case None => | ||
Future.successful(()) | ||
case Some(_) => | ||
after(monitorRepeatDelay, actorSystem.scheduler)( | ||
monitorResourceUntilUnavailable[O](name, monitorRepeatDelay) | ||
) | ||
} | ||
|
||
override def deleteAll[L <: ListResource[_]]()( | ||
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = | ||
{ | ||
|
@@ -481,30 +494,30 @@ class KubernetesClientImpl private[client] ( | |
Watch.eventsOnKind[O](this, sinceResourceVersion, bufSize) | ||
} | ||
|
||
override def watchContinuously[O <: ObjectResource](obj: O)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = | ||
override def watchContinuously[O <: ObjectResource](obj: O, pool: Option[Pool[WatchSource.Start[O]]])( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] = | ||
{ | ||
watchContinuously(obj.name) | ||
watchContinuously(obj.name, pool = pool) | ||
} | ||
|
||
override def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = | ||
override def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] = | ||
{ | ||
val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds) ) | ||
WatchSource(this, buildLongPollingPool(), Some(name), options, bufSize) | ||
WatchSource(this, pool.getOrElse(buildLongPollingPool()), Some(name), options, bufSize) | ||
} | ||
|
||
override def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = | ||
override def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] = | ||
{ | ||
val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds)) | ||
WatchSource(this, buildLongPollingPool(), None, options, bufSize) | ||
WatchSource(this, pool.getOrElse(buildLongPollingPool()), None, options, bufSize) | ||
} | ||
|
||
override def watchWithOptions[O <: skuber.ObjectResource](options: ListOptions, bufsize: Int = 10000)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = | ||
override def watchWithOptions[O <: skuber.ObjectResource](options: ListOptions, bufsize: Int = 10000, pool: Option[Pool[WatchSource.Start[O]]] = None)( | ||
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], (Pool[WatchSource.Start[O]], Option[Http.HostConnectionPool])] = | ||
{ | ||
WatchSource(this, buildLongPollingPool(), None, options, bufsize) | ||
WatchSource(this, pool.getOrElse(buildLongPollingPool()), None, options, bufsize) | ||
} | ||
|
||
private def buildLongPollingPool[O <: ObjectResource]() = { | ||
|
@@ -622,6 +635,45 @@ class KubernetesClientImpl private[client] ( | |
PodExecImpl.exec(this, podName, command, maybeContainerName, maybeStdin, maybeStdout, maybeStderr, tty, maybeClose) | ||
} | ||
|
||
override def executeJobAndWaitUntilDeleted( | ||
job: Job, | ||
labelSelector: LabelSelector, | ||
podProgress: WatchEvent[Pod] => Boolean, | ||
podCompletion: WatchEvent[Pod] => Future[Boolean], | ||
watchContinuouslyRequestTimeout: Duration, | ||
deletionMonitorRepeatDelay: FiniteDuration, | ||
pool: Option[Pool[WatchSource.Start[Pod]]], | ||
bufSize: Int = 10000)(implicit jfmt: Format[Job], pfmt: Format[Pod], jrd: ResourceDefinition[Job], prd: ResourceDefinition[Pod]) | ||
: Future[(Pool[WatchSource.Start[Pod]], Option[Http.HostConnectionPool], WatchEvent[Pod])] = | ||
for { | ||
j <- create(job) | ||
(p, hcp, lastPodEvent) <- { | ||
watchWithOptions[Pod]( | ||
options = ListOptions( | ||
labelSelector = Some(labelSelector), | ||
timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds) | ||
), | ||
bufsize = bufSize, | ||
pool = pool | ||
) | ||
.takeWhile(podProgress, inclusive = true) | ||
.toMat(Sink.last)(Keep.both) | ||
.run() match { | ||
case ((pool: Pool[WatchSource.Start[Pod]], | ||
hostConnectionPool: Option[Http.HostConnectionPool]), | ||
f: Future[WatchEvent[Pod]]) => | ||
f.map { ev => | ||
(pool, hostConnectionPool, ev) | ||
} | ||
} | ||
} | ||
delete <- podCompletion(lastPodEvent) | ||
_ <- if (delete) | ||
deleteWithOptions[Job](name = j.metadata.name, options = DeleteOptions(propagationPolicy = Some(DeletePropagation.Foreground))) | ||
.flatMap(_ => monitorResourceUntilUnavailable[Job](j.metadata.name, deletionMonitorRepeatDelay)) | ||
else Future.successful(()) | ||
} yield (p, hcp, lastPodEvent) | ||
|
||
override def close: Unit = | ||
{ | ||
isClosed = true | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Would it be possible to submit the various build updates as a separate PR, unless those updates are required by the other changes?
(I find small, cohesive PRs generally are best)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
.travis.yml
file is currently broken; see: https://travis-ci.community/t/install-of-oracle-jdk-8-failing/3038Do you want to fix the
.travis.yml
file as part of a separate issue? If so, this PR will fail until the travis ci issue is resolved.