diff --git a/operators/pkg/controller/elasticsearch/driver/default.go b/operators/pkg/controller/elasticsearch/driver/default.go index eebaeb54a19..a363ff9c61a 100644 --- a/operators/pkg/controller/elasticsearch/driver/default.go +++ b/operators/pkg/controller/elasticsearch/driver/default.go @@ -5,7 +5,6 @@ package driver import ( - "context" "crypto/x509" "fmt" @@ -17,7 +16,6 @@ import ( "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/migration" esvolume "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/volume" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/stringsutil" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" @@ -304,31 +302,6 @@ func (d *defaultDriver) Reconcile( // results.WithResult(defaultRequeue) // } //} - // - //// List the orphaned PVCs before the Pods are created. - //// If there are some orphaned PVCs they will be adopted and remove sequentially from the list when Pods are created. - //orphanedPVCs, err := pvc.FindOrphanedVolumeClaims(d.Client, es) - //if err != nil { - // return results.WithError(err) - //} - // - //for _, change := range performableChanges.ToCreate { - // d.PodsExpectations.ExpectCreation(namespacedName) - // if err := createElasticsearchPod( - // d.Client, - // d.Scheme, - // es, - // reconcileState, - // change.Pod, - // change.PodSpecCtx, - // orphanedPVCs, - // ); err != nil { - // // pod was not created, cancel our expectation by marking it observed - // d.PodsExpectations.CreationObserved(namespacedName) - // return results.WithError(err) - // } - //} - // passed this point, any pods resource listing should check expectations first if !esReachable { // We cannot manipulate ES allocation exclude settings if the ES cluster @@ -353,107 +326,12 @@ func (d *defaultDriver) Reconcile( // return results.WithResult(defaultRequeue).WithError(err) // } //} - // - //if !changes.HasChanges() { - // // Current state matches expected state - // reconcileState.UpdateElasticsearchOperational(*resourcesState, observedState) - // return results - //} - // - //// Start migrating data away from all pods to be deleted - //leavingNodeNames := pod.PodListToNames(performableChanges.ToDelete.Pods()) - //if err = migration.MigrateData(esClient, leavingNodeNames); err != nil { - // return results.WithError(errors.Wrap(err, "error during migrate data")) - //} - // - //// Shrink clusters by deleting deprecated pods - //if err = d.attemptPodsDeletion( - // performableChanges, - // reconcileState, - // resourcesState, - // observedState, - // results, - // esClient, - // es, - //); err != nil { - // return results.WithError(err) - //} - //// past this point, any pods resource listing should check expectations first - // - //if changes.HasChanges() && !performableChanges.HasChanges() { - // // if there are changes we'd like to perform, but none that were performable, we try again later - // results.WithResult(defaultRequeue) - //} reconcileState.UpdateElasticsearchState(*resourcesState, observedState) return results } -// -//// attemptPodsDeletion deletes a list of pods after checking there is no migrating data for each of them -//func (d *defaultDriver) attemptPodsDeletion( -// changes *mutation.PerformableChanges, -// reconcileState *reconcile.State, -// resourcesState *reconcile.ResourcesState, -// observedState observer.State, -// results *reconciler.Results, -// esClient esclient.Client, -// elasticsearch v1alpha1.Elasticsearch, -//) error { -// newState := make([]corev1.Pod, len(resourcesState.CurrentPods)) -// copy(newState, resourcesState.CurrentPods.Pods()) -// for _, pod := range changes.ToDelete.Pods() { -// newState = removePodFromList(newState, pod) -// preDelete := func() error { -// if d.zen1SettingsUpdater != nil { -// requeue, err := d.zen1SettingsUpdater( -// elasticsearch, -// d.Client, -// esClient, -// newState, -// changes, -// reconcileState) -// -// if err != nil { -// return err -// } -// -// if requeue { -// results.WithResult(defaultRequeue) -// } -// } -// return nil -// } -// -// // do not delete a pod or expect a deletion if a data migration is in progress -// isMigratingData := migration.IsMigratingData(observedState, pod, changes.ToDelete.Pods()) -// if isMigratingData { -// log.Info("Skipping deletion because of migrating data", "pod", pod.Name) -// reconcileState.UpdateElasticsearchMigrating(*resourcesState, observedState) -// results.WithResult(defaultRequeue) -// continue -// } -// -// namespacedName := k8s.ExtractNamespacedName(&elasticsearch) -// d.PodsExpectations.ExpectDeletion(namespacedName) -// result, err := deleteElasticsearchPod( -// d.Client, -// reconcileState, -// *resourcesState, -// pod, -// preDelete, -// ) -// if err != nil { -// // pod was not deleted, cancel our expectation by marking it observed -// d.PodsExpectations.DeletionObserved(namespacedName) -// return err -// } -// results.WithResult(result) -// } -// return nil -//} - func (d *defaultDriver) reconcileNodeSpecs( es v1alpha1.Elasticsearch, esReachable bool, @@ -564,16 +442,6 @@ func (d *defaultDriver) reconcileNodeSpecs( return results } -func NodesInTheCluster(esClient esclient.Client, expectedNodes []string) (bool, error) { - ctx, cancel := context.WithTimeout(context.Background(), esclient.DefaultReqTimeout) - defer cancel() - clusterNodes, err := esClient.GetNodes(ctx) - if err != nil { - return false, err - } - return stringsutil.StringsInSlice(expectedNodes, clusterNodes.Names()), nil -} - func (d *defaultDriver) scaleStatefulSetDown( statefulSet *appsv1.StatefulSet, targetReplicas int32, @@ -634,7 +502,9 @@ func (d *defaultDriver) scaleStatefulSetDown( } } - return nil + // TODO: clear allocation excludes + + return results } // newElasticsearchClient creates a new Elasticsearch HTTP client for this cluster using the provided user diff --git a/operators/pkg/controller/elasticsearch/driver/esstate.go b/operators/pkg/controller/elasticsearch/driver/esstate.go new file mode 100644 index 00000000000..0b45dcc5133 --- /dev/null +++ b/operators/pkg/controller/elasticsearch/driver/esstate.go @@ -0,0 +1,129 @@ +package driver + +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +import ( + "context" + "sync" + + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" + esclient "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/client" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/stringsutil" +) + +type ESState interface { + NodesInCluster(nodeNames []string) (bool, error) + ShardAllocationsEnabled() (bool, error) + GreenHealth() (bool, error) +} + +type LazyESState struct { + esClient esclient.Client + *lazyNodes + *lazyShardsAllocationEnabled + *lazyGreenHealth +} + +func NewLazyESState(esClient esclient.Client) ESState { + return &LazyESState{ + esClient: esClient, + lazyNodes: &lazyNodes{esClient: esClient}, + lazyShardsAllocationEnabled: &lazyShardsAllocationEnabled{esClient: esClient}, + lazyGreenHealth: &lazyGreenHealth{esClient: esClient}, + } +} + +func initOnce(once *sync.Once, f func() error) error { + var err error + once.Do(func() { + err = f() + }) + return err +} + +// -- Nodes + +type lazyNodes struct { + once sync.Once + esClient esclient.Client + nodes []string +} + +func (n *lazyNodes) initialize() error { + ctx, cancel := context.WithTimeout(context.Background(), esclient.DefaultReqTimeout) + defer cancel() + nodes, err := n.esClient.GetNodes(ctx) + if err != nil { + return err + } + n.nodes = nodes.Names() + return nil +} + +func (n *lazyNodes) nodeInCluster(nodeName string) (bool, error) { + if err := initOnce(&n.once, n.initialize); err != nil { + return false, err + } + return stringsutil.StringInSlice(nodeName, n.nodes), nil +} + +func (n *lazyNodes) NodesInCluster(nodeNames []string) (bool, error) { + if err := initOnce(&n.once, n.initialize); err != nil { + return false, err + } + return stringsutil.StringsInSlice(nodeNames, n.nodes), nil +} + +// -- Shards allocation enabled + +type lazyShardsAllocationEnabled struct { + once sync.Once + esClient esclient.Client + enabled bool +} + +func (s *lazyShardsAllocationEnabled) initialize() error { + ctx, cancel := context.WithTimeout(context.Background(), esclient.DefaultReqTimeout) + defer cancel() + allocationSettings, err := s.esClient.GetClusterRoutingAllocation(ctx) + if err != nil { + return err + } + s.enabled = allocationSettings.Transient.IsShardsAllocationEnabled() + return nil +} + +func (s *lazyShardsAllocationEnabled) ShardAllocationsEnabled() (bool, error) { + if err := initOnce(&s.once, s.initialize); err != nil { + return false, err + } + return s.enabled, nil +} + +// -- Green health + +type lazyGreenHealth struct { + once sync.Once + esClient esclient.Client + greenHealth bool +} + +func (h *lazyGreenHealth) initialize() error { + ctx, cancel := context.WithTimeout(context.Background(), esclient.DefaultReqTimeout) + defer cancel() + health, err := h.esClient.GetClusterHealth(ctx) + if err != nil { + return err + } + h.greenHealth = health.Status == string(v1alpha1.ElasticsearchGreenHealth) + return nil +} + +func (h *lazyGreenHealth) GreenHealth() (bool, error) { + if err := initOnce(&h.once, h.initialize); err != nil { + return false, err + } + return h.greenHealth, nil +} diff --git a/operators/pkg/controller/elasticsearch/driver/upgrade.go b/operators/pkg/controller/elasticsearch/driver/upgrade.go index f56a97c1d81..a8e5820af5b 100644 --- a/operators/pkg/controller/elasticsearch/driver/upgrade.go +++ b/operators/pkg/controller/elasticsearch/driver/upgrade.go @@ -25,19 +25,25 @@ func (d *defaultDriver) handleRollingUpgrades( ) *reconciler.Results { results := &reconciler.Results{} - res := d.doRollingUpgrade(statefulSets, esClient) + // We need an up-to-date ES state, but avoid requesting information we may not need. + esState := NewLazyESState(esClient) + + // Maybe upgrade some of the nodes. + res := d.doRollingUpgrade(es, statefulSets, esClient, esState) results.WithResults(res) // Maybe re-enable shards allocation if upgraded nodes are back into the cluster. - res = d.MaybeEnableShardsAllocation(es, esClient, statefulSets) + res = d.MaybeEnableShardsAllocation(es, esClient, esState, statefulSets) results.WithResults(res) return results } func (d *defaultDriver) doRollingUpgrade( + es v1alpha1.Elasticsearch, statefulSets sset.StatefulSetList, esClient esclient.Client, + esState ESState, ) *reconciler.Results { results := &reconciler.Results{} @@ -49,6 +55,11 @@ func (d *defaultDriver) doRollingUpgrade( return results.WithResult(defaultRequeue) } + if !statefulSets.RevisionUpdateScheduled() { + // nothing to upgrade + return results + } + // TODO: deal with multiple restarts at once, taking the changeBudget into account. // We'd need to stop checking cluster health and do something smarter, since cluster health green check // should be done **in between** restarts to make sense, which is pretty hard to do since we don't @@ -56,37 +67,43 @@ func (d *defaultDriver) doRollingUpgrade( // Instead of green health, we could look at shards status, taking into account nodes // we scheduled for a restart (maybe not restarted yet). - // TODO: don't upgrade more than 1 master concurrently (ok for now since we upgrade 1 node at a time) + // TODO: don't upgrade more than 1 master concurrently (ok for now since we upgrade 1 node at a time anyway) maxConcurrentUpgrades := 1 scheduledUpgrades := 0 - for i, actual := range statefulSets { - if actual.Status.UpdateRevision == "" || actual.Status.UpdateRevision == actual.Status.CurrentRevision { - // No upgrade to perform for this sset. - // We could decide to restore partition to the number of replicas here, but - // that's useless since any sset spec update already does this. - continue - } + for i, statefulSet := range statefulSets { // Inspect each pod, starting from the highest ordinal, and decrement the partition to allow // pod upgrades to go through, controlled by the StatefulSet controller. - for partition := sset.GetUpdatePartition(actual) - 1; partition >= 0; partition-- { - if partition >= sset.Replicas(actual) { - continue // to pod with smaller ordinal + for partition := sset.GetUpdatePartition(statefulSet); partition >= 0; partition-- { + if scheduledUpgrades >= maxConcurrentUpgrades { + return results.WithResult(defaultRequeue) + } + if partition >= sset.Replicas(statefulSet) { + continue } // Do we need to upgrade that pod? - podRef := types.NamespacedName{Namespace: actual.Namespace, Name: sset.PodName(actual.Name, partition)} - upgradeRequired, err := podRequiresUpgrade(d.Client, podRef, actual.Status.UpdateRevision) + podName := sset.PodName(statefulSet.Name, partition) + podRef := types.NamespacedName{Namespace: statefulSet.Namespace, Name: podName} + alreadyUpgraded, err := podUpgradeDone(d.Client, esState, podRef, statefulSet.Status.UpdateRevision) if err != nil { return results.WithError(err) } - if !upgradeRequired { - continue // to pod with smaller ordinal + if alreadyUpgraded { + continue } - // Is the cluster ready for a node upgrade? - clusterReady, err := clusterReadyForNodeRestart(esClient) + // An upgrade is required for that pod. + scheduledUpgrades++ + + // Is the pod upgrade already scheduled? + if partition == sset.GetUpdatePartition(statefulSet) { + continue + } + + // Is the cluster ready for the node upgrade? + clusterReady, err := clusterReadyForNodeRestart(es, esState) if err != nil { return results.WithError(err) } @@ -95,82 +112,93 @@ func (d *defaultDriver) doRollingUpgrade( return results.WithResult(defaultRequeue) } - log.Info("Preparing cluster for node restart", "namespace", actual.Namespace, "node", podRef.Name) - if err := prepareClusterForNodeRestart(esClient); err != nil { - return results.WithError(err) - } - - // TODO: zen1, zen2 - - // Node can be removed, update the StatefulSet rollingUpdate.Partition ordinal. - partitionCopy := partition - log.Info("Updating rollingUpdate.Partition", - "namespace", actual.Namespace, - "name", actual.Name, - "from", actual.Spec.UpdateStrategy.RollingUpdate.Partition, - "to", partitionCopy, - ) - statefulSets[i].Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{ - Partition: &partitionCopy, - } - if err := d.Client.Update(&statefulSets[i]); err != nil { + // Upgrade the pod. + if err := d.upgradeStatefulSetPartition(es, &statefulSets[i], esClient, esState, partition); err != nil { return results.WithError(err) } - - // Register the updated sset generation to deal with out-of-date sset cache. - d.Expectations.ExpectGeneration(statefulSets[i].ObjectMeta) - - // Don't restart all pods at once: return if we reached our budget. scheduledUpgrades++ - if scheduledUpgrades >= maxConcurrentUpgrades { - return results.WithResult(defaultRequeue) - } } } return results } -func prepareClusterForNodeRestart(esClient esclient.Client) error { +func (d *defaultDriver) upgradeStatefulSetPartition( + es v1alpha1.Elasticsearch, + statefulSet *appsv1.StatefulSet, + esClient esclient.Client, + esState ESState, + newPartition int32, +) error { + log.Info("Preparing cluster for node restart", "namespace", es.Namespace, "name", es.Name) + if err := prepareClusterForNodeRestart(esClient, esState); err != nil { + return err + } + + // TODO: zen1, zen2 + + // Node can be removed, update the StatefulSet rollingUpdate.Partition ordinal. + log.Info("Updating rollingUpdate.Partition", + "namespace", statefulSet.Namespace, + "name", statefulSet.Name, + "from", statefulSet.Spec.UpdateStrategy.RollingUpdate.Partition, + "to", &newPartition, + ) + statefulSet.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{ + Partition: &newPartition, + } + if err := d.Client.Update(statefulSet); err != nil { + return err + } + + // Register the updated sset generation to deal with out-of-date sset cache. + d.Expectations.ExpectGeneration(statefulSet.ObjectMeta) + + return nil +} + +func prepareClusterForNodeRestart(esClient esclient.Client, esState ESState) error { // Disable shard allocations to avoid shards moving around while the node is temporarily down - // TODO: maybe already disabled, in which case we could skip the call. - if err := disableShardsAllocation(esClient); err != nil { + shardsAllocationEnabled, err := esState.ShardAllocationsEnabled() + if err != nil { return err } + if !shardsAllocationEnabled { + if err := disableShardsAllocation(esClient); err != nil { + return err + } + } + // Request a sync flush to optimize indices recovery when the node restarts. if err := doSyncFlush(esClient); err != nil { return err } + // TODO: halt ML jobs on that node return nil } // clusterReadyForNodeRestart returns true if the ES cluster allows a node to be restarted // with minimized downtime and no unexpected data loss. -func clusterReadyForNodeRestart(esClient esclient.Client) (bool, error) { - // TODO: don't do that request at every single reconciliation - // the problem is relying on observed ES state gives out-of-date information - // it's probably fine if we want to restart one node. But if we want to restart - // more than one, chances are the cluster is not green anymore for the second node - // after the first node is restarted, but still appears green in the cache. - ctx, cancel := context.WithTimeout(context.Background(), esclient.DefaultReqTimeout) - defer cancel() +func clusterReadyForNodeRestart(es v1alpha1.Elasticsearch, esState ESState) (bool, error) { // Check the cluster health: only allow node restart if health is green. // This would cause downtime if some shards have 0 replicas, but we consider that's on the user. // TODO: we could technically still restart a node if the cluster is yellow, // as long as there are other copies of the shards in-sync on other nodes - health, err := esClient.GetClusterHealth(ctx) + // TODO: the fact we rely on a cached health here would prevent more than 1 restart + // in a single reconciliation + green, err := esState.GreenHealth() if err != nil { return false, err } - if health.Status != string(v1alpha1.ElasticsearchGreenHealth) { - log.Info("Skipping node rolling upgrade since cluster is not green", "health", health.Status) + if !green { + log.Info("Skipping node rolling upgrade since cluster is not green", "namespace", es.Namespace, "name", es.Name) return false, nil } return true, nil } -// podRequiresUpgrade inspects the given pod and returns true if it should be upgraded. -func podRequiresUpgrade(c k8s.Client, podRef types.NamespacedName, expectedRevision string) (bool, error) { +// podUpgradeDone inspects the given pod and returns true if it was successfully upgraded. +func podUpgradeDone(c k8s.Client, esState ESState, podRef types.NamespacedName, expectedRevision string) (bool, error) { if expectedRevision == "" { // no upgrade scheduled for the sset return false, nil @@ -182,14 +210,27 @@ func podRequiresUpgrade(c k8s.Client, podRef types.NamespacedName, expectedRevis return false, err } if errors.IsNotFound(err) || !pod.DeletionTimestamp.IsZero() { - // pod already terminating + // pod is terminating return false, nil } if sset.PodRevision(pod) != expectedRevision { // pod revision does not match the sset upgrade revision - return true, nil + return false, nil + } + // is the pod ready? + if !k8s.IsPodReady(pod) { + return false, nil + } + // has the node joined the cluster yet? + inCluster, err := esState.NodesInCluster([]string{podRef.Name}) + if err != nil { + return false, err + } + if !inCluster { + log.V(1).Info("Node has not joined the cluster yet", "namespace", podRef.Namespace, "name", podRef.Name) + return false, err } - return false, nil + return true, nil } func disableShardsAllocation(esClient esclient.Client) error { @@ -204,11 +245,13 @@ func doSyncFlush(esClient esclient.Client) error { return esClient.SyncedFlush(ctx) } -func (d *defaultDriver) MaybeEnableShardsAllocation(es v1alpha1.Elasticsearch, esClient esclient.Client, statefulSets sset.StatefulSetList) *reconciler.Results { +func (d *defaultDriver) MaybeEnableShardsAllocation( + es v1alpha1.Elasticsearch, + esClient esclient.Client, + esState ESState, + statefulSets sset.StatefulSetList, +) *reconciler.Results { results := &reconciler.Results{} - - // TODO: no need to enable again if already enabled (get it from observers?) - // Since we rely on sset rollingUpdate.Partition, requeue in case our cache hasn't seen a sset update yet. // Otherwise we could re-enable shards allocation while a pod was just scheduled for termination, // with the partition in the sset cache being outdated. @@ -216,7 +259,15 @@ func (d *defaultDriver) MaybeEnableShardsAllocation(es v1alpha1.Elasticsearch, e return results.WithResult(defaultRequeue) } - // Make sure all nodes scheduled for upgrade have been upgraded. + alreadyEnabled, err := esState.ShardAllocationsEnabled() + if err != nil { + return results.WithError(err) + } + if alreadyEnabled { + return results + } + + // Make sure all pods scheduled for upgrade have been upgraded. scheduledUpgradesDone, err := sset.ScheduledUpgradesDone(d.Client, statefulSets) if err != nil { return results.WithError(err) @@ -231,13 +282,13 @@ func (d *defaultDriver) MaybeEnableShardsAllocation(es v1alpha1.Elasticsearch, e } // Make sure all nodes scheduled for upgrade are back into the cluster. - nodesInCluster, err := NodesInTheCluster(esClient, sset.PodNamesForStatefulSetList(statefulSets)) + nodesInCluster, err := esState.NodesInCluster(statefulSets.PodNames()) if err != nil { return results.WithError(err) } if !nodesInCluster { log.V(1).Info( - "Rolling upgrade not over yet, somes nodes are not part of the cluster yet, keeping shard allocations disabled", + "Some upgraded nodes are not back in the cluster yet, keeping shard allocations disabled", "namespace", es.Namespace, "name", es.Name, ) diff --git a/operators/pkg/controller/elasticsearch/sset/list.go b/operators/pkg/controller/elasticsearch/sset/list.go index 887a67133f5..60e5279c876 100644 --- a/operators/pkg/controller/elasticsearch/sset/list.go +++ b/operators/pkg/controller/elasticsearch/sset/list.go @@ -42,6 +42,25 @@ func (l StatefulSetList) ObjectMetas() []metav1.ObjectMeta { return objs } +// RevisionUpdateScheduled returns true if at least one revision update is scheduled. +func (l StatefulSetList) RevisionUpdateScheduled() bool { + for _, s := range l { + if s.Status.UpdateRevision != "" && s.Status.UpdateRevision != s.Status.CurrentRevision { + return true + } + } + return false +} + +// PodNames returns the names of the pods for all StatefulSets in the list. +func (l StatefulSetList) PodNames() []string { + var names []string + for _, s := range l { + names = append(names, PodNames(s)...) + } + return names +} + // GetUpdatePartition returns the updateStrategy.Partition index, or falls back to the number of replicas if not set. func GetUpdatePartition(statefulSet appsv1.StatefulSet) int32 { if statefulSet.Spec.UpdateStrategy.RollingUpdate.Partition != nil { diff --git a/operators/pkg/controller/elasticsearch/sset/pod.go b/operators/pkg/controller/elasticsearch/sset/pod.go index c2b8ef7e4cd..bd6ec6fe648 100644 --- a/operators/pkg/controller/elasticsearch/sset/pod.go +++ b/operators/pkg/controller/elasticsearch/sset/pod.go @@ -27,14 +27,6 @@ func PodNames(sset appsv1.StatefulSet) []string { return names } -func PodNamesForStatefulSetList(list StatefulSetList) []string { - var names []string - for _, s := range list { - names = append(names, PodNames(s)...) - } - return names -} - func PodRevision(pod corev1.Pod) string { return pod.Labels[appsv1.StatefulSetRevisionLabel] } @@ -51,7 +43,7 @@ func ScheduledUpgradesDone(c k8s.Client, statefulSets StatefulSetList) (bool, er partition := GetUpdatePartition(s) for i := Replicas(s) - 1; i >= partition; i-- { var pod corev1.Pod - err := c.Get(types.NamespacedName{Namespace: s.Name, Name: PodName(s.Name, i)}, &pod) + err := c.Get(types.NamespacedName{Namespace: s.Namespace, Name: PodName(s.Name, i)}, &pod) if errors.IsNotFound(err) { // pod probably being terminated return false, nil