diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index b1257139c265..3c0731243608 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -27,6 +27,7 @@ import ( "os/user" "path" "path/filepath" + "sort" "strconv" "strings" "time" @@ -49,6 +50,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/labels" @@ -56,6 +58,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime/serializer/json" utilflag "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/watch" ) const ( @@ -477,7 +480,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) - pod, numPods, err := GetFirstPod(c, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } @@ -496,7 +500,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, numPods, err := GetFirstPod(c, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } @@ -653,21 +658,24 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *api.Pod: return t, nil @@ -685,21 +693,49 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { } } -// GetFirstPod returns the first pod of an object from its namespace and selector and the number of matching pods -func GetFirstPod(client *client.Client, namespace string, selector labels.Selector) (*api.Pod, int, error) { - var pods *api.PodList - for pods == nil || len(pods.Items) == 0 { - var err error - options := api.ListOptions{LabelSelector: selector} - if pods, err = client.Pods(namespace).List(options); err != nil { - return nil, 0, err - } - if len(pods.Items) == 0 { - time.Sleep(2 * time.Second) +// GetFirstPod returns a pod matching the namespace and label selector +// and the number of all pods that match the label selector. +func GetFirstPod(client client.Interface, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) { + options := api.ListOptions{LabelSelector: selector} + + podList, err := client.Pods(namespace).List(options) + if err != nil { + return nil, 0, err + } + pods := []*api.Pod{} + for i := range podList.Items { + pod := podList.Items[i] + pods = append(pods, &pod) + } + if len(pods) > 0 { + sort.Sort(sortBy(pods)) + return pods[0], len(podList.Items), nil + } + + // Watch for ready pods. + options.ResourceVersion = podList.ResourceVersion + w, err := client.Pods(namespace).Watch(options) + if err != nil { + return nil, len(podList.Items), err + } + defer w.Stop() + + condition := func(event watch.Event) (bool, error) { + pod, ok := event.Object.(*api.Pod) + if !ok { + return false, fmt.Errorf("%#v is not a pod event", event) } + return api.IsPodReady(pod), nil + } + event, err := watch.Until(timeout, w, condition) + if err != nil { + return nil, len(podList.Items), err + } + pod, ok := event.Object.(*api.Pod) + if !ok { + return nil, len(podList.Items), fmt.Errorf("%#v is not a pod event", event) } - pod := &pods.Items[0] - return pod, len(pods.Items), nil + return pod, len(podList.Items), nil } // Command will stringify and return all environment arguments ie. a command run by a client diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go index 459b34d6cd8c..ff3484f3494a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go @@ -19,14 +19,17 @@ package util import ( "bytes" "encoding/json" + "fmt" "io/ioutil" "net/http" "os" "os/user" "path" + "reflect" "sort" "strings" "testing" + "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" @@ -36,9 +39,13 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/watch" ) func TestNewFactoryDefaultFlagBindings(t *testing.T) { @@ -394,3 +401,199 @@ func TestSubstitueUser(t *testing.T) { } } } + +func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList { + pods := []api.Pod{} + for i := 0; i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("pod-%d", i+1), + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC), + Labels: labels, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + } + pods = append(pods, newPod) + } + if isUnready > -1 && isUnready < count { + pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse + } + if isUnhealthy > -1 && isUnhealthy < count { + pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}} + } + return &api.PodList{ + Items: pods, + } +} + +func TestGetFirstPod(t *testing.T) { + labelSet := map[string]string{"test": "selector"} + tests := []struct { + name string + + podList *api.PodList + watching []watch.Event + sortBy func([]*api.Pod) sort.Interface + + expected *api.Pod + expectedNum int + expectedErr bool + }{ + { + name: "kubectl logs - two ready pods", + podList: newPodList(2, -1, -1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-2", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl logs - one unhealthy, one healthy", + podList: newPodList(2, -1, 1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-2", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}}, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl attach - two ready pods", + podList: newPodList(2, -1, -1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl attach - wait for ready pod", + podList: newPodList(1, 1, -1, labelSet), + watching: []watch.Event{ + { + Type: watch.Modified, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + }, + }, + sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 1, + }, + } + + for i := range tests { + test := tests[i] + client := &testclient.Fake{} + client.PrependReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, test.podList, nil + }) + if len(test.watching) > 0 { + watcher := watch.NewFake() + for _, event := range test.watching { + switch event.Type { + case watch.Added: + go watcher.Add(event.Object) + case watch.Modified: + go watcher.Modify(event.Object) + } + } + client.PrependWatchReactor("pods", testclient.DefaultWatchReactor(watcher, nil)) + } + selector := labels.Set(labelSet).AsSelector() + + pod, numPods, err := GetFirstPod(client, api.NamespaceDefault, selector, 1*time.Minute, test.sortBy) + if !test.expectedErr && err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + continue + } + if test.expectedErr && err == nil { + t.Errorf("%s: expected an error", test.name) + continue + } + if test.expectedNum != numPods { + t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods) + continue + } + if !reflect.DeepEqual(test.expected, pod) { + t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/watch/watch.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/watch/watch.go index 93c34d936b5d..81eb092d29e0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/watch/watch.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/watch/watch.go @@ -18,8 +18,10 @@ package watch import ( "sync" + "time" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/wait" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -127,3 +129,62 @@ func (f *FakeWatcher) Error(errValue runtime.Object) { func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { f.result <- Event{action, obj} } + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event Event) (bool, error) + +// Until reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc) (*Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var after <-chan time.Time + if timeout > 0 { + after = time.After(timeout) + } else { + ch := make(chan time.Time) + close(ch) + after = ch + } + var lastEvent *Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + break + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, wait.ErrWaitTimeout + } + lastEvent = &event + + // TODO: check for watch expired error and retry watch from latest point? + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-after: + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/test/e2e/kubectl.go b/Godeps/_workspace/src/k8s.io/kubernetes/test/e2e/kubectl.go index dad906a84072..2993178589c4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/test/e2e/kubectl.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/test/e2e/kubectl.go @@ -32,6 +32,7 @@ import ( "path" "path/filepath" "regexp" + "sort" "strconv" "strings" "time" @@ -42,6 +43,7 @@ import ( apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/labels" @@ -483,7 +485,8 @@ var _ = KubeDescribe("Kubectl client", func() { withStdinData("abcd1234\n"). execOrDie() Expect(runOutput).ToNot(ContainSubstring("stdin closed")) - runTestPod, _, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"})) + f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + runTestPod, _, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, f) if err != nil { os.Exit(1) } diff --git a/docs/generated/oc_by_example_content.adoc b/docs/generated/oc_by_example_content.adoc index be933d7fabd9..f3cb418af74a 100644 --- a/docs/generated/oc_by_example_content.adoc +++ b/docs/generated/oc_by_example_content.adoc @@ -1608,6 +1608,12 @@ Start a shell session in a pod # Run the command 'cat /etc/resolv.conf' inside pod 'foo' $ oc rsh foo cat /etc/resolv.conf + + # See the configuration of your internal registry + $ oc rsh dc/docker-registry cat config.yml + + # Open a shell session on the container named 'index' inside a pod of your job + # oc rsh -c index job/sheduled ---- ==== diff --git a/pkg/cmd/cli/cmd/rsh.go b/pkg/cmd/cli/cmd/rsh.go index e7aeb0dc575c..b8b091037de0 100644 --- a/pkg/cmd/cli/cmd/rsh.go +++ b/pkg/cmd/cli/cmd/rsh.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" "io" + "time" "github.com/spf13/cobra" @@ -19,11 +20,13 @@ const ( rshLong = ` Open a remote shell session to a container -This command will attempt to start a shell session in the specified pod. It will default to the -first container if none is specified, and will attempt to use '/bin/bash' as the default shell. -You may pass an optional command after the pod name, which will be executed instead of a login -shell. A TTY will be automatically allocated if standard input is interactive - use -t and -T -to override. +This command will attempt to start a shell session in a pod for the specified resource. +It works with pods, deployment configs, jobs, daemon sets, and replication controllers. +Any of the aforementioned resources (apart from pods) will be resolved to a ready pod. +It will default to the first container if none is specified, and will attempt to use +'/bin/bash' as the default shell. You may pass an optional command after the resource name, +which will be executed instead of a login shell. A TTY will be automatically allocated +if standard input is interactive - use -t and -T to override. Note, some containers may not include a shell - use '%[1]s exec' if you need to run commands directly.` @@ -33,7 +36,13 @@ directly.` $ %[1]s foo # Run the command 'cat /etc/resolv.conf' inside pod 'foo' - $ %[1]s foo cat /etc/resolv.conf` + $ %[1]s foo cat /etc/resolv.conf + + # See the configuration of your internal registry + $ %[1]s dc/docker-registry cat config.yml + + # Open a shell session on the container named 'index' inside a pod of your job + # %[1]s -c index job/sheduled` ) // RshOptions declare the arguments accepted by the Rsh command @@ -96,7 +105,7 @@ func (o *RshOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []s if len(args) < 1 { return kcmdutil.UsageError(cmd, "rsh requires a single Pod to connect to") } - o.PodName = args[0] + resource := args[0] args = args[1:] if len(args) > 0 { o.Command = args @@ -122,7 +131,9 @@ func (o *RshOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []s } o.Client = client - return nil + // TODO: Consider making the timeout configurable + o.PodName, err = f.PodForResource(resource, 10*time.Second) + return err } // Validate ensures that RshOptions are valid diff --git a/pkg/cmd/util/clientcmd/factory.go b/pkg/cmd/util/clientcmd/factory.go index 0f84326ad33b..93d1705e4991 100644 --- a/pkg/cmd/util/clientcmd/factory.go +++ b/pkg/cmd/util/clientcmd/factory.go @@ -18,14 +18,16 @@ import ( "github.com/spf13/pflag" "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/restclient" kclient "k8s.io/kubernetes/pkg/client/unversioned" kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kclientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -41,6 +43,7 @@ import ( buildutil "github.com/openshift/origin/pkg/build/util" "github.com/openshift/origin/pkg/client" "github.com/openshift/origin/pkg/cmd/cli/describe" + "github.com/openshift/origin/pkg/cmd/util" deployapi "github.com/openshift/origin/pkg/deploy/api" deploygen "github.com/openshift/origin/pkg/deploy/generator" deployreaper "github.com/openshift/origin/pkg/deploy/reaper" @@ -379,45 +382,16 @@ func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory { } kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) { - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } switch t := object.(type) { case *deployapi.DeploymentConfig: - var err error - var pods *api.PodList - for pods == nil || len(pods.Items) == 0 { - if t.Status.LatestVersion == 0 { - time.Sleep(2 * time.Second) - } - if t, err = oc.DeploymentConfigs(t.Namespace).Get(t.Name); err != nil { - return nil, err - } - latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t) - deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName) - if err != nil { - if kerrors.IsNotFound(err) { - continue - } - return nil, err - } - pods, err = kc.Pods(deployment.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)}) - if err != nil { - return nil, err - } - if len(pods.Items) == 0 { - time.Sleep(2 * time.Second) - } - } - var oldestPod *api.Pod - for i := range pods.Items { - pod := &pods.Items[i] - if oldestPod == nil || pod.CreationTimestamp.Before(oldestPod.CreationTimestamp) { - oldestPod = pod - } + _, kc, err := w.Clients() + if err != nil { + return nil, err } - return oldestPod, nil + selector := labels.SelectorFromSet(t.Spec.Selector) + f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f) + return pod, err default: return kAttachablePodForObjectFunc(object) } @@ -566,6 +540,90 @@ func (w *Factory) ApproximatePodTemplateForObject(object runtime.Object) (*api.P } } +func (f *Factory) PodForResource(resource string, timeout time.Duration) (string, error) { + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + namespace, _, err := f.DefaultNamespace() + if err != nil { + return "", err + } + oc, kc, err := f.Clients() + if err != nil { + return "", err + } + mapper, _ := f.Object(false) + resourceType, name, err := util.ResolveResource(api.Resource("pods"), resource, mapper) + if err != nil { + return "", err + } + + switch resourceType { + case api.Resource("pods"): + return name, nil + case api.Resource("replicationcontrollers"): + rc, err := kc.ReplicationControllers(namespace).Get(name) + if err != nil { + return "", err + } + selector := labels.SelectorFromSet(rc.Spec.Selector) + pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) + if err != nil { + return "", err + } + return pod.Name, nil + case deployapi.Resource("deploymentconfigs"): + dc, err := oc.DeploymentConfigs(namespace).Get(name) + if err != nil { + return "", err + } + selector := labels.SelectorFromSet(dc.Spec.Selector) + pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) + if err != nil { + return "", err + } + return pod.Name, nil + case extensions.Resource("daemonsets"): + ds, err := kc.Extensions().DaemonSets(namespace).Get(name) + if err != nil { + return "", err + } + selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return "", err + } + pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) + if err != nil { + return "", err + } + return pod.Name, nil + case extensions.Resource("jobs"): + job, err := kc.Extensions().Jobs(namespace).Get(name) + if err != nil { + return "", err + } + return podNameForJob(job, kc, timeout, sortBy) + case batch.Resource("jobs"): + job, err := kc.Batch().Jobs(namespace).Get(name) + if err != nil { + return "", err + } + return podNameForJob(job, kc, timeout, sortBy) + default: + return "", fmt.Errorf("remote shell for %s is not supported", resourceType) + } +} + +func podNameForJob(job *extensions.Job, kc *kclient.Client, timeout time.Duration, sortBy func(pods []*api.Pod) sort.Interface) (string, error) { + selector, err := unversioned.LabelSelectorAsSelector(job.Spec.Selector) + if err != nil { + return "", err + } + pod, _, err := cmdutil.GetFirstPod(kc, job.Namespace, selector, timeout, sortBy) + if err != nil { + return "", err + } + return pod.Name, nil +} + // Clients returns an OpenShift and Kubernetes client. func (f *Factory) Clients() (*client.Client, *kclient.Client, error) { kClient, err := f.Client() diff --git a/test/end-to-end/core.sh b/test/end-to-end/core.sh index 688d943a6cec..2d757d9d3df0 100755 --- a/test/end-to-end/core.sh +++ b/test/end-to-end/core.sh @@ -90,7 +90,10 @@ echo "[INFO] Waiting for Docker registry pod to start" wait_for_registry # check to make sure that logs for rc works -oc logs rc/docker-registry-1 > /dev/null +os::cmd::expect_success "oc logs rc/docker-registry-1 > /dev/null" +# check that we can get a remote shell to a dc or rc +os::cmd::expect_success_and_text "oc rsh dc/docker-registry cat config.yml" "5000" +os::cmd::expect_success_and_text "oc rsh rc/docker-registry-1 cat config.yml" "5000" # services can end up on any IP. Make sure we get the IP we need for the docker registry DOCKER_REGISTRY=$(oc get --output-version=v1beta3 --template="{{ .spec.portalIP }}:{{ with index .spec.ports 0 }}{{ .port }}{{ end }}" service docker-registry) @@ -265,7 +268,7 @@ frontend_pod=$(oc get pod -l deploymentconfig=frontend --template='{{(index .ite # when running as a restricted pod the registry will run with a pre-allocated # user in the neighborhood of 1000000+. Look for a substring of the pre-allocated uid range os::cmd::expect_success_and_text "oc exec -p ${frontend_pod} id" '1000' -os::cmd::expect_success_and_text "oc rsh ${frontend_pod} id -u" '1000' +os::cmd::expect_success_and_text "oc rsh pod/${frontend_pod} id -u" '1000' os::cmd::expect_success_and_text "oc rsh -T ${frontend_pod} id -u" '1000' # Test retrieving application logs from dc oc logs dc/frontend | grep "Connecting to production database"