diff --git a/cmd/observe/io_reader_observer.go b/cmd/observe/io_reader_observer.go new file mode 100644 index 000000000..cbbb39bdb --- /dev/null +++ b/cmd/observe/io_reader_observer.go @@ -0,0 +1,106 @@ +// Copyright 2021 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observe + +import ( + "bufio" + "context" + "io" + + "github.com/cilium/cilium/api/v1/observer" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/cilium/pkg/hubble/filters" + "github.com/cilium/hubble/pkg/logger" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" +) + +// ioReaderObserver implements ObserverClient interface. It reads flows +// in jsonpb format from an io.Reader. +type ioReaderObserver struct { + scanner *bufio.Scanner +} + +func newIOReaderObserver(reader io.Reader) *ioReaderObserver { + return &ioReaderObserver{ + scanner: bufio.NewScanner(reader), + } +} + +func (o *ioReaderObserver) GetFlows(_ context.Context, in *observer.GetFlowsRequest, _ ...grpc.CallOption) (observer.Observer_GetFlowsClient, error) { + return newIOReaderClient(o.scanner, in) +} + +func (o *ioReaderObserver) GetNodes(_ context.Context, _ *observer.GetNodesRequest, _ ...grpc.CallOption) (*observer.GetNodesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "GetNodes not implemented") +} + +func (o *ioReaderObserver) ServerStatus(_ context.Context, _ *observer.ServerStatusRequest, _ ...grpc.CallOption) (*observer.ServerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "ServerStatus not implemented") +} + +// ioReaderClient implements Observer_GetFlowsClient. +type ioReaderClient struct { + scanner *bufio.Scanner + request *observer.GetFlowsRequest + allow filters.FilterFuncs + deny filters.FilterFuncs + grpc.ClientStream +} + +func newIOReaderClient(scanner *bufio.Scanner, request *observer.GetFlowsRequest) (*ioReaderClient, error) { + allow, err := filters.BuildFilterList(context.Background(), request.Whitelist, filters.DefaultFilters) + if err != nil { + return nil, err + } + deny, err := filters.BuildFilterList(context.Background(), request.Blacklist, filters.DefaultFilters) + if err != nil { + return nil, err + } + return &ioReaderClient{ + scanner: scanner, + request: request, + allow: allow, + deny: deny, + }, nil +} + +func (c *ioReaderClient) Recv() (*observer.GetFlowsResponse, error) { + for c.scanner.Scan() { + line := c.scanner.Text() + var res observer.GetFlowsResponse + err := protojson.Unmarshal(c.scanner.Bytes(), &res) + if err != nil { + logger.Logger.WithError(err).WithField("line", line).Warn("Failed to unmarshal json to flow") + continue + } + if c.request.Since != nil && c.request.Since.AsTime().After(res.Time.AsTime()) { + continue + } + if c.request.Until != nil && c.request.Until.AsTime().Before(res.Time.AsTime()) { + continue + } + if !filters.Apply(c.allow, c.deny, &v1.Event{Timestamp: res.Time, Event: res.GetFlow()}) { + continue + } + return &res, nil + } + if err := c.scanner.Err(); err != nil { + return nil, err + } + return nil, io.EOF +} diff --git a/cmd/observe/io_reader_observer_test.go b/cmd/observe/io_reader_observer_test.go new file mode 100644 index 000000000..5cfb6701b --- /dev/null +++ b/cmd/observe/io_reader_observer_test.go @@ -0,0 +1,123 @@ +// Copyright 2021 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observe + +import ( + "context" + "io" + "strings" + "testing" + + "github.com/cilium/cilium/api/v1/flow" + "github.com/cilium/cilium/api/v1/observer" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func Test_getFlowsBasic(t *testing.T) { + flows := []*observer.GetFlowsResponse{{}, {}, {}} + var flowStrings []string + for _, f := range flows { + b, err := f.MarshalJSON() + assert.NoError(t, err) + flowStrings = append(flowStrings, string(b)) + } + server := newIOReaderObserver(strings.NewReader(strings.Join(flowStrings, "\n") + "\n")) + req := observer.GetFlowsRequest{} + client, err := server.GetFlows(context.Background(), &req) + assert.NoError(t, err) + for i := 0; i < len(flows); i++ { + _, err = client.Recv() + assert.NoError(t, err) + } + _, err = client.Recv() + assert.Equal(t, io.EOF, err) +} + +func Test_getFlowsTimeRange(t *testing.T) { + flows := []*observer.GetFlowsResponse{ + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_FORWARDED}}, + Time: ×tamppb.Timestamp{Seconds: 0}, + }, + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_DROPPED}}, + Time: ×tamppb.Timestamp{Seconds: 100}, + }, + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_ERROR}}, + Time: ×tamppb.Timestamp{Seconds: 200}, + }, + } + var flowStrings []string + for _, f := range flows { + b, err := f.MarshalJSON() + assert.NoError(t, err) + flowStrings = append(flowStrings, string(b)) + } + server := newIOReaderObserver(strings.NewReader(strings.Join(flowStrings, "\n") + "\n")) + req := observer.GetFlowsRequest{ + Since: ×tamppb.Timestamp{Seconds: 50}, + Until: ×tamppb.Timestamp{Seconds: 150}, + } + client, err := server.GetFlows(context.Background(), &req) + assert.NoError(t, err) + res, err := client.Recv() + assert.NoError(t, err) + assert.Equal(t, flows[1], res) + _, err = client.Recv() + assert.Equal(t, io.EOF, err) +} + +func Test_getFlowsFilter(t *testing.T) { + flows := []*observer.GetFlowsResponse{ + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_FORWARDED}}, + Time: ×tamppb.Timestamp{Seconds: 0}, + }, + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_DROPPED}}, + Time: ×tamppb.Timestamp{Seconds: 100}, + }, + { + ResponseTypes: &observer.GetFlowsResponse_Flow{Flow: &flow.Flow{Verdict: flow.Verdict_ERROR}}, + Time: ×tamppb.Timestamp{Seconds: 200}, + }, + } + var flowStrings []string + for _, f := range flows { + b, err := f.MarshalJSON() + assert.NoError(t, err) + flowStrings = append(flowStrings, string(b)) + } + server := newIOReaderObserver(strings.NewReader(strings.Join(flowStrings, "\n") + "\n")) + req := observer.GetFlowsRequest{ + Whitelist: []*flow.FlowFilter{ + { + Verdict: []flow.Verdict{flow.Verdict_FORWARDED, flow.Verdict_ERROR}, + }, + }, + } + client, err := server.GetFlows(context.Background(), &req) + assert.NoError(t, err) + res, err := client.Recv() + assert.NoError(t, err) + assert.Equal(t, flows[0], res) + res, err = client.Recv() + assert.NoError(t, err) + assert.Equal(t, flows[2], res) + _, err = client.Recv() + assert.Equal(t, io.EOF, err) +} diff --git a/cmd/observe/observe.go b/cmd/observe/observe.go index 2b1a726cf..f916f0543 100644 --- a/cmd/observe/observe.go +++ b/cmd/observe/observe.go @@ -110,6 +110,20 @@ func New(vp *viper.Viper) *cobra.Command { func newObserveCmd(vp *viper.Viper, ofilter *observeFilter) *cobra.Command { observeCmd := &cobra.Command{ + Example: `* Piping flows to hubble observe + + Save output from 'hubble observe -o jsonpb' command to a file, and pipe it to + the observe command later for offline processing. For example: + + hubble observe -o jsonpb --last 1000 > flows.json + + Then, + + cat flows.json | hubble observe + + Note that the observe command ignores --follow, --last, and server flags when it + reads flows from stdin. The observe command processes and output flows in the same + order they are read from stdin without sorting them by timestamp.`, Use: "observe", Short: "Observe flows of a Hubble server", Long: `Observe provides visibility into flow information on the network and @@ -121,19 +135,32 @@ more.`, if err := handleArgs(ofilter, debug); err != nil { return err } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - hubbleConn, err := conn.New(ctx, vp.GetString(config.KeyServer), vp.GetDuration(config.KeyTimeout)) + req, err := getRequest(ofilter) if err != nil { return err } - defer hubbleConn.Close() - req, err := getRequest(ofilter) + + var client observer.ObserverClient + fi, err := os.Stdin.Stat() if err != nil { return err } + if fi.Mode()&os.ModeNamedPipe != 0 { + // read flows from stdin + client = newIOReaderObserver(os.Stdin) + } else { + // read flows from a hubble server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hubbleConn, err := conn.New(ctx, vp.GetString(config.KeyServer), vp.GetDuration(config.KeyTimeout)) + if err != nil { + return err + } + defer hubbleConn.Close() + client = observer.NewObserverClient(hubbleConn) + } + logger.Logger.WithField("request", req).Debug("Sending GetFlows request") - client := observer.NewObserverClient(hubbleConn) if err := getFlows(client, req); err != nil { msg := err.Error() // extract custom error message from failed grpc call diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/cluster.go b/vendor/github.com/cilium/cilium/pkg/defaults/cluster.go new file mode 100644 index 000000000..3fa326f16 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/defaults/cluster.go @@ -0,0 +1,20 @@ +// Copyright 2018 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaults + +const ( + // ClusterName is the default cluster name + ClusterName = "default" +) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go new file mode 100644 index 000000000..89487edfd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -0,0 +1,425 @@ +// Copyright 2016-2020 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaults + +import ( + "time" +) + +const ( + // AgentHealthPort is the default value for option.AgentHealthPort + AgentHealthPort = 9876 + + // GopsPortAgent is the default value for option.GopsPort in the agent + GopsPortAgent = 9890 + + // GopsPortOperator is the default value for option.GopsPort in the operator + GopsPortOperator = 9891 + + // GopsPortApiserver is the default value for option.GopsPort in the apiserver + GopsPortApiserver = 9892 + + // IPv6ClusterAllocCIDR is the default value for option.IPv6ClusterAllocCIDR + IPv6ClusterAllocCIDR = IPv6ClusterAllocCIDRBase + "/64" + + // IPv6ClusterAllocCIDRBase is the default base for IPv6ClusterAllocCIDR + IPv6ClusterAllocCIDRBase = "f00d::" + + // RuntimePath is the default path to the runtime directory + RuntimePath = "/var/run/cilium" + + // RuntimePathRights are the default access rights of the RuntimePath directory + RuntimePathRights = 0775 + + // StateDirRights are the default access rights of the state directory + StateDirRights = 0770 + + //StateDir is the default path for the state directory relative to RuntimePath + StateDir = "state" + + // TemplatesDir is the default path for the compiled template objects relative to StateDir + TemplatesDir = "templates" + + // TemplatePath is the default path for a symlink to a template relative to StateDir/ + TemplatePath = "template.o" + + // BpfDir is the default path for template files relative to LibDir + BpfDir = "bpf" + + // LibraryPath is the default path to the cilium libraries directory + LibraryPath = "/var/lib/cilium" + + // SockPath is the path to the UNIX domain socket exposing the API to clients locally + SockPath = RuntimePath + "/cilium.sock" + + // SockPathEnv is the environment variable to overwrite SockPath + SockPathEnv = "CILIUM_SOCK" + + // HubbleSockPath is the path to the UNIX domain socket exposing the Hubble + // API to clients locally. + HubbleSockPath = RuntimePath + "/hubble.sock" + + // HubbleSockPathEnv is the environment variable to overwrite + // HubbleSockPath. + HubbleSockPathEnv = "HUBBLE_SOCK" + + // MonitorSockPath1_2 is the path to the UNIX domain socket used to + // distribute BPF and agent events to listeners. + // This is the 1.2 protocol version. + MonitorSockPath1_2 = RuntimePath + "/monitor1_2.sock" + + // PidFilePath is the path to the pid file for the agent. + PidFilePath = RuntimePath + "/cilium.pid" + + // EnableHostIPRestore controls whether the host IP should be restored + // from previous state automatically + EnableHostIPRestore = true + + // DefaultMapRoot is the default path where BPFFS should be mounted + DefaultMapRoot = "/sys/fs/bpf" + + // DefaultCgroupRoot is the default path where cilium cgroup2 should be mounted + DefaultCgroupRoot = "/var/run/cilium/cgroupv2" + + // SockopsEnable controsl whether sockmap should be used + SockopsEnable = false + + // DefaultMapRootFallback is the path which is used when /sys/fs/bpf has + // a mount, but with the other filesystem than BPFFS. + DefaultMapRootFallback = "/run/cilium/bpffs" + + // DefaultMapPrefix is the default prefix for all BPF maps. + DefaultMapPrefix = "tc/globals" + + // DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain + // for each FQDN selector in endpoint's restored DNS rules. + DNSMaxIPsPerRestoredRule = 1000 + + // ToFQDNsMinTTL is the default lower bound for TTLs used with ToFQDNs rules. + // This is used in DaemonConfig.Populate + ToFQDNsMinTTL = 3600 // 1 hour in seconds + + // ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain + // for each FQDN name in an endpoint's FQDN cache + ToFQDNsMaxIPsPerHost = 50 + + // ToFQDNsMaxDeferredConnectionDeletes Maximum number of IPs to retain for + // expired DNS lookups with still-active connections + ToFQDNsMaxDeferredConnectionDeletes = 10000 + + // ToFQDNsIdleConnectionGracePeriod Time during which idle but + // previously active connections with expired DNS lookups are + // still considered alive + ToFQDNsIdleConnectionGracePeriod = 0 * time.Second + + // ToFQDNsPreCache is a path to a file with DNS cache data to insert into the + // global cache on startup. + // The file is not re-read after agent start. + ToFQDNsPreCache = "" + + // ToFQDNsEnableDNSCompression allows the DNS proxy to compress responses to + // endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + ToFQDNsEnableDNSCompression = true + + // IdentityChangeGracePeriod is the default value for + // option.IdentityChangeGracePeriod + IdentityChangeGracePeriod = 5 * time.Second + + // ExecTimeout is a timeout for executing commands. + ExecTimeout = 300 * time.Second + + // StatusCollectorInterval is the interval between a probe invocations + StatusCollectorInterval = 5 * time.Second + + // StatusCollectorWarningThreshold is the duration after which a probe + // is declared as stale + StatusCollectorWarningThreshold = 15 * time.Second + + // StatusCollectorFailureThreshold is the duration after which a probe + // is considered failed + StatusCollectorFailureThreshold = 1 * time.Minute + + // EnableIPv4 is the default value for IPv4 enablement + EnableIPv4 = true + + // EnableIPv6 is the default value for IPv6 enablement + EnableIPv6 = true + + // EnableIPv6NDP is the default value for IPv6 NDP support enablement + EnableIPv6NDP = false + + // EnableL7Proxy is the default value for L7 proxy enablement + EnableL7Proxy = true + + // EnableHostLegacyRouting is the default value for using the old routing path via stack. + EnableHostLegacyRouting = false + + // EnableExternalIPs is the default value for k8s service with externalIPs feature. + EnableExternalIPs = true + + // K8sEnableEndpointSlice is the default value for k8s EndpointSlice feature. + K8sEnableEndpointSlice = true + + // PreAllocateMaps is the default value for BPF map preallocation + PreAllocateMaps = true + + // EnableIPSec is the default value for IPSec enablement + EnableIPSec = false + + // EncryptNode enables encrypting traffic from host networking applications + // which are not part of Cilium manged pods. + EncryptNode = false + + // MonitorQueueSizePerCPU is the default value for the monitor queue + // size per CPU + MonitorQueueSizePerCPU = 1024 + + // MonitorQueueSizePerCPUMaximum is the maximum value for the monitor + // queue size when derived from the number of CPUs + MonitorQueueSizePerCPUMaximum = 16384 + + // NodeInitTimeout is the time the agent is waiting until giving up to + // initialize the local node with the kvstore + NodeInitTimeout = 15 * time.Minute + + // ClientConnectTimeout is the time the cilium-agent client is + // (optionally) waiting before returning an error. + ClientConnectTimeout = 30 * time.Second + + // DatapathMode is the default value for the datapath mode. + DatapathMode = "veth" + + // EnableBPFTProxy is the default value for EnableBPFTProxy + EnableBPFTProxy = false + + // EnableXTSocketFallback is the default value for EnableXTSocketFallback + EnableXTSocketFallback = true + + // EnableLocalNodeRoute default value for EnableLocalNodeRoute + EnableLocalNodeRoute = true + + // EnableAutoDirectRouting is the default value for EnableAutoDirectRouting + EnableAutoDirectRouting = false + + // EnableHealthChecking is the default value for EnableHealthChecking + EnableHealthChecking = true + + // EnableEndpointHealthChecking is the default value for + // EnableEndpointHealthChecking + EnableEndpointHealthChecking = true + + // EnableHealthCheckNodePort is the default value for + // EnableHealthCheckNodePort + EnableHealthCheckNodePort = true + + // AlignCheckerName is the BPF object name for the alignchecker. + AlignCheckerName = "bpf_alignchecker.o" + + // KVstorePeriodicSync is the default kvstore periodic sync interval + KVstorePeriodicSync = 5 * time.Minute + + // KVstoreConnectivityTimeout is the timeout when performing kvstore operations + KVstoreConnectivityTimeout = 2 * time.Minute + + // KVStoreStaleLockTimeout is the timeout for when a lock is held for + // a kvstore path for too long. + KVStoreStaleLockTimeout = 30 * time.Second + + // IPAllocationTimeout is the timeout when allocating CIDRs + IPAllocationTimeout = 2 * time.Minute + + // PolicyQueueSize is the default queue size for policy-related events. + PolicyQueueSize = 100 + + // KVstoreQPS is default rate limit for kv store operations + KVstoreQPS = 20 + + // EndpointQueueSize is the default queue size for an endpoint. + EndpointQueueSize = 25 + + // SelectiveRegeneration specifies whether regeneration of endpoints will be + // invoked only for endpoints which are selected by policy changes. + SelectiveRegeneration = true + + // K8sSyncTimeout specifies the standard time to allow for synchronizing + // local caches with Kubernetes state before exiting. + K8sSyncTimeout = 3 * time.Minute + + // K8sWatcherEndpointSelector specifies the k8s endpoints that Cilium + // should watch for. + K8sWatcherEndpointSelector = "metadata.name!=kube-scheduler,metadata.name!=kube-controller-manager,metadata.name!=etcd-operator,metadata.name!=gcp-controller-manager" + + // ConntrackGCMaxLRUInterval is the maximum conntrack GC interval when using LRU maps + ConntrackGCMaxLRUInterval = 12 * time.Hour + + // ConntrackGCMaxInterval is the maximum conntrack GC interval for non-LRU maps + ConntrackGCMaxInterval = 30 * time.Minute + + // ConntrackGCMinInterval is the minimum conntrack GC interval + ConntrackGCMinInterval = 10 * time.Second + + // ConntrackGCStartingInterval is the default starting interval for + // connection tracking garbage collection + ConntrackGCStartingInterval = 5 * time.Minute + + // K8sEventHandover enables use of the kvstore to optimize Kubernetes + // event handling by listening for k8s events in the operator and + // mirroring it into the kvstore for reduced overhead in large + // clusters. + K8sEventHandover = false + + // LoopbackIPv4 is the default address for service loopback + LoopbackIPv4 = "169.254.42.1" + + // EndpointInterfaceNamePrefix is the default prefix name of the + // interface names shared by all endpoints + EndpointInterfaceNamePrefix = "lxc+" + + // ForceLocalPolicyEvalAtSource is the default value for + // option.ForceLocalPolicyEvalAtSource. It is enabled by default to + // provide backwards compatibility, it can be disabled via an option + ForceLocalPolicyEvalAtSource = true + + // EnableEndpointRoutes is the value for option.EnableEndpointRoutes. + // It is disabled by default for backwards compatibility. + EnableEndpointRoutes = false + + // AnnotateK8sNode is the default value for option.AnnotateK8sNode. It is + // enabled by default to annotate kubernetes node and can be disabled using + // the provided option. + AnnotateK8sNode = true + + // MonitorBufferPages is the default number of pages to use for the + // ring buffer interacting with the kernel + MonitorBufferPages = 64 + + // NodeDeleteDelay is the delay before an unreliable node delete is + // handled. During this delay, the node can re-appear and the delete + // event is ignored. + NodeDeleteDelay = 30 * time.Second + + // KVstoreLeaseTTL is the time-to-live of the kvstore lease. + KVstoreLeaseTTL = 15 * time.Minute + + // KVstoreKeepAliveIntervalFactor is the factor to calculate the interval + // from KVstoreLeaseTTL in which KVstore lease is being renewed. + KVstoreKeepAliveIntervalFactor = 3 + + // LockLeaseTTL is the time-to-live of the lease dedicated for locks of Kvstore. + LockLeaseTTL = 25 * time.Second + + // KVstoreLeaseMaxTTL is the upper bound for KVStore lease TTL value. + // It is calculated as Min(int64 positive max, etcd MaxLeaseTTL, consul MaxLeaseTTL) + KVstoreLeaseMaxTTL = 86400 * time.Second + + // IPAMPreAllocation is the default value for + // CiliumNode.Spec.IPAM.PreAllocate if no value is set + IPAMPreAllocation = 8 + + // ENIFirstInterfaceIndex is the default value for + // CiliumNode.Spec.ENI.FirstInterfaceIndex if no value is set. + ENIFirstInterfaceIndex = 0 + + // ParallelAllocWorkers is the default max number of parallel workers doing allocation in the operator + ParallelAllocWorkers = 50 + + // IPAMAPIBurst is the default burst value when rate limiting access to external APIs + IPAMAPIBurst = 4 + + // IPAMAPIQPSLimit is the default QPS limit when rate limiting access to external APIs + IPAMAPIQPSLimit = 20.0 + + // AutoCreateCiliumNodeResource enables automatic creation of a + // CiliumNode resource for the local node + AutoCreateCiliumNodeResource = true + + // PolicyTriggerInterval is default amount of time between triggers of + // policy updates are invoked. + PolicyTriggerInterval = 1 * time.Second + + // K8sClientQPSLimit is the default qps for the k8s client. It is set to 0 because the the k8s client + // has its own default. + K8sClientQPSLimit float32 = 0.0 + + // K8sClientBurst is the default burst for the k8s client. It is set to 0 because the the k8s client + // has its own default. + K8sClientBurst = 0 + + // K8sServiceCacheSize is the default value for option.K8sServiceCacheSize + // which denotes the value of Cilium's K8s service cache size. + K8sServiceCacheSize = 128 + + // AllowICMPFragNeeded is the default value for option.AllowICMPFragNeeded flag. + // It is enabled by default and directs that the ICMP Fragmentation needed type + // packets are allowed to enable TCP Path MTU. + AllowICMPFragNeeded = true + + // RestoreV4Addr is used as match for cilium_host v4 address + RestoreV4Addr = "cilium.v4.internal.raw " + + // RestoreV6Addr is used as match for cilium_host v6 (router) address + RestoreV6Addr = "cilium.v6.internal.raw " + + // EnableWellKnownIdentities is enabled by default as this is the + // original behavior. New default Helm templates will disable this. + EnableWellKnownIdentities = true + + // CertsDirectory is the default directory used to find certificates + // specified in the L7 policies. + CertsDirectory = RuntimePath + "/certs" + + // EnableRemoteNodeIdentity is the default value for option.EnableRemoteNodeIdentity + EnableRemoteNodeIdentity = false + + // IPAMExpiration is the timeout after which an IP subject to expiratio + // is being released again if no endpoint is being created in time. + IPAMExpiration = 10 * time.Minute + + // EnableIPv4FragmentsTracking enables IPv4 fragments tracking for + // L4-based lookups + EnableIPv4FragmentsTracking = true + + // FragmentsMapEntries is the default number of entries allowed in an + // the map used to track datagram fragments. + FragmentsMapEntries = 8192 + + // K8sEnableAPIDiscovery defines whether Kuberntes API groups and + // resources should be probed using the discovery API + K8sEnableAPIDiscovery = false + + // EnableIdentityMark enables setting identity in mark field of packet + // for local traffic + EnableIdentityMark = true + + // K8sEnableLeasesFallbackDiscovery enables k8s to fallback to API probing to check + // for the support of Leases in Kubernetes when there is an error in discovering + // API groups using Discovery API. + K8sEnableLeasesFallbackDiscovery = false + + // KubeProxyReplacementHealthzBindAddr is the default kubeproxyReplacement healthz server bind addr + KubeProxyReplacementHealthzBindAddr = "" + + // EnableBPFBypassFIBLookup instructs Cilium to enable the FIB lookup bypass optimization for nodeport reverse NAT handling. + EnableBPFBypassFIBLookup = true + + // InstallNoConntrackRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic. + InstallNoConntrackIptRules = false + + // WireguardSubnetV4 is a default wireguard tunnel subnet + WireguardSubnetV4 = "172.16.43.0/24" + + // WireguardSubnetV6 is a default wireguard tunnel subnet + WireguardSubnetV6 = "fdc9:281f:04d7:9ee9::1/64" +) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/node.go b/vendor/github.com/cilium/cilium/pkg/defaults/node.go new file mode 100644 index 000000000..adf3e100f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/defaults/node.go @@ -0,0 +1,59 @@ +// Copyright 2016-2017 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaults + +import ( + "net" +) + +const ( + // DefaultIPv4Prefix is the prefix for all the IPv4 addresses. + // %d is substituted with the last byte of first global IPv4 address + // configured on the system. + DefaultIPv4Prefix = "10.%d.0.1" + + // DefaultIPv4PrefixLen is the length used to allocate container IPv4 addresses from. + DefaultIPv4PrefixLen = 16 + + // DefaultNAT46Prefix is the IPv6 prefix to represent NATed IPv4 addresses. + DefaultNAT46Prefix = "0:0:0:0:0:FFFF::/96" + + // HostDevice is the name of the device that connects the cilium IP + // space with the host's networking model + HostDevice = "cilium_host" + // SecondHostDevice is the name of the second interface of the host veth pair. + SecondHostDevice = "cilium_net" +) + +var ( + // Default addressing schema + // + // node: beef:beef:beef:beef:::/96 + // lxc: beef:beef:beef:beef::::/128 + + // ContainerIPv6Mask is the IPv6 prefix length for address assigned to + // container. The default is L3 only and thus /128. + ContainerIPv6Mask = net.CIDRMask(128, 128) + + // ContainerIPv4Mask is the IPv4 prefix length for address assigned to + // container. The default is L3 only and thus /32. + ContainerIPv4Mask = net.CIDRMask(32, 32) + + // IPv6DefaultRoute is the default IPv6 route. + IPv6DefaultRoute = net.IPNet{IP: net.IPv6zero, Mask: net.CIDRMask(0, 128)} + + // IPv4DefaultRoute is the default IPv4 route. + IPv4DefaultRoute = net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)} +) diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/event_type.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/event_type.go new file mode 100644 index 000000000..60b6a1d2a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/event_type.go @@ -0,0 +1,88 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + monitorAPI "github.com/cilium/cilium/pkg/monitor/api" +) + +func filterByEventType(types []*flowpb.EventTypeFilter) FilterFunc { + return func(ev *v1.Event) bool { + switch ev.Event.(type) { + case *flowpb.Flow: + event := ev.GetFlow().GetEventType() + if event == nil { + return false + } + for _, typeFilter := range types { + if t := typeFilter.GetType(); t != 0 && event.Type != t { + continue + } + if typeFilter.GetMatchSubType() && typeFilter.GetSubType() != event.SubType { + continue + } + return true + } + case *flowpb.AgentEvent: + for _, typeFilter := range types { + if t := typeFilter.GetType(); t != 0 && t != monitorAPI.MessageTypeAgent { + continue + } + agentEventType := int32(ev.GetAgentEvent().GetType()) + if typeFilter.GetMatchSubType() && typeFilter.GetSubType() != agentEventType { + continue + } + return true + } + case *flowpb.DebugEvent: + for _, typeFilter := range types { + if t := typeFilter.GetType(); t != 0 && t != monitorAPI.MessageTypeDebug { + continue + } + debugEventType := int32(ev.GetDebugEvent().GetType()) + if typeFilter.GetMatchSubType() && typeFilter.GetSubType() != debugEventType { + continue + } + return true + } + case *flowpb.LostEvent: + // Currently there's no way in the Hubble CLI and API to filter lost events, + // thus always include them. They are very uncommon and only occur on + // overloaded systems, in which case a user would anyway want to get them. + return true + } + + return false + } +} + +// EventTypeFilter implements filtering based on event type +type EventTypeFilter struct{} + +// OnBuildFilter builds an event type filter +func (e *EventTypeFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + types := ff.GetEventType() + if len(types) > 0 { + fs = append(fs, filterByEventType(types)) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/filters.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/filters.go new file mode 100644 index 000000000..47933ab2e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/filters.go @@ -0,0 +1,153 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +// FilterFunc is the function will be used to filter the given data. +// Should return true if the filter is hit, false otherwise. +type FilterFunc func(ev *v1.Event) bool + +// FilterFuncs is a combination of multiple filters, typically applied together. +type FilterFuncs []FilterFunc + +// Apply filters the flow with the given white- and blacklist. Returns true +// if the flow should be included in the result. +func Apply(whitelist, blacklist FilterFuncs, ev *v1.Event) bool { + return whitelist.MatchOne(ev) && blacklist.MatchNone(ev) +} + +// MatchAll returns true if all the filters match the provided data, i.e. AND. +func (fs FilterFuncs) MatchAll(ev *v1.Event) bool { + for _, f := range fs { + if !f(ev) { + return false + } + } + return true +} + +// MatchOne returns true if at least one of the filters match the provided data or +// if no filters are specified, i.e. OR. +func (fs FilterFuncs) MatchOne(ev *v1.Event) bool { + if len(fs) == 0 { + return true + } + + for _, f := range fs { + if f(ev) { + return true + } + } + return false +} + +// MatchNone returns true if none of the filters match the provided data or +// if no filters are specified, i.e. NOR +func (fs FilterFuncs) MatchNone(ev *v1.Event) bool { + if len(fs) == 0 { + return true + } + + for _, f := range fs { + if f(ev) { + return false + } + } + return true +} + +// OnBuildFilter is invoked while building a flow filter +type OnBuildFilter interface { + OnBuildFilter(context.Context, *flowpb.FlowFilter) ([]FilterFunc, error) +} + +// OnBuildFilterFunc implements OnBuildFilter for a single function +type OnBuildFilterFunc func(context.Context, *flowpb.FlowFilter) ([]FilterFunc, error) + +// OnBuildFilter is invoked while building a flow filter +func (f OnBuildFilterFunc) OnBuildFilter(ctx context.Context, flow *flowpb.FlowFilter) ([]FilterFunc, error) { + return f(ctx, flow) +} + +// BuildFilter builds a filter based on a FlowFilter. It returns: +// - the FilterFunc to be used to filter packets based on the requested +// FlowFilter; +// - an error in case something went wrong. +func BuildFilter(ctx context.Context, ff *flowpb.FlowFilter, auxFilters []OnBuildFilter) (FilterFuncs, error) { + var fs []FilterFunc + + for _, f := range auxFilters { + fl, err := f.OnBuildFilter(ctx, ff) + if err != nil { + return nil, err + } + if fl != nil { + fs = append(fs, fl...) + } + } + + return fs, nil +} + +// BuildFilterList constructs a list of filter functions representing the list +// of FlowFilter. It returns: +// - the FilterFunc to be used to filter packets based on the requested +// FlowFilter; +// - an error in case something went wrong. +func BuildFilterList(ctx context.Context, ff []*flowpb.FlowFilter, auxFilters []OnBuildFilter) (FilterFuncs, error) { + filterList := make([]FilterFunc, 0, len(ff)) + + for _, flowFilter := range ff { + // Build filter matching on all requirements of the FlowFilter + tf, err := BuildFilter(ctx, flowFilter, auxFilters) + if err != nil { + return nil, err + } + + // All filters representing a FlowFilter must match + filterFunc := func(ev *v1.Event) bool { + return tf.MatchAll(ev) + } + + filterList = append(filterList, filterFunc) + } + + return filterList, nil +} + +// DefaultFilters is the list of default filters +var DefaultFilters = []OnBuildFilter{ + &EventTypeFilter{}, + &VerdictFilter{}, + &ReplyFilter{}, + &IdentityFilter{}, + &ProtocolFilter{}, + &IPFilter{}, + &PodFilter{}, + &ServiceFilter{}, + &FQDNFilter{}, + &LabelsFilter{}, + &PortFilter{}, + &HTTPFilter{}, + &TCPFilter{}, + &NodeNameFilter{}, + &IPVersionFilter{}, +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/fqdn.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/fqdn.go new file mode 100644 index 000000000..374fc0ced --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/fqdn.go @@ -0,0 +1,114 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "regexp" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func sourceFQDN(ev *v1.Event) []string { + return ev.GetFlow().GetSourceNames() +} + +func destinationFQDN(ev *v1.Event) []string { + return ev.GetFlow().GetDestinationNames() +} + +func filterByFQDNs(fqdnPatterns []string, getFQDNs func(*v1.Event) []string) (FilterFunc, error) { + fqdnRegexp, err := compileFQDNPattern(fqdnPatterns) + if err != nil { + return nil, err + } + + return func(ev *v1.Event) bool { + names := getFQDNs(ev) + if len(names) == 0 { + return false + } + + for _, name := range names { + if fqdnRegexp.MatchString(name) { + return true + } + } + + return false + }, nil +} + +// filterByDNSQueries returns a FilterFunc that filters a flow by L7.DNS.query field. +// The filter function returns true if and only if the DNS query field matches any of +// the regular expressions. +func filterByDNSQueries(queryPatterns []string) (FilterFunc, error) { + queries := make([]*regexp.Regexp, 0, len(queryPatterns)) + for _, pattern := range queryPatterns { + query, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile regexp: %v", err) + } + queries = append(queries, query) + } + return func(ev *v1.Event) bool { + dns := ev.GetFlow().GetL7().GetDns() + if dns == nil { + return false + } + for _, query := range queries { + if query.MatchString(dns.Query) { + return true + } + } + return false + }, nil +} + +// FQDNFilter implements filtering based on FQDN information +type FQDNFilter struct{} + +// OnBuildFilter builds a FQDN filter +func (f *FQDNFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourceFqdn() != nil { + ff, err := filterByFQDNs(ff.GetSourceFqdn(), sourceFQDN) + if err != nil { + return nil, err + } + fs = append(fs, ff) + } + + if ff.GetDestinationFqdn() != nil { + ff, err := filterByFQDNs(ff.GetDestinationFqdn(), destinationFQDN) + if err != nil { + return nil, err + } + fs = append(fs, ff) + } + + if ff.GetDnsQuery() != nil { + dnsFilters, err := filterByDNSQueries(ff.GetDnsQuery()) + if err != nil { + return nil, fmt.Errorf("invalid DNS query filter: %v", err) + } + fs = append(fs, dnsFilters) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go new file mode 100644 index 000000000..07aadaca9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go @@ -0,0 +1,185 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/cilium/pkg/monitor/api" +) + +func httpMatchCompatibleEventFilter(types []*flowpb.EventTypeFilter) bool { + if len(types) == 0 { + return true + } + + for _, t := range types { + if t.GetType() == api.MessageTypeAccessLog { + return true + } + } + + return false +} + +var ( + httpStatusCodeFull = regexp.MustCompile(`[1-5][0-9]{2}`) + httpStatusCodePrefix = regexp.MustCompile(`^([1-5][0-9]?\+)$`) +) + +func filterByHTTPStatusCode(statusCodePrefixes []string) (FilterFunc, error) { + var full, prefix []string + for _, s := range statusCodePrefixes { + switch { + case httpStatusCodeFull.MatchString(s): + full = append(full, s) + case httpStatusCodePrefix.MatchString(s): + prefix = append(prefix, strings.TrimSuffix(s, "+")) + default: + return nil, fmt.Errorf("invalid status code prefix: %q", s) + } + } + + return func(ev *v1.Event) bool { + http := ev.GetFlow().GetL7().GetHttp() + // Not an HTTP response record + if http == nil || http.Code == 0 { + return false + } + + // Check for both full matches or prefix matches + httpStatusCode := fmt.Sprintf("%03d", http.Code) + for _, f := range full { + if httpStatusCode == f { + return true + } + } + for _, p := range prefix { + if strings.HasPrefix(httpStatusCode, p) { + return true + } + } + + return false + }, nil +} + +func filterByHTTPMethods(methods []string) (FilterFunc, error) { + return func(ev *v1.Event) bool { + http := ev.GetFlow().GetL7().GetHttp() + + if http == nil || http.Method == "" { + // Not an HTTP or method is missing + return false + } + + for _, method := range methods { + if strings.EqualFold(http.Method, method) { + return true + } + } + + return false + }, nil +} + +func filterByHTTPPaths(pathRegexpStrs []string) (FilterFunc, error) { + pathRegexps := make([]*regexp.Regexp, 0, len(pathRegexpStrs)) + for _, pathRegexpStr := range pathRegexpStrs { + pathRegexp, err := regexp.Compile(pathRegexpStr) + if err != nil { + return nil, fmt.Errorf("%s: %v", pathRegexpStr, err) + } + pathRegexps = append(pathRegexps, pathRegexp) + } + + return func(ev *v1.Event) bool { + http := ev.GetFlow().GetL7().GetHttp() + + if http == nil || http.Url == "" { + return false + } + + uri, err := url.ParseRequestURI(http.Url) + if err != nil { + // Silently drop all invalid URIs as there is nothing else we can + // do. + return false + } + for _, pathRegexp := range pathRegexps { + if pathRegexp.MatchString(uri.Path) { + return true + } + } + + return false + }, nil +} + +// HTTPFilter implements filtering based on HTTP metadata +type HTTPFilter struct{} + +// OnBuildFilter builds a HTTP filter +func (h *HTTPFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetHttpStatusCode() != nil { + if !httpMatchCompatibleEventFilter(ff.GetEventType()) { + return nil, errors.New("filtering by http status code requires " + + "the event type filter to only match 'l7' events") + } + + hsf, err := filterByHTTPStatusCode(ff.GetHttpStatusCode()) + if err != nil { + return nil, fmt.Errorf("invalid http status code filter: %v", err) + } + fs = append(fs, hsf) + } + + if ff.GetHttpMethod() != nil { + if !httpMatchCompatibleEventFilter(ff.GetEventType()) { + return nil, errors.New("filtering by http method requires " + + "the event type filter to only match 'l7' events") + } + + methodf, err := filterByHTTPMethods(ff.GetHttpMethod()) + if err != nil { + return nil, fmt.Errorf("invalid http method filter: %v", err) + } + fs = append(fs, methodf) + } + + if ff.GetHttpPath() != nil { + if !httpMatchCompatibleEventFilter(ff.GetEventType()) { + return nil, errors.New("filtering by http path requires " + + "the event type filter to only match 'l7' events") + } + + pathf, err := filterByHTTPPaths(ff.GetHttpPath()) + if err != nil { + return nil, fmt.Errorf("invalid http path filter: %v", err) + } + fs = append(fs, pathf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/identity.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/identity.go new file mode 100644 index 000000000..16700a971 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/identity.go @@ -0,0 +1,61 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func sourceEndpoint(ev *v1.Event) *flowpb.Endpoint { + return ev.GetFlow().GetSource() +} + +func destinationEndpoint(ev *v1.Event) *flowpb.Endpoint { + return ev.GetFlow().GetDestination() +} + +func filterByIdentity(identities []uint32, getEndpoint func(*v1.Event) *flowpb.Endpoint) FilterFunc { + return func(ev *v1.Event) bool { + if endpoint := getEndpoint(ev); endpoint != nil { + for _, i := range identities { + if i == endpoint.Identity { + return true + } + } + } + return false + } +} + +// IdentityFilter implements filtering based on security identity +type IdentityFilter struct{} + +// OnBuildFilter builds a security identity filter +func (i *IdentityFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourceIdentity() != nil { + fs = append(fs, filterByIdentity(ff.GetSourceIdentity(), sourceEndpoint)) + } + + if ff.GetDestinationIdentity() != nil { + fs = append(fs, filterByIdentity(ff.GetDestinationIdentity(), destinationEndpoint)) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/ip.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/ip.go new file mode 100644 index 000000000..fd5eda509 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/ip.go @@ -0,0 +1,139 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "net" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func sourceIP(ev *v1.Event) string { + return ev.GetFlow().GetIP().GetSource() +} + +func destinationIP(ev *v1.Event) string { + return ev.GetFlow().GetIP().GetDestination() +} + +func filterByIPs(ips []string, getIP func(*v1.Event) string) (FilterFunc, error) { + // IP filter can either be an exact match (e.g. "1.1.1.1") or a CIDR range + // (e.g. "1.1.1.0/24"). Put them into 2 separate lists here. + var addresses []string + var cidrs []*net.IPNet + for _, ip := range ips { + if strings.Contains(ip, "/") { + _, ipnet, err := net.ParseCIDR(ip) + if err != nil { + return nil, fmt.Errorf("invalid CIDR in filter: %q", ip) + } + cidrs = append(cidrs, ipnet) + } else { + if net.ParseIP(ip) == nil { + return nil, fmt.Errorf("invalid IP address in filter: %q", ip) + } + addresses = append(addresses, ip) + } + } + + return func(ev *v1.Event) bool { + eventIP := getIP(ev) + if eventIP == "" { + return false + } + + for _, ip := range addresses { + if ip == eventIP { + return true + } + } + + if len(cidrs) > 0 { + parsedIP := net.ParseIP(eventIP) + for _, cidr := range cidrs { + if cidr.Contains(parsedIP) { + return true + } + } + } + + return false + }, nil +} + +// IPFilter implements IP addressing filtering for the source and destination +// address +type IPFilter struct{} + +// OnBuildFilter builds an IP address filter +func (f *IPFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourceIp() != nil { + ipf, err := filterByIPs(ff.GetSourceIp(), sourceIP) + if err != nil { + return nil, err + } + fs = append(fs, ipf) + } + + if ff.GetDestinationIp() != nil { + ipf, err := filterByIPs(ff.GetDestinationIp(), destinationIP) + if err != nil { + return nil, err + } + fs = append(fs, ipf) + } + + return fs, nil +} + +func filterByIPVersion(ipver []flowpb.IPVersion) (FilterFunc, error) { + return func(ev *v1.Event) bool { + flow := ev.GetFlow() + if flow == nil { + return false + } + ver := flow.GetIP().GetIpVersion() + for _, v := range ipver { + if v == ver { + return true + } + } + return false + }, nil +} + +// IPVersionFilter implements IP version based filtering +type IPVersionFilter struct{} + +// OnBuildFilter builds an IP version filter +func (f *IPVersionFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetIpVersion() != nil { + pf, err := filterByIPVersion(ff.GetIpVersion()) + if err != nil { + return nil, err + } + fs = append(fs, pf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/k8s.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/k8s.go new file mode 100644 index 000000000..696ba71ef --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/k8s.go @@ -0,0 +1,124 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/cilium/pkg/hubble/k8s" +) + +func sourcePod(ev *v1.Event) (ns, pod string) { + ep := ev.GetFlow().GetSource() + return ep.GetNamespace(), ep.GetPodName() +} + +func destinationPod(ev *v1.Event) (ns, pod string) { + ep := ev.GetFlow().GetDestination() + return ep.GetNamespace(), ep.GetPodName() +} + +func sourceService(ev *v1.Event) (ns, svc string) { + s := ev.GetFlow().GetSourceService() + return s.GetNamespace(), s.GetName() +} + +func destinationService(ev *v1.Event) (ns, svc string) { + s := ev.GetFlow().GetDestinationService() + return s.GetNamespace(), s.GetName() +} + +func filterByNamespacedName(names []string, getName func(*v1.Event) (ns, name string)) (FilterFunc, error) { + type nameFilter struct{ ns, prefix string } + nameFilters := make([]nameFilter, 0, len(names)) + for _, name := range names { + ns, prefix := k8s.ParseNamespaceName(name) + if ns == "" && prefix == "" { + return nil, fmt.Errorf("invalid filter, must be [namespace/][], got %q", name) + } + nameFilters = append(nameFilters, nameFilter{ns, prefix}) + } + + return func(ev *v1.Event) bool { + eventNs, eventName := getName(ev) + if eventNs == "" && eventName == "" { + return false + } + + for _, f := range nameFilters { + if (f.prefix == "" || strings.HasPrefix(eventName, f.prefix)) && f.ns == eventNs { + return true + } + } + + return false + }, nil +} + +// PodFilter implements filtering based on Kubernetes pod names +type PodFilter struct{} + +// OnBuildFilter builds a Kubernetes pod name filter +func (p *PodFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourcePod() != nil { + pf, err := filterByNamespacedName(ff.GetSourcePod(), sourcePod) + if err != nil { + return nil, err + } + fs = append(fs, pf) + } + + if ff.GetDestinationPod() != nil { + pf, err := filterByNamespacedName(ff.GetDestinationPod(), destinationPod) + if err != nil { + return nil, err + } + fs = append(fs, pf) + } + + return fs, nil +} + +// ServiceFilter implements filtering based on Kubernetes service names +type ServiceFilter struct{} + +// OnBuildFilter builds a Kubernetes service name filter +func (s *ServiceFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourceService() != nil { + ssf, err := filterByNamespacedName(ff.GetSourceService(), sourceService) + if err != nil { + return nil, fmt.Errorf("invalid source service filter: %v", err) + } + fs = append(fs, ssf) + } + + if ff.GetDestinationService() != nil { + dsf, err := filterByNamespacedName(ff.GetDestinationService(), destinationService) + if err != nil { + return nil, fmt.Errorf("invalid destination service filter: %v", err) + } + fs = append(fs, dsf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/labels.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/labels.go new file mode 100644 index 000000000..fc5abf402 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/labels.go @@ -0,0 +1,105 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "regexp" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + k8sLabels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" + ciliumLabels "github.com/cilium/cilium/pkg/labels" +) + +func sourceLabels(ev *v1.Event) k8sLabels.Labels { + labels := ev.GetFlow().GetSource().GetLabels() + return ciliumLabels.ParseLabelArrayFromArray(labels) +} + +func destinationLabels(ev *v1.Event) k8sLabels.Labels { + labels := ev.GetFlow().GetDestination().GetLabels() + return ciliumLabels.ParseLabelArrayFromArray(labels) +} + +var ( + labelSelectorWithColon = regexp.MustCompile(`([^,]\s*[a-z0-9-]+):([a-z0-9-]+)`) +) + +func parseSelector(selector string) (k8sLabels.Selector, error) { + // ciliumLabels.LabelArray extends the k8sLabels.Selector logic with + // support for Cilium source prefixes such as "k8s:foo" or "any:bar". + // It does this by treating the string before the first dot as the source + // prefix, i.e. `k8s.foo` is treated like `k8s:foo`. This translation is + // needed because k8sLabels.Selector does not support colons in label names. + // + // We do not want to expose this implementation detail to the user, + // therefore we translate any user-specified source prefixes by + // replacing colon-based source prefixes in labels with dot-based prefixes, + // i.e. "k8s:foo in (bar, baz)" becomes "k8s.foo in (bar, baz)". + + translated := labelSelectorWithColon.ReplaceAllString(selector, "${1}.${2}") + return k8sLabels.Parse(translated) +} + +// FilterByLabelSelectors returns a FilterFunc. The FilterFunc returns true if and only if any of the +// specified selectors select the event. The caller specifies how to extract labels from the event. +func FilterByLabelSelectors(labelSelectors []string, getLabels func(*v1.Event) k8sLabels.Labels) (FilterFunc, error) { + selectors := make([]k8sLabels.Selector, 0, len(labelSelectors)) + for _, selector := range labelSelectors { + s, err := parseSelector(selector) + if err != nil { + return nil, err + } + selectors = append(selectors, s) + } + + return func(ev *v1.Event) bool { + labels := getLabels(ev) + for _, selector := range selectors { + if selector.Matches(labels) { + return true + } + } + return false + }, nil +} + +// LabelsFilter implements filtering based on labels +type LabelsFilter struct{} + +// OnBuildFilter builds a labels filter +func (l *LabelsFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourceLabel() != nil { + slf, err := FilterByLabelSelectors(ff.GetSourceLabel(), sourceLabels) + if err != nil { + return nil, fmt.Errorf("invalid source label filter: %v", err) + } + fs = append(fs, slf) + } + + if ff.GetDestinationLabel() != nil { + dlf, err := FilterByLabelSelectors(ff.GetDestinationLabel(), destinationLabels) + if err != nil { + return nil, fmt.Errorf("invalid destination label filter: %v", err) + } + fs = append(fs, dlf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/nodename.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/nodename.go new file mode 100644 index 000000000..522a8eaec --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/nodename.go @@ -0,0 +1,63 @@ +// Copyright 2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" + ciliumDefaults "github.com/cilium/cilium/pkg/defaults" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +// filterByNodeNames returns a function that filters flow events based on the +// node name. +func filterByNodeNames(nodeNames []string) (FilterFunc, error) { + nodeNameRegexp, err := compileNodeNamePattern(nodeNames) + if err != nil { + return nil, err + } + + return func(ev *v1.Event) bool { + nodeName := ev.GetFlow().GetNodeName() + if nodeName == "" { + return false + } + // ensure that the node name always includes a cluster name + if strings.IndexByte(nodeName, '/') == -1 { + nodeName = ciliumDefaults.ClusterName + "/" + nodeName + } + return nodeNameRegexp.MatchString(nodeName) + }, nil +} + +// A NodeNameFilter filters on node name. +type NodeNameFilter struct{} + +// OnBuildFilter builds a node name filter. +func (n *NodeNameFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetNodeName() != nil { + nodeNameF, err := filterByNodeNames(ff.GetNodeName()) + if err != nil { + return nil, err + } + fs = append(fs, nodeNameF) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/patterns.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/patterns.go new file mode 100644 index 000000000..986c017a7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/patterns.go @@ -0,0 +1,146 @@ +// Copyright 2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +var ( + // fqdnRegexpStr matches an FQDN, inluding underscores. + // FIXME this should not match components that begin or end with hyphens, e.g. -foo- + fqdnRegexpStr = `(?:[-0-9_a-z]+(?:\.[-0-9_a-z]+)*)` + _ = regexp.MustCompile(fqdnRegexpStr) // compile regexp to ensure that it is valid + + errEmptyPattern = errors.New("empty pattern") + errMultipleTrailingDotsInPattern = errors.New("multiple trailing dots in pattern") + errTooManySlashesInPattern = errors.New("too many slashes in pattern") +) + +// canonicalizeFQDNPattern canonicalizes fqdnPattern by trimming space, trimming +// up to one trailing dot, and converting it to lowercase. +func canonicalizeFQDNPattern(fqdnPattern string) string { + fqdnPattern = strings.TrimSpace(fqdnPattern) + fqdnPattern = strings.TrimSuffix(fqdnPattern, ".") + fqdnPattern = strings.ToLower(fqdnPattern) + return fqdnPattern +} + +// appendFQDNPatternRegexp appends the regular expression equivalent to +// fqdnPattern to sb. +func appendFQDNPatternRegexp(sb *strings.Builder, fqdnPattern string) error { + fqdnPattern = canonicalizeFQDNPattern(fqdnPattern) + switch { + case fqdnPattern == "": + return errEmptyPattern + case strings.HasSuffix(fqdnPattern, "."): + return errMultipleTrailingDotsInPattern + } + for _, r := range fqdnPattern { + switch { + case r == '.': + sb.WriteString(`\.`) + case r == '*': + sb.WriteString(`[-.0-9a-z]*`) + case r == '-': + fallthrough + case '0' <= r && r <= '9': + fallthrough + case r == '_': + fallthrough + case 'a' <= r && r <= 'z': + sb.WriteRune(r) + default: + return fmt.Errorf("%q: invalid rune in pattern", r) + } + } + return nil +} + +// appendNodeNamePatternRegexp appends the regular expression equivalent to +// nodeNamePattern to sb. The returned regular expression matches node names +// that include a cluster name. +// +// Node name patterns consist of a cluster pattern element and a node pattern +// element separated by a forward slash. Each element is an FQDN pattern, with +// an empty pattern matching everything. If there is no forward slash then the +// pattern is treated as a node pattern and matches all clusters. +func appendNodeNamePatternRegexp(sb *strings.Builder, nodeNamePattern string) error { + if nodeNamePattern == "" { + return errEmptyPattern + } + clusterPattern := "" + nodePattern := "" + elems := strings.Split(nodeNamePattern, "/") + switch len(elems) { + case 1: + nodePattern = elems[0] + case 2: + clusterPattern = elems[0] + nodePattern = elems[1] + default: + return errTooManySlashesInPattern + } + + if clusterPattern == "" { + sb.WriteString(fqdnRegexpStr) + } else if err := appendFQDNPatternRegexp(sb, clusterPattern); err != nil { + return err + } + sb.WriteByte('/') + if nodePattern == "" { + sb.WriteString(fqdnRegexpStr) + } else if err := appendFQDNPatternRegexp(sb, nodePattern); err != nil { + return err + } + return nil +} + +// compileFQDNPattern returns a regular expression equivalent to the FQDN +// patterns in fqdnPatterns. +func compileFQDNPattern(fqdnPatterns []string) (*regexp.Regexp, error) { + var sb strings.Builder + sb.WriteString(`\A(?:`) + for i, fqdnPattern := range fqdnPatterns { + if i > 0 { + sb.WriteByte('|') + } + if err := appendFQDNPatternRegexp(&sb, fqdnPattern); err != nil { + return nil, err + } + } + sb.WriteString(`)\z`) + return regexp.Compile(sb.String()) +} + +// compileNodeNamePattern returns a regular expression equivalent to the node +// name patterns in nodeNamePatterns. +func compileNodeNamePattern(nodeNamePatterns []string) (*regexp.Regexp, error) { + sb := strings.Builder{} + sb.WriteString(`\A(?:`) + for i, nodeNamePattern := range nodeNamePatterns { + if i > 0 { + sb.WriteByte('|') + } + if err := appendNodeNamePatternRegexp(&sb, nodeNamePattern); err != nil { + return nil, err + } + } + sb.WriteString(`)\z`) + return regexp.Compile(sb.String()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/port.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/port.go new file mode 100644 index 000000000..829a20219 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/port.go @@ -0,0 +1,94 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "strconv" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func sourcePort(ev *v1.Event) (port uint16, ok bool) { + l4 := ev.GetFlow().GetL4() + if tcp := l4.GetTCP(); tcp != nil { + return uint16(tcp.SourcePort), true + } + if udp := l4.GetUDP(); udp != nil { + return uint16(udp.SourcePort), true + } + return 0, false +} + +func destinationPort(ev *v1.Event) (port uint16, ok bool) { + l4 := ev.GetFlow().GetL4() + if tcp := l4.GetTCP(); tcp != nil { + return uint16(tcp.DestinationPort), true + } + if udp := l4.GetUDP(); udp != nil { + return uint16(udp.DestinationPort), true + } + return 0, false +} + +func filterByPort(portStrs []string, getPort func(*v1.Event) (port uint16, ok bool)) (FilterFunc, error) { + ports := make([]uint16, 0, len(portStrs)) + for _, p := range portStrs { + port, err := strconv.ParseUint(p, 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid port %q: %s", p, err) + } + ports = append(ports, uint16(port)) + } + + return func(ev *v1.Event) bool { + if port, ok := getPort(ev); ok { + for _, p := range ports { + if p == port { + return true + } + } + } + return false + }, nil +} + +// PortFilter implements filtering based on L4 port numbers +type PortFilter struct{} + +// OnBuildFilter builds a L4 port filter +func (p *PortFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetSourcePort() != nil { + spf, err := filterByPort(ff.GetSourcePort(), sourcePort) + if err != nil { + return nil, fmt.Errorf("invalid source port filter: %v", err) + } + fs = append(fs, spf) + } + + if ff.GetDestinationPort() != nil { + dpf, err := filterByPort(ff.GetDestinationPort(), destinationPort) + if err != nil { + return nil, fmt.Errorf("invalid destination port filter: %v", err) + } + fs = append(fs, dpf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/protocol.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/protocol.go new file mode 100644 index 000000000..b90c93b8c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/protocol.go @@ -0,0 +1,105 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func filterByProtocol(protocols []string) (FilterFunc, error) { + var l4Protocols, l7Protocols []string + for _, p := range protocols { + proto := strings.ToLower(p) + switch proto { + case "icmp", "icmpv4", "icmpv6", "tcp", "udp": + l4Protocols = append(l4Protocols, proto) + case "dns", "http", "kafka": + l7Protocols = append(l7Protocols, proto) + default: + return nil, fmt.Errorf("unknown protocol: %q", p) + } + } + + return func(ev *v1.Event) bool { + l4 := ev.GetFlow().GetL4() + for _, proto := range l4Protocols { + switch proto { + case "icmp": + if l4.GetICMPv4() != nil || l4.GetICMPv6() != nil { + return true + } + case "icmpv4": + if l4.GetICMPv4() != nil { + return true + } + case "icmpv6": + if l4.GetICMPv6() != nil { + return true + } + case "tcp": + if l4.GetTCP() != nil { + return true + } + case "udp": + if l4.GetUDP() != nil { + return true + } + } + } + + l7 := ev.GetFlow().GetL7() + for _, proto := range l7Protocols { + switch proto { + case "dns": + if l7.GetDns() != nil { + return true + } + case "http": + if l7.GetHttp() != nil { + return true + } + case "kafka": + if l7.GetKafka() != nil { + return true + } + } + } + + return false + }, nil +} + +// ProtocolFilter implements filtering based on L4 protocol +type ProtocolFilter struct{} + +// OnBuildFilter builds a L4 protocol filter +func (p *ProtocolFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetProtocol() != nil { + pf, err := filterByProtocol(ff.GetProtocol()) + if err != nil { + return nil, fmt.Errorf("invalid protocol filter: %v", err) + } + fs = append(fs, pf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/reply.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/reply.go new file mode 100644 index 000000000..778b83213 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/reply.go @@ -0,0 +1,63 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func filterByReplyField(replyParams []bool) FilterFunc { + return func(ev *v1.Event) bool { + if len(replyParams) == 0 { + return true + } + switch f := ev.Event.(type) { + case *flowpb.Flow: + // FIXME: For dropped flows, we handle `is_reply=unknown` as + // `is_reply=false`. This is for compatibility with older clients + // (such as Hubble UI) which assume this filter applies to the + // deprecated `reply` field, where dropped flows always have + // `reply=false`. + if f.GetIsReply() == nil && f.GetVerdict() != flowpb.Verdict_DROPPED { + return false + } + + reply := f.GetIsReply().GetValue() + for _, replyParam := range replyParams { + if reply == replyParam { + return true + } + } + } + return false + } +} + +// ReplyFilter implements filtering for reply flows +type ReplyFilter struct{} + +// OnBuildFilter builds a reply filter +func (r *ReplyFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetReply() != nil { + fs = append(fs, filterByReplyField(ff.GetReply())) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/tcp.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/tcp.go new file mode 100644 index 000000000..ab905acfd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/tcp.go @@ -0,0 +1,70 @@ +// Copyright 2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + "fmt" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func filterByTCPFlags(flags []*flowpb.TCPFlags) (FilterFunc, error) { + return func(ev *v1.Event) bool { + l4tcp := ev.GetFlow().GetL4().GetTCP() + if l4tcp == nil { + return false + } + flowFlags := l4tcp.GetFlags() + // check if the TCP event has any of the flags mentioned in flowfilter + // example: if TCP event has flags SYN and ACK set and if the flowfilter + // only has SYN, then this event should be accepted by the filter. + for _, f := range flags { + switch { + case f.FIN && !flowFlags.FIN, + f.SYN && !flowFlags.SYN, + f.RST && !flowFlags.RST, + f.PSH && !flowFlags.PSH, + f.ACK && !flowFlags.ACK, + f.URG && !flowFlags.URG, + f.ECE && !flowFlags.ECE, + f.CWR && !flowFlags.CWR, + f.NS && !flowFlags.NS: + continue + } + return true + } + return false + }, nil +} + +// TCPFilter implements filtering based on TCP protocol header +type TCPFilter struct{} + +// OnBuildFilter builds a TCP protocol based filter +func (p *TCPFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetTcpFlags() != nil { + pf, err := filterByTCPFlags(ff.GetTcpFlags()) + if err != nil { + return nil, fmt.Errorf("invalid tcp flags filter: %w", err) + } + fs = append(fs, pf) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/verdict.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/verdict.go new file mode 100644 index 000000000..83e0875be --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/verdict.go @@ -0,0 +1,52 @@ +// Copyright 2019-2020 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filters + +import ( + "context" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func filterByVerdicts(vs []flowpb.Verdict) FilterFunc { + return func(ev *v1.Event) bool { + flow := ev.GetFlow() + if flow == nil { + return false + } + for _, verdict := range vs { + if verdict == flow.GetVerdict() { + return true + } + } + + return false + } +} + +// VerdictFilter implements filtering based on forwarding verdict +type VerdictFilter struct{} + +// OnBuildFilter builds a forwarding verdict filter +func (v *VerdictFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + + if ff.GetVerdict() != nil { + fs = append(fs, filterByVerdicts(ff.GetVerdict())) + } + + return fs, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/k8s/utils.go b/vendor/github.com/cilium/cilium/pkg/hubble/k8s/utils.go new file mode 100644 index 000000000..e60fa5842 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/k8s/utils.go @@ -0,0 +1,34 @@ +// Copyright 2019 Authors of Hubble +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "strings" +) + +// ParseNamespaceName returns the object's namespace and name. If namespace is +// not specified, the namespace "default" is returned. +func ParseNamespaceName(namespaceName string) (string, string) { + nsName := strings.Split(namespaceName, "/") + ns := nsName[0] + switch { + case len(nsName) > 1: + return ns, nsName[1] + case ns == "": + return "", "" + default: + return "default", ns + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/doc.go new file mode 100644 index 000000000..054427662 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/doc.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Kubernetes Authors. +// Copyright 2020 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +deepequal-gen=package + +// Package labels implements a simple label system, parsing and matching +// selectors with sets of labels. +package labels diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go new file mode 100644 index 000000000..4d1929986 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go @@ -0,0 +1,140 @@ +// Copyright 2014 The Kubernetes Authors. +// Copyright 2020-2021 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. It does not +// perform any validation, which means the server will reject +// the request if the Set contains invalid values. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsValidatedSelector converts labels into a selectors. +// The Set is validated client-side, which allows to catch errors early. +func (ls Set) AsValidatedSelector() (Selector, error) { + return ValidatedSelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus doesn't +// perform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high scale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLabels converts label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go new file mode 100644 index 000000000..8ea317870 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go @@ -0,0 +1,935 @@ +// Copyright 2014 The Kubernetes Authors. +// Copyright 2020-2021 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" +) + +var ( + validRequirementOperators = []string{ + string(selection.In), string(selection.NotIn), + string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals), + string(selection.Exists), string(selection.DoesNotExist), + string(selection.GreaterThan), string(selection.LessThan), + } +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { + return "", false +} + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +// NewSelector returns a nil selector +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList +func NewRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) (*Requirement, error) { + var allErrs field.ErrorList + path := field.ToPath(opts...) + if err := validateLabelKey(key, path.Child("key")); err != nil { + allErrs = append(allErrs, err) + } + + valuePath := path.Child("values") + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty")) + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value")) + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist")) + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required")) + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer")) + } + } + default: + allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators)) + } + + for i := range vals { + if err := validateLabelValue(key, vals[i], valuePath.Index(i)); err != nil { + allErrs = append(allErrs, err) + } + } + return &Requirement{key: key, operator: op, strValues: vals}, allErrs.ToAggregate() +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to an integer. + if len(r.strValues) != 1 { + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +// Key returns requirement key +func (r *Requirement) Key() string { + return r.key +} + +// Operator returns requirement operator +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +// Values returns requirement values +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Empty returns true if the internalSelector doesn't restrict selection space +func (s internalSelector) Empty() bool { + if s == nil { + return true + } + return len(s) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var sb strings.Builder + sb.Grow( + // length of r.key + len(r.key) + + // length of 'r.operator' + 2 spaces for the worst case ('in' and 'notin') + len(r.operator) + 2 + + // length of 'r.strValues' slice times. Heuristically 5 chars per word + +5*len(r.strValues)) + if r.operator == selection.DoesNotExist { + sb.WriteString("!") + } + sb.WriteString(r.key) + + switch r.operator { + case selection.Equals: + sb.WriteString("=") + case selection.DoubleEquals: + sb.WriteString("==") + case selection.NotEquals: + sb.WriteString("!=") + case selection.In: + sb.WriteString(" in ") + case selection.NotIn: + sb.WriteString(" notin ") + case selection.GreaterThan: + sb.WriteString(">") + case selection.LessThan: + sb.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return sb.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + sb.WriteString("(") + } + if len(r.strValues) == 1 { + sb.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + // normalizes value order on output, without mutating the in-memory selector representation + // also avoids normalization when it is not required, and ensures we do not mutate shared data + sb.WriteString(strings.Join(safeSort(r.strValues), ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + sb.WriteString(")") + } + return sb.String() +} + +// safeSort sorts input strings without modification +func safeSort(in []string) []string { + if sort.StringsAreSorted(in) { + return in + } + out := make([]string, len(in)) + copy(out, in) + sort.Strings(out) + return out +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (s internalSelector) Add(reqs ...Requirement) Selector { + var ret internalSelector + for ix := range s { + ret = append(ret, s[ix]) + } + for _, r := range reqs { + ret = append(ret, r) + } + sort.Sort(ByKey(ret)) + return ret +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (s internalSelector) Matches(l Labels) bool { + for ix := range s { + if matches := s[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (s internalSelector) String() string { + var reqs []string + for ix := range s { + reqs = append(reqs, s[ix].String()) + } + return strings.Join(reqs, ",") +} + +// RequiresExactMatch introspects whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range s { + if s[ix].key == label { + switch s[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(s[ix].strValues) == 1 { + return s[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + +// Token represents constant definition for lexer token +type Token int + +const ( + // ErrorToken represents scan error + ErrorToken Token = iota + // EndOfStringToken represents end of string + EndOfStringToken + // ClosedParToken represents close parenthesis + ClosedParToken + // CommaToken represents the comma + CommaToken + // DoesNotExistToken represents logic not + DoesNotExistToken + // DoubleEqualsToken represents double equals + DoubleEqualsToken + // EqualsToken represents equal + EqualsToken + // GreaterThanToken represents greater than + GreaterThanToken + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in + InToken + // LessThanToken represents less than + LessThanToken + // NotEqualsToken represents not equal + NotEqualsToken + // NotInToken represents not in + NotInToken + // OpenParToken represents open parenthesis + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detects if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read returns the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parses literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal, nil); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator returns operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, _ := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit := p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string, path *field.Path) *field.Error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return field.Invalid(path, k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(k, v string, path *field.Path) *field.Error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return field.Invalid(path.Key(k), v, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +// It does not perform any validation, which means the server will reject +// the request if the Set contains invalid values. +func SelectorFromSet(ls Set) Selector { + return SelectorFromValidatedSet(ls) +} + +// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +// The Set is validated client-side, which allows to catch errors early. +func ValidatedSelectorFromSet(ls Set) (Selector, error) { + if ls == nil || len(ls) == 0 { + return internalSelector{}, nil + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err != nil { + return nil, err + } + requirements = append(requirements, *r) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements), nil +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepcopy.go new file mode 100644 index 000000000..54e25c66f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepcopy.go @@ -0,0 +1,40 @@ +// +build !ignore_autogenerated + +// Copyright 2017-2021 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package labels + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go new file mode 100644 index 000000000..53ecb7e5e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go @@ -0,0 +1,189 @@ +// +build !ignore_autogenerated + +// Copyright 2017-2021 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by main. DO NOT EDIT. + +package labels + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ByKey) DeepEqual(other *ByKey) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Lexer) DeepEqual(other *Lexer) bool { + if other == nil { + return false + } + + if in.s != other.s { + return false + } + if in.pos != other.pos { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Parser) DeepEqual(other *Parser) bool { + if other == nil { + return false + } + + if (in.l == nil) != (other.l == nil) { + return false + } else if in.l != nil { + if !in.l.DeepEqual(other.l) { + return false + } + } + + if ((in.scannedItems != nil) && (other.scannedItems != nil)) || ((in.scannedItems == nil) != (other.scannedItems == nil)) { + in, other := &in.scannedItems, &other.scannedItems + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if in.position != other.position { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Requirement) DeepEqual(other *Requirement) bool { + if other == nil { + return false + } + + if in.key != other.key { + return false + } + if in.operator != other.operator { + return false + } + if ((in.strValues != nil) && (other.strValues != nil)) || ((in.strValues == nil) != (other.strValues == nil)) { + in, other := &in.strValues, &other.strValues + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Requirements) DeepEqual(other *Requirements) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ScannedItem) DeepEqual(other *ScannedItem) bool { + if other == nil { + return false + } + + if in.tok != other.tok { + return false + } + if in.literal != other.literal { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Set) DeepEqual(other *Set) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection/operator.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection/operator.go new file mode 100644 index 000000000..298f798c4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection/operator.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go new file mode 100644 index 000000000..5d4d6250a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors implements various utility functions and types around errors. +package errors // import "k8s.io/apimachinery/pkg/util/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go new file mode 100644 index 000000000..1f5a04fd4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -0,0 +1,249 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// MessageCountMap contains occurrence for each error message. +type MessageCountMap map[string]int + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. +type Aggregate interface { + error + Errors() []error + Is(error) bool +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) bool { + msg := err.Error() + if seenerrs.Has(msg) { + return false + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + return false + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting slice will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go new file mode 100644 index 000000000..9bfa85d43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) Byte { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) Byte { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 000000000..b152a0bf0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go new file mode 100644 index 000000000..e11e622c5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go new file mode 100644 index 000000000..88bd70967 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) Int { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) Int { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 000000000..96a485554 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go new file mode 100644 index 000000000..b375a1b06 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) Int64 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) Int64 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go new file mode 100644 index 000000000..e6f37db88 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 000000000..0cd5d6577 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = v.Type.String() + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeTooMany: + return "Too many" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} +} + +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go new file mode 100644 index 000000000..daccb0589 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -0,0 +1,117 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +type pathOptions struct { + path *Path +} + +// PathOption modifies a pathOptions +type PathOption func(o *pathOptions) + +// WithPath generates a PathOption +func WithPath(p *Path) PathOption { + return func(o *pathOptions) { + o.path = p + } +} + +// ToPath produces *Path from a set of PathOption +func ToPath(opts ...PathOption) *Path { + c := &pathOptions{} + for _, opt := range opts { + opt(c) + } + return c.path +} + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + if p == nil { + return "" + } + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 000000000..c8b419984 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,503 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + for _, label := range strings.Split(name, ".") { + if errs := IsDNS1123Label(label); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ","))) + } + } + return allErrors +} + +// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may +// contain: +// * unreserved characters (alphanumeric, '-', '.', '_', '~') +// * percent-encoded octets +// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") +// * a colon character (":") +const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` + +var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") + +// IsDomainPrefixedPath checks if the given string is a domain-prefixed path +// (e.g. acme.io/foo). All characters before the first "/" must be a valid +// subdomain as defined by RFC 1123. All characters trailing the first "/" must +// be valid HTTP Path characters as defined by RFC 3986. +func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { + var allErrs field.ErrorList + if len(dpPath) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + segments := strings.SplitN(dpPath, "/", 2) + if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { + return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) + } + + host := segments[0] + for _, err := range IsDNS1123Subdomain(host) { + allErrs = append(allErrs, field.Invalid(fldPath, host, err)) + } + + path := segments[1] + if !httpPathRegexp.MatchString(path) { + return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) + } + + return allErrs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} + } + return nil +} + +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +// IsValidPercent checks that string is in the form of a percentage +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// IsValidSocketAddr checks that string represents a valid socket address +// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) +func IsValidSocketAddr(value string) []string { + var errs []string + ip, port, err := net.SplitHostPort(value) + if err != nil { + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return errs + } + portInt, _ := strconv.Atoi(port) + errs = append(errs, IsValidPortNum(portInt)...) + errs = append(errs, IsValidIP(ip)...) + return errs +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7b5998ded..a17ef0e0c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,9 +6,14 @@ github.com/cilium/cilium/api/v1/flow github.com/cilium/cilium/api/v1/observer github.com/cilium/cilium/api/v1/peer github.com/cilium/cilium/api/v1/relay +github.com/cilium/cilium/pkg/defaults github.com/cilium/cilium/pkg/hubble/api/v1 +github.com/cilium/cilium/pkg/hubble/filters +github.com/cilium/cilium/pkg/hubble/k8s github.com/cilium/cilium/pkg/identity github.com/cilium/cilium/pkg/k8s/apis/cilium.io +github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels +github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection github.com/cilium/cilium/pkg/labels github.com/cilium/cilium/pkg/lock github.com/cilium/cilium/pkg/logging @@ -278,6 +283,11 @@ honnef.co/go/tools/staticcheck honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/unused/typemap +# k8s.io/apimachinery v0.21.0-rc.0 +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/validation/field # k8s.io/klog/v2 v2.8.0 k8s.io/klog/v2 # github.com/miekg/dns => github.com/cilium/dns v1.1.4-0.20190417235132-8e25ec9a0ff3