Skip to content

Commit

Permalink
Alias as many common types as possible
Browse files Browse the repository at this point in the history
  • Loading branch information
swiatekm committed May 1, 2024
1 parent ed45e0e commit b536c53
Show file tree
Hide file tree
Showing 33 changed files with 745 additions and 663 deletions.
28 changes: 22 additions & 6 deletions apis/v1alpha1/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (dst *OpenTelemetryCollector) ConvertFrom(srcRaw conversion.Hub) error {

func tov1beta1(in OpenTelemetryCollector) (v1beta1.OpenTelemetryCollector, error) {
copy := in.DeepCopy()
cfg := &common.Config{}
cfg := &v1beta1.Config{}
if err := yaml.Unmarshal([]byte(copy.Spec.Config), cfg); err != nil {
return v1beta1.OpenTelemetryCollector{}, errors.New("could not convert config json to v1beta1.Config")
}
Expand Down Expand Up @@ -171,13 +171,13 @@ func tov1beta1TA(in OpenTelemetryTargetAllocator) v1beta1.TargetAllocatorEmbedde
Replicas: in.Replicas,
NodeSelector: in.NodeSelector,
Resources: in.Resources,
AllocationStrategy: in.AllocationStrategy,
FilterStrategy: in.FilterStrategy,
AllocationStrategy: tov1beta1TAAllocationStrategy(in.AllocationStrategy),
FilterStrategy: tov1beta1TAFilterStrategy(in.FilterStrategy),
ServiceAccount: in.ServiceAccount,
Image: in.Image,
Enabled: in.Enabled,
Affinity: in.Affinity,
PrometheusCR: common.TargetAllocatorPrometheusCR{
PrometheusCR: v1beta1.TargetAllocatorPrometheusCR{
Enabled: in.PrometheusCR.Enabled,
ScrapeInterval: in.PrometheusCR.ScrapeInterval,
// prometheus_cr.pod_monitor_selector shouldn't be nil when selector is empty
Expand Down Expand Up @@ -441,8 +441,8 @@ func tov1alpha1TA(in v1beta1.TargetAllocatorEmbedded) OpenTelemetryTargetAllocat
Replicas: in.Replicas,
NodeSelector: in.NodeSelector,
Resources: in.Resources,
AllocationStrategy: in.AllocationStrategy,
FilterStrategy: in.FilterStrategy,
AllocationStrategy: Tov1alpha1TAAllocationStrategy(in.AllocationStrategy),
FilterStrategy: Tov1alpha1TAFilterStrategy(in.FilterStrategy),
ServiceAccount: in.ServiceAccount,
Image: in.Image,
Enabled: in.Enabled,
Expand All @@ -467,3 +467,19 @@ func tov1alpha1TA(in v1beta1.TargetAllocatorEmbedded) OpenTelemetryTargetAllocat
PodDisruptionBudget: tov1alpha1PodDisruptionBudget(in.PodDisruptionBudget),
}
}

func Tov1alpha1TAFilterStrategy(strategy v1beta1.TargetAllocatorFilterStrategy) TargetAllocatorFilterStrategy {
return TargetAllocatorFilterStrategy(strategy)
}

func Tov1alpha1TAAllocationStrategy(strategy v1beta1.TargetAllocatorAllocationStrategy) TargetAllocatorAllocationStrategy {
return TargetAllocatorAllocationStrategy(strategy)
}

func tov1beta1TAFilterStrategy(strategy TargetAllocatorFilterStrategy) v1beta1.TargetAllocatorFilterStrategy {
return v1beta1.TargetAllocatorFilterStrategy(strategy)
}

func tov1beta1TAAllocationStrategy(strategy TargetAllocatorAllocationStrategy) v1beta1.TargetAllocatorAllocationStrategy {
return v1beta1.TargetAllocatorAllocationStrategy(strategy)
}
6 changes: 3 additions & 3 deletions apis/v1alpha1/convert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func Test_tov1beta1_config(t *testing.T) {
}

func Test_tov1alpha1_config(t *testing.T) {
cfg := common.Config{}
cfg := v1beta1.Config{}
err := yaml.Unmarshal([]byte(collectorCfg), &cfg)
require.NoError(t, err)

Expand Down Expand Up @@ -428,7 +428,7 @@ func createTA() OpenTelemetryTargetAllocator {
v1.ResourceMemory: resource.MustParse("128Mi"),
},
},
AllocationStrategy: common.TargetAllocatorAllocationStrategyConsistentHashing,
AllocationStrategy: TargetAllocatorAllocationStrategyConsistentHashing,
FilterStrategy: "relabel-config",
ServiceAccount: "serviceAccountName",
Image: "custom_image",
Expand Down Expand Up @@ -532,7 +532,7 @@ func TestConvertTo(t *testing.T) {
ServiceAccount: "otelcol",
},
TargetAllocator: v1beta1.TargetAllocatorEmbedded{
PrometheusCR: common.TargetAllocatorPrometheusCR{
PrometheusCR: v1beta1.TargetAllocatorPrometheusCR{
PodMonitorSelector: &metav1.LabelSelector{},
ServiceMonitorSelector: &metav1.LabelSelector{},
},
Expand Down
6 changes: 2 additions & 4 deletions apis/v1alpha1/opentelemetrycollector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"

"github.com/open-telemetry/opentelemetry-operator/internal/api/common"
)

// ManagementStateType defines the type for CR management states.
Expand Down Expand Up @@ -322,13 +320,13 @@ type OpenTelemetryTargetAllocator struct {
// WARNING: The per-node strategy currently ignores targets without a Node, like control plane components.
// +optional
// +kubebuilder:default:=consistent-hashing
AllocationStrategy common.TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
AllocationStrategy TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
// FilterStrategy determines how to filter targets before allocating them among the collectors.
// The only current option is relabel-config (drops targets based on prom relabel_config).
// The default is relabel-config.
// +optional
// +kubebuilder:default:=relabel-config
FilterStrategy common.TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
FilterStrategy TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
// ServiceAccount indicates the name of an existing service account to use with this instance. When set,
// the operator will not automatically create a ServiceAccount for the TargetAllocator.
// +optional
Expand Down
50 changes: 13 additions & 37 deletions apis/v1alpha1/targetallocator_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,13 @@ type TargetAllocatorSpec struct {
// WARNING: The per-node strategy currently ignores targets without a Node, like control plane components.
// +optional
// +kubebuilder:default:=consistent-hashing
AllocationStrategy common.TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
AllocationStrategy TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
// FilterStrategy determines how to filter targets before allocating them among the collectors.
// The only current option is relabel-config (drops targets based on prom relabel_config).
// The default is relabel-config.
// +optional
// +kubebuilder:default:=relabel-config
FilterStrategy common.TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
FilterStrategy TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
// ScrapeConfigs define static Prometheus scrape configurations for the target allocator.
// To use dynamic configurations from ServiceMonitors and PodMonitors, see the PrometheusCR section.
// For the exact format, see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config.
Expand All @@ -84,7 +84,7 @@ type TargetAllocatorSpec struct {
ScrapeConfigs []common.AnyConfig `json:"scrapeConfigs,omitempty"`
// PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 and podmonitor.monitoring.coreos.com/v1 ).
// +optional
PrometheusCR common.TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
PrometheusCR TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
// ObservabilitySpec defines how telemetry data gets handled.
//
// +optional
Expand All @@ -93,49 +93,25 @@ type TargetAllocatorSpec struct {
Observability common.ObservabilitySpec `json:"observability,omitempty"`
}

// TargetAllocatorPrometheusCR configures Prometheus CustomResource handling in the Target Allocator.
type TargetAllocatorPrometheusCR struct {
// Enabled indicates whether to use a PrometheusOperator custom resources as targets or not.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Default interval between consecutive scrapes. Intervals set in ServiceMonitors and PodMonitors override it.
//Equivalent to the same setting on the Prometheus CR.
//
// Default: "30s"
// +kubebuilder:default:="30s"
// +kubebuilder:validation:Format:=duration
ScrapeInterval *metav1.Duration `json:"scrapeInterval,omitempty"`
// PodMonitors to be selected for target discovery.
// This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a
// PodMonitor's meta labels. The requirements are ANDed.
// +optional
PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"`
// ServiceMonitors to be selected for target discovery.
// This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a
// ServiceMonitor's meta labels. The requirements are ANDed.
// +optional
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
}

type (
// TargetAllocatorAllocationStrategy represent a strategy Target Allocator uses to distribute targets to each collector
// +kubebuilder:validation:Enum=least-weighted;consistent-hashing;per-node
TargetAllocatorAllocationStrategy string
// TargetAllocatorFilterStrategy represent a filtering strategy for targets before they are assigned to collectors
// +kubebuilder:validation:Enum="";relabel-config
TargetAllocatorFilterStrategy string
// TargetAllocatorPrometheusCR configures Prometheus CustomResource handling in the Target Allocator.
TargetAllocatorPrometheusCR common.TargetAllocatorPrometheusCR
// TargetAllocatorAllocationStrategy represent a strategy Target Allocator uses to distribute targets to each collector.
TargetAllocatorAllocationStrategy common.TargetAllocatorAllocationStrategy
// TargetAllocatorFilterStrategy represent a filtering strategy for targets before they are assigned to collectors.
TargetAllocatorFilterStrategy common.TargetAllocatorFilterStrategy
)

const (
// TargetAllocatorAllocationStrategyLeastWeighted targets will be distributed to collector with fewer targets currently assigned.
TargetAllocatorAllocationStrategyLeastWeighted TargetAllocatorAllocationStrategy = "least-weighted"
TargetAllocatorAllocationStrategyLeastWeighted = TargetAllocatorAllocationStrategy(common.TargetAllocatorAllocationStrategyLeastWeighted)

// TargetAllocatorAllocationStrategyConsistentHashing targets will be consistently added to collectors, which allows a high-availability setup.
TargetAllocatorAllocationStrategyConsistentHashing TargetAllocatorAllocationStrategy = "consistent-hashing"
TargetAllocatorAllocationStrategyConsistentHashing = TargetAllocatorAllocationStrategy(common.TargetAllocatorAllocationStrategyConsistentHashing)

// TargetAllocatorAllocationStrategyPerNode targets will be assigned to the collector on the node they reside on (use only with daemon set).
TargetAllocatorAllocationStrategyPerNode TargetAllocatorAllocationStrategy = "per-node"
TargetAllocatorAllocationStrategyPerNode TargetAllocatorAllocationStrategy = TargetAllocatorAllocationStrategy(common.TargetAllocatorAllocationStrategyPerNode)

// TargetAllocatorFilterStrategyRelabelConfig targets will be consistently drops targets based on the relabel_config.
TargetAllocatorFilterStrategyRelabelConfig TargetAllocatorFilterStrategy = "relabel-config"
TargetAllocatorFilterStrategyRelabelConfig = TargetAllocatorFilterStrategy(common.TargetAllocatorFilterStrategyRelabelConfig)
)
20 changes: 10 additions & 10 deletions apis/v1beta1/collector_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"

common2 "github.com/open-telemetry/opentelemetry-operator/internal/api/common"
"github.com/open-telemetry/opentelemetry-operator/internal/api/common"
"github.com/open-telemetry/opentelemetry-operator/internal/config"
ta "github.com/open-telemetry/opentelemetry-operator/internal/manifests/targetallocator/adapters"
"github.com/open-telemetry/opentelemetry-operator/internal/rbac"
Expand Down Expand Up @@ -125,7 +125,7 @@ func (c CollectorWebhook) Default(_ context.Context, obj runtime.Object) error {
// not blocking node drains but preventing out-of-the-box
// from disruption generated by them with replicas > 1
if otelcol.Spec.PodDisruptionBudget == nil {
otelcol.Spec.PodDisruptionBudget = &common2.PodDisruptionBudgetSpec{
otelcol.Spec.PodDisruptionBudget = &common.PodDisruptionBudgetSpec{
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
Expand All @@ -139,9 +139,9 @@ func (c CollectorWebhook) Default(_ context.Context, obj runtime.Object) error {
// just one replica, not blocking node drains but preventing
// out-of-the-box from disruption generated by them with replicas > 1
if otelcol.Spec.TargetAllocator.Enabled &&
otelcol.Spec.TargetAllocator.AllocationStrategy == common2.TargetAllocatorAllocationStrategyConsistentHashing &&
otelcol.Spec.TargetAllocator.AllocationStrategy == TargetAllocatorAllocationStrategyConsistentHashing &&
otelcol.Spec.TargetAllocator.PodDisruptionBudget == nil {
otelcol.Spec.TargetAllocator.PodDisruptionBudget = &common2.PodDisruptionBudgetSpec{
otelcol.Spec.TargetAllocator.PodDisruptionBudget = &common.PodDisruptionBudgetSpec{
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
Expand All @@ -158,7 +158,7 @@ func (c CollectorWebhook) Default(_ context.Context, obj runtime.Object) error {
// If someone upgrades to a later version without upgrading their CRD they will not have a management state set.
// This results in a default state of unmanaged preventing reconciliation from continuing.
if len(otelcol.Spec.ManagementState) == 0 {
otelcol.Spec.ManagementState = common2.ManagementStateManaged
otelcol.Spec.ManagementState = common.ManagementStateManaged
}
return nil
}
Expand Down Expand Up @@ -330,12 +330,12 @@ func (c CollectorWebhook) validateTargetAllocatorConfig(ctx context.Context, r *
return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode)
}

if r.Spec.Mode == ModeDaemonSet && r.Spec.TargetAllocator.AllocationStrategy != common2.TargetAllocatorAllocationStrategyPerNode {
return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which must be used with target allocation strategy %s ", r.Spec.Mode, common2.TargetAllocatorAllocationStrategyPerNode)
if r.Spec.Mode == ModeDaemonSet && r.Spec.TargetAllocator.AllocationStrategy != TargetAllocatorAllocationStrategyPerNode {
return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which must be used with target allocation strategy %s ", r.Spec.Mode, TargetAllocatorAllocationStrategyPerNode)
}

if r.Spec.TargetAllocator.AllocationStrategy == common2.TargetAllocatorAllocationStrategyPerNode && r.Spec.Mode != ModeDaemonSet {
return nil, fmt.Errorf("target allocation strategy %s is only supported in OpenTelemetry Collector mode %s", common2.TargetAllocatorAllocationStrategyPerNode, ModeDaemonSet)
if r.Spec.TargetAllocator.AllocationStrategy == TargetAllocatorAllocationStrategyPerNode && r.Spec.Mode != ModeDaemonSet {
return nil, fmt.Errorf("target allocation strategy %s is only supported in OpenTelemetry Collector mode %s", TargetAllocatorAllocationStrategyPerNode, ModeDaemonSet)
}

cfgYaml, err := r.Spec.Config.Yaml()
Expand Down Expand Up @@ -367,7 +367,7 @@ func (c CollectorWebhook) validateTargetAllocatorConfig(ctx context.Context, r *
return nil, nil
}

func checkAutoscalerSpec(autoscaler *common2.AutoscalerSpec) error {
func checkAutoscalerSpec(autoscaler *common.AutoscalerSpec) error {
if autoscaler.Behavior != nil {
if autoscaler.Behavior.ScaleDown != nil && autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds != nil &&
*autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds < int32(1) {
Expand Down
Loading

0 comments on commit b536c53

Please sign in to comment.