Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Devops 1871 deprecate kube proxy #25

Merged
merged 4 commits into from
May 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -148,14 +148,13 @@ jobs:



printf "\n\nAgent INFO log\n"
kubectl exec -t deploy/sample-deployment -c app -- cat /tmp/lightrun_java_agent.INFO
printf "\n\nAgent ERROR log\n"
kubectl exec -t deploy/sample-deployment -c app -- cat /tmp/lightrun_java_agent.ERROR || true
printf "\n\nAgent log\n"

log_file_name=/tmp/$(kubectl exec -t deploy/sample-deployment - c app -- ls -t /tmp/ | grep lightrun_java_agent | head -n 1)
kubectl exec -t deploy/sample-deployment -c app -- cat $log_file_name

printf "\nSearching for "registered" in INFO log\n"
if kubectl exec -t deploy/sample-deployment -c app -- cat /tmp/lightrun_java_agent.INFO | grep Debuggee |grep registered > /dev/null; then
printf "\nSearching for "registered" in log\n"
if kubectl exec -t deploy/sample-deployment -c app -- cat $log_file_name | grep Debuggee |grep registered > /dev/null; then
printf "\n----------------\nAgent registered succesfully!\n----------------\n"
else
printf "\n----------------\nAgent failed to register!\n----------------\n"
Expand Down
5 changes: 4 additions & 1 deletion cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,12 @@ func getWatchNamespaces() ([]string, error) {

func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var pprofAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.StringVar(&pprofAddr, "pprof-bind-address", "0", "The address the pprof endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
Expand All @@ -98,6 +100,7 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "5b425f09.lightrun.com",
PprofBindAddress: pprofAddr,

// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
Expand Down
13 changes: 3 additions & 10 deletions config/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ namePrefix: lightrun-k8s-operator-
# someName: someValue

resources:
- ../crd
- ../rbac
- ../manager
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
Expand All @@ -26,10 +26,3 @@ resources:
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus

# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patches:
- path: manager_auth_proxy_patch.yaml
40 changes: 0 additions & 40 deletions config/default/manager_auth_proxy_patch.yaml

This file was deleted.

69 changes: 34 additions & 35 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,40 +64,39 @@ spec:
seccompProfile:
type: RuntimeDefault
containers:
- command:
- /manager
env:
- name: WATCH_NAMESPACE
value: ""
args:
- --leader-elect
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
- command:
- /manager
env:
- name: WATCH_NAMESPACE
value: ""
args:
- --leader-elect
- --zap-log-level=0
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 50m
memory: 128Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
31 changes: 3 additions & 28 deletions config/samples/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -481,31 +481,6 @@ spec:
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.14.1
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --zap-log-level=0
command:
Expand All @@ -530,10 +505,10 @@ spec:
resources:
limits:
cpu: 500m
memory: 128Mi
memory: 512Mi
requests:
cpu: 10m
memory: 64Mi
cpu: 50m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
Expand Down
25 changes: 1 addition & 24 deletions examples/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -480,32 +480,9 @@ spec:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --metrics-bind-address=:8080
- --leader-elect
- --zap-log-level=info
command:
Expand Down
14 changes: 6 additions & 8 deletions helm-chart/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,16 @@ type: application
icon: https://lightrun-public.s3.amazonaws.com/img/lightrun-logo.png

## Kubeversion due to "seccompProfile" in the controller deployment
kubeVersion: ">= 1.19.0"
## -0 is used to allow any patch version
## it is resolving GKE and EKS versioning
## Example of EKS version: v1.28.9-eks-036c24b
kubeVersion: ">= 1.19.0-0"

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0 # Will be updated by CI pipeline



# ArtifactHub.io annotations
annotations:
artifacthub.io/crds: |
Expand All @@ -43,16 +44,14 @@ annotations:
serverHostname: app.lightrun.com
agentEnvVarName: JAVA_TOOL_OPTIONS
initContainer:
image: "lightruncom/k8s-operator-init-java-agent-linux:1.8.5-init.1"
image: "lightruncom/k8s-operator-init-java-agent-linux:latest"
sharedVolumeName: lightrun-agent-init
sharedVolumeMountPath: "/lightrun"
agentTags:
- operator
- example
- 1.8.3



artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Operator repo
Expand All @@ -63,8 +62,7 @@ annotations:
- name: Lightrun devops team
email: devopsdevops@lightrun.com
- name: LeonidP
email: leonidp@lightrun.com
email: leonidp@lightrun.com
artifacthub.io/operator: "true"
artifacthub.io/operatorCapabilities: Basic Install
artifacthub.io/prerelease: "false"

24 changes: 5 additions & 19 deletions helm-chart/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,27 +28,13 @@ spec:
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
image: {{ .Values.controllerManager.kubeRbacProxy.image.repository }}:{{ .Values.controllerManager.kubeRbacProxy.image.tag }}
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources: {{- toYaml .Values.controllerManager.kubeRbacProxy.resources | nindent 10 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --health-probe-bind-address={{ .Values.managerConfig.healthProbe.bindAddress }}
- --metrics-bind-address={{ .Values.managerConfig.metrics.bindAddress }}
- --leader-elect
- --zap-log-level={{ .Values.managerConfig.logLevel }}
{{- if .Values.managerConfig.profiler.bindAddress }}
- --pprof-bind-address={{ .Values.managerConfig.profiler.bindAddress }}
{{- end }}
command:
- /manager
image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag | default .Chart.AppVersion }}
Expand Down
34 changes: 10 additions & 24 deletions helm-chart/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,6 @@
controllerManager:
replicas: 1

## Kube proxy config
## In most cases you don't need to change those
kubeRbacProxy:
image:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.15.0
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi

## Controller image
manager:
image:
Expand Down Expand Up @@ -60,16 +46,16 @@ managerConfig:
logLevel: info

## Default values of the container inside pod. In most cases you don't need to change those
controllerManagerConfigYaml:
health:
healthProbeBindAddress: :8081
leaderElection:
leaderElect: true
resourceName: 5b425f09.lightrun.com
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
healthProbe:
bindAddress: ":8081"
metrics:
bindAddress: ":8080"
# -- Profiler is used for debugging and performance analysis
# It is disabled by default
# To enable it, specify the bindAddress, similar to the metrics or the health probes
# Make sure to protect this endpoint as it is containing sensitive information
profiler:
bindAddress: ""
# -- Operator may work in 2 scopes: cluster and namespaced
# Cluster scope will give permissions to operator to watch and patch deployment in the whole cluster
# With namespaced scope you need to provide list of namespaces that operator will be able to watch.
Expand Down