Skip to content

Commit

Permalink
keycloak
Browse files Browse the repository at this point in the history
  • Loading branch information
wangzheng422 committed Oct 14, 2024
1 parent 84cf441 commit 815cc67
Showing 1 changed file with 217 additions and 2 deletions.
219 changes: 217 additions & 2 deletions redhat/ocp4/4.15/2024.10.keycloak.performance.md
Original file line number Diff line number Diff line change
Expand Up @@ -1956,13 +1956,20 @@ apiVersion: v1
kind: Service
metadata:
name: rhsso-service
label:
app: rhsso
spec:
selector:
app: rhsso
ports:
- protocol: TCP
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: monitor
protocol: TCP
port: 12345
targetPort: 12345
---
apiVersion: route.openshift.io/v1
kind: Route
Expand Down Expand Up @@ -2024,7 +2031,7 @@ oc exec -it $VAR_POD -n $VAR_PROJECT -- bash
export PATH=/opt/rhsso/bin:$PATH

ADMIN_PWD='password'
CLIENT_SECRET="09cd9699-3584-47ed-98f5-00553e4a7cb3"


# after enable http in keycloak, you can use http endpoint
# it is better to set session timeout for admin for 1 day :)
Expand Down Expand Up @@ -2096,4 +2103,212 @@ curl -X POST 'http://rhsso-service:8080/auth/realms/performance/protocol/openid-

inject another script, and run the init user script on rhsso pod locally. This is because the management interface is bind to localhost, and we can not access it from outside the pod. And we can not change the management interface to public ip, because it will fail to startup right now.


## monitoring rhsso

```bash

cat << EOF > ${BASE_DIR}/data/install/enable-monitor.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-monitoring-config
namespace: openshift-monitoring
data:
config.yaml: |
enableUserWorkload: true
# alertmanagerMain:
# enableUserAlertmanagerConfig: true
EOF

oc apply -f ${BASE_DIR}/data/install/enable-monitor.yaml

oc -n openshift-user-workload-monitoring get pod

# monitor rhsso
oc delete -n $VAR_PROJECT -f ${BASE_DIR}/data/install/rhsso-monitor.yaml

cat << EOF > ${BASE_DIR}/data/install/rhsso-monitor.yaml
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: rhsso
spec:
endpoints:
- interval: 5s
path: /metrics
port: monitor
scheme: http
namespaceSelector:
matchNames:
- demo-rhsso
selector:
matchLabels:
app: rhsso
# ---
# apiVersion: monitoring.coreos.com/v1
# kind: PodMonitor
# metadata:
# name: rhsso
# namespace: $VAR_PROJECT
# spec:
# podMetricsEndpoints:
# - interval: 5s
# path: /metrics
# port: monitor
# scheme: http
# namespaceSelector:
# matchNames:
# - $VAR_PROJECT
# selector:
# matchLabels:
# app: rhsso
EOF

oc apply -f ${BASE_DIR}/data/install/rhsso-monitor.yaml -n $VAR_PROJECT

```


## run with python job

```bash

CLIENT_SECRET="09cd9699-3584-47ed-98f5-00553e4a7cb3"
TARGET_URL="http://rhsso-service:8080/auth/realms/performance/protocol/openid-connect/token"

cat <<EOF > ${BASE_DIR}/data/install/performance_test.py
import requests
import time
import threading
from concurrent.futures import ThreadPoolExecutor
CLIENT_SECRET = "$CLIENT_SECRET"
URL = "$TARGET_URL"
HEADERS = {"Content-Type": "application/x-www-form-urlencoded"}
num_users = 50000
num_threads = 10
success_count = 0
failure_count = 0
total_time = 0
lock = threading.Lock()
def make_request(start, end):
global success_count, failure_count, total_time
while True:
for i in range(start, end):
username = f"user-{i:05d}"
data = {
"client_id": "performance",
"client_secret": CLIENT_SECRET,
"username": username,
"password": "password",
"grant_type": "password"
}
start_time = time.time()
try:
response = requests.post(URL, headers=HEADERS, data=data)
elapsed_time = time.time() - start_time
with lock:
total_time += elapsed_time
if response.status_code == 200:
success_count += 1
else:
failure_count += 1
print(f"Error for user {username}: {response.status_code} {response.text}")
except requests.RequestException as e:
with lock:
failure_count += 1
print(f"RequestException for user {username}: {e}")
def print_summary():
global success_count, failure_count, total_time
while True:
time.sleep(60)
with lock:
total_requests = success_count + failure_count
success_rate = (success_count / total_requests) * 100 if total_requests > 0 else 0
avg_time = total_time / total_requests if total_requests > 0 else 0
print(f"Summary (last minute): Success: {success_count}, Failure: {failure_count}, Success Rate: {success_rate:.2f}%, Avg Time: {avg_time:.2f}s")
success_count = 0
failure_count = 0
total_time = 0
def print_secondly_summary():
global success_count, failure_count, total_time
while True:
time.sleep(1)
with lock:
total_requests = success_count + failure_count
success_rate = (success_count / total_requests) * 100 if total_requests > 0 else 0
avg_time = total_time / total_requests if total_requests > 0 else 0
print(f"Second Summary: Success: {success_count}, Failure: {failure_count}, Success Rate: {success_rate:.2f}%, Avg Time: {avg_time:.2f}s")
if __name__ == "__main__":
summary_thread = threading.Thread(target=print_summary, daemon=True)
summary_thread.start()
secondly_summary_thread = threading.Thread(target=print_secondly_summary, daemon=True)
secondly_summary_thread.start()
users_per_thread = num_users // num_threads
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for i in range(num_threads):
start = i * users_per_thread + 1
end = (i + 1) * users_per_thread + 1
executor.submit(make_request, start, end)
# Keep the main thread running indefinitely
while True:
time.sleep(1)
EOF

oc delete -n $VAR_PROJECT configmap performance-test-script
oc create configmap performance-test-script -n $VAR_PROJECT --from-file=${BASE_DIR}/data/install/performance_test.py


oc delete -n $VAR_PROJECT job performance-test-job
cat << EOF > ${BASE_DIR}/data/install/performance-test-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: performance-test-job
spec:
template:
spec:
containers:
- name: performance-test
image: quay.io/wangzheng422/qimgs:rocky9-test-2024.06.17.v01
command: ["/usr/bin/python3", "/scripts/performance_test.py"]
volumeMounts:
- name: script-volume
mountPath: /scripts
restartPolicy: Never
volumes:
- name: script-volume
configMap:
name: performance-test-script
backoffLimit: 4
EOF

oc delete -n $VAR_PROJECT job performance-test-job
oc apply -f ${BASE_DIR}/data/install/performance-test-job.yaml -n $VAR_PROJECT

```


## set owner to 2

### run with 2 instance

### run with 20 instance

### run with 40 instance

### run with 80 instance


# end

0 comments on commit 815cc67

Please sign in to comment.