chore(scaletest/templates/scaletest-runner): fix dashboard command invocation, autoscale provisioners (#10177)

add --retries on kubectl cp
remove --count parameter to scaletest dashboard
scale provisioners up and down

Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
This commit is contained in:
Cian Johnston 2023-10-10 15:33:55 +01:00 committed by GitHub
parent db8592fa93
commit 2506415def
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 20 additions and 1 deletions

View File

@ -28,6 +28,12 @@ coder exp scaletest cleanup \
tee "${SCALETEST_RESULTS_DIR}/cleanup-${event}.txt"
end_phase
if [[ $event != prepare ]]; then
start_phase "Scaling down provisioners..."
maybedryrun "$DRY_RUN" kubectl scale deployment/coder-provisioner --replicas 1
maybedryrun "$DRY_RUN" kubectl rollout status deployment/coder-provisioner
fi
if [[ $event = manual ]]; then
echo 'Press any key to continue...'
read -s -r -n 1

View File

@ -271,10 +271,12 @@ fetch_coder_full() {
exit 1
fi
log "Fetching full Coder binary from ${pod}"
# We need --retries due to https://github.com/kubernetes/kubernetes/issues/60140 :(
maybedryrun "${DRY_RUN}" kubectl \
--namespace "${namespace}" \
cp \
--container coder \
--retries 10 \
"${pod}:/opt/coder" "${SCALETEST_CODER_BINARY}"
maybedryrun "${DRY_RUN}" chmod +x "${SCALETEST_CODER_BINARY}"
log "Full Coder binary downloaded to ${SCALETEST_CODER_BINARY}"

View File

@ -51,3 +51,15 @@ log "Cleaning up from previous runs (if applicable)..."
"${SCRIPTS_DIR}/cleanup.sh" "prepare"
log "Preparation complete!"
PROVISIONER_REPLICA_COUNT="${SCALETEST_PARAM_CREATE_CONCURRENCY:-0}"
if [[ "${PROVISIONER_REPLICA_COUNT}" -eq 0 ]]; then
# TODO(Cian): what is a good default value here?
echo "Setting PROVISIONER_REPLICA_COUNT to 10 since SCALETEST_PARAM_CREATE_CONCURRENCY is 0"
PROVISIONER_REPLICA_COUNT=10
fi
log "Scaling up provisioners to ${PROVISIONER_REPLICA_COUNT}..."
maybedryrun "$DRY_RUN" kubectl scale deployment/coder-provisioner \
--replicas "${PROVISIONER_REPLICA_COUNT}"
log "Waiting for provisioners to scale up..."
maybedryrun "$DRY_RUN" kubectl rollout status deployment/coder-provisioner

View File

@ -49,7 +49,6 @@ for scenario in "${SCALETEST_PARAM_LOAD_SCENARIOS[@]}"; do
;;
"Dashboard Traffic")
coder exp scaletest dashboard \
--count "${SCALETEST_PARAM_NUM_WORKSPACES}" \
--timeout "${SCALETEST_PARAM_LOAD_SCENARIO_DASHBOARD_TRAFFIC_DURATION}m" \
--job-timeout "${SCALETEST_PARAM_LOAD_SCENARIO_DASHBOARD_TRAFFIC_DURATION}m30s" \
--output json:"${SCALETEST_RESULTS_DIR}/traffic-dashboard.json" \