diff --git a/changes/11307.test.md b/changes/11307.test.md new file mode 100644 index 00000000000..a8fbcf4f69e --- /dev/null +++ b/changes/11307.test.md @@ -0,0 +1 @@ +Add bai v2 CLI integration scenario suite covering vfolder, session, deployment, and RBAC flows diff --git a/scenarios/.gitignore b/scenarios/.gitignore new file mode 100644 index 00000000000..85c03dca676 --- /dev/null +++ b/scenarios/.gitignore @@ -0,0 +1,2 @@ +.state/ +.tmp/ diff --git a/scenarios/00_setup/inactive_keypair_access_keys.py b/scenarios/00_setup/inactive_keypair_access_keys.py new file mode 100644 index 00000000000..f1f5789c9ac --- /dev/null +++ b/scenarios/00_setup/inactive_keypair_access_keys.py @@ -0,0 +1,13 @@ +"""Print access_keys of inactive keypairs whose user_id == $TARGET_UID. Reads admin keypair search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["TARGET_UID"] +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +for it in d.get("items", []): + if it.get("user_id") == target and not it.get("is_active"): + print(it["access_key"]) diff --git a/scenarios/00_setup/run.sh b/scenarios/00_setup/run.sh new file mode 100755 index 00000000000..9f7147b2aef --- /dev/null +++ b/scenarios/00_setup/run.sh @@ -0,0 +1,170 @@ +#!/usr/bin/env bash +# Setup: idempotently create scenario users (A, B), projects (A, B), memberships, +# vfolder host grants, and a model-card fixture in model-store. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "00_setup — create scenario users + projects" + +bai_config_session +bai_login_admin + +# Pre-clear login throttle: prior failed runs may have locked test users out. +log_step "Pre-clear login throttle counters in Redis" +REDIS_CONTAINER="$(docker compose -f docker-compose.halfstack.current.yml ps -q backendai-half-redis 2>/dev/null || true)" +if [[ -n "$REDIS_CONTAINER" ]]; then + for email in "$TEST_USER_A_EMAIL" "$TEST_USER_B_EMAIL"; do + docker exec "$REDIS_CONTAINER" redis-cli del "login_history_${email}" >/dev/null 2>&1 || true + done +else + log_warn "halfstack redis container not found; skipping throttle clear" +fi + +log_step "Verify domain '${TEST_DOMAIN}' exists" +./bai domain get "$TEST_DOMAIN" >/dev/null + +ensure_project() { + local name="$1" + local existing + existing="$(lookup_project_id "$name" || true)" + if [[ -n "$existing" ]]; then + printf '%s' "$existing"; return 0 + fi + log_step "Create project '${name}'" + local body + body=$(printf '{"name":"%s","domain_name":"%s","resource_policy":"%s","description":"scenario test project"}' \ + "$name" "$TEST_DOMAIN" "$TEST_PROJECT_RESOURCE_POLICY") + ./bai admin project create "$body" >/dev/null + sleep 0.2 + local pid + pid="$(lookup_project_id "$name")" + [[ -n "$pid" ]] || { log_error "project create succeeded but lookup failed for '${name}'"; return 1; } + printf '%s' "$pid" +} + +PROJECT_A_ID="$(ensure_project "$TEST_PROJECT_A_NAME")" +state_set project_a_id "$PROJECT_A_ID" +PROJECT_B_ID="$(ensure_project "$TEST_PROJECT_B_NAME")" +state_set project_b_id "$PROJECT_B_ID" +log_ok "projects: A=$PROJECT_A_ID B=$PROJECT_B_ID" + +ensure_user() { + local email="$1" username="$2" password="$3" + local existing + existing="$(lookup_user_id "$email" || true)" + if [[ -n "$existing" ]]; then + # Reactivate (may be soft-deleted) + reset password + re-enable keypairs. + local body + body=$(printf '{"status":"active","password":"%s"}' "$password") + ./bai user update "$existing" "$body" >/dev/null 2>&1 || true + ./bai admin keypair search --limit 200 2>/dev/null | TARGET_UID="$existing" \ + python3 "$SCRIPT_DIR/inactive_keypair_access_keys.py" \ + | while read -r ak; do + [[ -z "$ak" ]] && continue + ./bai admin keypair update "{\"access_key\":\"$ak\",\"is_active\":true}" >/dev/null 2>&1 || true + done + printf '%s' "$existing"; return 0 + fi + log_step "Create user '${email}'" + ./bai admin user create \ + --email "$email" \ + --username "$username" \ + --password "$password" \ + --domain-name "$TEST_DOMAIN" \ + --status active \ + --role user \ + --resource-policy "$TEST_USER_RESOURCE_POLICY" >/dev/null + sleep 0.3 + local uid + uid="$(lookup_user_id "$email")" + [[ -n "$uid" ]] || { log_error "user create succeeded but lookup failed for '${email}'"; return 1; } + printf '%s' "$uid" +} + +USER_A_ID="$(ensure_user "$TEST_USER_A_EMAIL" "$TEST_USER_A_NAME" "$TEST_USER_A_PASSWORD")" +state_set user_a_id "$USER_A_ID" +USER_B_ID="$(ensure_user "$TEST_USER_B_EMAIL" "$TEST_USER_B_NAME" "$TEST_USER_B_PASSWORD")" +state_set user_b_id "$USER_B_ID" +log_ok "users: A=$USER_A_ID B=$USER_B_ID" + +# CLI for membership add isn't exposed; use the legacy GraphQL mutation. +add_user_to_project() { + local uid="$1" pid="$2" + log_step "Add user $uid to project $pid" + local query="mutation { modify_group(gid: \"${pid}\", props: {user_update_mode: \"add\", user_uuids: [\"${uid}\"]}) { ok msg } }" + local out; out="$(./bai gql "$query" 2>&1)" || true + printf '%s' "$out" | python3 "$SCN_PY/modify_group_ok.py" || log_warn "modify_group response: $out" +} + +add_user_to_project "$USER_A_ID" "$PROJECT_A_ID" +add_user_to_project "$USER_B_ID" "$PROJECT_B_ID" + +# Projects need allowed_vfolder_hosts populated before vfolders can be created +# bound to that host. Default is empty `{}`. +grant_host_to_project() { + local pid="$1" host="$2" + local perms='[\"create-vfolder\",\"modify-vfolder\",\"delete-vfolder\",\"mount-in-session\",\"upload-file\",\"download-file\",\"invite-others\",\"set-user-specific-permission\"]' + local hosts="{\\\"${host}\\\":${perms}}" + log_step "Grant project ${pid} access to host '${host}'" + local query="mutation { modify_group(gid: \"${pid}\", props: {allowed_vfolder_hosts: \"${hosts}\"}) { ok msg } }" + local out; out="$(./bai gql "$query" 2>&1)" || true + printf '%s' "$out" | python3 "$SCN_PY/modify_group_ok.py" || log_warn "host grant response: $out" +} + +grant_host_to_project "$PROJECT_A_ID" "$TEST_VFOLDER_HOST" +grant_host_to_project "$PROJECT_B_ID" "$TEST_VFOLDER_HOST" + +log_step "Locate 'model-store' project" +MODEL_STORE_ID="$(lookup_project_id "model-store" || true)" +if [[ -z "$MODEL_STORE_ID" ]]; then + log_warn "no 'model-store' project — scenarios 03/14 will fail until provisioned" +else + log_ok "model-store: $MODEL_STORE_ID" + state_set model_store_id "$MODEL_STORE_ID" + grant_host_to_project "$MODEL_STORE_ID" "$TEST_VFOLDER_HOST" + + FIXTURE_NAME="${SCENARIO_PREFIX}-model-card-fixture" + + log_step "Ensure model fixture vfolder '${FIXTURE_NAME}' in model-store" + FIXTURE_VFOLDER_ID="$(lookup_admin_vfolder_id "$FIXTURE_NAME" || true)" + if [[ -z "$FIXTURE_VFOLDER_ID" ]]; then + ./bai vfolder create \ + --name "$FIXTURE_NAME" \ + --usage-mode model \ + --group "$MODEL_STORE_ID" \ + --host "$TEST_VFOLDER_HOST" >/dev/null + sleep 0.3 + FIXTURE_VFOLDER_ID="$(lookup_admin_vfolder_id "$FIXTURE_NAME" || true)" + [[ -n "$FIXTURE_VFOLDER_ID" ]] || { log_error "fixture vfolder create succeeded but lookup failed"; exit 1; } + fi + state_set model_fixture_vfolder_id "$FIXTURE_VFOLDER_ID" + + log_step "Ensure model card '${FIXTURE_NAME}' registered" + FIXTURE_CARD_ID="$(lookup_card_id "$FIXTURE_NAME" || true)" + if [[ -z "$FIXTURE_CARD_ID" ]]; then + body=$(printf '{"name":"%s","vfolder_id":"%s","model_store_project_id":"%s"}' \ + "$FIXTURE_NAME" "$FIXTURE_VFOLDER_ID" "$MODEL_STORE_ID") + OUT="$(./bai admin model-card create "$body" 2>&1)" || log_warn "model-card create: $OUT" + sleep 0.3 + FIXTURE_CARD_ID="$(lookup_card_id "$FIXTURE_NAME" || true)" + [[ -n "$FIXTURE_CARD_ID" ]] || log_warn "model card not found after create — 03/14 may still fail" + fi + [[ -n "${FIXTURE_CARD_ID:-}" ]] && state_set model_fixture_card_id "$FIXTURE_CARD_ID" + log_ok "fixture vfolder=$FIXTURE_VFOLDER_ID card=${FIXTURE_CARD_ID:-}" +fi + +log_step "Verify each test user can log in" +bai_login_user_a +./bai my session search --limit 1 >/dev/null +bai_login_user_b +./bai my session search --limit 1 >/dev/null + +# Restore admin session for downstream interactive runs. +bai_login_admin + +scenario_end_ok diff --git a/scenarios/01_vfolder_lifecycle/run.sh b/scenarios/01_vfolder_lifecycle/run.sh new file mode 100755 index 00000000000..5d598090f69 --- /dev/null +++ b/scenarios/01_vfolder_lifecycle/run.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# 01: VFolder lifecycle — create → mkdir → ls → mv → rm → delete → purge. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "01_vfolder_lifecycle" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A not found — run 00_setup first"; exit 1; } + +bai_login_user_a + +VF_NAME="${SCENARIO_PREFIX}-vf-lifecycle-$$" +log_step "Create user-owned vfolder '${VF_NAME}'" +./bai vfolder create --name "$VF_NAME" --host "$TEST_VFOLDER_HOST" --usage-mode general >/dev/null +VF_ID="$(lookup_my_vfolder_id "$VF_NAME")" +[[ -n "$VF_ID" ]] || { log_error "vfolder lookup failed after create"; exit 1; } +state_set vfolder_lifecycle_id "$VF_ID" +log_ok "vfolder: $VF_ID" + +log_step "mkdir data/inputs, data/outputs" +./bai vfolder mkdir "$VF_ID" data --parents --exist-ok >/dev/null +./bai vfolder mkdir "$VF_ID" data/inputs --exist-ok >/dev/null +./bai vfolder mkdir "$VF_ID" data/outputs --exist-ok >/dev/null + +log_step "ls /data must show inputs and outputs" +./bai vfolder ls "$VF_ID" data 2>&1 | python3 "$SCRIPT_DIR/verify_data_ls.py" \ + || { log_error "ls verification failed"; exit 1; } + +log_step "mv data/inputs → data/in" +./bai vfolder mv "$VF_ID" data/inputs data/in >/dev/null + +log_step "rm data/outputs" +./bai vfolder rm "$VF_ID" data/outputs --recursive >/dev/null + +log_step "delete + purge" +./bai vfolder delete "$VF_ID" >/dev/null +./bai vfolder purge "$VF_ID" >/dev/null + +log_step "Verify vfolder absent from my-search" +sleep 0.5 +RESIDUAL="$(lookup_my_vfolder_id "$VF_NAME" || true)" +[[ -z "$RESIDUAL" ]] || { log_error "vfolder still present after purge: $RESIDUAL"; exit 1; } + +scenario_end_ok diff --git a/scenarios/01_vfolder_lifecycle/verify_data_ls.py b/scenarios/01_vfolder_lifecycle/verify_data_ls.py new file mode 100644 index 00000000000..c9c3b987654 --- /dev/null +++ b/scenarios/01_vfolder_lifecycle/verify_data_ls.py @@ -0,0 +1,9 @@ +"""Assert that 'inputs' and 'outputs' appear in a `bai vfolder ls` response. Reads JSON from stdin.""" +import json +import sys + +d = json.load(sys.stdin) +items = d.get("items") or d.get("files") or [] +names = [it.get("name", it.get("path", "")) for it in items] if isinstance(items, list) else [] +assert any("inputs" in (n or "") for n in names), f"inputs/ not found in {names}" +assert any("outputs" in (n or "") for n in names), f"outputs/ not found in {names}" diff --git a/scenarios/02_session_lifecycle/run.sh b/scenarios/02_session_lifecycle/run.sh new file mode 100755 index 00000000000..16ccb3ce5d2 --- /dev/null +++ b/scenarios/02_session_lifecycle/run.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# 02: Compute session lifecycle — vfolder + interactive session + terminate. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "02_session_lifecycle" + +bai_config_session +bai_login_admin + +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A missing"; exit 1; } +IMAGE_ID="$(lookup_image_id "$TEST_IMAGE_NAME")" +[[ -n "$IMAGE_ID" ]] || { log_error "image not found: $TEST_IMAGE_NAME"; exit 1; } + +bai_login_user_a + +VF_NAME="${SCENARIO_PREFIX}-vf-session-$$" +log_step "Create mount-target vfolder '${VF_NAME}'" +./bai vfolder create --name "$VF_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +VF_ID="$(lookup_my_vfolder_id "$VF_NAME")" +[[ -n "$VF_ID" ]] || { log_error "vfolder create lookup failed"; exit 1; } +state_set session_vf_id "$VF_ID" + +SESSION_NAME="${SCENARIO_PREFIX}-sess-$$" +PAYLOAD_FILE="$SCENARIO_TMP_DIR/enqueue-${SESSION_NAME}.json" +session_payload "$SESSION_NAME" "$IMAGE_ID" "$PROJECT_A_ID" "$VF_ID" "/home/work/${VF_NAME}" > "$PAYLOAD_FILE" + +log_step "Enqueue session '${SESSION_NAME}' (mounting ${VF_NAME})" +ENQ_OUT="$(./bai session enqueue "@$PAYLOAD_FILE" 2>&1)" +SESSION_ID="$(printf '%s' "$ENQ_OUT" | session_id_from)" +[[ -n "$SESSION_ID" ]] || { log_error "Failed to extract session id"; echo "$ENQ_OUT" | head -c 2000 >&2; exit 1; } +state_set session_id "$SESSION_ID" +log_ok "session: $SESSION_ID" + +log_step "Wait for session to appear in my-search" +wait_session_status "$SESSION_ID" 20 1 \ + PENDING PREPARING PREPARED RUNNING TERMINATED CANCELLED ERROR >/dev/null \ + || { log_error "session never appeared in my-search"; exit 1; } + +log_step "Wait for session to settle (max 60s)" +FINAL_STATUS="$(wait_session_status "$SESSION_ID" 30 2 RUNNING TERMINATED CANCELLED ERROR || true)" +log_info "session settled at: ${FINAL_STATUS:-(unknown after timeout)}" + +log_step "Terminate session" +terminate_session "$SESSION_ID" + +log_step "Verify session terminated within 30s" +wait_session_status "$SESSION_ID" 15 2 TERMINATED CANCELLED NOT_FOUND >/dev/null \ + || log_warn "session did not reach TERMINATED within timeout" + +log_step "Cleanup mount vfolder" +./bai vfolder delete "$VF_ID" >/dev/null || true +./bai vfolder purge "$VF_ID" >/dev/null || true + +scenario_end_ok diff --git a/scenarios/03_model_card_deploy/run.sh b/scenarios/03_model_card_deploy/run.sh new file mode 100755 index 00000000000..c25b06df672 --- /dev/null +++ b/scenarios/03_model_card_deploy/run.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# 03: Model card → deployment via `model-card deploy` happy path. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "03_model_card_deploy" + +bai_config_session +bai_login_admin + +MODEL_STORE_ID="$(lookup_project_id "model-store")" +[[ -n "$MODEL_STORE_ID" ]] || { log_error "no 'model-store' project on this cluster"; exit 1; } + +log_step "Search model cards in model-store" +CARD_ID="$(./bai model-card project-search "$MODEL_STORE_ID" --limit 5 2>&1 | python3 "$SCN_PY/pick_first_id.py")" +[[ -n "$CARD_ID" ]] || { log_error "no model cards available"; exit 1; } +log_ok "card: $CARD_ID" + +./bai model-card get "$CARD_ID" >/dev/null + +log_step "List available revision presets" +PRESET_ID="$(./bai model-card available-presets "$CARD_ID" 2>&1 | python3 "$SCN_PY/pick_first_id.py")" +[[ -n "$PRESET_ID" ]] || { log_error "card has no revision presets"; exit 1; } + +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" + +log_step "Deploy model card to project A" +DEPLOY_OUT="$(./bai model-card deploy "$CARD_ID" \ + --project-id "$PROJECT_A_ID" \ + --revision-preset-id "$PRESET_ID" \ + --resource-group "$TEST_RESOURCE_GROUP" \ + --replicas 1 2>&1)" +DEPLOYMENT_ID="$(printf '%s' "$DEPLOY_OUT" | deployment_id_from)" +[[ -n "$DEPLOYMENT_ID" ]] || { log_error "could not extract deployment id: $DEPLOY_OUT"; exit 1; } +state_set deployment_id "$DEPLOYMENT_ID" +log_ok "deployment: $DEPLOYMENT_ID" + +log_step "Verify deployment via my deployment search" +sleep 1 +TARGET="$DEPLOYMENT_ID" ./bai my deployment search --limit 50 2>&1 \ + | TARGET="$DEPLOYMENT_ID" python3 "$SCN_PY/assert_id_in_search.py" \ + || { log_error "deployment not visible via my deployment search"; exit 1; } + +log_step "Delete deployment" +./bai deployment delete "$DEPLOYMENT_ID" >/dev/null + +scenario_end_ok diff --git a/scenarios/04_deployment_revision/check_terminal_status.py b/scenarios/04_deployment_revision/check_terminal_status.py new file mode 100644 index 00000000000..c2987f8a5df --- /dev/null +++ b/scenarios/04_deployment_revision/check_terminal_status.py @@ -0,0 +1,21 @@ +"""Verify deployment $TARGET in project-search response is in a terminal state. + +Exit 0 if status ∈ {STOPPED, DESTROYED, DELETED, TERMINATED, CANCELLED}, or if +the row is absent (also acceptable). Exit 1 otherwise. Reads search JSON from stdin. +""" +import json +import os +import sys + +target = os.environ["TARGET"] +TERM = {"STOPPED", "DESTROYED", "DELETED", "TERMINATED", "CANCELLED"} +for it in json.load(sys.stdin).get("items", []): + if it.get("id") == target: + st = (it.get("lifecycle") or {}).get("status") or it.get("status") or "" + if st in TERM: + print(f"terminal: {st}") + sys.exit(0) + print(f"NOT TERMINAL: {st}") + sys.exit(1) +print("absent (also acceptable)") +sys.exit(0) diff --git a/scenarios/04_deployment_revision/run.sh b/scenarios/04_deployment_revision/run.sh new file mode 100755 index 00000000000..ec4b714ae3c --- /dev/null +++ b/scenarios/04_deployment_revision/run.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# 04: Bare deployment + listing/update/delete; verify terminal status post-delete. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "04_deployment_revision" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A missing"; exit 1; } + +bai_login_user_a + +DEP_NAME="${SCENARIO_PREFIX}-dep-$$" +log_step "Create bare deployment '${DEP_NAME}'" +DEPLOY_OUT="$(./bai deployment create \ + --name "$DEP_NAME" \ + --project-id "$PROJECT_A_ID" \ + --domain-name "$TEST_DOMAIN" \ + --desired-replicas 0 \ + --strategy ROLLING 2>&1)" +DEPLOYMENT_ID="$(printf '%s' "$DEPLOY_OUT" | deployment_id_from)" +[[ -n "$DEPLOYMENT_ID" ]] || { log_error "could not parse deployment id from: $DEPLOY_OUT"; exit 1; } +state_set bare_deployment_id "$DEPLOYMENT_ID" +log_ok "deployment: $DEPLOYMENT_ID" + +log_step "Get deployment + verify in project-search" +./bai deployment get "$DEPLOYMENT_ID" >/dev/null +TARGET="$DEPLOYMENT_ID" ./bai deployment project-search "$PROJECT_A_ID" --limit 50 2>&1 \ + | TARGET="$DEPLOYMENT_ID" python3 "$SCN_PY/assert_id_in_search.py" \ + || { log_error "deployment missing from project-search"; exit 1; } + +log_step "Bare deployment has zero revisions" +./bai deployment revision search "$DEPLOYMENT_ID" --limit 10 >/dev/null +./bai deployment revision current "$DEPLOYMENT_ID" >/dev/null 2>&1 || true + +log_step "Update deployment metadata" +./bai deployment update "$DEPLOYMENT_ID" --desired-replicas 0 >/dev/null + +log_step "Delete deployment" +./bai deployment delete "$DEPLOYMENT_ID" >/dev/null + +# Soft-delete: row stays visible. Status MUST be one of the terminal states. +log_step "Verify deployment status is terminal after delete" +sleep 1 +TARGET="$DEPLOYMENT_ID" ./bai deployment project-search "$PROJECT_A_ID" --limit 50 2>&1 \ + | TARGET="$DEPLOYMENT_ID" python3 "$SCRIPT_DIR/check_terminal_status.py" \ + || { log_error "deployment delete did not produce terminal status"; exit 1; } + +scenario_end_ok diff --git a/scenarios/05_teardown_verification/leaked_deployments.py b/scenarios/05_teardown_verification/leaked_deployments.py new file mode 100644 index 00000000000..7956ac06ff2 --- /dev/null +++ b/scenarios/05_teardown_verification/leaked_deployments.py @@ -0,0 +1,23 @@ +"""Print leaked deployments (name starts with $PREFIX, status not in terminal set). + +Prints lines of the form ` []`. Reads my-search JSON from stdin. +""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +TERMINAL = {"STOPPED", "DESTROYED", "DELETED", "TERMINATED", "CANCELLED"} +try: + d = json.load(sys.stdin) +except Exception: + print("") + sys.exit(0) +left = [] +for it in d.get("items", []): + md = it.get("metadata") or {} + name = md.get("name") or it.get("name", "") + status = md.get("status") or it.get("status", "") + if name.startswith(prefix) and status not in TERMINAL: + left.append(f"{it['id']} {name} [{status}]") +print("\n".join(left)) diff --git a/scenarios/05_teardown_verification/leaked_sessions.py b/scenarios/05_teardown_verification/leaked_sessions.py new file mode 100644 index 00000000000..25a4c4496b6 --- /dev/null +++ b/scenarios/05_teardown_verification/leaked_sessions.py @@ -0,0 +1,16 @@ +"""Print leaked sessions (name starts with $PREFIX, status not TERMINATED/CANCELLED). + +Prints lines of the form ` []`. Reads my-search JSON from stdin. +""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +left = [] +for it in json.load(sys.stdin).get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name") or "" + status = (it.get("lifecycle") or {}).get("status") or it.get("status", "") + if name.startswith(prefix) and status not in ("TERMINATED", "CANCELLED"): + left.append(f"{it['id']} {name} [{status}]") +print("\n".join(left)) diff --git a/scenarios/05_teardown_verification/leaked_vfolders.py b/scenarios/05_teardown_verification/leaked_vfolders.py new file mode 100644 index 00000000000..89b3570ef7f --- /dev/null +++ b/scenarios/05_teardown_verification/leaked_vfolders.py @@ -0,0 +1,22 @@ +"""Print leaked vfolders (name starts with $PREFIX, status not in deleted set). + +Prints lines of the form ` `. Reads my-search JSON from stdin. +""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +DELETED = { + "delete-pending", + "delete-ongoing", + "delete-complete", + "delete-error", + "delete-aborted", +} +left = [] +for it in json.load(sys.stdin).get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name", "") + if name.startswith(prefix) and it.get("status") not in DELETED: + left.append(f"{it['id']} {name}") +print("\n".join(left)) diff --git a/scenarios/05_teardown_verification/run.sh b/scenarios/05_teardown_verification/run.sh new file mode 100755 index 00000000000..2add43705b9 --- /dev/null +++ b/scenarios/05_teardown_verification/run.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# 05: Verify scenarios 01–04 left no orphaned user-A resources. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "05_teardown_verification" + +bai_config_session +bai_login_admin +bai_login_user_a + +FAIL=0 + +log_step "Check for leaked sessions" +LEFT_SESSIONS="$(./bai my session search --limit 200 2>&1 | PREFIX="${SCENARIO_PREFIX}-" python3 "$SCRIPT_DIR/leaked_sessions.py")" +if [[ -n "$LEFT_SESSIONS" ]]; then + log_error "leaked sessions:"; echo "$LEFT_SESSIONS" >&2; FAIL=1 +fi + +log_step "Check for leaked vfolders" +LEFT_VFS="$(./bai vfolder my-search --limit 200 2>&1 | PREFIX="${SCENARIO_PREFIX}-vf-" python3 "$SCRIPT_DIR/leaked_vfolders.py")" +if [[ -n "$LEFT_VFS" ]]; then + log_warn "vfolders still listed (may be in trash, will be purged in 99_teardown):" + echo "$LEFT_VFS" >&2 +fi + +log_step "Check for leaked deployments" +LEFT_DEPS="$(./bai my deployment search --limit 200 2>&1 | PREFIX="${SCENARIO_PREFIX}-dep-" python3 "$SCRIPT_DIR/leaked_deployments.py")" || LEFT_DEPS="" +if [[ -n "$LEFT_DEPS" ]]; then + log_error "leaked deployments:"; echo "$LEFT_DEPS" >&2; FAIL=1 +fi + +(( FAIL == 0 )) || { log_error "teardown verification failed"; exit 1; } + +scenario_end_ok diff --git a/scenarios/06_multi_user_access/run.sh b/scenarios/06_multi_user_access/run.sh new file mode 100755 index 00000000000..1ff6c9c1a2d --- /dev/null +++ b/scenarios/06_multi_user_access/run.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# 06: User B cannot see user A's vfolder via my-search or by id. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "06_multi_user_access" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" + +bai_login_user_a + +VF_NAME="${SCENARIO_PREFIX}-vf-multiuser-$$" +log_step "user A: create vfolder '${VF_NAME}'" +./bai vfolder create --name "$VF_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +VF_ID="$(lookup_my_vfolder_id "$VF_NAME")" +[[ -n "$VF_ID" ]] || { log_error "lookup failed"; exit 1; } +log_ok "vfolder: $VF_ID" + +bai_login_user_b + +log_step "user B: my-search must NOT contain user A's vfolder" +./bai vfolder my-search --limit 200 2>&1 \ + | TARGET="$VF_ID" python3 "$SCN_PY/check_no_id.py" \ + || { log_error "isolation broken: user B can see user A's vfolder"; exit 1; } + +log_step "user B: direct vfolder get by id should fail" +expect_fail "user B accessing user A's vfolder by id" \ + ./bai vfolder get "$VF_ID" + +bai_login_user_a +log_step "user A: cleanup" +./bai vfolder delete "$VF_ID" >/dev/null || true +./bai vfolder purge "$VF_ID" >/dev/null || true + +scenario_end_ok diff --git a/scenarios/07_vfolder_invite_clone/run.sh b/scenarios/07_vfolder_invite_clone/run.sh new file mode 100755 index 00000000000..25dfc7154cf --- /dev/null +++ b/scenarios/07_vfolder_invite_clone/run.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# 07: vfolder clone — cloneable=true → both src and dst readable, both contain populated dir. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "07_vfolder_invite_clone" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" + +bai_login_user_a + +SRC_NAME="${SCENARIO_PREFIX}-vf-cloneable-src-$$" +DST_NAME="${SCENARIO_PREFIX}-vf-cloneable-dst-$$" + +log_step "Create cloneable vfolder '${SRC_NAME}' + populate /shared" +./bai vfolder create --name "$SRC_NAME" --host "$TEST_VFOLDER_HOST" --cloneable >/dev/null +SRC_ID="$(lookup_my_vfolder_id "$SRC_NAME")" +[[ -n "$SRC_ID" ]] || { log_error "src lookup failed"; exit 1; } +./bai vfolder mkdir "$SRC_ID" shared --exist-ok >/dev/null + +log_step "Clone src → '${DST_NAME}'" +./bai vfolder clone "$SRC_ID" --name "$DST_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +DST_ID="$(lookup_my_vfolder_id "$DST_NAME")" +[[ -n "$DST_ID" ]] || { log_error "cloned vfolder lookup failed"; exit 1; } + +log_step "Both src and dst must contain /shared" +./bai vfolder ls "$SRC_ID" / 2>&1 | grep -q "shared" \ + || { log_error "src missing /shared after clone"; exit 1; } +./bai vfolder ls "$DST_ID" / 2>&1 | grep -q "shared" \ + || { log_error "dst missing /shared after clone"; exit 1; } + +log_step "Cleanup" +./bai vfolder delete "$SRC_ID" >/dev/null +./bai vfolder purge "$SRC_ID" >/dev/null +./bai vfolder delete "$DST_ID" >/dev/null +./bai vfolder purge "$DST_ID" >/dev/null + +scenario_end_ok diff --git a/scenarios/08_cross_project_isolation/assert_listing.py b/scenarios/08_cross_project_isolation/assert_listing.py new file mode 100644 index 00000000000..b18fda4ef69 --- /dev/null +++ b/scenarios/08_cross_project_isolation/assert_listing.py @@ -0,0 +1,13 @@ +"""Assert vfolder $EXPECT is in project-search items, $FORBID is not. + +Reads project-search JSON from stdin. Exits non-zero with AssertionError on mismatch. +""" +import json +import os +import sys + +ids = {it["id"] for it in json.load(sys.stdin).get("items", [])} +expect = os.environ["EXPECT"] +forbid = os.environ["FORBID"] +assert expect in ids, f"{expect} missing from listing" +assert forbid not in ids, f"{forbid} unexpectedly in listing" diff --git a/scenarios/08_cross_project_isolation/run.sh b/scenarios/08_cross_project_isolation/run.sh new file mode 100755 index 00000000000..f9954f2675b --- /dev/null +++ b/scenarios/08_cross_project_isolation/run.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# 08: Cross-project isolation — project-search never crosses project boundaries. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "08_cross_project_isolation" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +PROJECT_B_ID="$(state_get project_b_id || lookup_project_id "$TEST_PROJECT_B_NAME")" +[[ -n "$PROJECT_A_ID" && -n "$PROJECT_B_ID" ]] || { log_error "missing projects"; exit 1; } + +VFA_NAME="${SCENARIO_PREFIX}-vf-isoA-$$" +VFB_NAME="${SCENARIO_PREFIX}-vf-isoB-$$" + +log_step "admin: create project-A vfolder + project-B vfolder" +./bai vfolder create --name "$VFA_NAME" --host "$TEST_VFOLDER_HOST" --group "$PROJECT_A_ID" >/dev/null +VFA_ID="$(lookup_project_vfolder_id "$PROJECT_A_ID" "$VFA_NAME")" +[[ -n "$VFA_ID" ]] || { log_error "VFA lookup failed"; exit 1; } +./bai vfolder create --name "$VFB_NAME" --host "$TEST_VFOLDER_HOST" --group "$PROJECT_B_ID" >/dev/null +VFB_ID="$(lookup_project_vfolder_id "$PROJECT_B_ID" "$VFB_NAME")" +[[ -n "$VFB_ID" ]] || { log_error "VFB lookup failed"; exit 1; } +log_ok "VFA: $VFA_ID, VFB: $VFB_ID" + +log_step "project-search(A) lists VFA but NOT VFB" +EXPECT="$VFA_ID" FORBID="$VFB_ID" ./bai vfolder project-search "$PROJECT_A_ID" --limit 200 2>&1 \ + | EXPECT="$VFA_ID" FORBID="$VFB_ID" python3 "$SCRIPT_DIR/assert_listing.py" \ + || { log_error "project-search(A) isolation failed"; exit 1; } + +log_step "project-search(B) lists VFB but NOT VFA" +EXPECT="$VFB_ID" FORBID="$VFA_ID" ./bai vfolder project-search "$PROJECT_B_ID" --limit 200 2>&1 \ + | EXPECT="$VFB_ID" FORBID="$VFA_ID" python3 "$SCRIPT_DIR/assert_listing.py" \ + || { log_error "project-search(B) isolation failed"; exit 1; } + +bai_login_user_a + +log_step "user A: project-search(B) must NOT reveal VFB" +A_PB_OUT="$(./bai vfolder project-search "$PROJECT_B_ID" --limit 200 2>&1 || true)" +echo "$A_PB_OUT" | TARGET="$VFB_ID" python3 "$SCN_PY/check_no_id.py" \ + || { log_error "user A leaked VFB via project-search(B)"; exit 1; } + +bai_login_admin +log_step "Cleanup" +./bai vfolder delete "$VFA_ID" >/dev/null || true +./bai vfolder purge "$VFA_ID" >/dev/null || true +./bai vfolder delete "$VFB_ID" >/dev/null || true +./bai vfolder purge "$VFB_ID" >/dev/null || true + +scenario_end_ok diff --git a/scenarios/09_vfolder_mounted_delete/run.sh b/scenarios/09_vfolder_mounted_delete/run.sh new file mode 100755 index 00000000000..42ba13f1861 --- /dev/null +++ b/scenarios/09_vfolder_mounted_delete/run.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# 09: Mounted vfolder cannot be deleted; delete succeeds once session ends. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "09_vfolder_mounted_delete" + +bai_config_session +bai_login_admin +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A missing"; exit 1; } +IMAGE_ID="$(lookup_image_id "$TEST_IMAGE_NAME")" +[[ -n "$IMAGE_ID" ]] || { log_error "image not found"; exit 1; } + +bai_login_user_a + +VF_NAME="${SCENARIO_PREFIX}-vf-mountlock-$$" +SESSION_NAME="${SCENARIO_PREFIX}-sess-mountlock-$$" + +log_step "Create vfolder '${VF_NAME}'" +./bai vfolder create --name "$VF_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +VF_ID="$(lookup_my_vfolder_id "$VF_NAME")" +[[ -n "$VF_ID" ]] || { log_error "vfolder lookup failed"; exit 1; } + +PAYLOAD_FILE="$SCENARIO_TMP_DIR/enqueue-${SESSION_NAME}.json" +session_payload "$SESSION_NAME" "$IMAGE_ID" "$PROJECT_A_ID" "$VF_ID" "/home/work/${VF_NAME}" > "$PAYLOAD_FILE" + +log_step "Enqueue session mounting the vfolder" +ENQ_OUT="$(./bai session enqueue "@$PAYLOAD_FILE" 2>&1)" +SESSION_ID="$(printf '%s' "$ENQ_OUT" | session_id_from)" +[[ -n "$SESSION_ID" ]] || { log_error "enqueue failed"; echo "$ENQ_OUT" >&2; exit 1; } +log_ok "session: $SESSION_ID" + +log_step "Wait for session to register the mount (max 60s)" +MOUNTED_STATUS="$(wait_session_status "$SESSION_ID" 30 2 PENDING PREPARING PREPARED RUNNING || true)" +case "$MOUNTED_STATUS" in + PENDING|PREPARING|PREPARED|RUNNING) : ;; + TERMINATED|CANCELLED|ERROR) log_error "session ended too early ($MOUNTED_STATUS); cannot test mount lock"; exit 1 ;; + *) log_error "session never reached mounted state (last: $MOUNTED_STATUS)"; exit 1 ;; +esac +log_ok "session at $MOUNTED_STATUS — vfolder is reserved" + +# Core assertion: delete on a mounted vfolder MUST be rejected. +log_step "Attempt delete on mounted vfolder (must be rejected)" +DEL_OUT="$(./bai vfolder delete "$VF_ID" 2>&1 || true)" + +STATUS_AFTER="$(./bai vfolder my-search --limit 200 2>/dev/null \ + | VID="$VF_ID" python3 "$SCRIPT_DIR/vfolder_status.py")" + +case "$STATUS_AFTER" in + delete-pending|delete-ongoing|delete-complete|delete-error|delete-aborted|NOT_FOUND) + log_error "BUG: vfolder ${VF_ID} entered '${STATUS_AFTER}' while mounted by session ${SESSION_ID}" + log_error "delete output: $DEL_OUT" + terminate_session "$SESSION_ID" + exit 1 + ;; +esac +log_ok "delete rejected — vfolder status remains '${STATUS_AFTER}'" + +log_step "Terminate session and wait" +terminate_session "$SESSION_ID" +wait_session_status "$SESSION_ID" 15 2 TERMINATED CANCELLED NOT_FOUND >/dev/null || true + +log_step "Delete vfolder after unmount (must succeed)" +./bai vfolder delete "$VF_ID" >/dev/null +./bai vfolder purge "$VF_ID" >/dev/null + +scenario_end_ok diff --git a/scenarios/09_vfolder_mounted_delete/vfolder_status.py b/scenarios/09_vfolder_mounted_delete/vfolder_status.py new file mode 100644 index 00000000000..50be96f03a1 --- /dev/null +++ b/scenarios/09_vfolder_mounted_delete/vfolder_status.py @@ -0,0 +1,11 @@ +"""Print status of vfolder $VID in my-search response (or 'NOT_FOUND'). Reads JSON from stdin.""" +import json +import os +import sys + +vid = os.environ["VID"] +for it in json.load(sys.stdin).get("items", []): + if it.get("id") == vid: + print(it.get("status") or "UNKNOWN") + sys.exit(0) +print("NOT_FOUND") diff --git a/scenarios/10_vfolder_cloneable_false/run.sh b/scenarios/10_vfolder_cloneable_false/run.sh new file mode 100755 index 00000000000..c71b0d13ce3 --- /dev/null +++ b/scenarios/10_vfolder_cloneable_false/run.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# 10: clone of a non-cloneable vfolder MUST be rejected; no destination created. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "10_vfolder_cloneable_false" + +bai_config_session +bai_login_admin +bai_login_user_a + +SRC_NAME="${SCENARIO_PREFIX}-vf-noclone-src-$$" +DST_NAME="${SCENARIO_PREFIX}-vf-noclone-dst-$$" + +log_step "Create non-cloneable vfolder '${SRC_NAME}'" +./bai vfolder create --name "$SRC_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +SRC_ID="$(lookup_my_vfolder_id "$SRC_NAME")" +[[ -n "$SRC_ID" ]] || { log_error "src lookup failed"; exit 1; } + +log_step "Clone must be rejected" +expect_fail "clone of non-cloneable vfolder" \ + ./bai vfolder clone "$SRC_ID" --name "$DST_NAME" --host "$TEST_VFOLDER_HOST" + +DST_FOUND="$(lookup_my_vfolder_id "$DST_NAME" || true)" +if [[ -n "$DST_FOUND" ]]; then + log_error "BUG: clone produced ${DST_FOUND} despite src.cloneable=false" + ./bai vfolder delete "$DST_FOUND" >/dev/null 2>&1 || true + ./bai vfolder purge "$DST_FOUND" >/dev/null 2>&1 || true + ./bai vfolder delete "$SRC_ID" >/dev/null 2>&1 || true + ./bai vfolder purge "$SRC_ID" >/dev/null 2>&1 || true + exit 1 +fi + +log_step "Cleanup" +./bai vfolder delete "$SRC_ID" >/dev/null +./bai vfolder purge "$SRC_ID" >/dev/null + +scenario_end_ok diff --git a/scenarios/11_vfolder_bulk_ops/run.sh b/scenarios/11_vfolder_bulk_ops/run.sh new file mode 100755 index 00000000000..333b104b906 --- /dev/null +++ b/scenarios/11_vfolder_bulk_ops/run.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# 11: bulk-delete + bulk-purge across multiple vfolders in one call. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "11_vfolder_bulk_ops" + +bai_config_session +bai_login_admin +bai_login_user_a + +IDS=() +for i in 1 2 3; do + n="${SCENARIO_PREFIX}-vf-bulk-${i}-$$" + log_step "Create vfolder '$n'" + ./bai vfolder create --name "$n" --host "$TEST_VFOLDER_HOST" >/dev/null + id="$(lookup_my_vfolder_id "$n")" + [[ -n "$id" ]] || { log_error "lookup failed for $n"; exit 1; } + IDS+=("$id") +done + +log_step "Bulk-delete ${#IDS[@]} vfolders in one call" +./bai vfolder bulk-delete "${IDS[@]}" >/dev/null + +IDS_CSV="$(IFS=,; echo "${IDS[*]}")" + +log_step "Verify all targeted vfolders are in a deleted state" +./bai vfolder my-search --limit 500 2>/dev/null \ + | TARGETS="$IDS_CSV" python3 "$SCRIPT_DIR/verify_all_deleted.py" \ + || { log_error "bulk-delete state check failed"; exit 1; } + +log_step "Bulk-purge" +./bai vfolder bulk-purge "${IDS[@]}" >/dev/null + +# After purge: either gone or delete-complete; must NOT be 'ready'. +log_step "Verify no targeted vfolder is 'ready' after bulk-purge" +./bai vfolder my-search --limit 500 2>/dev/null \ + | TARGETS="$IDS_CSV" python3 "$SCRIPT_DIR/verify_no_ready.py" \ + || { log_error "vfolder still 'ready' after bulk-purge"; exit 1; } + +scenario_end_ok diff --git a/scenarios/11_vfolder_bulk_ops/verify_all_deleted.py b/scenarios/11_vfolder_bulk_ops/verify_all_deleted.py new file mode 100644 index 00000000000..23b9e10dac0 --- /dev/null +++ b/scenarios/11_vfolder_bulk_ops/verify_all_deleted.py @@ -0,0 +1,24 @@ +"""Verify every id in $TARGETS (comma-separated) appears in my-search with a deleted-state status. + +Reads my-search JSON from stdin. Prints diagnostics to stdout; exits 1 on any miss. +""" +import json +import os +import sys + +targets = set(os.environ["TARGETS"].split(",")) +DELETED = { + "delete-pending", + "delete-ongoing", + "delete-complete", + "delete-error", + "delete-aborted", +} +seen = {it["id"]: it.get("status") for it in json.load(sys.stdin).get("items", []) if it.get("id") in targets} +missing = targets - set(seen.keys()) +not_deleted = [vid for vid, s in seen.items() if s not in DELETED] +if missing: + print("MISSING_FROM_LIST", ",".join(missing)) +if not_deleted: + print("NOT_IN_DELETED_STATE", ",".join(not_deleted)) +sys.exit(0 if (not missing and not not_deleted) else 1) diff --git a/scenarios/11_vfolder_bulk_ops/verify_no_ready.py b/scenarios/11_vfolder_bulk_ops/verify_no_ready.py new file mode 100644 index 00000000000..5fe6a903d62 --- /dev/null +++ b/scenarios/11_vfolder_bulk_ops/verify_no_ready.py @@ -0,0 +1,18 @@ +"""Exit 1 if any id in $TARGETS (comma-separated) appears with status 'ready' in my-search. + +Reads my-search JSON from stdin. +""" +import json +import os +import sys + +targets = set(os.environ["TARGETS"].split(",")) +ready_leak = [ + it["id"] + for it in json.load(sys.stdin).get("items", []) + if it.get("id") in targets and it.get("status") == "ready" +] +if ready_leak: + print("READY_AFTER_PURGE", ",".join(ready_leak)) + sys.exit(1) +sys.exit(0) diff --git a/scenarios/12_vfolder_file_io/gen_payload.py b/scenarios/12_vfolder_file_io/gen_payload.py new file mode 100644 index 00000000000..875c048eb96 --- /dev/null +++ b/scenarios/12_vfolder_file_io/gen_payload.py @@ -0,0 +1,5 @@ +"""Write 4 KiB of deterministic content to stdout (binary).""" +import sys + +data = (b"hello-scenario-fileio\n" * 200)[:4096] +sys.stdout.buffer.write(data) diff --git a/scenarios/12_vfolder_file_io/run.sh b/scenarios/12_vfolder_file_io/run.sh new file mode 100755 index 00000000000..83bcc1546de --- /dev/null +++ b/scenarios/12_vfolder_file_io/run.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# 12: TUS upload → ls → download → sha256 round-trip via storage-proxy. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "12_vfolder_file_io" + +bai_config_session +bai_login_admin +bai_login_user_a + +VF_NAME="${SCENARIO_PREFIX}-vf-fileio-$$" +LOCAL_FILE="$SCENARIO_TMP_DIR/payload-${VF_NAME}.txt" +DOWNLOAD_FILE="$SCENARIO_TMP_DIR/payload-${VF_NAME}.downloaded" +REMOTE_NAME="$(basename "$LOCAL_FILE")" + +python3 "$SCRIPT_DIR/gen_payload.py" > "$LOCAL_FILE" +SIZE="$(wc -c < "$LOCAL_FILE" | tr -d ' ')" + +log_step "Create vfolder '${VF_NAME}' (payload ${SIZE} bytes)" +./bai vfolder create --name "$VF_NAME" --host "$TEST_VFOLDER_HOST" >/dev/null +VF_ID="$(lookup_my_vfolder_id "$VF_NAME")" +[[ -n "$VF_ID" ]] || { log_error "vfolder lookup failed"; exit 1; } + +log_step "Get TUS upload token" +UP_RESP="$(./bai vfolder upload "$VF_ID" "$LOCAL_FILE" 2>&1)" +UP_TOKEN="$(printf '%s' "$UP_RESP" | FIELD=token python3 "$SCN_PY/print_json_field.py")" +UP_URL="$(printf '%s' "$UP_RESP" | FIELD=url python3 "$SCN_PY/print_json_field.py")" +[[ -n "$UP_TOKEN" && -n "$UP_URL" ]] || { log_error "missing upload token/url: $UP_RESP"; exit 1; } + +log_step "PATCH payload via TUS" +HTTP_CODE="$(curl -s -o /dev/null -w '%{http_code}' \ + -X PATCH "${UP_URL}?token=${UP_TOKEN}" \ + -H "Tus-Resumable: 1.0.0" \ + -H "Upload-Offset: 0" \ + -H "Content-Type: application/offset+octet-stream" \ + --data-binary "@${LOCAL_FILE}")" +[[ "$HTTP_CODE" == "204" ]] || { log_error "TUS PATCH failed: HTTP $HTTP_CODE"; exit 1; } + +# Storage proxy rejects '/' or '.' as path; use empty string for root. +log_step "Verify file visible via 'vfolder ls' with correct size" +./bai vfolder ls "$VF_ID" "" 2>&1 \ + | NAME="$REMOTE_NAME" SIZE="$SIZE" python3 "$SCRIPT_DIR/verify_file_size.py" \ + || { log_error "uploaded file not visible / size mismatch"; exit 1; } + +log_step "Get download token + GET file" +DL_RESP="$(./bai vfolder download "$VF_ID" "$REMOTE_NAME" 2>&1)" +DL_TOKEN="$(printf '%s' "$DL_RESP" | FIELD=token python3 "$SCN_PY/print_json_field.py")" +DL_URL="$(printf '%s' "$DL_RESP" | FIELD=url python3 "$SCN_PY/print_json_field.py")" +[[ -n "$DL_TOKEN" && -n "$DL_URL" ]] || { log_error "missing download token/url: $DL_RESP"; exit 1; } + +HTTP_CODE="$(curl -s -o "$DOWNLOAD_FILE" -w '%{http_code}' "${DL_URL}?token=${DL_TOKEN}")" +[[ "$HTTP_CODE" == "200" ]] || { log_error "download failed: HTTP $HTTP_CODE"; exit 1; } + +log_step "Compare sha256(uploaded) == sha256(downloaded)" +UP_HASH="$(shasum -a 256 "$LOCAL_FILE" | awk '{print $1}')" +DN_HASH="$(shasum -a 256 "$DOWNLOAD_FILE" | awk '{print $1}')" +[[ "$UP_HASH" == "$DN_HASH" ]] || { log_error "hash mismatch: upload=${UP_HASH} download=${DN_HASH}"; exit 1; } +log_ok "round-trip sha256: $UP_HASH" + +log_step "Cleanup" +./bai vfolder delete "$VF_ID" >/dev/null +./bai vfolder purge "$VF_ID" >/dev/null +rm -f "$LOCAL_FILE" "$DOWNLOAD_FILE" + +scenario_end_ok diff --git a/scenarios/12_vfolder_file_io/verify_file_size.py b/scenarios/12_vfolder_file_io/verify_file_size.py new file mode 100644 index 00000000000..7118353ee0a --- /dev/null +++ b/scenarios/12_vfolder_file_io/verify_file_size.py @@ -0,0 +1,24 @@ +"""Verify file $NAME exists in `bai vfolder ls` output with size $SIZE bytes. + +Reads ls JSON from stdin. Exits 0 on match, 1 on size mismatch or missing. +""" +import json +import os +import sys + +target = os.environ["NAME"] +expected = int(os.environ["SIZE"]) +d = json.load(sys.stdin) +items = d.get("items") or d.get("files") or d +if isinstance(items, dict): + items = items.get("items") or [] +for it in items: + if (it.get("name") or it.get("filename") or "") == target: + size = it.get("size") + if size is None or int(size) != expected: + print(f"SIZE_MISMATCH expected={expected} got={size}") + sys.exit(1) + print("OK") + sys.exit(0) +print("NOT_FOUND") +sys.exit(1) diff --git a/scenarios/13_session_exec_logs/run.sh b/scenarios/13_session_exec_logs/run.sh new file mode 100755 index 00000000000..5995be2c57a --- /dev/null +++ b/scenarios/13_session_exec_logs/run.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# 13: BATCH session prints marker; `session logs` retrieves it after exit. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "13_session_exec_logs" + +bai_config_session +bai_login_admin + +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A missing"; exit 1; } +IMAGE_ID="$(lookup_image_id "$TEST_IMAGE_NAME")" +[[ -n "$IMAGE_ID" ]] || { log_error "image not found"; exit 1; } + +bai_login_user_a + +MARKER="hello-scenario-${RANDOM}-${RANDOM}" +SESSION_NAME="${SCENARIO_PREFIX}-sess-batch-$$" +PAYLOAD_FILE="$SCENARIO_TMP_DIR/enqueue-${SESSION_NAME}.json" + +cat > "$PAYLOAD_FILE" <&1)" +SESSION_ID="$(printf '%s' "$ENQ_OUT" | session_id_from)" +[[ -n "$SESSION_ID" ]] || { log_error "enqueue failed"; echo "$ENQ_OUT" >&2; exit 1; } +log_ok "session: $SESSION_ID" + +log_step "Wait for BATCH session to TERMINATED (max 120s)" +if ! wait_session_status "$SESSION_ID" 60 2 TERMINATED CANCELLED ERROR >/dev/null; then + log_warn "session did not terminate within 120s; forcing" + terminate_session "$SESSION_ID" +fi + +# Logs may take a moment to flush; retry briefly. +log_step "Fetch logs and verify marker '${MARKER}'" +FOUND=0 +for _ in {1..10}; do + LOGS_OUT="$(./bai session logs "$SESSION_ID" 2>&1 || true)" + if echo "$LOGS_OUT" | grep -q "$MARKER"; then FOUND=1; break; fi + sleep 2 +done + +if (( FOUND == 0 )); then + log_error "marker '${MARKER}' not found in session logs" + echo "----- last logs output -----" >&2 + echo "$LOGS_OUT" | head -c 2000 >&2 + echo "----- end -----" >&2 + exit 1 +fi + +terminate_session "$SESSION_ID" + +scenario_end_ok diff --git a/scenarios/14_deployment_endpoint_serve/endpoint_url.py b/scenarios/14_deployment_endpoint_serve/endpoint_url.py new file mode 100644 index 00000000000..c70444f8907 --- /dev/null +++ b/scenarios/14_deployment_endpoint_serve/endpoint_url.py @@ -0,0 +1,15 @@ +"""Print deployment endpoint URL from a `deployment get` response, if populated. + +Reads JSON from stdin. Prints nothing (and exits 0) if URL is absent or 'null'. +""" +import json +import sys + +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +na = d.get("network_access") or d.get("networkAccess") or {} +url = na.get("endpoint_url") or d.get("endpoint_url") or "" +if url and url.lower() != "null": + print(url) diff --git a/scenarios/14_deployment_endpoint_serve/run.sh b/scenarios/14_deployment_endpoint_serve/run.sh new file mode 100755 index 00000000000..fff26d4e993 --- /dev/null +++ b/scenarios/14_deployment_endpoint_serve/run.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# 14: Deploy → endpoint URL populates → URL is L7-reachable. +# Asserts URL construction + reachability, not 200 OK from the model. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "14_deployment_endpoint_serve" + +bai_config_session +bai_login_admin + +MODEL_STORE_ID="$(lookup_project_id "model-store")" +[[ -n "$MODEL_STORE_ID" ]] || { log_error "no 'model-store' project"; exit 1; } + +CARD_ID="$(./bai model-card project-search "$MODEL_STORE_ID" --limit 5 2>&1 | python3 "$SCN_PY/pick_first_id.py")" +[[ -n "$CARD_ID" ]] || { log_error "no model cards available"; exit 1; } + +PRESET_ID="$(./bai model-card available-presets "$CARD_ID" 2>&1 | python3 "$SCN_PY/pick_first_id.py")" +[[ -n "$PRESET_ID" ]] || { log_error "no presets available"; exit 1; } + +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" + +log_step "Deploy model card → project A" +DEPLOY_OUT="$(./bai model-card deploy "$CARD_ID" \ + --project-id "$PROJECT_A_ID" \ + --revision-preset-id "$PRESET_ID" \ + --resource-group "$TEST_RESOURCE_GROUP" \ + --replicas 1 2>&1)" +DEPLOYMENT_ID="$(printf '%s' "$DEPLOY_OUT" | deployment_id_from)" +[[ -n "$DEPLOYMENT_ID" ]] || { log_error "deploy failed: $DEPLOY_OUT"; exit 1; } +log_ok "deployment: $DEPLOYMENT_ID" + +trap '{ log_step "Cleanup"; ./bai deployment delete "$DEPLOYMENT_ID" >/dev/null 2>&1 || true; }' EXIT + +log_step "Wait for endpoint_url to populate (max 90s)" +ENDPOINT_URL="" +for _ in {1..45}; do + URL_RAW="$(./bai deployment get "$DEPLOYMENT_ID" 2>/dev/null | python3 "$SCRIPT_DIR/endpoint_url.py" || true)" + if [[ -n "$URL_RAW" ]]; then ENDPOINT_URL="$URL_RAW"; break; fi + sleep 2 +done +[[ -n "$ENDPOINT_URL" ]] || { log_error "endpoint_url never populated within 90s"; exit 1; } +log_ok "endpoint_url: $ENDPOINT_URL" + +log_step "HTTP probe endpoint" +HTTP_CODE="$(curl -s -o /dev/null -w '%{http_code}' --max-time 15 "$ENDPOINT_URL" || echo "000")" +[[ "$HTTP_CODE" != "000" ]] || { log_error "endpoint URL not routable (curl failed)"; exit 1; } +log_ok "endpoint reachable (HTTP $HTTP_CODE)" + +scenario_end_ok diff --git a/scenarios/15_session_concurrency_cap/count_active_sessions.py b/scenarios/15_session_concurrency_cap/count_active_sessions.py new file mode 100644 index 00000000000..997fe242bee --- /dev/null +++ b/scenarios/15_session_concurrency_cap/count_active_sessions.py @@ -0,0 +1,14 @@ +"""Print count of sessions whose status is not in TERMINATED/CANCELLED/ERROR. + +Reads my-search JSON from stdin. +""" +import json +import sys + +TERM = {"TERMINATED", "CANCELLED", "ERROR"} +n = 0 +for it in json.load(sys.stdin).get("items", []): + s = (it.get("lifecycle") or {}).get("status") or it.get("status", "") + if s and s not in TERM: + n += 1 +print(n) diff --git a/scenarios/15_session_concurrency_cap/find_keypair_policy.py b/scenarios/15_session_concurrency_cap/find_keypair_policy.py new file mode 100644 index 00000000000..9aef1920c1d --- /dev/null +++ b/scenarios/15_session_concurrency_cap/find_keypair_policy.py @@ -0,0 +1,13 @@ +"""Print resource_policy of keypair whose access_key == $AK (defaults to 'default'). + +Reads admin keypair search JSON from stdin. +""" +import json +import os +import sys + +ak = os.environ["AK"] +for it in json.load(sys.stdin).get("items", []): + if it.get("access_key") == ak: + print(it.get("resource_policy") or "default") + break diff --git a/scenarios/15_session_concurrency_cap/find_user_keypair.py b/scenarios/15_session_concurrency_cap/find_user_keypair.py new file mode 100644 index 00000000000..e390c54b846 --- /dev/null +++ b/scenarios/15_session_concurrency_cap/find_user_keypair.py @@ -0,0 +1,13 @@ +"""Print access_key of the first active keypair whose user_id == $TARGET_UID. + +Reads admin keypair search JSON from stdin. +""" +import json +import os +import sys + +target = os.environ["TARGET_UID"] +for it in json.load(sys.stdin).get("items", []): + if it.get("user_id") == target and it.get("is_active"): + print(it["access_key"]) + break diff --git a/scenarios/15_session_concurrency_cap/run.sh b/scenarios/15_session_concurrency_cap/run.sh new file mode 100755 index 00000000000..679d7a0e0de --- /dev/null +++ b/scenarios/15_session_concurrency_cap/run.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +# 15: keypair resource-policy enforces max_concurrent_sessions=1. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "15_session_concurrency_cap" + +bai_config_session +bai_login_admin + +PROJECT_A_ID="$(state_get project_a_id || lookup_project_id "$TEST_PROJECT_A_NAME")" +[[ -n "$PROJECT_A_ID" ]] || { log_error "project A missing"; exit 1; } +USER_A_ID="$(state_get user_a_id || lookup_user_id "$TEST_USER_A_EMAIL")" +[[ -n "$USER_A_ID" ]] || { log_error "user A missing"; exit 1; } + +log_step "Locate user A's primary keypair + record current resource-policy" +KP_JSON="$(./bai admin keypair search --limit 200 2>/dev/null)" +USER_A_AK="$(printf '%s' "$KP_JSON" | TARGET_UID="$USER_A_ID" python3 "$SCRIPT_DIR/find_user_keypair.py")" +[[ -n "$USER_A_AK" ]] || { log_error "no active keypair for user A"; exit 1; } +ORIGINAL_POLICY="$(printf '%s' "$KP_JSON" | AK="$USER_A_AK" python3 "$SCRIPT_DIR/find_keypair_policy.py")" +log_ok "keypair $USER_A_AK (original policy: ${ORIGINAL_POLICY})" + +POLICY_NAME="${SCENARIO_PREFIX}-cap1-$$" + +cleanup() { + log_step "Cleanup: restore keypair → ${ORIGINAL_POLICY}, delete temp policy" + ./bai admin keypair update "{\"access_key\":\"${USER_A_AK}\",\"resource_policy\":\"${ORIGINAL_POLICY}\"}" >/dev/null 2>&1 || true + ./bai admin resource-policy keypair delete "$POLICY_NAME" >/dev/null 2>&1 || true + [[ -n "${SESS1_ID:-}" ]] && terminate_session "$SESS1_ID" + [[ -n "${SESS2_ID:-}" ]] && terminate_session "$SESS2_ID" +} +trap cleanup EXIT + +log_step "Create temp policy '${POLICY_NAME}' (max_concurrent_sessions=1)" +./bai admin resource-policy keypair create \ + --name "$POLICY_NAME" \ + --default-for-unspecified UNLIMITED \ + --max-concurrent-sessions 1 \ + --max-containers-per-session 1 \ + --idle-timeout 3600 \ + --max-concurrent-sftp-sessions 1 \ + --total-resource-slots '[{"resource_type":"cpu","quantity":"100"},{"resource_type":"mem","quantity":"107374182400"}]' \ + --allowed-vfolder-hosts '[{"host":"local:volume1","permissions":["create-vfolder","modify-vfolder","delete-vfolder","mount-in-session","upload-file","download-file","invite-others","set-user-specific-permission"]}]' >/dev/null + +log_step "Reassign user A's keypair to '${POLICY_NAME}'" +./bai admin keypair update "{\"access_key\":\"${USER_A_AK}\",\"resource_policy\":\"${POLICY_NAME}\"}" >/dev/null + +# Re-login as user A so the new policy applies to subsequent calls. +bai_login_user_a +bai_login_admin +IMAGE_ID="$(lookup_image_id "$TEST_IMAGE_NAME")" +[[ -n "$IMAGE_ID" ]] || { log_error "image not found"; exit 1; } +bai_login_user_a + +S1_NAME="${SCENARIO_PREFIX}-cap-s1-$$" +S2_NAME="${SCENARIO_PREFIX}-cap-s2-$$" +P1="$SCENARIO_TMP_DIR/${S1_NAME}.json" +P2="$SCENARIO_TMP_DIR/${S2_NAME}.json" +session_payload "$S1_NAME" "$IMAGE_ID" "$PROJECT_A_ID" > "$P1" +session_payload "$S2_NAME" "$IMAGE_ID" "$PROJECT_A_ID" > "$P2" + +log_step "Enqueue session #1 (should succeed and hold the slot)" +ENQ1="$(./bai session enqueue "@$P1" 2>&1)" +SESS1_ID="$(printf '%s' "$ENQ1" | session_id_from)" +[[ -n "$SESS1_ID" ]] || { log_error "session #1 enqueue failed"; echo "$ENQ1" >&2; exit 1; } +log_ok "session #1: $SESS1_ID" + +# Even PENDING/PREPARING consumes the slot — no need to wait for RUNNING. +sleep 2 + +log_step "Enqueue session #2 — MUST be rejected by max_concurrent_sessions=1" +if ENQ2="$(./bai session enqueue "@$P2" 2>&1)"; then + if echo "$ENQ2" | grep -qiE "concurrent|quota|policy|exceed|limit"; then + log_ok "session #2 rejected (response indicates limit)" + else + SESS2_ID="$(printf '%s' "$ENQ2" | session_id_from || true)" + log_error "BUG: session #2 enqueue succeeded under max_concurrent_sessions=1" + log_error "response: $(echo "$ENQ2" | head -c 500)" + exit 1 + fi +else + log_ok "session #2 rejected (CLI exit non-zero)" +fi + +log_step "Verify only one session is active for user A" +ACTIVE_COUNT="$(./bai my session search --limit 200 2>/dev/null | python3 "$SCRIPT_DIR/count_active_sessions.py")" +[[ "$ACTIVE_COUNT" -le 1 ]] || { log_error "more than one active session ($ACTIVE_COUNT)"; exit 1; } +log_ok "concurrency limit enforced (active=$ACTIVE_COUNT)" + +scenario_end_ok diff --git a/scenarios/99_teardown/run.sh b/scenarios/99_teardown/run.sh new file mode 100755 index 00000000000..4d8a22400c0 --- /dev/null +++ b/scenarios/99_teardown/run.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# 99: Teardown — purge every scenario-prefixed resource and the test users. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR/../.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +scenario_begin "99_teardown" + +bai_config_session +bai_login_admin + +cleanup_user_resources() { + local login_fn="$1" label="$2" + "$login_fn" || { log_warn "${label} login failed (already deleted?)"; return 0; } + + log_step "${label}: terminate scenario sessions" + ./bai my session search --limit 200 2>&1 \ + | PREFIX="${SCENARIO_PREFIX}-" python3 "$SCRIPT_DIR/scenario_session_ids.py" \ + | while read -r sid; do + [[ -z "$sid" ]] && continue + terminate_session "$sid" + done + + log_step "${label}: purge scenario vfolders" + ./bai vfolder my-search --limit 500 2>&1 \ + | PREFIX="${SCENARIO_PREFIX}-" python3 "$SCRIPT_DIR/scenario_vfolder_ids.py" \ + | while read -r vid; do + [[ -z "$vid" ]] && continue + ./bai vfolder delete "$vid" >/dev/null 2>&1 || true + ./bai vfolder purge "$vid" >/dev/null 2>&1 || true + done + + log_step "${label}: delete scenario deployments" + ./bai my deployment search --limit 200 2>&1 \ + | PREFIX="${SCENARIO_PREFIX}-" python3 "$SCRIPT_DIR/scenario_deployment_ids.py" \ + | while read -r did; do + [[ -z "$did" ]] && continue + ./bai deployment delete "$did" >/dev/null 2>&1 || true + done +} + +cleanup_user_resources bai_login_user_a "user A" +cleanup_user_resources bai_login_user_b "user B" + +bai_login_admin + +# Model card fixture — created by 00_setup in `model-store`. Delete the card +# first (frees the vfolder reference), then the vfolder. +FIXTURE_NAME="${SCENARIO_PREFIX}-model-card-fixture" + +log_step "Delete model card fixture" +FIXTURE_CARD_ID="$(state_get model_fixture_card_id || lookup_card_id "$FIXTURE_NAME" || true)" +if [[ -n "$FIXTURE_CARD_ID" ]]; then + ./bai admin model-card delete "$FIXTURE_CARD_ID" >/dev/null 2>&1 || log_warn "model card delete failed" +fi + +log_step "Delete model fixture vfolder" +FIXTURE_VF_ID="$(state_get model_fixture_vfolder_id || lookup_admin_vfolder_id "$FIXTURE_NAME" || true)" +if [[ -n "$FIXTURE_VF_ID" ]]; then + ./bai vfolder delete "$FIXTURE_VF_ID" >/dev/null 2>&1 || true + ./bai vfolder purge "$FIXTURE_VF_ID" >/dev/null 2>&1 || true +fi + +log_step "Delete scenario users" +for email in "$TEST_USER_A_EMAIL" "$TEST_USER_B_EMAIL"; do + USER_UID="$(lookup_user_id "$email" || true)" + if [[ -n "$USER_UID" ]]; then + ./bai admin user delete "$USER_UID" >/dev/null 2>&1 || log_warn "delete failed for $email" + fi +done + +# Soft-delete leaves login_history_ Redis keys cached. 10 failed +# attempts → permanent lockout, so clear before next run. +log_step "Clear login throttle counters in Redis" +REDIS_CONTAINER="$(docker compose -f docker-compose.halfstack.current.yml ps -q backendai-half-redis 2>/dev/null || true)" +if [[ -n "$REDIS_CONTAINER" ]]; then + for email in "$TEST_USER_A_EMAIL" "$TEST_USER_B_EMAIL"; do + docker exec "$REDIS_CONTAINER" redis-cli del "login_history_${email}" >/dev/null 2>&1 || true + done +else + log_warn "halfstack redis container not found; skipping throttle clear" +fi + +log_step "Delete + purge scenario projects" +for name in "$TEST_PROJECT_A_NAME" "$TEST_PROJECT_B_NAME"; do + PID="$(lookup_project_id "$name" || true)" + if [[ -n "$PID" ]]; then + ./bai admin project delete "$PID" >/dev/null 2>&1 || log_warn "project delete failed" + ./bai admin project purge "$PID" >/dev/null 2>&1 || log_warn "project purge failed" + fi +done + +log_step "Clear local state files" +state_clear + +scenario_end_ok diff --git a/scenarios/99_teardown/scenario_deployment_ids.py b/scenarios/99_teardown/scenario_deployment_ids.py new file mode 100644 index 00000000000..14d37057c7d --- /dev/null +++ b/scenarios/99_teardown/scenario_deployment_ids.py @@ -0,0 +1,14 @@ +"""Print ids of deployments whose name starts with $PREFIX. Reads my-search JSON from stdin.""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +for it in d.get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name") or "" + if name.startswith(prefix): + print(it["id"]) diff --git a/scenarios/99_teardown/scenario_session_ids.py b/scenarios/99_teardown/scenario_session_ids.py new file mode 100644 index 00000000000..1052a1c712d --- /dev/null +++ b/scenarios/99_teardown/scenario_session_ids.py @@ -0,0 +1,15 @@ +"""Print ids of non-terminal sessions whose name starts with $PREFIX. Reads my-search JSON from stdin.""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +for it in d.get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name") or "" + status = (it.get("lifecycle") or {}).get("status") or it.get("status") or "" + if name.startswith(prefix) and status not in ("TERMINATED", "CANCELLED"): + print(it["id"]) diff --git a/scenarios/99_teardown/scenario_vfolder_ids.py b/scenarios/99_teardown/scenario_vfolder_ids.py new file mode 100644 index 00000000000..1d9b87fecef --- /dev/null +++ b/scenarios/99_teardown/scenario_vfolder_ids.py @@ -0,0 +1,21 @@ +"""Print ids of non-deleted vfolders whose name starts with $PREFIX. Reads my-search JSON from stdin.""" +import json +import os +import sys + +prefix = os.environ["PREFIX"] +DELETED = { + "delete-pending", + "delete-ongoing", + "delete-complete", + "delete-error", + "delete-aborted", +} +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +for it in d.get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name") or "" + if name.startswith(prefix) and it.get("status") not in DELETED: + print(it["id"]) diff --git a/scenarios/README.md b/scenarios/README.md new file mode 100644 index 00000000000..06c7d333220 --- /dev/null +++ b/scenarios/README.md @@ -0,0 +1,181 @@ +# Backend.AI Scenario Tests + +End-to-end shell scripts that exercise core user journeys via the **`./bai` v2 CLI** +against a live Backend.AI cluster. + +These are *integration smoke tests* — they ensure the API surface, CLI plumbing, +permission boundaries, and resource lifecycles all work together. They are not a +substitute for the unit / pytest suite under `tests/`. + +## Quick start + +```bash +# 1. Cluster up + admin able to log in +./dev start all # tmux services +docker compose -f docker-compose.halfstack.current.yml ps # halfstack healthy + +# 2. Run everything (creates test users + projects, runs all 16 scenarios, tears down) +scenarios/run_all.sh + +# 3. Only specific scenarios, keep test data afterwards +SKIP_TEARDOWN=1 ONLY="01 02 06" scenarios/run_all.sh +``` + +## What gets created + +`00_setup.sh` provisions, **idempotently**: + +| Kind | Name (default prefix `scn`) | +|----------|----------------------------------------| +| User | `scn-userA@scenario.local` (role=user) | +| User | `scn-userB@scenario.local` (role=user) | +| Project | `scn-projectA` (domain=default) | +| Project | `scn-projectB` (domain=default) | + +User A is added to project A; user B to project B. Resource policies are +`default` for keypair / user / project. Override any of these via env vars +(see `lib/env.sh`). + +## Coverage matrix + +Last verified: 2026-04-26 against `main` (commit `f55366d34`). 13/17 PASS, 4 FAIL. + +| # | Scenario | Domain | Status | What it verifies | +|----|------------------------------|-----------------|---------|------------------------------------------------------------------------| +| 00 | setup | - | ✅ PASS | admin creates users + projects + memberships, grants vfolder hosts | +| 01 | vfolder_lifecycle | vfolder | ✅ PASS | create → mkdir → ls → mv → rm → delete → purge | +| 02 | session_lifecycle | session | ✅ PASS | enqueue session w/ vfolder mount → wait → terminate → cleanup | +| 03 | model_card_deploy | model card | ❌ FAIL | model-card project-search → available-presets → deploy → cleanup | +| 04 | deployment_revision | model service | ❌ FAIL | deployment create → list/current revision → update → delete | +| 05 | teardown_verification | session/vf/dep | ✅ PASS | no scenario-prefixed leftovers in user A's scope | +| 06 | multi_user_access | vfolder | ✅ PASS | user B cannot list or fetch user A's vfolder | +| 07 | vfolder_invite_clone | vfolder | ❌ FAIL | cloneable vfolder → clone → both accessible | +| 08 | cross_project_isolation | vfolder | ✅ PASS | project-scoped lookups never cross project boundaries | +| 09 | vfolder_mounted_delete | vfolder/session | ✅ PASS | deleting a vfolder mounted on a live session must be rejected | +| 10 | vfolder_cloneable_false | vfolder | ✅ PASS | clone of a non-cloneable vfolder must be rejected | +| 11 | vfolder_bulk_ops | vfolder | ✅ PASS | bulk-delete + bulk-purge across multiple vfolders | +| 12 | vfolder_file_io | vfolder/storage | ✅ PASS | TUS upload → ls → download → sha256 round-trip via storage proxy | +| 13 | session_exec_logs | session | ✅ PASS | BATCH session prints marker, `session logs` retrieves it after exit | +| 14 | deployment_endpoint_serve | model service | ❌ FAIL | deployment endpoint URL is constructed and L7-reachable | +| 15 | session_concurrency_cap | session/policy | ✅ PASS | keypair `max_concurrent_sessions=1` rejects the second enqueue | +| 99 | teardown | all | ✅ PASS | purges every scenario-prefixed resource and the test users | + +**Pass rate:** 13 / 17 scenarios. + +All scenarios are strict — there are no soft-pass / soft-skip paths. A failure here means a real defect or a missing fixture on the cluster. + +## Known failures + +These scenarios surface real defects (or missing cluster fixtures) that need +upstream attention. They are kept strict so regressions stay visible — do +**not** mask them with `|| true`, `log_warn`, or `soft-pass` style escapes. + +| # | Scenario | Symptom | Root cause | +|----|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 03 | model_card_deploy | `[FAIL] card has no revision presets` — `model-card project-search` now finds the auto-provisioned card, but `available-presets` returns an empty list. | Two-stage fixture gap. `00_setup.sh` now provisions a model card row (`scn-model-card-fixture`), removing the original "no model cards available" gate. The remaining gap: `deployment_revision_preset` table is empty on a fresh dev DB (verified via `gql { deploymentRevisionPresets }`). Seeding presets requires more than scenario fixtures should do — extend `./scripts/install-dev.sh` to insert at least one preset bound to a runtime variant (`cmd`, `nim`, `sglang`, etc.). | +| 04 | deployment_revision | After `./bai deployment delete`, `project-search` returns the row with an empty `lifecycle.status` (`NOT TERMINAL: ` — empty string). Scenario expects one of `STOPPED\|DESTROYED\|DELETED\|TERMINATED\|CANCELLED`. | Manager bug: soft-delete leaves `lifecycle.status` unset rather than transitioning to a terminal state. The row should report a terminal status synchronously after delete. | +| 07 | vfolder_invite_clone | `./bai vfolder clone` returns HTTP 403 `PermissionDeniedError: User lacks permission read on RBACElementRef(VFOLDER, )` from the post-clone `GET` in `vfolder/adapter.py:626`. | RBAC eventual consistency: the cloned vfolder row is committed, but the owner's `read` permission edge has not been replicated when the immediate post-clone GET runs. | +| 14 | deployment_endpoint_serve | `[FAIL] no presets available` — same revision-preset gap as 03; this scenario also needs L7-reachable model artifacts on the fixture vfolder once the preset is in place. | Same as 03 for the preset gap. Beyond that, the fixture vfolder ships empty — for L7 reachability you also need a real `model-definition.yaml` + weights uploaded to it. Seeding both should live in the dev installer. | + +### Notes for fixers + +* **Scenario 03 / 14:** `00_setup.sh` now provisions a fixture vfolder + (`scn-model-card-fixture`, usage=model) inside the `model-store` project + and registers it as a model card via `./bai admin model-card create`. With + that, `model-card project-search` returns the card and 03/14 progress past + the original "no model cards available" gate. They now stop at the next + gate — `deployment_revision_preset` rows aren't seeded on a fresh dev DB, + so `model-card available-presets` returns an empty list. To unblock fully: + 1. Extend `./scripts/install-dev.sh` to insert at least one + `deployment_revision_preset` row, bound to one of the seeded + `runtime_variant` rows (`cmd`, `nim`, `sglang`, etc.). Without this + both 03 and 14 stop at "no presets available." + 2. For 14's L7 reachability: also upload a real `model-definition.yaml` + and matching weights into the fixture vfolder — the auto-provisioned + vfolder is empty. +* **Scenario 04:** manager `deployment delete` should transition + `lifecycle.status` to a terminal state inside the same transaction as the + row update. The current empty-string state breaks any client that filters + by status. +* **Scenario 07:** fix manager-side — ensure the new vfolder's owner + permission edge is visible inside the same transaction as the row insert, + or retry the post-clone GET until RBAC catches up. A side effect to watch: + once 07 passes the clone step, the user-side `vfolder delete ` + may still fail with the same `RBACElementRef` error. `99_teardown` cleans + up via admin context as a fallback so the suite stays idempotent. + +## Layout + +``` +scenarios/ +├── README.md +├── lib/ +│ ├── env.sh # endpoints, credentials, resource policy/host names +│ └── common.sh # logging, login helpers, JSON helpers, retries +├── 00_setup.sh +├── 01_vfolder_lifecycle.sh +├── 02_session_lifecycle.sh +├── 03_model_card_deploy.sh +├── 04_deployment_revision.sh +├── 05_teardown_verification.sh +├── 06_multi_user_access.sh +├── 07_vfolder_invite_clone.sh +├── 08_cross_project_isolation.sh +├── 09_vfolder_mounted_delete.sh +├── 10_vfolder_cloneable_false.sh +├── 11_vfolder_bulk_ops.sh +├── 12_vfolder_file_io.sh +├── 13_session_exec_logs.sh +├── 14_deployment_endpoint_serve.sh +├── 15_session_concurrency_cap.sh +├── 99_teardown.sh +├── run_all.sh +├── .state/ # per-run state (project IDs, session IDs) — gitignored +└── .tmp/ # generated payload JSON, upload artifacts — gitignored +``` + +## Conventions + +* Each script is **self-contained**: sources `lib/env.sh` + `lib/common.sh`, + configures session endpoint, logs in as the appropriate user, and cleans up + after itself. Running a single scenario directly is supported. +* Test resources are prefixed with `${SCENARIO_PREFIX}-` (default `scn-`) so + cleanup is by-prefix and never touches non-scenario data. +* `SCENARIO_DEBUG=1` enables verbose `[DBUG]` output (raw `./bai` invocations + and parsed JSON snippets). +* **No soft-pass.** If a feature or fixture is unavailable, the scenario + fails (`exit 1`). Missing fixtures (e.g. no model card on a fresh dev DB) + must be provisioned, not silently skipped. This is intentional — silent + skips hide real regressions. + +## Environment overrides + +```bash +# Different cluster +BAI_ENDPOINT=http://10.0.0.5:8090 scenarios/run_all.sh + +# x86_64 host — pick an x86_64 image +TEST_IMAGE_NAME='cr.backend.ai/stable/python:3.12-ubuntu22.04' scenarios/run_all.sh + +# Different storage host +TEST_VFOLDER_HOST=local:volume2 scenarios/run_all.sh + +# Different test prefix to avoid colliding with another suite run +SCENARIO_PREFIX=ci-$RANDOM scenarios/run_all.sh +``` + +## Prerequisites + +* `./bai` configured for session login against the webserver (`run_all.sh` does this). +* Halfstack up; manager + agent + webserver running. +* At least one image visible to `./bai admin image search` matching `TEST_IMAGE_NAME`. +* At least one healthy agent in the resource group `TEST_RESOURCE_GROUP`. + +## Troubleshooting + +* **Admin login fails** → check webserver / manager are up (`/halfstack` skill). +* **`image not found`** → run `./backend.ai mgr image rescan cr.backend.ai`. +* **Session never reaches RUNNING** → no agent or insufficient resources; + the scenario logs the final status and continues to terminate it. +* **`vfolder host not allowed`** → keypair resource policy doesn't permit + `TEST_VFOLDER_HOST`. Inspect `./bai admin resource-policy keypair search`. diff --git a/scenarios/lib/common.sh b/scenarios/lib/common.sh new file mode 100755 index 00000000000..00734faf92e --- /dev/null +++ b/scenarios/lib/common.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +# Shared helpers for scenario scripts. Most JSON parsing lives in $SCN_PY/*.py. + +# ---- Colors / logging ---- +if [[ -t 1 ]]; then + _C_RED=$'\033[0;31m' + _C_GREEN=$'\033[0;32m' + _C_YELLOW=$'\033[0;33m' + _C_BLUE=$'\033[0;34m' + _C_GRAY=$'\033[0;37m' + _C_BOLD=$'\033[1m' + _C_RESET=$'\033[0m' +else + _C_RED= _C_GREEN= _C_YELLOW= _C_BLUE= _C_GRAY= _C_BOLD= _C_RESET= +fi + +log_info() { printf '%s[INFO]%s %s\n' "$_C_BLUE" "$_C_RESET" "$*" >&2; } +log_step() { printf '%s%s[STEP]%s %s\n' "$_C_BOLD" "$_C_BLUE" "$_C_RESET" "$*" >&2; } +log_ok() { printf '%s[ OK ]%s %s\n' "$_C_GREEN" "$_C_RESET" "$*" >&2; } +log_warn() { printf '%s[WARN]%s %s\n' "$_C_YELLOW" "$_C_RESET" "$*" >&2; } +log_error() { printf '%s[FAIL]%s %s\n' "$_C_RED" "$_C_RESET" "$*" >&2; } +log_debug() { [[ -n "${SCENARIO_DEBUG:-}" ]] && printf '%s[DBUG]%s %s\n' "$_C_GRAY" "$_C_RESET" "$*" >&2 || true; } + +# ---- Scenario header / footer ---- +_SCENARIO_NAME="" +_SCENARIO_START=0 + +scenario_begin() { + _SCENARIO_NAME="$1" + _SCENARIO_START=$(date +%s) + printf '\n%s========================================================================%s\n' "$_C_BOLD" "$_C_RESET" >&2 + printf '%s SCENARIO: %s%s\n' "$_C_BOLD" "$_SCENARIO_NAME" "$_C_RESET" >&2 + printf '%s========================================================================%s\n' "$_C_BOLD" "$_C_RESET" >&2 +} + +scenario_end_ok() { + local end=$(date +%s) + local dur=$((end - _SCENARIO_START)) + log_ok "Scenario '${_SCENARIO_NAME}' PASSED (${dur}s)" +} + +# ---- bai login helpers ---- +_bai_login_as() { + local email="$1" password="$2" label="$3" + ./bai logout >/dev/null 2>&1 || true + local out + out="$(BACKEND_USER="$email" BACKEND_PASSWORD="$password" ./bai login 2>&1)" || { + if ! printf '%s' "$out" | grep -qi "already logged in"; then + log_error "${label} login failed: $out" + return 1 + fi + } + return 0 +} + +bai_login_admin() { _bai_login_as "$ADMIN_EMAIL" "$ADMIN_PASSWORD" "admin"; } +bai_login_user_a() { _bai_login_as "$TEST_USER_A_EMAIL" "$TEST_USER_A_PASSWORD" "user A"; } +bai_login_user_b() { _bai_login_as "$TEST_USER_B_EMAIL" "$TEST_USER_B_PASSWORD" "user B"; } + +bai_config_session() { + ./bai config set endpoint "$BAI_ENDPOINT" >/dev/null + ./bai config set endpoint-type "$BAI_ENDPOINT_TYPE" >/dev/null +} + +# wait_until +wait_until() { + local timeout="$1" interval="$2"; shift 2 + local elapsed=0 + while (( elapsed < timeout )); do + if "$@" >/dev/null 2>&1; then return 0; fi + sleep "$interval" + elapsed=$(( elapsed + interval )) + done + return 1 +} + +# ---- ID lookup helpers ---- + +lookup_project_id() { + ./bai admin project search --limit 50 2>&1 | NAME="$1" python3 "$SCN_PY/lookup_project_id.py" +} + +lookup_user_id() { + ./bai admin user search --email-contains "$1" --limit 5 2>&1 | EMAIL="$1" python3 "$SCN_PY/lookup_user_id.py" +} + +lookup_image_id() { + ./bai admin image search --name-contains "$1" --limit 50 2>&1 | NAME="$1" python3 "$SCN_PY/lookup_image_id.py" +} + +# Skips deleted states. Used from a user context (user-owned vfolders). +lookup_my_vfolder_id() { + ./bai vfolder my-search --limit 200 2>&1 | NAME="$1" python3 "$SCN_PY/lookup_vfolder_id.py" +} + +# Admin context: any vfolder by exact name, skipping deleted. +lookup_admin_vfolder_id() { + ./bai vfolder admin-search --limit 500 2>&1 | NAME="$1" python3 "$SCN_PY/lookup_vfolder_id.py" +} + +# Project-scoped vfolder lookup (caller chooses whether admin or member). +lookup_project_vfolder_id() { + ./bai vfolder project-search "$1" --limit 200 2>/dev/null | NAME="$2" python3 "$SCN_PY/lookup_vfolder_id.py" +} + +lookup_card_id() { + ./bai admin model-card search --name-contains "$1" --limit 10 2>&1 | NAME="$1" python3 "$SCN_PY/lookup_card_id.py" +} + +# ---- Response parsers (read JSON from stdin) ---- + +session_id_from() { python3 "$SCN_PY/session_id_from.py"; } +deployment_id_from() { python3 "$SCN_PY/deployment_id_from.py"; } + +# ---- Session state machine ---- + +# Print live status of a session id (querying current user's my-search). +session_status() { + SID="$1" ./bai my session search --limit 50 2>/dev/null | SID="$1" python3 "$SCN_PY/session_status.py" +} + +# wait_session_status [...] +# Returns 0 on first match (echoing the matched status). On timeout returns 1 +# and echoes the last observed status. +wait_session_status() { + local sid="$1" iters="$2" interval="$3"; shift 3 + local s="" + for ((i=0; i/dev/null 2>&1 \ + || ./bai session terminate "$sid" >/dev/null 2>&1 \ + || true +} + +# Build an interactive session enqueue payload (1 CPU, 1 GiB RAM). +# Usage: session_payload [vfolder_id mount_path] +session_payload() { + local name="$1" image_id="$2" pid="$3" + local mounts="[]" + if [[ -n "${4:-}" && -n "${5:-}" ]]; then + mounts=$(printf '[{"vfolder_id":"%s","mount_path":"%s","permission":"rw"}]' "$4" "$5") + fi + cat < "$SCENARIO_STATE_DIR/$1"; } +state_get() { [[ -f "$SCENARIO_STATE_DIR/$1" ]] && cat "$SCENARIO_STATE_DIR/$1" || return 1; } +state_clear() { rm -rf "$SCENARIO_STATE_DIR"/*; } + +# ---- Negative-path assertion ---- +expect_fail() { + local desc="$1"; shift + if "$@" >/dev/null 2>&1; then + log_error "expected '${desc}' to fail but it succeeded" + return 1 + fi + log_ok "expected failure: ${desc}" +} diff --git a/scenarios/lib/env.sh b/scenarios/lib/env.sh new file mode 100755 index 00000000000..71c00ac307c --- /dev/null +++ b/scenarios/lib/env.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Shared environment for scenario scripts. +# Source from each scenario: `source "$(dirname "$0")/lib/env.sh"`. + +# Endpoints +export BAI_ENDPOINT="${BAI_ENDPOINT:-http://127.0.0.1:8090}" +export BAI_ENDPOINT_TYPE="${BAI_ENDPOINT_TYPE:-session}" + +# Admin credentials (from fixtures/manager/example-users.json) +export ADMIN_EMAIL="${ADMIN_EMAIL:-admin@lablup.com}" +export ADMIN_PASSWORD="${ADMIN_PASSWORD:-wJalrXUt}" + +# Test user/project names — prefixed to avoid collision +export SCENARIO_PREFIX="${SCENARIO_PREFIX:-scn}" +export TEST_USER_A_EMAIL="${SCENARIO_PREFIX}-userA@scenario.local" +export TEST_USER_A_NAME="${SCENARIO_PREFIX}-userA" +export TEST_USER_A_PASSWORD="ScenarioPassA1!" +export TEST_USER_B_EMAIL="${SCENARIO_PREFIX}-userB@scenario.local" +export TEST_USER_B_NAME="${SCENARIO_PREFIX}-userB" +export TEST_USER_B_PASSWORD="ScenarioPassB1!" + +export TEST_PROJECT_A_NAME="${SCENARIO_PREFIX}-projectA" +export TEST_PROJECT_B_NAME="${SCENARIO_PREFIX}-projectB" + +# Domain / scaling group / hosts / policies +export TEST_DOMAIN="${TEST_DOMAIN:-default}" +export TEST_RESOURCE_GROUP="${TEST_RESOURCE_GROUP:-default}" +export TEST_VFOLDER_HOST="${TEST_VFOLDER_HOST:-local:volume1}" +export TEST_KEYPAIR_RESOURCE_POLICY="${TEST_KEYPAIR_RESOURCE_POLICY:-default}" +export TEST_USER_RESOURCE_POLICY="${TEST_USER_RESOURCE_POLICY:-default}" +export TEST_PROJECT_RESOURCE_POLICY="${TEST_PROJECT_RESOURCE_POLICY:-default}" + +# Default image (aarch64). Override for x86_64 hosts. +# Find current ID with: ./bai admin image search --architecture aarch64 --name-contains "python:3.12-ubuntu24.04-arm64" +export TEST_IMAGE_NAME="${TEST_IMAGE_NAME:-cr.backend.ai/stable/python:3.12-ubuntu24.04-arm64}" + +# Working/state dir for cross-script artifacts (uuid lookups, etc.) +export SCENARIO_STATE_DIR="${SCENARIO_STATE_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/.state}" +mkdir -p "$SCENARIO_STATE_DIR" + +# Temp dir for upload/download artifacts +export SCENARIO_TMP_DIR="${SCENARIO_TMP_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/.tmp}" +mkdir -p "$SCENARIO_TMP_DIR" + +# Shared Python helpers used by both lib/common.sh and individual scenarios. +export SCN_PY="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/py" diff --git a/scenarios/lib/py/assert_id_in_search.py b/scenarios/lib/py/assert_id_in_search.py new file mode 100644 index 00000000000..ae304c0f6d2 --- /dev/null +++ b/scenarios/lib/py/assert_id_in_search.py @@ -0,0 +1,10 @@ +"""Exit 0 if any items[].id == $TARGET, else exit 1. Reads search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["TARGET"] +for it in json.load(sys.stdin).get("items", []): + if it.get("id") == target: + sys.exit(0) +sys.exit(1) diff --git a/scenarios/lib/py/check_no_id.py b/scenarios/lib/py/check_no_id.py new file mode 100644 index 00000000000..b4610f355dd --- /dev/null +++ b/scenarios/lib/py/check_no_id.py @@ -0,0 +1,18 @@ +"""Exit 0 if no items[].id == $TARGET (isolation enforced); exit 1 if found. + +Reads search JSON from stdin. Treats unparseable input as enforcement (exit 0) +since 403/error pages mean the caller couldn't see anything. +""" +import json +import os +import sys + +target = os.environ["TARGET"] +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(0) +for it in d.get("items", []): + if it.get("id") == target: + sys.exit(1) +sys.exit(0) diff --git a/scenarios/lib/py/deployment_id_from.py b/scenarios/lib/py/deployment_id_from.py new file mode 100644 index 00000000000..045ebe69d9b --- /dev/null +++ b/scenarios/lib/py/deployment_id_from.py @@ -0,0 +1,14 @@ +"""Print deployment id from a `deployment create` / `model-card deploy` response (stdin).""" +import json +import sys + +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(1) +print( + d.get("id") + or d.get("deployment_id") + or d.get("deployment", {}).get("id") + or "" +) diff --git a/scenarios/lib/py/lookup_card_id.py b/scenarios/lib/py/lookup_card_id.py new file mode 100644 index 00000000000..0998d5e318e --- /dev/null +++ b/scenarios/lib/py/lookup_card_id.py @@ -0,0 +1,10 @@ +"""Print model-card id whose name == $NAME. Reads search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["NAME"] +for it in json.load(sys.stdin).get("items", []): + if (it.get("name") or "") == target: + print(it["id"]) + break diff --git a/scenarios/lib/py/lookup_image_id.py b/scenarios/lib/py/lookup_image_id.py new file mode 100644 index 00000000000..64854efb802 --- /dev/null +++ b/scenarios/lib/py/lookup_image_id.py @@ -0,0 +1,10 @@ +"""Print image id whose name == $NAME. Reads search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["NAME"] +for it in json.load(sys.stdin).get("items", []): + if it.get("name") == target: + print(it["id"]) + break diff --git a/scenarios/lib/py/lookup_project_id.py b/scenarios/lib/py/lookup_project_id.py new file mode 100644 index 00000000000..115d84fdc00 --- /dev/null +++ b/scenarios/lib/py/lookup_project_id.py @@ -0,0 +1,10 @@ +"""Print project id whose basic_info.name == $NAME. Reads search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["NAME"] +for it in json.load(sys.stdin).get("items", []): + if it.get("basic_info", {}).get("name") == target: + print(it["id"]) + break diff --git a/scenarios/lib/py/lookup_user_id.py b/scenarios/lib/py/lookup_user_id.py new file mode 100644 index 00000000000..8fa8ab7316c --- /dev/null +++ b/scenarios/lib/py/lookup_user_id.py @@ -0,0 +1,11 @@ +"""Print user id whose email == $EMAIL. Reads search JSON from stdin.""" +import json +import os +import sys + +target = os.environ["EMAIL"] +for it in json.load(sys.stdin).get("items", []): + info = it.get("basic_info") or {} + if info.get("email") == target or it.get("email") == target: + print(it["id"]) + break diff --git a/scenarios/lib/py/lookup_vfolder_id.py b/scenarios/lib/py/lookup_vfolder_id.py new file mode 100644 index 00000000000..53f8b1ac5dc --- /dev/null +++ b/scenarios/lib/py/lookup_vfolder_id.py @@ -0,0 +1,23 @@ +"""Print vfolder id whose name == $NAME, skipping deleted-state rows. + +Used for my-search, admin-search, and project-search responses (all share +shape `items[].metadata.name | items[].name` and `items[].status`). +Reads search JSON from stdin. +""" +import json +import os +import sys + +DELETED = { + "delete-pending", + "delete-ongoing", + "delete-complete", + "delete-error", + "delete-aborted", +} +target = os.environ["NAME"] +for it in json.load(sys.stdin).get("items", []): + name = (it.get("metadata") or {}).get("name") or it.get("name") or "" + if name == target and it.get("status") not in DELETED: + print(it["id"]) + break diff --git a/scenarios/lib/py/modify_group_ok.py b/scenarios/lib/py/modify_group_ok.py new file mode 100644 index 00000000000..2f9f328fede --- /dev/null +++ b/scenarios/lib/py/modify_group_ok.py @@ -0,0 +1,10 @@ +"""Exit 0 if the modify_group GraphQL mutation returned ok=true. Reads response JSON from stdin.""" +import json +import sys + +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(1) +ok = (d.get("data", {}).get("modify_group") or d.get("modify_group") or {}).get("ok") +sys.exit(0 if ok else 1) diff --git a/scenarios/lib/py/pick_first_id.py b/scenarios/lib/py/pick_first_id.py new file mode 100644 index 00000000000..12757cc79da --- /dev/null +++ b/scenarios/lib/py/pick_first_id.py @@ -0,0 +1,7 @@ +"""Print id of the first item from `items` (or `presets`). Reads JSON from stdin.""" +import json +import sys + +d = json.load(sys.stdin) +items = d.get("items") or d.get("presets") or [] +print(items[0]["id"] if items else "") diff --git a/scenarios/lib/py/print_json_field.py b/scenarios/lib/py/print_json_field.py new file mode 100644 index 00000000000..15a5ab6b5f1 --- /dev/null +++ b/scenarios/lib/py/print_json_field.py @@ -0,0 +1,6 @@ +"""Print the value of top-level JSON field named $FIELD. Reads JSON from stdin.""" +import json +import os +import sys + +print(json.load(sys.stdin)[os.environ["FIELD"]]) diff --git a/scenarios/lib/py/session_id_from.py b/scenarios/lib/py/session_id_from.py new file mode 100644 index 00000000000..71d927d4208 --- /dev/null +++ b/scenarios/lib/py/session_id_from.py @@ -0,0 +1,11 @@ +"""Print session id from a `bai session enqueue` response (stdin).""" +import json +import sys + +try: + d = json.load(sys.stdin) +except Exception: + sys.exit(1) +sid = (d.get("session") or {}).get("id") or d.get("id") or d.get("session_id") +if sid: + print(sid) diff --git a/scenarios/lib/py/session_status.py b/scenarios/lib/py/session_status.py new file mode 100644 index 00000000000..2bc53795669 --- /dev/null +++ b/scenarios/lib/py/session_status.py @@ -0,0 +1,17 @@ +"""Print lifecycle.status (or 'NOT_FOUND') of session whose id == $SID. Reads search JSON from stdin.""" +import json +import os +import sys + +sid = os.environ["SID"] +try: + d = json.load(sys.stdin) +except Exception: + print("NOT_FOUND") + sys.exit(0) +for it in d.get("items", []): + if it.get("id") == sid: + status = (it.get("lifecycle") or {}).get("status") or it.get("status", "UNKNOWN") + print(status) + sys.exit(0) +print("NOT_FOUND") diff --git a/scenarios/run_all.sh b/scenarios/run_all.sh new file mode 100755 index 00000000000..df00f26346e --- /dev/null +++ b/scenarios/run_all.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# run_all.sh — execute every scenario in order, collecting PASS/FAIL. +# +# Usage: +# scenarios/run_all.sh # full run incl. teardown +# SKIP_TEARDOWN=1 scenarios/run_all.sh # leave test data behind for inspection +# ONLY="01 02" scenarios/run_all.sh # run a subset of scenario numbers + +set -uo pipefail + +cd "$(dirname "$0")/.." +source scenarios/lib/env.sh +source scenarios/lib/common.sh + +SCRIPTS=( + scenarios/00_setup/run.sh + scenarios/01_vfolder_lifecycle/run.sh + scenarios/02_session_lifecycle/run.sh + scenarios/03_model_card_deploy/run.sh + scenarios/04_deployment_revision/run.sh + scenarios/05_teardown_verification/run.sh + scenarios/06_multi_user_access/run.sh + scenarios/07_vfolder_invite_clone/run.sh + scenarios/08_cross_project_isolation/run.sh + scenarios/09_vfolder_mounted_delete/run.sh + scenarios/10_vfolder_cloneable_false/run.sh + scenarios/11_vfolder_bulk_ops/run.sh + scenarios/12_vfolder_file_io/run.sh + scenarios/13_session_exec_logs/run.sh + scenarios/14_deployment_endpoint_serve/run.sh + scenarios/15_session_concurrency_cap/run.sh +) + +if [[ -n "${ONLY:-}" ]]; then + FILTERED=() + for s in "${SCRIPTS[@]}"; do + for tok in $ONLY; do + if [[ "$s" == *"/${tok}_"* ]]; then + FILTERED+=("$s"); break + fi + done + done + SCRIPTS=("${FILTERED[@]}") +fi + +declare -a PASSED FAILED +for script in "${SCRIPTS[@]}"; do + if bash "$script"; then + PASSED+=("$script") + else + FAILED+=("$script") + fi +done + +if [[ -z "${SKIP_TEARDOWN:-}" ]]; then + bash scenarios/99_teardown/run.sh \ + && PASSED+=("scenarios/99_teardown/run.sh") \ + || FAILED+=("scenarios/99_teardown/run.sh") +fi + +printf '\n%s========================================================================%s\n' "$_C_BOLD" "$_C_RESET" +printf '%s SUMMARY%s\n' "$_C_BOLD" "$_C_RESET" +printf '%s========================================================================%s\n' "$_C_BOLD" "$_C_RESET" +printf '%sPASSED (%d):%s\n' "$_C_GREEN" "${#PASSED[@]}" "$_C_RESET" +for s in "${PASSED[@]}"; do printf ' ✓ %s\n' "$s"; done + +if (( ${#FAILED[@]} > 0 )); then + printf '%sFAILED (%d):%s\n' "$_C_RED" "${#FAILED[@]}" "$_C_RESET" + for s in "${FAILED[@]}"; do printf ' ✗ %s\n' "$s"; done + exit 1 +fi +printf '%sAll scenarios passed.%s\n' "$_C_GREEN" "$_C_RESET"