From 30809a7ebc167039ce2ab00cfb23e15aed2b03b8 Mon Sep 17 00:00:00 2001 From: Israel Weinberg <99821070+iwphonedo@users.noreply.github.com> Date: Tue, 7 Jan 2025 20:32:10 +0200 Subject: [PATCH 01/18] E2E Tests version 1.0 (#2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This comprehensive commit includes major improvements and enhancements to the OPAL development environment, focusing on debugging, authorization, containerized testing, and project structure optimization. Key highlights include: 1. OPAL Development Enhancements: • Introduced remote debugging capabilities using debugpy and enhanced logging across various components. • Improved OPAL server and client configuration with better container integration and dynamic port management. • Enabled Gitea for local policy management, replacing GitHub for enhanced control and security in testing environments. 2. Refactoring and Streamlining: • Refactored the codebase to improve readability, remove deprecated files, and enhance test fixtures. • Reorganized project structure and consolidated environment setup for consistency. • Simplified session and container management for test execution using pytest and testcontainers. 3. Testing and Configuration Improvements: • Enhanced test automation with new fixtures, logging updates, and RBAC policy improvements. • Improved Docker image handling, session matrix parameterization, and environment variable management for better efficiency and clarity. • Added global exception handling to improve error reporting during tests. 4. Documentation and Cleanup: • Updated README documentation to reflect changes in test organization and container setup. • Removed unused files and components to streamline the project structure. --- .devcontainer/devcontainer.json | 26 + .devcontainer/setup.sh | 20 + .gitignore | 80 +-- .vscode/launch.json | 40 ++ .vscode/settings.json | 7 + app-tests/docker-compose-app-tests.yml | 23 +- app-tests/jwks_dir/jwks.json | 1 + app-tests/minrun.sh | 194 +++++++ app-tests/sample_service/Dockerfile | 44 ++ app-tests/sample_service/app.py | 80 +++ app-tests/sample_service/nginx.conf | 109 ++++ app-tests/sample_service/openapi.yaml | 88 +++ app-tests/sample_service/policy.rego | 37 ++ app-tests/sample_service/requirements.txt | 18 + app-tests/sample_service/start.sh | 10 + app-tests/sample_service/supervisord.conf | 8 + docker/Dockerfile | 22 +- docker/Dockerfile.client | 118 ++++ docker/Dockerfile.server | 116 ++++ docker/docker-compose-local.yml | 105 ++++ packages/opal-client/opal_client/main.py | 6 + packages/opal-client/requires.txt | 1 + packages/opal-server/opal_server/data/api.py | 3 + packages/opal-server/opal_server/main.py | 6 + pytest.ini | 1 + requirements.txt | 1 + scripts/start.sh | 9 +- tests/.env.example | 5 + tests/README.md | 82 +++ tests/__init__.py | 0 tests/conftest.py | 505 +++++++++++++++++ tests/containers/broadcast_container_base.py | 21 + tests/containers/cedar_container.py | 46 ++ tests/containers/gitea_container.py | 413 ++++++++++++++ tests/containers/kafka_broadcast_container.py | 33 ++ tests/containers/kafka_ui_container.py | 36 ++ tests/containers/opa_container.py | 67 +++ tests/containers/opal_client_container.py | 52 ++ tests/containers/opal_server_container.py | 91 ++++ tests/containers/permitContainer.py | 135 +++++ .../postgres_broadcast_container.py | 42 ++ .../containers/pulsar_broadcast_container.py | 32 ++ tests/containers/redis_broadcast_container.py | 28 + tests/containers/redis_ui_container.py | 36 ++ tests/containers/settings/cedar_settings.py | 23 + tests/containers/settings/gitea_settings.py | 121 +++++ .../settings/kafka_broadcast_settings.py | 104 ++++ .../settings/opal_client_settings.py | 304 +++++++++++ .../settings/opal_server_settings.py | 262 +++++++++ .../settings/postgres_broadcast_settings.py | 61 +++ tests/containers/zookeeper_container.py | 40 ++ tests/docker/Dockerfile.cedar | 32 ++ tests/docker/Dockerfile.client | 17 + tests/docker/Dockerfile.client.local | 76 +++ tests/docker/Dockerfile.client_cedar.local | 111 ++++ tests/docker/Dockerfile.client_opa.local | 106 ++++ tests/docker/Dockerfile.opa | 36 ++ tests/docker/Dockerfile.server | 17 + tests/docker/Dockerfile.server.local | 102 ++++ tests/fixtures/__init__.py | 0 tests/fixtures/broadcasters.py | 110 ++++ tests/fixtures/images.py | 91 ++++ tests/fixtures/policy_repos.py | 111 ++++ tests/fixtures/policy_stores.py | 89 +++ tests/genopalkeys.sh | 16 + tests/install_opal.sh | 19 + tests/policies/rbac.rego | 9 + tests/policy_repos/gitea_policy_repo.py | 106 ++++ tests/policy_repos/github_policy_repo.py | 411 ++++++++++++++ tests/policy_repos/gitlab_policy_repo.py | 103 ++++ tests/policy_repos/policy_repo_base.py | 27 + tests/policy_repos/policy_repo_factory.py | 51 ++ tests/policy_repos/policy_repo_settings.py | 36 ++ tests/pytest.ini | 8 + tests/requirements.txt | 4 + tests/run.sh | 51 ++ tests/settings.py | 171 ++++++ tests/start_debug.sh | 29 + tests/test_app.py | 308 +++++++++++ tests/test_opal_server_config.py | 4 + tests/utils.py | 506 ++++++++++++++++++ 81 files changed, 6412 insertions(+), 57 deletions(-) create mode 100644 .devcontainer/devcontainer.json create mode 100755 .devcontainer/setup.sh create mode 100644 .vscode/launch.json create mode 100644 .vscode/settings.json create mode 100644 app-tests/jwks_dir/jwks.json create mode 100755 app-tests/minrun.sh create mode 100644 app-tests/sample_service/Dockerfile create mode 100644 app-tests/sample_service/app.py create mode 100644 app-tests/sample_service/nginx.conf create mode 100644 app-tests/sample_service/openapi.yaml create mode 100644 app-tests/sample_service/policy.rego create mode 100644 app-tests/sample_service/requirements.txt create mode 100644 app-tests/sample_service/start.sh create mode 100644 app-tests/sample_service/supervisord.conf create mode 100644 docker/Dockerfile.client create mode 100644 docker/Dockerfile.server create mode 100644 docker/docker-compose-local.yml create mode 100644 tests/.env.example create mode 100644 tests/README.md create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/containers/broadcast_container_base.py create mode 100644 tests/containers/cedar_container.py create mode 100644 tests/containers/gitea_container.py create mode 100644 tests/containers/kafka_broadcast_container.py create mode 100644 tests/containers/kafka_ui_container.py create mode 100644 tests/containers/opa_container.py create mode 100644 tests/containers/opal_client_container.py create mode 100644 tests/containers/opal_server_container.py create mode 100644 tests/containers/permitContainer.py create mode 100644 tests/containers/postgres_broadcast_container.py create mode 100644 tests/containers/pulsar_broadcast_container.py create mode 100644 tests/containers/redis_broadcast_container.py create mode 100644 tests/containers/redis_ui_container.py create mode 100644 tests/containers/settings/cedar_settings.py create mode 100644 tests/containers/settings/gitea_settings.py create mode 100644 tests/containers/settings/kafka_broadcast_settings.py create mode 100644 tests/containers/settings/opal_client_settings.py create mode 100644 tests/containers/settings/opal_server_settings.py create mode 100644 tests/containers/settings/postgres_broadcast_settings.py create mode 100644 tests/containers/zookeeper_container.py create mode 100644 tests/docker/Dockerfile.cedar create mode 100644 tests/docker/Dockerfile.client create mode 100644 tests/docker/Dockerfile.client.local create mode 100644 tests/docker/Dockerfile.client_cedar.local create mode 100644 tests/docker/Dockerfile.client_opa.local create mode 100644 tests/docker/Dockerfile.opa create mode 100644 tests/docker/Dockerfile.server create mode 100644 tests/docker/Dockerfile.server.local create mode 100644 tests/fixtures/__init__.py create mode 100644 tests/fixtures/broadcasters.py create mode 100644 tests/fixtures/images.py create mode 100644 tests/fixtures/policy_repos.py create mode 100644 tests/fixtures/policy_stores.py create mode 100644 tests/genopalkeys.sh create mode 100644 tests/install_opal.sh create mode 100644 tests/policies/rbac.rego create mode 100644 tests/policy_repos/gitea_policy_repo.py create mode 100644 tests/policy_repos/github_policy_repo.py create mode 100644 tests/policy_repos/gitlab_policy_repo.py create mode 100644 tests/policy_repos/policy_repo_base.py create mode 100644 tests/policy_repos/policy_repo_factory.py create mode 100644 tests/policy_repos/policy_repo_settings.py create mode 100644 tests/pytest.ini create mode 100644 tests/requirements.txt create mode 100755 tests/run.sh create mode 100644 tests/settings.py create mode 100644 tests/start_debug.sh create mode 100644 tests/test_app.py create mode 100644 tests/test_opal_server_config.py create mode 100644 tests/utils.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..3c23d8797 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,26 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", + "runArgs": ["--name", "OAPL-DEV"], + "features": { + "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} + + }, + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "/bin/bash .devcontainer/setup.sh", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100755 index 000000000..d44836c80 --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [ -d ".venv" ]; then + echo "Virtual environment already exists" +else + python3 -m venv .venv +fi +source .venv/bin/activate + +apt-get update && apt-get install -y git + +pip install --upgrade pip +pip3 install --user -r requirements.txt + +cd tests +pip3 install --user -r requirements.txt + +pip install pre-commit +pre-commit install +pre-commit run --all-files diff --git a/.gitignore b/.gitignore index 06c38bfb3..47754a866 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,13 @@ -# Byte-compiled / optimized / DLL files +# OPAL specific +opal_test_keys/* +.env +opal-example-policy-repo/* +data/ +OPAL_DATASOURCE_TOKEN.tkn +OPAL_CLIENT_TOKEN.tkn + +# Temporary and Python cache files +**/*.pyc __pycache__/ *.py[cod] *$py.class @@ -6,10 +15,17 @@ __pycache__/ # C extensions *.so +# Virtual environments +.venv/ +venv/ +env/ +ENV/ +env.bak/ +venv.bak/ + # Distribution / packaging .Python build/ -develop-eggs/ dist/ downloads/ eggs/ @@ -18,7 +34,6 @@ lib/ lib64/ parts/ sdist/ -var/ wheels/ pip-wheel-metadata/ share/python-wheels/ @@ -27,16 +42,14 @@ share/python-wheels/ *.egg MANIFEST -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - # Installer logs pip-log.txt pip-delete-this-directory.txt +# PyInstaller +*.manifest +*.spec + # Unit test / coverage reports htmlcov/ .tox/ @@ -50,22 +63,23 @@ coverage.xml *.py,cover .hypothesis/ .pytest_cache/ +new_pytest_env/temp # Translations *.mo *.pot -# Django stuff: +# Django *.log local_settings.py db.sqlite3 db.sqlite3-journal -# Flask stuff: +# Flask instance/ .webassets-cache -# Scrapy stuff: +# Scrapy .scrapy # Sphinx documentation @@ -84,53 +98,43 @@ ipython_config.py # pyenv .python-version -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. +# Pipenv #Pipfile.lock -# PEP 582; used by e.g. github.com/David-OConnor/pyflow +# PEP 582 __pypackages__/ -# Celery stuff +# Celery celerybeat-schedule celerybeat.pid -# SageMath parsed files +# SageMath *.sage.py -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ +# Editors +.vscode/ +.idea +*.iml -# Spyder project settings +# Spyder .spyderproject .spyproject -# Rope project settings +# Rope .ropeproject -# mkdocs documentation -/site +# mkdocs +docs/_build/ # mypy .mypy_cache/ .dmypy.json dmypy.json -# Pyre type checker +# Pyre .pyre/ -# editors -.vscode/ -.idea -*.iml - +# System files .DS_Store +pytest_6dbc.env +tests/pytest_1a09.env diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 000000000..4559acfd6 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,40 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Remote to local", + "type": "debugpy", + "request": "attach", + "justMyCode": false, + "subProcess": true, + "connect": { + "host": "localhost", + "port": 5678 + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "${cwd}" + } + ] + }, + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Python: Debug with Args", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "args": [ + "--deploy", + "--with_broadcast", + ], + "console": "integratedTerminal" + } + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..dc49512f5 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "cmake.ignoreCMakeListsMissing": true, + "makefile.configureOnOpen": false, + "python.analysis.extraPaths": [ + "./packages/opal-common" + ] +} diff --git a/app-tests/docker-compose-app-tests.yml b/app-tests/docker-compose-app-tests.yml index b12e5309a..3bb517d3d 100644 --- a/app-tests/docker-compose-app-tests.yml +++ b/app-tests/docker-compose-app-tests.yml @@ -1,4 +1,6 @@ + services: + broadcast_channel: image: postgres:alpine environment: @@ -7,15 +9,19 @@ services: - POSTGRES_PASSWORD=postgres opal_server: - image: permitio/opal-server:${OPAL_IMAGE_TAG:-latest} + #image: permitio/opal-server:${OPAL_IMAGE_TAG:-latest} + build: + context: ../ # Point to the directory containing your Dockerfile + dockerfile: ./docker/Dockerfile.server # Specify your Dockerfile if it's not named 'Dockerfile' deploy: mode: replicated - replicas: 2 + replicas: 1 endpoint_mode: vip environment: - OPAL_BROADCAST_URI=postgres://postgres:postgres@broadcast_channel:5432/postgres - - UVICORN_NUM_WORKERS=4 - - OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-tests-policy-repo.git} + - UVICORN_NUM_WORKERS=0 + #- OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-tests-policy-repo.git} + - OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-example-policy-repo.git} - OPAL_POLICY_REPO_MAIN_BRANCH=${POLICY_REPO_BRANCH} - OPAL_POLICY_REPO_SSH_KEY=${OPAL_POLICY_REPO_SSH_KEY} - OPAL_DATA_CONFIG_SOURCES={"config":{"entries":[{"url":"http://opal_server:7002/policy-data","config":{"headers":{"Authorization":"Bearer ${OPAL_CLIENT_TOKEN}"}},"topics":["policy_data"],"dst_path":"/static"}]}} @@ -35,9 +41,10 @@ services: opal_client: image: permitio/opal-client:${OPAL_IMAGE_TAG:-latest} + scale: 0 deploy: mode: replicated - replicas: 2 + replicas: 0 endpoint_mode: vip environment: - OPAL_SERVER_URL=http://opal_server:7002 @@ -50,9 +57,9 @@ services: - OPAL_AUTH_JWT_AUDIENCE=https://api.opal.ac/v1/ - OPAL_AUTH_JWT_ISSUER=https://opal.ac/ - OPAL_STATISTICS_ENABLED=true - ports: - - "7766-7767:7000" - - "8181-8182:8181" + #ports: + # - "7766-7767:7000" + # - "8181-8182:8181" depends_on: - opal_server command: sh -c "exec ./wait-for.sh opal_server:7002 --timeout=20 -- ./start.sh" diff --git a/app-tests/jwks_dir/jwks.json b/app-tests/jwks_dir/jwks.json new file mode 100644 index 000000000..d8a027820 --- /dev/null +++ b/app-tests/jwks_dir/jwks.json @@ -0,0 +1 @@ +{"keys": [{"kty": "RSA", "key_ops": ["verify"], "n": "3HYeEOlS7BXR4x0klclD909fcrjyr4Jkmuixfl8cRmS7q3LPIsl1hIueKK0qBLjc7jIUsPCUEoJTIwMcdFfPQnViexerdx_ekupUwR0IFRzJli5wG0cYW5UkKOKDaXRrN0cnOQLZ_48ol6aEki8lkbGNYmaGtqrNTHsKA8uEP8S7AKnFqseTHJPhKAGzeeFKjWD4wAR0dgXkixLVxcAFohP-WR68oWPlrRnkBfb3ovRgQpo0UVWnjY99DJu9KZCaCGhSyjP42kjY65PykFmWHRUTltfMq7dkGkKuIOn_0YEVFoGhTda934vDqZO2EXgjj2lTCpYkVNK_WsL8ILIeCHEnK2ZYnxl4BgKLOHu2xyk6U4i8VjYpJo0U9UDcvNHaIuPNTxs0LNr1VtSVLxvZHDZ8f0UnmElgSGnAHj1cFamT-erIfIFfSuhzJ2qwcA30Y1B6EO8bBrQ2YgSWm8CNJO9FqWkEK0SPb3xw64KSnGtuAbBpVoNvK8AeHT7m9-11QtS7PWUiUGkSGKtTMlsvz90hb-rbFBIRcDuP2NG6BfoFq-rbcX9A2Djqhpxi6Zfr_5s7GN95UTbUKCLau3jvgTwdSxjCPsiE9t1phlWmTwp_eC8uYVriSPwiQ4ZqBIUE3Gz1PikqRJCt2E2KFMiHjSwlEPMtrbPwnO1B0G1ZXk8", "e": "AQAB"}]} diff --git a/app-tests/minrun.sh b/app-tests/minrun.sh new file mode 100755 index 000000000..1905be2c6 --- /dev/null +++ b/app-tests/minrun.sh @@ -0,0 +1,194 @@ +#!/bin/bash +set -e + +export OPAL_AUTH_PUBLIC_KEY +export OPAL_AUTH_PRIVATE_KEY +export OPAL_AUTH_MASTER_TOKEN +export OPAL_CLIENT_TOKEN +export OPAL_DATA_SOURCE_TOKEN + +function generate_opal_keys { + echo "- Generating OPAL keys" + + ssh-keygen -q -t rsa -b 4096 -m pem -f opal_crypto_key -N "" + OPAL_AUTH_PUBLIC_KEY="$(cat opal_crypto_key.pub)" + OPAL_AUTH_PRIVATE_KEY="$(tr '\n' '_' < opal_crypto_key)" + rm opal_crypto_key.pub opal_crypto_key + + OPAL_AUTH_MASTER_TOKEN="$(openssl rand -hex 16)" + OPAL_AUTH_JWT_AUDIENCE=https://api.opal.ac/v1/ OPAL_AUTH_JWT_ISSUER=https://opal.ac/ OPAL_REPO_WATCHER_ENABLED=0 \ + opal-server run & + sleep 2; + + OPAL_CLIENT_TOKEN="$(opal-client obtain-token "$OPAL_AUTH_MASTER_TOKEN" --type client)" + echo "Client token: $OPAL_CLIENT_TOKEN" + OPAL_DATA_SOURCE_TOKEN="$(opal-client obtain-token "$OPAL_AUTH_MASTER_TOKEN" --type datasource)" + # shellcheck disable=SC2009ß + ps -ef | grep opal-server | grep -v grep | awk '{print $2}' | xargs kill + sleep 5; + + echo "- Create .env file" + rm -f .env + ( + echo "OPAL_AUTH_PUBLIC_KEY=\"$OPAL_AUTH_PUBLIC_KEY\""; + echo "OPAL_AUTH_PRIVATE_KEY=\"$OPAL_AUTH_PRIVATE_KEY\""; + echo "OPAL_AUTH_MASTER_TOKEN=\"$OPAL_AUTH_MASTER_TOKEN\""; + echo "OPAL_CLIENT_TOKEN=\"$OPAL_CLIENT_TOKEN\""; + echo "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE=\"$OPAL_AUTH_PRIVATE_KEY_PASSPHRASE\"" + ) > .env +} + +function prepare_policy_repo { + echo "- Clone tests policy repo to create test's branch" + export OPAL_POLICY_REPO_URL + OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-example-policy-repo.git} + +echo "- Forking the policy repo" +OPAL_TARGET_ACCOUNT="iwphonedo" +ORIGINAL_REPO_NAME=$(basename -s .git "$OPAL_POLICY_REPO_URL") +NEW_REPO_NAME="${ORIGINAL_REPO_NAME}" +FORKED_REPO_URL="git@github.com:${OPAL_TARGET_ACCOUNT}/${NEW_REPO_NAME}.git" + +# Check if the forked repository already exists +if gh repo list "$OPAL_TARGET_ACCOUNT" --json name -q '.[].name' | grep -q "^${NEW_REPO_NAME}$"; then + echo "Forked repository $NEW_REPO_NAME already exists." + OPAL_POLICY_REPO_URL="$FORKED_REPO_URL" + echo "Using existing forked repository: $OPAL_POLICY_REPO_URL" +else + # Using GitHub CLI to fork the repository + # gh repo fork "$OPAL_POLICY_REPO_URL" --clone --remote=false --org="$OPAL_TARGET_ACCOUNT" + OPAL_TARGET_PAT="${pat:-}" + curl -X POST -H "Authorization: token $OPAL_TARGET_PAT" https://api.github.com/repos/permitio/opal-example-policy-repo/forks + if [ $? -eq 0 ]; then + echo "Fork created successfully!" + else + echo "Error creating fork: $?" + fi + + # Update OPAL_POLICY_REPO_URL to point to the forked repo + OPAL_POLICY_REPO_URL="$FORKED_REPO_URL" + echo "Updated OPAL_POLICY_REPO_URL to $OPAL_POLICY_REPO_URL" +fi + + + export POLICY_REPO_BRANCH + POLICY_REPO_BRANCH=test-$RANDOM$RANDOM + rm -rf ./opal-example-policy-repo + git clone "$OPAL_POLICY_REPO_URL" + cd opal-example-policy-repo + git checkout -b $POLICY_REPO_BRANCH + git push --set-upstream origin $POLICY_REPO_BRANCH + cd - + + echo "OPAL_POLICY_REPO_URL=\"$OPAL_POLICY_REPO_URL\"" >> .env + echo "POLICY_REPO_BRANCH=\"$POLICY_REPO_BRANCH\"" >> .env + + # That's for the docker-compose to use, set ssh key from "~/.ssh/id_rsa", unless another path/key data was configured + export OPAL_POLICY_REPO_SSH_KEY + OPAL_POLICY_REPO_SSH_KEY_PATH=${OPAL_POLICY_REPO_SSH_KEY_PATH:-~/.ssh/id_rsa} + OPAL_POLICY_REPO_SSH_KEY=${OPAL_POLICY_REPO_SSH_KEY:-$(cat "$OPAL_POLICY_REPO_SSH_KEY_PATH")} + echo "- OPAL_POLICY_REPO_SSH_KEY=$OPAL_POLICY_REPO_SSH_KEY" + echo "OPAL_POLICY_REPO_SSH_KEY=\"$OPAL_POLICY_REPO_SSH_KEY\"" >> .env +} + +function compose { + docker compose -f ./docker-compose-app-tests.yml --env-file .env "$@" +} + +function check_clients_logged { + echo "- Looking for msg '$1' in client's logs" + compose logs --index 1 opal_client | grep -q "$1" + compose logs --index 2 opal_client | grep -q "$1" +} + +function check_no_error { + # Without index would output all replicas + if compose logs opal_client | grep -q 'ERROR'; then + echo "- Found error in logs" + exit 1 + fi +} + +function clean_up { + ARG=$? + if [[ "$ARG" -ne 0 ]]; then + echo "*** Test Failed ***" + echo "" + compose logs + else + echo "*** Test Passed ***" + echo "" + fi + compose down + #cd opal-example-policy-repo; git push -d origin $POLICY_REPO_BRANCH; cd - # Remove remote tests branch + rm -rf ./opal-example-policy-repo + exit $ARG +} + +function test_push_policy { + echo "- Testing pushing policy $1" + regofile="$1.rego" + cd opal-tests-policy-repo + echo "package $1" > "$regofile" + git add "$regofile" + git commit -m "Add $regofile" + git push + cd - + + curl -s --request POST 'http://localhost:7002/webhook' --header 'Content-Type: application/json' --header 'x-webhook-token: xxxxx' --data-raw '{"gitEvent":"git.push","repository":{"git_url":"'"$OPAL_POLICY_REPO_URL"'"}}' + sleep 5 + check_clients_logged "PUT /v1/policies/$regofile -> 200" +} + +function test_data_publish { + echo "- Testing data publish for user $1" + user=$1 + OPAL_CLIENT_TOKEN=$OPAL_DATA_SOURCE_TOKEN opal-client publish-data-update --src-url https://api.country.is/23.54.6.78 -t policy_data --dst-path "/users/$user/location" + sleep 5 + check_clients_logged "PUT /v1/data/users/$user/location -> 204" +} + +function test_statistics { + echo "- Testing statistics feature" + # Make sure 2 servers & 2 clients (repeat few times cause different workers might response) + for _ in {1..10}; do + curl -s 'http://localhost:7002/stats' --header "Authorization: Bearer $OPAL_DATA_SOURCE_TOKEN" | grep '"client_count":2,"server_count":2' + done +} + +function main { + # Setup + generate_opal_keys + prepare_policy_repo + + trap clean_up EXIT + + # Bring up OPAL containers + #compose down --remove-orphans + #compose up -d + #sleep 10 + + # Check containers started correctly + #check_clients_logged "Connected to PubSub server" + #check_clients_logged "Got policy bundle" + #check_clients_logged 'PUT /v1/data/static -> 204' + #check_no_error + + # Test functionality + # test_data_publish "bob" + # test_push_policy "something" + # test_statistics + + # echo "- Testing broadcast channel disconnection" + # compose restart broadcast_channel + # sleep 10 + + # test_data_publish "alice" + # test_push_policy "another" + # test_data_publish "sunil" + # test_data_publish "eve" + # test_push_policy "best_one_yet" + # TODO: Test statistics feature again after broadcaster restart (should first fix statistics bug) +} + +main diff --git a/app-tests/sample_service/Dockerfile b/app-tests/sample_service/Dockerfile new file mode 100644 index 000000000..f777de051 --- /dev/null +++ b/app-tests/sample_service/Dockerfile @@ -0,0 +1,44 @@ +# Use an OpenResty base image +FROM openresty/openresty:alpine-fat + +# Install dependencies +RUN apk update && apk add --no-cache python3 py3-pip && \ + python3 -m venv /venv && \ + . /venv/bin/activate && \ + pip install --upgrade pip && \ + pip install flask && \ + pip install requests && \ + pip install jwt + +RUN apk add --no-cache shadow + +RUN addgroup -S nginx && adduser -S nginx -G nginx + +# Set up the Python environment and install other dependencies +WORKDIR /app +COPY requirements.txt /app/requirements.txt +RUN . /venv/bin/activate && pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . /app + +# Copy NGINX configuration to OpenResty’s NGINX path +COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf + +# Set environment variables for Flask +ENV FLASK_APP=app.py + +# Expose necessary ports +EXPOSE 80 5000 5682 + +# Ensure the log directory and log file exist, and set proper permissions +RUN mkdir -p /var/log/nginx && \ + touch /var/log/nginx/proxy_access.log && \ + chown nginx:nginx /var/log/nginx/proxy_access.log && \ + touch /var/log/nginx/error.log && \ + chown nginx:nginx /var/log/nginx/error.log + +# Run both OpenResty and Flask +COPY start.sh /start.sh +RUN chmod +x /start.sh +CMD /start.sh \ No newline at end of file diff --git a/app-tests/sample_service/app.py b/app-tests/sample_service/app.py new file mode 100644 index 000000000..8e345f348 --- /dev/null +++ b/app-tests/sample_service/app.py @@ -0,0 +1,80 @@ +from flask import Flask, request, jsonify +import requests +import debugpy + +app = Flask(__name__) + +debugpy.listen(("0.0.0.0", 5682)) # Optional, listen for debug requests on port 5678 + +# OPAL Authorization endpoint +OPAL_AUTH_URL = "http://opal_client:8181/v1/data/authorize" # Adjust with actual OPAL endpoint + +@app.route('/a') +def a(): + return 'Endpoint A' + +@app.route('/b') +def b(): + return 'Endpoint B' + +@app.route('/c') +def c(): + # Assuming the JWT token is passed in the Authorization header + auth_header = request.headers.get('Authorization') + + debugpy.wait_for_client() + + if not auth_header: + return jsonify({"error": "Unauthorized, missing Authorization header"}), 401 + + # Extract the token (assuming Bearer token) + token = auth_header.split(" ")[1] if "Bearer" in auth_header else None + + if not token: + return jsonify({"error": "Unauthorized, invalid Authorization header"}), 401 + + import jwt + + try: + # Decode the JWT token to extract the "sub" field + decoded_token = jwt.decode(token, options={"verify_signature": False}) + user = decoded_token.get("sub") + except jwt.DecodeError: + return jsonify({"error": "Unauthorized, invalid token"}), 401 + + if not user: + return jsonify({"error": "Unauthorized, 'sub' field not found in token"}), 401 + + # Prepare the payload for the OPAL authorization request with the extracted user + payload = { + "input": { + "user": user, + "method": request.method, + "path": request.path + } + } + + # Send the request to OPAL authorization endpoint + try: + response = requests.post(OPAL_AUTH_URL, json=payload) + + # Check if OPAL's response contains a positive authorization result + if response.status_code == 200: + opal_response = response.json() + if opal_response.get("result") is True: + return 'Endpoint C - Authorized' # Authorized access + + # If the result is not `true`, deny access + + # Assuming `response` is your variable containing the response object from OPAL + response_data = response.get_data(as_text=True) + return jsonify({"error": f"Forbidden, authorization denied! \n Response Body: {response_data}"}), 403 + # OPAL responded but with a non-200 status, treat as denied + return jsonify({"error": "Forbidden, OPAL authorization failed"}), 403 + + except requests.exceptions.RequestException as e: + # Handle connection or other request errors + return jsonify({"error": f"Error contacting OPAL client: {str(e)}"}), 500 + +if __name__ == '__main__': + app.run() \ No newline at end of file diff --git a/app-tests/sample_service/nginx.conf b/app-tests/sample_service/nginx.conf new file mode 100644 index 000000000..8662f7fe6 --- /dev/null +++ b/app-tests/sample_service/nginx.conf @@ -0,0 +1,109 @@ +worker_processes 1; + +events { worker_connections 1024; } + +http { + error_log /var/log/nginx/error.log debug; # Ensure this is set + + lua_shared_dict jwt_cache 10m; # Cache to avoid re-parsing JWT on every request + lua_package_path "/usr/local/lib/lua/?.lua;;"; # Adjust to match the Lua path on your setup + + server { + listen 80; + + set $auth_status 0; + + location /a { + access_log /var/log/nginx/proxy_access.log; + + # Directly proxy to Flask without authorization + proxy_pass http://127.0.0.1:5000; + } + + # This will be enforced in the endpoint + location /c { + access_log /var/log/nginx/proxy_access.log; + + proxy_pass http://127.0.0.1:5000; + } + + location / { + access_log /var/log/nginx/proxy_access.log; + + # Log the Authorization header to see if it's being passed correctly + log_by_lua_block { + ngx.log(ngx.ERR, "Authorization header: ", ngx.var.http_authorization) + } + + # Send authorization subrequest + auth_request /authz_check; + + # Proxy to Flask app if authorized + proxy_pass http://127.0.0.1:5000; + } + + location = /authz_check { + internal; + + # Authorization headers and content type for OPAL client + proxy_set_header Content-Type "application/json"; + proxy_set_header Authorization $http_authorization; + proxy_pass_request_body off; + + access_by_lua_block { + local jwt_token = ngx.var.http_authorization:match("Bearer%s+(.+)") + ngx.log(ngx.ERR, "JWT Token: ", jwt_token) + + if jwt_token then + local decoded_jwt = require("cjson").decode(require("ngx.decode_base64")(jwt_token:match("^[^.]+%.([^.]+)"))) + ngx.log(ngx.ERR, "Decoded JWT: ", require("cjson").encode(decoded_jwt)) + + local user_id = decoded_jwt["sub"] + local method = ngx.req.get_method() + local path = ngx.var.request_uri + + local opa_input = { + input = { + user = user_id, + method = method, + path = path + } + } + + ngx.req.set_body_data(require("cjson").encode(opa_input)) + else + ngx.log(ngx.ERR, "No JWT token found in Authorization header") + end + } + + # Forward request to OPAL + proxy_pass http://opal_client:8181/v1/data/authorize; + + # Process OPAL's response in header_filter_by_lua_block if needed + header_filter_by_lua_block { + ngx.ctx.auth_allowed = false -- Default to unauthorized + + local res_body = ngx.arg[1] + if res_body then + local response_json = require("cjson").decode(res_body) + if response_json and response_json["result"] == true then + ngx.ctx.auth_allowed = true + end + end + + if not ngx.ctx.auth_allowed then + ngx.status = ngx.HTTP_UNAUTHORIZED + ngx.say("Unauthorized") + ngx.exit(ngx.HTTP_UNAUTHORIZED) + end + } + } + # Custom 401 page if unauthorized + error_page 401 = /unauthorized; + error_page 403 = /unauthorized; + location = /unauthorized { + internal; + return 401 "Unauthorized"; + } + } +} \ No newline at end of file diff --git a/app-tests/sample_service/openapi.yaml b/app-tests/sample_service/openapi.yaml new file mode 100644 index 000000000..f45907a6a --- /dev/null +++ b/app-tests/sample_service/openapi.yaml @@ -0,0 +1,88 @@ +openapi: 3.0.0 +info: + title: Flask REST API with OPAL Authorization + description: A simple API with three endpoints (`/a`, `/b`, and `/c`), where `/c` requires OPAL authorization. + version: 1.0.0 +servers: + - url: http://localhost:5500 # Modify with actual server URL and port + +paths: + /a: + get: + summary: Endpoint A + description: A simple, unauthenticated endpoint. + responses: + '200': + description: Success + content: + text/plain: + schema: + type: string + example: "Endpoint A" + + /b: + get: + summary: Endpoint B + description: Another unauthenticated endpoint. + responses: + '200': + description: Success + content: + text/plain: + schema: + type: string + example: "Endpoint B" + + /c: + get: + summary: Endpoint C with Authorization + description: | + This endpoint requires authorization. The client must provide a JWT token in the Authorization header. + The endpoint checks with an OPAL server to authorize the user based on the token. + security: + - bearerAuth: [] + responses: + '200': + description: Authorized access to endpoint C + content: + text/plain: + schema: + type: string + example: "Endpoint C - Authorized" + '401': + description: Unauthorized - Missing or invalid JWT token + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Unauthorized, missing Authorization header" + '403': + description: Forbidden - Authorization denied by OPAL + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Forbidden, authorization denied" + '500': + description: Error contacting OPAL client + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error contacting OPAL client: Connection error" + +components: + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # Indicates the use of JWT for bearer token \ No newline at end of file diff --git a/app-tests/sample_service/policy.rego b/app-tests/sample_service/policy.rego new file mode 100644 index 000000000..fc156dcbd --- /dev/null +++ b/app-tests/sample_service/policy.rego @@ -0,0 +1,37 @@ +package test + +default allow = false + +# User-role mapping +user_roles = { + "alice": "reader", + "bob": "writer" +} + +# Decode the token and store payload +token = {"payload": payload} { + io.jwt.decode(input.token, [_, payload, _]) +} + +# Extract the user role based on the user from `input` +user_role = user_roles[input.user] + +# Allow access to path `a` and `b` only for users with the role `writer` +allow = true { + input.path = ["a"] + input.method = "GET" + user_role == "writer" +} + +allow = true { + input.path = ["b"] + input.method = "GET" + user_role == "writer" +} + +# Allow access to path `c` for users with role `writer` or `reader` +allow = true { + input.path = ["c"] + input.method = "GET" + user_role == "writer" or user_role == "reader" +} \ No newline at end of file diff --git a/app-tests/sample_service/requirements.txt b/app-tests/sample_service/requirements.txt new file mode 100644 index 000000000..f831415e8 --- /dev/null +++ b/app-tests/sample_service/requirements.txt @@ -0,0 +1,18 @@ +blinker==1.8.2 +certifi==2024.8.30 +cffi==1.17.1 +charset-normalizer==3.4.0 +click==8.1.7 +cryptography==43.0.3 +debugpy==1.8.7 +Flask==3.0.3 +idna==3.10 +itsdangerous==2.2.0 +Jinja2==3.1.4 +jwt==1.3.1 +MarkupSafe==3.0.2 +pycparser==2.22 +PyJWT==2.9.0 +requests==2.32.3 +urllib3==2.2.3 +Werkzeug==3.1.2 diff --git a/app-tests/sample_service/start.sh b/app-tests/sample_service/start.sh new file mode 100644 index 000000000..4fcd2a91d --- /dev/null +++ b/app-tests/sample_service/start.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# Activate virtual environment +. /venv/bin/activate + +# Start OpenResty +openresty -g "daemon off;" & + +# Start Flask app +python -Xfrozen_modules=off -m flask run --host=0.0.0.0 --port=5000 diff --git a/app-tests/sample_service/supervisord.conf b/app-tests/sample_service/supervisord.conf new file mode 100644 index 000000000..62a9ffba4 --- /dev/null +++ b/app-tests/sample_service/supervisord.conf @@ -0,0 +1,8 @@ +[supervisord] +nodaemon=true + +[program:nginx] +command=nginx -g "daemon off;" + +[program:flask] +command=flask run --host=0.0.0.0 --port=5000 \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index a14953117..9ad66f271 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,10 +5,10 @@ FROM python:3.10-bookworm AS build-stage # from now on, work in the /app directory WORKDIR /app/ # Layer dependency install (for caching) -COPY ./packages/requires.txt ./base_requires.txt -COPY ./packages/opal-common/requires.txt ./common_requires.txt -COPY ./packages/opal-client/requires.txt ./client_requires.txt -COPY ./packages/opal-server/requires.txt ./server_requires.txt +COPY ../packages/requires.txt ./base_requires.txt +COPY ../packages/opal-common/requires.txt ./common_requires.txt +COPY ../packages/opal-client/requires.txt ./client_requires.txt +COPY ../packages/opal-server/requires.txt ./server_requires.txt # install python deps RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt @@ -16,7 +16,7 @@ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./ # split this stage to save time and reduce image size # --------------------------------------------------- FROM rust:1.79 AS cedar-builder -COPY ./cedar-agent /tmp/cedar-agent +COPY ../cedar-agent /tmp/cedar-agent WORKDIR /tmp/cedar-agent RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release @@ -34,7 +34,7 @@ RUN useradd -m -b / -s /bin/bash opal WORKDIR /opal # copy wait-for script (create link at old path to maintain backward compatibility) -COPY scripts/wait-for.sh . +COPY ../scripts/wait-for.sh . RUN chmod +x ./wait-for.sh RUN ln -s /opal/wait-for.sh /usr/wait-for.sh @@ -42,15 +42,15 @@ RUN ln -s /opal/wait-for.sh /usr/wait-for.sh RUN apt-get update && apt-get install -y netcat-traditional jq wget && apt-get clean # copy startup script (create link at old path to maintain backward compatibility) -COPY ./scripts/start.sh . +COPY ../scripts/start.sh . RUN chmod +x ./start.sh RUN ln -s /opal/start.sh /start.sh # copy gunicorn_config -COPY ./scripts/gunicorn_conf.py . +COPY ../scripts/gunicorn_conf.py . # copy app code -COPY ./README.md . -COPY ./packages ./packages/ +COPY ../README.md . +COPY ../packages ./packages/ # install the opal-common package RUN cd ./packages/opal-common && python setup.py install # Make sure scripts in .local are usable: @@ -119,6 +119,8 @@ ENV OPAL_INLINE_OPA_ENABLED=true ENV OPAL_INLINE_OPA_EXEC_PATH=/opal/opa # expose opa port EXPOSE 8181 +EXPOSE 5678 + USER opal # CEDAR CLIENT IMAGE -------------------------------- diff --git a/docker/Dockerfile.client b/docker/Dockerfile.client new file mode 100644 index 000000000..81c1cd192 --- /dev/null +++ b/docker/Dockerfile.client @@ -0,0 +1,118 @@ +# Dockerfile.server + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY ../packages/requires.txt ./base_requires.txt +COPY ../packages/opal-common/requires.txt ./common_requires.txt +COPY ../packages/opal-client/requires.txt ./client_requires.txt +COPY ../packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY ../scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY ../scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY ../scripts/gunicorn_conf.py . +# copy app code + +COPY ../README.md . +COPY ../packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH +# run gunicorn +CMD ["./start.sh"] + + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- + FROM common AS client-standalone + # uvicorn config ------------------------------------ + # install the opal-client package + RUN cd ./packages/opal-client && python setup.py install + + # WARNING: do not change the number of workers on the opal client! + # only one worker is currently supported for the client. + + # number of uvicorn workers + ENV UVICORN_NUM_WORKERS=1 + # uvicorn asgi app + ENV UVICORN_ASGI_APP=opal_client.main:app + # uvicorn port + ENV UVICORN_PORT=7000 + # disable inline OPA + ENV OPAL_INLINE_OPA_ENABLED=false + + # expose opal client port + EXPOSE 7000 + USER opal + + RUN mkdir -p /opal/backup + VOLUME /opal/backup + + + # IMAGE to extract OPA from official image ---------- + # --------------------------------------------------- + FROM alpine:latest AS opa-extractor + USER root + + RUN apk update && apk add skopeo tar + WORKDIR /opal + + # copy opa from official docker image + ARG opa_image=openpolicyagent/opa + ARG opa_tag=latest-static + RUN skopeo copy "docker://${opa_image}:${opa_tag}" docker-archive:./image.tar && \ + mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ + find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ + rm -r image image.tar + + + # OPA CLIENT IMAGE ---------------------------------- + # Using standalone image as base -------------------- + # --------------------------------------------------- + FROM client-standalone AS client + + # Temporarily move back to root for additional setup + USER root + + # copy opa from opa-extractor + COPY --from=opa-extractor /opal/opa ./opa + + # enable inline OPA + ENV OPAL_INLINE_OPA_ENABLED=true + # expose opa port + EXPOSE 8181 + + USER opal \ No newline at end of file diff --git a/docker/Dockerfile.server b/docker/Dockerfile.server new file mode 100644 index 000000000..6a15df983 --- /dev/null +++ b/docker/Dockerfile.server @@ -0,0 +1,116 @@ +# Dockerfile.server + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY ../packages/requires.txt ./base_requires.txt +COPY ../packages/opal-common/requires.txt ./common_requires.txt +COPY ../packages/opal-client/requires.txt ./client_requires.txt +COPY ../packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY ../scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY ../scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY ../scripts/gunicorn_conf.py . +# copy app code + +COPY ../README.md . +COPY ../packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH +# run gunicorn +CMD ["./start.sh"] + +# SERVER IMAGE -------------------------------------- +# --------------------------------------------------- +FROM common AS server + +RUN apt-get update && apt-get install -y openssh-client git && apt-get clean +RUN git config --global core.symlinks false # Mitigate CVE-2024-32002 + +USER opal + +# Potentially trust POLICY REPO HOST ssh signature -- +# opal trackes a remote (git) repository and fetches policy (e.g rego) from it. +# however, if the policy repo uses an ssh url scheme, authentication to said repo +# is done via ssh, and without adding the repo remote host (i.e: github.com) to +# the ssh known hosts file, ssh will issue output an interactive prompt that +# looks something like this: +# The authenticity of host 'github.com (192.30.252.131)' can't be established. +# RSA key fingerprint is 16:27:ac:a5:76:28:1d:52:13:1a:21:2d:bz:1d:66:a8. +# Are you sure you want to continue connecting (yes/no)? +# if the docker build arg `TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT` is set to `true` +# (default), the host specified by `POLICY_REPO_HOST` build arg (i.e: `github.com`) +# will be added to the known ssh hosts file at build time and prevent said prompt +# from showing. +ARG TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT="true" +ARG POLICY_REPO_HOST="github.com" + +RUN if [ "$TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT" = "true" ] ; then \ + mkdir -p ~/.ssh && \ + chmod 0700 ~/.ssh && \ + ssh-keyscan -t rsa ${POLICY_REPO_HOST} >> ~/.ssh/known_hosts ; fi + +USER root + +# install the opal-server package +RUN cd ./packages/opal-server && python setup.py install + +# uvicorn config ------------------------------------ + +# number of uvicorn workers +ENV UVICORN_NUM_WORKERS=1 +# uvicorn asgi app +ENV UVICORN_ASGI_APP=opal_server.main:app +# uvicorn port +ENV UVICORN_PORT=7002 + +# opal configuration -------------------------------- +# if you are not setting OPAL_DATA_CONFIG_SOURCES for some reason, +# override this env var with the actual public address of the server +# container (i.e: if you are running in docker compose and the server +# host is `opalserver`, the value will be: http://opalserver:7002/policy-data) +# `host.docker.internal` value will work better than `localhost` if you are +# running dockerized opal server and client on the same machine +# ENV OPAL_ALL_DATA_URL=http://host.docker.internal:7002/policy-data +ENV OPAL_ALL_DATA_URL=http://opal_server:7002/policy-data +# Use fixed path for the policy repo - so new leader would use the same directory without re-cloning it. +# That's ok when running in docker and fs is ephemeral (repo in a bad state would be fixed by restarting container). +ENV OPAL_POLICY_REPO_REUSE_CLONE_PATH=true + +# expose opal server port +EXPOSE 7002 +USER opal diff --git a/docker/docker-compose-local.yml b/docker/docker-compose-local.yml new file mode 100644 index 000000000..086bb7aee --- /dev/null +++ b/docker/docker-compose-local.yml @@ -0,0 +1,105 @@ +version: '3.8' + +services: + # Database service for broadcast channel + broadcast_channel: + image: postgres:alpine + container_name: broadcast_channel + environment: + - POSTGRES_DB=postgres + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + + # Gitea service + gitea: + image: gitea/gitea:latest + container_name: gitea + environment: + - USER_UID=1000 + - USER_GID=1000 + - DB_TYPE=sqlite3 # Alternatively, you can set up PostgreSQL or MySQL for production + - GITEA__database__DB_PATH=/data/gitea/gitea.db + - GITEA__server__ROOT_URL=http://localhost:3000/ + - GITEA__service__DISABLE_REGISTRATION=true # Optional: disable public registrations for security + volumes: + - gitea_data:/data + ports: + - "3000:3000" # Expose Gitea's web interface on port 3000 + - "2222:22" # Expose Gitea's SSH service on port 2222 + depends_on: + - broadcast_channel + + # OPAL Server and Client service + opal_server: + build: + context: ../ # Point to the directory containing your Dockerfile + dockerfile: ./docker/Dockerfile.server # Specify your Dockerfile if it's not named 'Dockerfile' + container_name: opal_server + environment: + - OPAL_BROADCAST_URI=postgres://postgres:postgres@broadcast_channel:5432/postgres + - UVICORN_NUM_WORKERS=1 + - OPAL_POLICY_REPO_URL=http://gitea:3000/permit/opal-example-policy-repo + - OPAL_POLICY_REPO_POLLING_INTERVAL=30 + - OPAL_DATA_CONFIG_SOURCES={"config":{"entries":[{"url":"http://opal_server:7002/policy-data","topics":["policy_data"],"dst_path":"/static"}]}} + - OPAL_LOG_FORMAT_INCLUDE_PID=true + - OPAL_SERVER_URL=http://opal_server:7002 + - OPAL_LOG_FORMAT_INCLUDE_PID=true + - OPAL_INLINE_OPA_LOG_FORMAT=http + - DEBUGPY_PORT=5678 + ports: + - "7002:7002" # Expose OPAL Server + - "5679:5678" # DebugPy + volumes: + - ../packages:/app/packages # Mount local packages directory for live updates + - ../scripts:/app/scripts # Mount local scripts for live updates + - ../README.md:/app/README.md # Mount README for reference, if necessary + depends_on: + - gitea + command: sh -c "exec ./wait-for.sh broadcast_channel:5432 --timeout=20 -- ./start.sh" + + opal_client: + build: + context: ../ # Point to the directory containing your Dockerfile + dockerfile: ./docker/Dockerfile.client # Specify your Dockerfile if it's not named 'Dockerfile' + container_name: opal_client + environment: + - OPAL_SERVER_URL=http://opal_server:7002 + - OPAL_LOG_FORMAT_INCLUDE_PID=true + - OPAL_INLINE_OPA_LOG_FORMAT=http + - DEBUGPY_PORT=5678 + ports: + - "7766:7000" # OPAL client + - "8181:8181" # OPA agent + - "5680:5678" # DebugPy + depends_on: + - opal_server + command: sh -c "exec ./wait-for.sh opal_server:7002 --timeout=20 -- ./start.sh" + + sample_service: + build: + context: ../app-tests/sample_service # Point to the directory containing your Dockerfile + dockerfile: ./Dockerfile # Specify your Dockerfile if it's not named 'Dockerfile' + container_name: openresty_nginx # This sets the container name + environment: + - FLASK_APP=app.py + - OPAL_URL=http://opal_client:7000 + ports: + - "5500:80" + - "5682:5682" + volumes: + - ../app-tests/sample_service/sources:/app/sources # Mount the sources directory + depends_on: + - opal_client + frontend: + build: + context: ../app-tests/opal-frontend + dockerfile: ./Dockerfile + container_name: frontend + ports: + - "4200:80" # Serve Angular app on http://localhost:4200 + depends_on: + - sample_service # Make sure the backend is up first + +volumes: + opa_backup: + gitea_data: # Data volume for Gitea \ No newline at end of file diff --git a/packages/opal-client/opal_client/main.py b/packages/opal-client/opal_client/main.py index 65f3bb665..635ddccc4 100644 --- a/packages/opal-client/opal_client/main.py +++ b/packages/opal-client/opal_client/main.py @@ -1,5 +1,11 @@ from opal_client.client import OpalClient client = OpalClient() + +import debugpy +#debugpy.listen(("0.0.0.0", 5678)) +print("Waiting for debugger attach...") +#debugpy.wait_for_client() # Optional, wait for debugger to attach before continuing + # expose app for Uvicorn app = client.app diff --git a/packages/opal-client/requires.txt b/packages/opal-client/requires.txt index 4acb85cb6..ed57c33ee 100644 --- a/packages/opal-client/requires.txt +++ b/packages/opal-client/requires.txt @@ -11,3 +11,4 @@ opentelemetry-instrumentation opentelemetry-instrumentation-fastapi opentelemetry-exporter-otlp opentelemetry-exporter-prometheus +debugpy diff --git a/packages/opal-server/opal_server/data/api.py b/packages/opal-server/opal_server/data/api.py index 3ef9d5732..ba426575e 100644 --- a/packages/opal-server/opal_server/data/api.py +++ b/packages/opal-server/opal_server/data/api.py @@ -1,5 +1,6 @@ from typing import Optional +import debugpy from fastapi import APIRouter, Depends, Header, HTTPException, status from fastapi.responses import RedirectResponse from opal_common.authentication.authz import ( @@ -86,6 +87,8 @@ async def get_data_sources_config(authorization: Optional[str] = Header(None)): token = get_token_from_header(authorization) if data_sources_config.config is not None: logger.info("Serving source configuration") + logger.info("Source config: {config}", config=data_sources_config.config) + debugpy.breakpoint() return data_sources_config.config elif data_sources_config.external_source_url is not None: url = str(data_sources_config.external_source_url) diff --git a/packages/opal-server/opal_server/main.py b/packages/opal-server/opal_server/main.py index 7e61e2a66..9a56377d7 100644 --- a/packages/opal-server/opal_server/main.py +++ b/packages/opal-server/opal_server/main.py @@ -1,3 +1,9 @@ + +import debugpy +#debugpy.listen(("0.0.0.0", 5678)) +print("Waiting for debugger attach...") +#debugpy.wait_for_client() # Optional, wait for debugger to attach before continuing + def create_app(*args, **kwargs): from opal_server.server import OpalServer diff --git a/pytest.ini b/pytest.ini index 16c88ba91..10b5e3305 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,3 +1,4 @@ # Handling DeprecationWarning 'asyncio_mode' default value [pytest] asyncio_mode = strict +asyncio_default_fixture_loop_scope = function diff --git a/requirements.txt b/requirements.txt index 86e2f7efe..93930ce35 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ pytest-asyncio pytest-rerunfailures wheel>=0.38.0 twine +testcontainers setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability prometheus_client diff --git a/scripts/start.sh b/scripts/start.sh index 350c836bc..16aebbeea 100755 --- a/scripts/start.sh +++ b/scripts/start.sh @@ -5,6 +5,8 @@ export GUNICORN_CONF=${GUNICORN_CONF:-./gunicorn_conf.py} export GUNICORN_TIMEOUT=${GUNICORN_TIMEOUT:-30} export GUNICORN_KEEP_ALIVE_TIMEOUT=${GUNICORN_KEEP_ALIVE_TIMEOUT:-5} +sleep 10 + if [[ -z "${OPAL_BROADCAST_URI}" && "${UVICORN_NUM_WORKERS}" != "1" ]]; then echo "OPAL_BROADCAST_URI must be set when having multiple workers" exit 1 @@ -15,4 +17,9 @@ prefix="" if [[ -z "${OPAL_ENABLE_DATADOG_APM}" && "${OPAL_ENABLE_DATADOG_APM}" = "true" ]]; then prefix=ddtrace-run fi -(set -x; exec $prefix gunicorn -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) + +#(set -x; exec $prefix gunicorn --reload -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) +(set -x; exec $prefix python -m debugpy --listen 0.0.0.0:5678 -m uvicorn ${UVICORN_ASGI_APP} --reload --host 0.0.0.0 --port ${UVICORN_PORT} ) + +# write a code that will wait for the user to press enter +read -n1 -r -p "Press any key to continue..." key diff --git a/tests/.env.example b/tests/.env.example new file mode 100644 index 000000000..ae9fb7b10 --- /dev/null +++ b/tests/.env.example @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export OPAL_POLICY_REPO_URL='' +export POLICY_REPO_BRANCH='' +export OPAL_POLICY_REPO_SSH_KEY='' diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..815917d65 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,82 @@ +Got it! If you'd like, I can incorporate code snippets or provide clarifications. Just let me know what specific details or examples you'd like me to include. Here's the README file in Markdown format: + +--- + +# Tests + +The tests folder contains integration and unit tests for OPAL. These tests ensure the proper functionality and reliability of OPAL across various components and scenarios. Below is an overview of the test structure, utilities, and execution methods. + +## Test Structure + +- **`tests/containers`**: Configurations and setups for containerized environments used in testing OPAL, including Docker and Kubernetes configurations. +- **`tests/data-fetchers`**: OPAL data fetchers used in the tests to fetch data from various sources, such as PostgreSQL, MongoDB, etc. +- **`tests/docker`**: Dockerfiles and related files used to build Docker images for the tests. +- **`tests/policies`**: Policies written in REGO used to verify that OPAL functions correctly. +- **`tests/policy_repos`**: Providers managing policy repositories on platforms such as Gitea, GitHub, GitLab, and others. Additional platforms should implement a class derived from `PolicyRepoBase` (e.g., Bitbucket). +- **`tests/app-tests`**: Integration tests running OPAL with a sample service to verify correct configuration. +- **`tests/policy_stores`**: Test setups to validate support for policy decision engines such as OPA, Cedar, OpenFGA, etc. +- **`conftest.py`**: Fixtures shared across multiple tests for consistent test environments. + +The tests are built using [Pytest](https://pytest.org/en/latest/) and leverage [testcontainers](https://testcontainers.org/) to build and run Docker images. + +## Infrastructure of the Testing System + +### Settings + +The `settings.py` file includes a `TestSettings` class for configuring global test settings. This class allows you to define: + +- Test data location. +- Docker network configuration. +- Other environment settings. + +### Utilities + +The `utils.py` file contains a `Utils` class for simplifying test writing. It provides methods for: + +- Creating temporary directories. +- Copying files. +- Other common tasks. + +### Using the `session_matrix` + +The `session_matrix` feature allows you to define and manage test scenarios across multiple configurations. This is particularly useful for validating OPAL's behavior under various conditions. + +#### Using the `is_final` Property + +The `is_final` property within the `session_matrix` helps identify if a particular test session represents the last stage of a given scenario. This can be used to perform cleanup tasks or additional validations at the end of a test sequence. + +Example: + +```python +def test_example(session_matrix): + if session_matrix.is_final: + # Perform cleanup or final assertions + print("Final session reached") +``` + +## Writing Your Own Tests + +To write a test, include `opal_servers` and `opal_clients` as parameters in your test function. These will automatically be populated with available OPAL servers and clients. For example: + +```python +def test_custom_policy(opal_servers, opal_clients): + server = opal_servers[0] + client = opal_clients[0] + # Add your test logic here +``` + +## Running the Tests + +To execute the tests, run the `run.sh` script from the root directory of the repository. This script sets up the environment and executes all tests: + +```bash +./run.sh +``` + +## OPAL API Reference + +Refer to the [OPAL API Documentation](https://opal-v2.permit.io/redoc#tag/Bundle-Server/operation/get_policy_policy_get) for additional details on endpoints and functionality. + +--- + +Let me know if you'd like to include specific code examples or any other details! \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 000000000..bdded10dc --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,505 @@ +import json +import os +import shutil +import tempfile +import threading +import time +from typing import List + +import debugpy +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger +from testcontainers.core.waiting_utils import wait_for_logs + +import docker +from tests import utils +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.cedar_container import CedarContainer +from tests.containers.gitea_container import GiteaContainer +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.opa_container import OpaContainer, OpaSettings +from tests.containers.opal_client_container import OpalClientContainer +from tests.containers.opal_server_container import OpalServerContainer +from tests.containers.postgres_broadcast_container import PostgresBroadcastContainer +from tests.containers.redis_broadcast_container import RedisBroadcastContainer +from tests.containers.settings.cedar_settings import CedarSettings +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.containers.settings.opal_client_settings import OpalClientSettings +from tests.containers.settings.opal_server_settings import OpalServerSettings +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_factory import ( + PolicyRepoFactory, + SupportedPolicyRepo, +) +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + +# wait some seconds for the debugger to attach +debugger_wait_time = 5 # seconds + + +def cancel_wait_for_client_after_timeout(): + try: + time.sleep(debugger_wait_time) + debugpy.wait_for_client.cancel() + except Exception as e: + print(f"Failed to cancel wait for client: {e}") + + +try: + if pytest_settings.wait_for_debugger: + t = threading.Thread(target=cancel_wait_for_client_after_timeout) + t.start() + print(f"Waiting for debugger to attach... {debugger_wait_time} seconds timeout") + debugpy.wait_for_client() +except Exception as e: + print(f"Failed to attach debugger: {e}") + +utils.export_env("OPAL_TESTS_DEBUG", "true") +utils.install_opal_server_and_client() + + +@pytest.fixture(scope="session") +def temp_dir(): + # Setup: Create a temporary directory + """Creates a temporary directory once at the beginning of the test session, + prints the directory path to the console, and yields it to the test. + + After the test session is finished, it deletes the directory and + prints the directory removal to the console. + + This fixture is useful for tests that need a temporary directory to + exist for the duration of the test session. + """ + dir_path = tempfile.mkdtemp() + print(f"Temporary directory created: {dir_path}") + yield dir_path + + # Teardown: Clean up the temporary directory + shutil.rmtree(dir_path) + print(f"Temporary directory removed: {dir_path}") + + +@pytest.fixture(scope="session") +def opal_network(): + """Creates a Docker network and yields it. + + The network is cleaned up after all tests have finished running. + """ + network = Network().create() + + yield network + + print("Removing network...") + time.sleep(5) # wait for the containers to stop + network.remove() + print("Network removed") + + +@pytest.fixture(scope="session") +def number_of_opal_servers(): + """The number of OPAL servers to start. + + This fixture is used to determine how many OPAL servers to start for + the tests. The default value is 2, but it can be overridden by setting + the environment variable OPAL_TESTS_NUMBER_OF_OPAL_SERVERS. + + Returns: + int: The number of OPAL servers to start. + """ + return 2 + + +from tests.fixtures.broadcasters import ( + broadcast_channel, + kafka_broadcast_channel, + postgres_broadcast_channel, + redis_broadcast_channel, +) +from tests.fixtures.images import opal_server_image +from tests.fixtures.policy_repos import gitea_server, gitea_settings, policy_repo + + +@pytest.fixture(scope="session") +def opal_servers( + opal_network: Network, + broadcast_channel: BroadcastContainerBase, + policy_repo: PolicyRepoBase, + number_of_opal_servers: int, + opal_server_image: str, + topics: dict[str, int], + # kafka_broadcast_channel: KafkaBroadcastContainer, + # redis_broadcast_channel: RedisBroadcastContainer, + session_matrix, +): + """Fixture that initializes and manages OPAL server containers for testing. + + This fixture sets up a specified number of OPAL server containers, each + connected to the provided Docker network and using the specified broadcast + channel. The first server container sets up and creates a webhook for the + policy repository. All containers are started and their logs are monitored + for successful cloning of the policy repository. The containers are stopped + after the test session is complete. + + Args: + opal_network (Network): The Docker network to which the containers are connected. + broadcast_channel (BroadcastContainerBase): The broadcast channel container. + policy_repo (PolicyRepoBase): The policy repository to be used. + number_of_opal_servers (int): The number of OPAL server containers to start. + opal_server_image (str): The Docker image used for the OPAL servers. + topics (dict[str, int]): The topics for OPAL data configuration. + kafka_broadcast_channel (KafkaBroadcastContainer): The Kafka broadcast channel container. + redis_broadcast_channel (RedisBroadcastContainer): The Redis broadcast channel container. + session_matrix: The session matrix used for the test configuration. + + Yields: + List[OpalServerContainer]: A list of running OPAL server containers. + """ + + if not broadcast_channel: + raise ValueError("Missing 'broadcast_channel' container.") + + containers = [] # List to store container instances + + for i in range(number_of_opal_servers): + container_name = f"opal_server_{i+1}" + + container = OpalServerContainer( + OpalServerSettings( + broadcast_uri=broadcast_channel.get_url(), + container_name=container_name, + container_index=i + 1, + uvicorn_workers="4", + policy_repo_url=policy_repo.get_repo_url(), + image=opal_server_image, + log_level="DEBUG", + data_topics=" ".join(topics.keys()), + polling_interval=3, + ), + network=opal_network, + ) + + container.start() + container.get_wrapped_container().reload() + + if i == 0: + # Only the first server should setup the webhook + policy_repo.setup_webhook( + container.get_container_host_ip(), container.settings.port + ) + policy_repo.create_webhook() + + print( + f"Started container: {container_name}, ID: {container.get_wrapped_container().id}" + ) + container.wait_for_log("Clone succeeded", timeout=30) + containers.append(container) + + yield containers + + for container in containers: + container.stop() + + +@pytest.fixture(scope="session") +def number_of_opal_clients(): + """The number of OPAL clients to start. + + This fixture is used to determine how many OPAL clients to start for + the tests. The default value is 2, but it can be overridden by + setting the environment variable OPAL_TESTS_NUMBER_OF_OPAL_CLIENTS. + """ + return 2 + + +@pytest.fixture(scope="session") +def connected_clients(opal_clients: List[OpalClientContainer]): + """A fixture that waits for all OPAL clients to connect to the PubSub + server before yielding them. + + This fixture takes a list of OPAL clients as input and waits for each of them + to connect to the PubSub server before yielding them. The fixture is used to + ensure that all OPAL clients are connected and ready to receive messages + before the tests are executed. + + Parameters + ---------- + opal_clients : List[OpalClientContainer] + A list of OPAL client containers. + + Yields + ------ + List[OpalClientContainer] + A list of connected OPAL client containers. + """ + for client in opal_clients: + assert client.wait_for_log( + log_str="Connected to PubSub server", timeout=30 + ), f"Client {client.settings.container_name} did not connect to PubSub server." + yield opal_clients + + +from tests.fixtures.images import ( + cedar_image, + opa_image, + opal_client_image, + opal_client_with_opa_image, +) +from tests.fixtures.policy_stores import cedar_server, opa_server + + +@pytest.fixture(scope="session") +def opal_clients( + opal_network: Network, + opal_servers: List[OpalServerContainer], + # opa_server: OpaContainer, + # cedar_server: CedarContainer, + request, + number_of_opal_clients: int, + opal_client_with_opa_image, +): + """A fixture that starts and manages multiple OPAL client containers. + + This fixture takes a list of OPAL server containers as input and starts a + specified number of OPAL client containers, each connected to the first + OPAL server container. The fixture yields the list of started OPAL client + containers. + + Parameters + ---------- + opal_network : Network + The Docker network to which the containers are connected. + opal_servers : List[OpalServerContainer] + A list of OPAL server containers. + #opa_server : OpaContainer + # The OPA server container. + cedar_server : CedarContainer + The Cedar server container. + request + The pytest request object. + number_of_opal_clients : int + The number of OPAL clients to start. + opal_client_image + The Docker image used for the OPAL clients. + + Yields + ------ + List[OpalClientContainer] + A list of started OPAL client containers. + """ + if not opal_servers or len(opal_servers) == 0: + raise ValueError("Missing 'opal_server' container.") + + opal_server_url = f"http://{opal_servers[0].settings.container_name}:{opal_servers[0].settings.port}" + + containers = [] # List to store OpalClientContainer instances + + for i in range(number_of_opal_clients): + container_name = f"opal_client_{i+1}" # Unique name for each client + + client_token = opal_servers[0].obtain_OPAL_tokens(container_name)["client"] + callbacks = json.dumps( + { + "callbacks": [ + [ + f"{opal_server_url}/data/callback_report", + { + "method": "post", + "process_data": False, + "headers": { + "Authorization": f"Bearer {client_token}", + "content-type": "application/json", + }, + }, + ] + ] + } + ) + + container = OpalClientContainer( + OpalClientSettings( + image=opal_client_with_opa_image, + container_name=container_name, + container_index=i + 1, + opal_server_url=opal_server_url, + client_token=client_token, + default_update_callbacks=callbacks, + ), + network=opal_network, + ) + + container.start() + print( + f"Started OpalClientContainer: {container_name}, ID: {container.get_wrapped_container().id}" + ) + containers.append(container) + + yield containers + + try: + for container in containers: + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container}") + pass + + +@pytest.fixture(scope="session") +def topics(): + """A fixture that returns a dictionary mapping topic names to the number of + OpalClientContainer instances that should subscribe to each topic. + + Returns + ------- + dict + A dictionary mapping topic names to the number of OpalClientContainer + instances that should subscribe to each topic. + """ + topics = {"topic_1": 1, "topic_2": 1} + return topics + + +@pytest.fixture(scope="session") +def topiced_clients( + topics, opal_network: Network, opal_servers: list[OpalServerContainer] +): + """Fixture that starts and manages multiple OPAL client containers, each + subscribing to a different topic. + + The fixture takes a dictionary of topics and the number of clients to + subscribe to each topic. It starts the specified number of OPAL client + containers, each connected to the first OPAL server container, and each + subscribing to the specified topic. The fixture yields the list of started + OPAL client containers, organized by topic. + + Parameters + ---------- + topics : dict + A dictionary mapping topic names to the number of OpalClientContainer + instances that should subscribe to each topic. + opal_network : Network + The Docker network to which the containers are connected. + opal_servers : list[OpalServerContainer] + A list of OPAL server containers. + + Yields + ------ + dict + A dictionary mapping topic names to a list of OpalClientContainer + instances that are subscribed to the topic. + """ + if not opal_servers or len(opal_servers) == 0: + raise ValueError("Missing 'opal_server' container.") + + opal_server_url = f"http://{opal_servers[0].settings.container_name}:{opal_servers[0].settings.port}" + containers = {} # List to store OpalClientContainer instances + + client_token = opal_servers[0].obtain_OPAL_tokens("topiced_opal_client_?x?")[ + "client" + ] + callbacks = json.dumps( + { + "callbacks": [ + [ + f"{opal_server_url}/data/callback_report", + { + "method": "post", + "process_data": False, + "headers": { + "Authorization": f"Bearer {client_token}", + "content-type": "application/json", + }, + }, + ] + ] + } + ) + + for topic, number_of_clients in topics.items(): + for i in range(number_of_clients): + container_name = f"opal_client_{topic}_{i+1}" # Unique name for each client + + container = OpalClientContainer( + OpalClientSettings( + image="permitio/opal-client:latest", + container_name=container_name, + container_index=i + 1, + opal_server_url=opal_server_url, + client_token=client_token, + default_update_callbacks=callbacks, + topics=topic, + ), + network=opal_network, + ) + + container.start() + logger.info( + f"Started OpalClientContainer: {container_name}, ID: {container.get_wrapped_container().id} - on topic: {topic}" + ) + containers[topic] = containers.get(topic, []) + + assert container.wait_for_log( + log_str="Connected to PubSub server", timeout=30 + ), f"Client {client.settings.container_name} did not connect to PubSub server." + + containers[topic].append(container) + + yield containers + + for _, clients in containers.items(): + for client in clients: + client.stop() + + +def wait_sometime(): + """Pauses execution based on the environment. + + If the code is running inside GitHub Actions, it pauses execution + for 30 seconds. Otherwise, it waits for user input to continue. + + This can be used to control the flow of execution depending on the + environment in which the code is being executed. + """ + + if os.getenv("GITHUB_ACTIONS") == "true": + print("Running inside GitHub Actions. Sleeping for 30 seconds...") + time.sleep(3600) # Sleep for 30 seconds + else: + print("Running on the local machine. Press Enter to continue...") + input() # Wait for key press + + +@pytest.fixture(scope="session", autouse=True) +def setup(opal_clients, session_matrix): + """A setup fixture that is run once per test session. + + This fixture is automatically used by all tests, and is used to set up the + environment for the test session. The fixture yields, allowing the tests to + execute, and then is used to tear down the environment when the test session + is finished. + + Parameters + ---------- + opal_servers : List[OpalServerContainer] + A list of OPAL server containers. + opal_clients : List[OpalClientContainer] + A list of OPAL client containers. + session_matrix : dict + A dictionary containing information about the test session. + + Yields + ------ + None + """ + yield + + if session_matrix["is_final"]: + logger.info("Finalizing test session...") + utils.remove_env("OPAL_TESTS_DEBUG") + wait_sometime() diff --git a/tests/containers/broadcast_container_base.py b/tests/containers/broadcast_container_base.py new file mode 100644 index 000000000..4a278ee85 --- /dev/null +++ b/tests/containers/broadcast_container_base.py @@ -0,0 +1,21 @@ +from tests.containers.permitContainer import PermitContainer + + +class BroadcastContainerBase(PermitContainer): + def __init__(self): + PermitContainer.__init__(self) + + def get_url(self) -> str: + url = ( + self.settings.protocol + + "://" + + self.settings.user + + ":" + + self.settings.password + + "@" + + self.settings.container_name + + ":" + + str(self.settings.port) + ) + print(url) + return url diff --git a/tests/containers/cedar_container.py b/tests/containers/cedar_container.py new file mode 100644 index 000000000..c661ad095 --- /dev/null +++ b/tests/containers/cedar_container.py @@ -0,0 +1,46 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.permitContainer import PermitContainer +from tests.containers.settings.cedar_settings import CedarSettings +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class CedarContainer(PermitContainer, DockerContainer): + def __init__( + self, + settings: CedarSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + PermitContainer.__init__(self) # Initialize PermitContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 8180, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + def reload_with_settings(self, settings: CedarSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/gitea_container.py b/tests/containers/gitea_container.py new file mode 100644 index 000000000..83c14e631 --- /dev/null +++ b/tests/containers/gitea_container.py @@ -0,0 +1,413 @@ +import codecs +import os +import shutil +import time + +import requests +from git import GitCommandError, Repo +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +import docker +from tests.containers.permitContainer import PermitContainer +from tests.containers.settings.gitea_settings import GiteaSettings + + +class GiteaContainer(PermitContainer, DockerContainer): + def __init__( + self, + settings: GiteaSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + self.settings = settings + self.network = network + self.kwargs = kwargs + + self.logger = setup_logger(__name__) + + labels = self.kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + # Set container lifecycle properties + self.with_kwargs(auto_remove=False, restart_policy={"Name": "always"}) + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, + image=self.settings.image, + docker_client_kw=docker_client_kw, + **self.kwargs, + ) + + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + # Set container name and ports + self.with_name(self.settings.container_name).with_bind_ports( + 3000, self.settings.port_http + ).with_bind_ports(2222, self.settings.port_ssh).with_network( + self.network + ).with_network_aliases( + self.settings.network_aliases + ) + + def is_gitea_ready(self): + """Check if Gitea is ready by inspecting logs.""" + stdout_logs, stderr_logs = self.get_logs() + logs = stdout_logs.decode("utf-8") + stderr_logs.decode("utf-8") + return "Listen: http://0.0.0.0:3000" in logs + + def wait_for_gitea(self, timeout: int = 30): + """Wait for Gitea to initialize within a timeout period.""" + for _ in range(timeout): + if self.is_gitea_ready(): + self.logger.info("Gitea is ready.") + return + time.sleep(1) + raise RuntimeError("Gitea initialization timeout.") + + def create_gitea_user(self): + """Create an admin user in the Gitea instance.""" + create_user_command = ( + f"/usr/local/bin/gitea admin user create " + f"--admin --username {self.settings.username} " + f"--email {self.settings.email} " + f"--password {self.settings.password} " + f"--must-change-password=false" + ) + result = self.exec(create_user_command) + if result.exit_code != 0: + raise RuntimeError( + f"Failed to create Gitea user: {result.output.decode('utf-8')}" + ) + + def create_gitea_admin_token(self): + """Generate an admin access token for the Gitea instance.""" + create_token_command = ( + f"/usr/local/bin/gitea admin user generate-access-token " + f"--username {self.settings.username} --raw --scopes all" + ) + result = self.exec(create_token_command) + token_result = result.output.decode("utf-8").strip() + if not token_result: + raise RuntimeError("Failed to create an access token.") + + return token_result + + def deploy_gitea(self): + """Deploy Gitea container and initialize configuration.""" + self.logger.info("Deploying Gitea container...") + # self.start() + self.wait_for_gitea() + self.create_gitea_user() + self.access_token = self.create_gitea_admin_token() + + def exec(self, command: str): + """Execute a command inside the container.""" + self.logger.info(f"Executing command: {command}") + exec_result = self.get_wrapped_container().exec_run(command) + if exec_result.exit_code != 0: + raise RuntimeError( + f"Command failed with exit code {exec_result.exit_code}: {exec_result.output.decode('utf-8')}" + ) + return exec_result + + def repo_exists(self): + url = f"{self.settings.gitea_base_url}/repos/{self.settings.username}/{self.settings.repo_name}" + headers = {"Authorization": f"token {self.access_token}"} + response = requests.get(url, headers=headers) + + if response.status_code == 200: + self.logger.info(f"Repository '{self.settings.repo_name}' already exists.") + return True + elif response.status_code == 404: + self.logger.info(f"Repository '{self.settings.repo_name}' does not exist.") + return False + else: + self.logger.error( + f"Failed to check repository: {response.status_code} {response.text}" + ) + response.raise_for_status() + + def create_gitea_repo( + self, description="", private=False, auto_init=True, default_branch="master" + ): + url = f"{self.settings.gitea_base_url}/api/v1/user/repos" + headers = { + "Authorization": f"token {self.access_token}", + "Content-Type": "application/json", + } + payload = { + "name": self.settings.repo_name, + "description": description, + "private": private, + "auto_init": auto_init, + "default_branch": default_branch, + } + response = requests.post(url, json=payload, headers=headers) + if response.status_code == 201: + self.logger.info("Repository created successfully!") + return response.json() + else: + self.logger.error( + f"Failed to create repository: {response.status_code} {response.text}" + ) + response.raise_for_status() + + def clone_repo_with_gitpython(self, clone_directory): + repo_url = f"{self.settings.gitea_base_url}/{self.settings.username}/{self.settings.repo_name}.git" + if self.access_token: + repo_url = f"http://{self.settings.username}:{self.access_token}@{self.settings.gitea_base_url.split('://')[1]}/{self.settings.username}/{self.settings.repo_name}.git" + try: + if os.path.exists(clone_directory): + self.logger.debug( + f"Directory '{clone_directory}' already exists. Deleting it..." + ) + shutil.rmtree(clone_directory) + Repo.clone_from(repo_url, clone_directory) + self.logger.debug( + f"Repository '{self.settings.repo_name}' cloned successfully into '{clone_directory}'." + ) + except Exception as e: + self.logger.error( + f"Failed to clone repository '{self.settings.repo_name}': {e}" + ) + + def reset_repo_with_rbac(self, repo_directory, source_rbac_file): + try: + if not os.path.exists(repo_directory): + raise FileNotFoundError( + f"Repository directory '{repo_directory}' does not exist." + ) + + git_dir = os.path.join(repo_directory, ".git") + if not os.path.exists(git_dir): + raise FileNotFoundError( + f"The directory '{repo_directory}' is not a valid Git repository (missing .git folder)." + ) + + repo = Repo(repo_directory) + + # Get the default branch name + default_branch = self.get_default_branch(repo) + if not default_branch: + raise ValueError("Could not determine the default branch name.") + + # Ensure we are on the default branch + if repo.active_branch.name != default_branch: + repo.git.checkout(default_branch) + + # Remove other branches + branches = [ + branch.name for branch in repo.branches if branch.name != default_branch + ] + for branch in branches: + repo.git.branch("-D", branch) + + # Reset repository content + for item in os.listdir(repo_directory): + item_path = os.path.join(repo_directory, item) + if os.path.basename(item_path) == ".git": + continue + if os.path.isfile(item_path) or os.path.islink(item_path): + os.unlink(item_path) + elif os.path.isdir(item_path): + shutil.rmtree(item_path) + + # Copy RBAC file + destination_rbac_path = os.path.join(repo_directory, "rbac.rego") + shutil.copy2(source_rbac_file, destination_rbac_path) + + # Stage and commit changes + repo.git.add(all=True) + repo.index.commit("Reset repository to only include 'rbac.rego'") + + self.logger.debug( + f"Repository reset successfully. 'rbac.rego' is the only file and changes are committed." + ) + except Exception as e: + self.logger.error(f"Error resetting repository: {e}") + + def get_default_branch(self, repo): + try: + return repo.git.symbolic_ref("refs/remotes/origin/HEAD").split("/")[-1] + except Exception as e: + self.logger.error(f"Error determining default branch: {e}") + return None + + def push_repo_to_remote(self, repo_directory): + try: + repo = Repo(repo_directory) + + # Get the default branch name + default_branch = self.get_default_branch(repo) + if not default_branch: + raise ValueError("Could not determine the default branch name.") + + # Ensure we are on the default branch + if repo.active_branch.name != default_branch: + repo.git.checkout(default_branch) + + # Check if remote origin exists + if "origin" not in [remote.name for remote in repo.remotes]: + raise ValueError("No remote named 'origin' found in the repository.") + + # Push changes to the default branch + repo.remotes.origin.push(refspec=f"{default_branch}:{default_branch}") + self.logger.info("Changes pushed to remote repository successfully.") + except Exception as e: + self.logger.error(f"Error pushing changes to remote: {e}") + + def cleanup_local_repo(self, repo_directory): + try: + if os.path.exists(repo_directory): + shutil.rmtree(repo_directory) + self.logger.debug( + f"Local repository '{repo_directory}' has been cleaned up." + ) + else: + self.logger.debug( + f"Local repository '{repo_directory}' does not exist. No cleanup needed." + ) + except Exception as e: + self.logger.error(f"Error during cleanup: {e}") + + def init_repo(self): + try: + # Set paths for source RBAC file and clone directory + source_rbac_file = os.path.join( + self.settings.data_dir, "rbac.rego" + ) # Use self.data_dir for source RBAC file + clone_directory = os.path.join( + self.settings.temp_dir, f"{self.settings.repo_name}-clone" + ) # Use self.repo_name + + # Check if the repository exists + if not self.repo_exists(): + # Create the repository if it doesn't exist + self.create_gitea_repo( + description="This is a test repository created via API.", + private=False, + ) + + # Clone the repository + self.clone_repo_with_gitpython(clone_directory=clone_directory) + + # Reset the repository with RBAC + self.reset_repo_with_rbac( + repo_directory=clone_directory, source_rbac_file=source_rbac_file + ) + + # Push the changes to the remote repository + self.push_repo_to_remote(repo_directory=clone_directory) + + # Clean up the local repository + self.cleanup_local_repo(repo_directory=clone_directory) + + self.logger.info("Repository initialization completed successfully.") + except Exception as e: + self.logger.error(f"Error during repository initialization: {e}") + + # Prepare the directory + def prepare_directory(self, path): + """Prepare the directory by cleaning up any existing content.""" + if os.path.exists(path): + shutil.rmtree(path) # Remove existing directory + os.makedirs(path) # Create a new directory + + # Clone and push changes + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + + # Push changes to the remote repository + print(f"Pushing changes for branch {branch}...") + try: + repo.git.push(authenticated_url, branch) + except GitCommandError as e: + print(f"Error pushing branch {branch}: {e}") + + # Cleanup function + def cleanup(self, CLONE_DIR): + """Remove the temporary clone directory.""" + if os.path.exists(CLONE_DIR): + print("Cleaning up temporary directory...") + shutil.rmtree(CLONE_DIR) + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.temp_dir + + self.logger.info( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITEA_REPO_URL = f"http://localhost:{self.settings.port_http}/{self.settings.username}/{self.settings.repo_name}.git" + username = self.settings.username + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITEA_REPO_URL.replace( + "http://", f"http://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + print("Operation completed successfully.") + finally: + # Ensure cleanup is performed regardless of success or failure + self.cleanup(CLONE_DIR) + + def reload_with_settings(self, settings: GiteaSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/kafka_broadcast_container.py b/tests/containers/kafka_broadcast_container.py new file mode 100644 index 000000000..1fa52e1d1 --- /dev/null +++ b/tests/containers/kafka_broadcast_container.py @@ -0,0 +1,33 @@ +import debugpy +from testcontainers.core.network import Network +from testcontainers.kafka import KafkaContainer + +import docker +from tests.containers.permitContainer import PermitContainer +from tests.containers.zookeeper_container import ZookeeperContainer + + +class KafkaBroadcastContainer(PermitContainer, KafkaContainer): + def __init__( + self, + network: Network, + zookeeper_container: ZookeeperContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.zookeeper_container = zookeeper_container + self.network = network + + PermitContainer.__init__(self) + KafkaContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"kafka_broadcast_channel") diff --git a/tests/containers/kafka_ui_container.py b/tests/containers/kafka_ui_container.py new file mode 100644 index 000000000..a01b70f1f --- /dev/null +++ b/tests/containers/kafka_ui_container.py @@ -0,0 +1,36 @@ +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.permitContainer import PermitContainer + + +class KafkaUIContainer(PermitContainer, DockerContainer): + def __init__( + self, + network: Network, + kafka_container: KafkaBroadcastContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.kafka_container = kafka_container + self.network = network + + self.image = "provectuslabs/kafka-ui:latest" + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, image=self.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_name("kafka-ui") + self.with_bind_ports(8080, 8080) + self.with_env("KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS", "kafka:9092") + + self.with_network(self.network) + self.with_network_aliases("Kafka_ui") diff --git a/tests/containers/opa_container.py b/tests/containers/opa_container.py new file mode 100644 index 000000000..8227394e2 --- /dev/null +++ b/tests/containers/opa_container.py @@ -0,0 +1,67 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.permitContainer import PermitContainer +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class OpaSettings: + def __init__( + self, + image: str | None = None, + port: int | None = None, + container_name: str | None = None, + ) -> None: + self.image = image if image else "openpolicyagent/opa:0.29.0" + self.container_name = "opa" + + if port is None: + self.port = utils.find_available_port(8181) + else: + if utils.is_port_available(port): + self.port = port + else: + self.port = utils.find_available_port(8181) + + def getEnvVars(self): + return {} + + +class OpaContainer(PermitContainer, DockerContainer): + def __init__( + self, + settings: OpaSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + PermitContainer.__init__(self) # Initialize PermitContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 8181, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + def reload_with_settings(self, settings: OpaSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/opal_client_container.py b/tests/containers/opal_client_container.py new file mode 100644 index 000000000..dee8d3301 --- /dev/null +++ b/tests/containers/opal_client_container.py @@ -0,0 +1,52 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.permitContainer import PermitContainer +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class OpalClientContainer(PermitContainer, DockerContainer): + def __init__( + self, + settings: OpalClientSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + PermitContainer.__init__(self) # Initialize PermitContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 7000, self.settings.port + ).with_bind_ports( + 8181, utils.find_available_port(self.settings.opa_port) + ).with_network( + self.network + ).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + if self.settings.debug_enabled: + self.with_bind_ports(5678, self.settings.debug_port) + + def reload_with_settings(self, settings: OpalClientSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/opal_server_container.py b/tests/containers/opal_server_container.py new file mode 100644 index 000000000..71c6ce50b --- /dev/null +++ b/tests/containers/opal_server_container.py @@ -0,0 +1,91 @@ +import requests +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.permitContainer import PermitContainer +from tests.containers.settings.opal_server_settings import OpalServerSettings + + +class OpalServerContainer(PermitContainer, DockerContainer): + def __init__( + self, + settings: OpalServerSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + self.settings = settings + self.network = network + + self.logger = setup_logger(__name__) + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, image=self.settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.configure() + + def configure(self): + # Add environment variables individually + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + # Configure network and other settings + self.with_name(self.settings.container_name).with_bind_ports( + 7002, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + # Bind debug ports if enabled + if self.settings.debugEnabled: + self.with_bind_ports(5678, self.settings.debug_port) + + def reload_with_settings(self, settings: OpalServerSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() + + def obtain_OPAL_tokens(self, caller: str = "Unkonwn caller") -> dict: + """Fetch client and datasource tokens from the OPAL server.""" + token_url = f"http://localhost:{self.settings.port}/token" + headers = { + "Authorization": f"Bearer {self.settings.master_token}", + "Content-Type": "application/json", + } + + tokens = {} + + for token_type in ["client", "datasource"]: + try: + data = {"type": token_type} # ).replace("'", "\"") + self.logger.debug(f"Fetching OPAL {token_type} token...") + self.logger.debug(f"url: {token_url}") + self.logger.debug(f"headers: {headers}") + self.logger.debug(data) + + response = requests.post(token_url, headers=headers, json=data) + response.raise_for_status() + + token = response.json().get("token") + if token: + tokens[token_type] = token + self.logger.info(f"{caller} | Successfully fetched OPAL {token_type} token.") + else: + self.logger.error( + f"{caller} | Failed to fetch OPAL {token_type} token: {response.json()}" + ) + + except requests.exceptions.RequestException as e: + self.logger.error( + f"{caller} | HTTP Request failed while fetching OPAL {token_type} token: {e}" + ) + + return tokens diff --git a/tests/containers/permitContainer.py b/tests/containers/permitContainer.py new file mode 100644 index 000000000..50858a95e --- /dev/null +++ b/tests/containers/permitContainer.py @@ -0,0 +1,135 @@ +import re +import time +from datetime import datetime + +from testcontainers.core.utils import setup_logger + + +class PermitContainer: + def __init__(self): + self.permitLogger = setup_logger(__name__) + + # Regex to match any ANSI-escaped timestamp in the format YYYY-MM-DDTHH:MM:SS.mmmmmm+0000 + self.timestamp_with_ansi = ( + r"\x1b\[.*?(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+\d{4})" + ) + self.errors = [] + # self.check_errors() + + def wait_for_log( + self, log_str: str, timeout: int, reference_timestamp: datetime | None = None + ): + """Wait for a specific log to appear in the container logs after the + reference timestamp. + + Args: + reference_timestamp (datetime): The timestamp to start checking logs from. + log_str (str): The string to search for in the logs. + timeout (int): Maximum time to wait for the log (in seconds). + + Returns: + bool: True if the log was found, False if the timeout was reached. + """ + # Stream logs from the opal_client container + log_found = False + logs = self._container.logs(stream=True) + + start_time = time.time() # Record the start time + + for line in logs: + # Check if the timeout has been exceeded + elapsed_time = time.time() - start_time + if elapsed_time > timeout: + self.permitLogger.warning(f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}") + break + + decoded_line = line.decode("utf-8").strip() + + # Search for the timestamp in the line + match = re.search(self.timestamp_with_ansi, decoded_line) + if match: + log_timestamp_string = match.group(1) + log_timestamp = datetime.strptime( + log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" + ) + + if (reference_timestamp is None) or ( + log_timestamp > reference_timestamp + ): + if log_str in decoded_line: + log_found = True + break + + return log_found + + def wait_for_error( + self, reference_timestamp: datetime, error_str: str = "Error", timeout: int = 30 + ): + """Wait for a specific log to appear in the container logs after the + reference timestamp. + + Args: + reference_timestamp (datetime): The timestamp to start checking logs from. + log_str (str): The string to search for in the logs. + timeout (int): Maximum time to wait for the log (in seconds). + + Returns: + bool: True if the log was found, False if the timeout was reached. + """ + # Stream logs from the opal_client container + err_found = False + logs = self._container.logs(stream=True) + + start_time = time.time() # Record the start time + + for line in logs: + # Check if the timeout has been exceeded + elapsed_time = time.time() - start_time + if elapsed_time > timeout: + self.permitLogger.warning("Timeout reached while waiting for the log.") + break + + decoded_line = line.decode("utf-8").strip() + + # Search for the timestamp in the line + match = re.search(self.timestamp_with_ansi, decoded_line) + if match: + log_timestamp_string = match.group(1) + log_timestamp = datetime.strptime( + log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" + ) + + if log_timestamp > reference_timestamp: + self.permitLogger.info(f"Checking log line: {decoded_line}") + if error_str in decoded_line: + err_found = True + for err in self.errors: + m = re.search(self.timestamp_with_ansi, decoded_line) + if m.group(1) == match.group(1): + self.errors.remove(err) + self.permitLogger.info("err found!") + break + return err_found + + async def check_errors(self): + # Stream logs from the opal_client container + logs = self._container.logs(stream=True) + + log_str = "ERROR" + + for line in logs: + decoded_line = line.decode("utf-8").strip() + self.permitLogger.info(f"Checking log line: {decoded_line}") + self.permitLogger.info(f"scanning line: {decoded_line}") + if log_str in decoded_line: + self.permitLogger.error("\n\n\n\n") + self.permitLogger.error(f"error found: {decoded_line}") + self.permitLogger.error("\n\n\n\n") + self.errors.append(decoded_line) + + def __del__(self): + if len(self.errors) > 0: + self.permitLogger.error("Errors found in container logs:") + for error in self.errors: + self.permitLogger.error(error) + assert False, "Errors found in container logs." diff --git a/tests/containers/postgres_broadcast_container.py b/tests/containers/postgres_broadcast_container.py new file mode 100644 index 000000000..bb0764335 --- /dev/null +++ b/tests/containers/postgres_broadcast_container.py @@ -0,0 +1,42 @@ +from testcontainers.core.network import Network +from testcontainers.postgres import PostgresContainer + +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) + + +class PostgresBroadcastContainer(BroadcastContainerBase, PostgresContainer): + def __init__( + self, + network: Network, + settings: PostgresBroadcastSettings, + image: str = "postgres:alpine", + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + self.settings = settings + + BroadcastContainerBase.__init__(self) + PostgresContainer.__init__( + self, + image, + settings.port, + settings.user, + settings.password, + settings.database, + docker_client_kw=docker_client_kw, + **kwargs, + ) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + self.with_name(f"postgres_broadcast_channel") diff --git a/tests/containers/pulsar_broadcast_container.py b/tests/containers/pulsar_broadcast_container.py new file mode 100644 index 000000000..74abc951a --- /dev/null +++ b/tests/containers/pulsar_broadcast_container.py @@ -0,0 +1,32 @@ +import debugpy +from containers.permitContainer import PermitContainer +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +import docker + + +class PulsarBroadcastContainer(PermitContainer, DockerContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, image="pulsar:latest", docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"pytest_opal_broadcast_channel") diff --git a/tests/containers/redis_broadcast_container.py b/tests/containers/redis_broadcast_container.py new file mode 100644 index 000000000..b11ad61b6 --- /dev/null +++ b/tests/containers/redis_broadcast_container.py @@ -0,0 +1,28 @@ +from testcontainers.core.network import Network +from testcontainers.redis import RedisContainer + +from tests.containers.permitContainer import PermitContainer + + +class RedisBroadcastContainer(PermitContainer, RedisContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + PermitContainer.__init__(self) + RedisContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"redis_broadcast_channel") diff --git a/tests/containers/redis_ui_container.py b/tests/containers/redis_ui_container.py new file mode 100644 index 000000000..eb02df918 --- /dev/null +++ b/tests/containers/redis_ui_container.py @@ -0,0 +1,36 @@ +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network +from testcontainers.redis import RedisContainer + +from tests.containers.permitContainer import PermitContainer + + +class RedisUIContainer(PermitContainer, DockerContainer): + def __init__( + self, + network: Network, + redis_container: RedisContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.redis_container = redis_container + self.network = network + self.container_name = "redis-ui" + self.image = "redislabs/redisinsight:latest" + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, image=self.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_name(self.container_name) + + self.with_network(self.network) + self.with_bind_ports(5540, 5540) + + self.with_network_aliases("redis_ui") diff --git a/tests/containers/settings/cedar_settings.py b/tests/containers/settings/cedar_settings.py new file mode 100644 index 000000000..d05d3e22b --- /dev/null +++ b/tests/containers/settings/cedar_settings.py @@ -0,0 +1,23 @@ +from tests import utils + + +class CedarSettings: + def __init__( + self, + image: str | None = None, + port: int | None = None, + container_name: str | None = None, + ) -> None: + self.image = image if image else "permitio/cedar:latest" + self.container_name = container_name if container_name else "cedar" + + if port is None: + self.port = utils.find_available_port(8180) + else: + if utils.is_port_available(port): + self.port = port + else: + self.port = utils.find_available_port(8180) + + def getEnvVars(self): + return {} diff --git a/tests/containers/settings/gitea_settings.py b/tests/containers/settings/gitea_settings.py new file mode 100644 index 000000000..8a61f69db --- /dev/null +++ b/tests/containers/settings/gitea_settings.py @@ -0,0 +1,121 @@ +import os + +from testcontainers.core.utils import setup_logger + + +class GiteaSettings: + def __init__( + self, + container_name: str = None, + repo_name: str = None, + temp_dir: str = None, + data_dir: str = None, + port_http: int = None, + port_ssh: int = None, + USER_UID: int = None, + USER_GID: int = None, + username: str = None, + email: str = None, + password: str = None, + network_aliases: str = None, + image: str = None, + **kwargs, + ): + """Initialize the Gitea Docker container and related parameters. + + :param container_name: Name of the Gitea container + :param repo_name: Name of the repository + :param temp_dir: Path to the temporary directory for files + :param data_dir: Path to the data directory for persistent files + :param port_http: Optional - Port for Gitea HTTP access + :param ssh_port: Optional - Port for Gitea SSH access + :param image: Optional - Docker image for Gitea + :param USER_UID: Optional - User UID for Gitea + :param USER_GID: Optional - User GID for Gitea + :param username: Optional - Default admin username for Gitea + :param email: Optional - Default admin email for Gitea + :param password: Optional - Default admin password for Gitea + """ + + self.logger = setup_logger(__name__) + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.repo_name = repo_name if repo_name else self.repo_name + self.port_http = port_http if port_http else self.port_http + self.port_ssh = port_ssh if port_ssh else self.port_ssh + self.uid = USER_UID if USER_UID else self.uid + self.gid = USER_GID if USER_GID else self.gid + + self.username = username if username else self.username + self.email = email if email else self.email + self.password = password if password else self.password + + self.temp_dir = os.path.abspath(temp_dir) if temp_dir else self.temp_dir + self.data_dir = ( + data_dir if data_dir else self.data_dir + ) # Data directory for persistent files (e.g., RBAC file) + + self.db_type = "sqlite3" # Default to SQLite + self.install_lock = "true" + + self.network_aliases = ( + network_aliases if network_aliases else self.network_aliases + ) + + self.access_token = None # Optional, can be set later + self.__dict__.update(kwargs) + + self.gitea_base_url = f"http://localhost:{self.port_http}" + + # Validate required parameters + self.validate_dependencies() + + self.gitea_internal_base_url = f"http://{self.container_name}:{self.port_http}" + + + def validate_dependencies(self): + """Validate required parameters.""" + required_params = [ + self.container_name, + self.port_http, + self.port_ssh, + self.image, + self.uid, + self.gid, + ] + if not all(required_params): + raise ValueError( + "Missing required parameters for Gitea container initialization." + ) + + self.logger.info(f"{self.container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "USER_UID": self.uid, + "USER_GID": self.gid, + "username": self.username, + "EMAIL": self.email, + "PASSWORD": self.password, + "DB_TYPE": self.db_type, + "INSTALL_LOCK": self.install_lock, + } + + def load_from_env(self): + self.image = os.getenv("GITEA_IMAGE", "gitea/gitea:latest-rootless") + self.container_name = os.getenv("GITEA_CONTAINER_NAME", "gitea") + self.repo_name = os.getenv("REPO_NAME", "permit") + self.temp_dir = os.getenv("TEMP_DIR", "/tmp/permit") + self.data_dir = os.getenv("DATA_DIR", "/tmp/data") + self.port_http = int(os.getenv("GITEA_PORT_HTTP", 3000)) + self.port_ssh = int(os.getenv("GITEA_PORT_SSH", 2222)) + self.uid = int(os.getenv("USER_UID", 1000)) + self.gid = int(os.getenv("USER_GID", 1000)) + self.username = os.getenv("username", "permitAdmin") + self.email = os.getenv("EMAIL", "admin@permit.io") + self.password = os.getenv("PASSWORD", "Aa123456") + self.network_aliases = os.getenv("NETWORK_ALIASES", "gitea") diff --git a/tests/containers/settings/kafka_broadcast_settings.py b/tests/containers/settings/kafka_broadcast_settings.py new file mode 100644 index 000000000..f388e3d2f --- /dev/null +++ b/tests/containers/settings/kafka_broadcast_settings.py @@ -0,0 +1,104 @@ +import os +from testcontainers.core.utils import setup_logger + +class KafkaBroadcastSettings: + def __init__(self, host, port, user, password, database): + + self.logger = setup_logger("KafkaBroadcastSettings") + + self.host = host + self.port = port + self.user = user + self.password = password + self.database = database + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.host: + raise ValueError("POSTGRES_HOST is required.") + if not self.port: + raise ValueError("POSTGRES_PORT is required.") + if not self.user: + raise ValueError("POSTGRES_USER is required.") + if not self.password: + raise ValueError("POSTGRES_PASSWORD is required.") + if not self.database: + raise ValueError("POSTGRES_DATABASE is required.") + + self.logger.info(f"{self.kafka_container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "POSTGRES_HOST": self.host, + "POSTGRES_PORT": self.port, + "POSTGRES_USER": self.user, + "POSTGRES_PASSWORD": self.password, + "POSTGRES_DATABASE": self.database, + } + + def load_from_env(self): + self.host = os.getenv("POSTGRES_HOST", "localhost") + self.port = int(os.getenv("POSTGRES_PORT", 5432)) + self.user = os.getenv("POSTGRES_USER", "postgres") + self.password = os.getenv("POSTGRES_PASSWORD", "postgres") + self.database = os.getenv("POSTGRES_DATABASE", "postgres") + + self.zookeeper_image_name = os.getenv( + "ZOOKEEPER_IMAGE_NAME", "confluentinc/cp-zookeeper:6.2.0" + ) + self.zookeeper_container_name = os.getenv( + "ZOOKEEPER_CONTAINER_NAME", "zookeeper" + ) + self.zookeeper_port = os.getenv("ZOOKEEPER_CLIENT_PORT", 2181) + self.zookeeper_tick_time = os.getenv("ZOOKEEPER_TICK_TIME", 2000) + self.zookeeper_allow_anonymous_login = os.getenv("ALLOW_ANONYMOUS_LOGIN", "yes") + + self.kafka_image_name = os.getenv( + "KAFKA_IMAGE_NAME", "confluentinc/cp-kafka:6.2.0" + ) + self.kafka_container_name = os.getenv("KAFKA_CONTAINER_NAME", "kafka") + self.kafka_port = os.getenv("KAFKA_CLIENT_PORT", 9092) + self.kafka_admin_port = os.getenv("KAFKA_ADMIN_PORT", 29092) + + self.kafka_ui_image_name = os.getenv( + "KAFKA_UI_IMAGE_NAME", "provectuslabs/kafka-ui:latest" + ) + self.kafka_ui_container_name = os.getenv("KAFKA_UI_CONTAINER_NAME", "kafka-ui") + + self.kafka_ui_port = os.getenv("KAFKA_UI_PORT", 8080) + + self.kafka_ui_url = os.getenv( + "KAFKA_UI_URL", f"http://{self.kafka_ui_host}:{self.kafka_ui_port}" + ) + + self.broker_id = os.getenv("KAFKA_BROKER_ID", 1) + self.zookeeper_connect = os.getenv( + "KAFKA_ZOOKEEPER_CONNECT", + f"{self.zookeeper_container_name}:{self.zookeeper_port}", + ) + self.offsets_topic_replication_factor = os.getenv( + "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", 1 + ) + self.listener_security_protocol_map = os.getenv( + "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP", + "PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT", + ) + self.advertised_listeners = os.getenv( + "KAFKA_ADVERTISED_LISTENERS", + f"PLAINTEXT_HOST://localhost:{self.kafka_admin_port},PLAINTEXT://{self.kafka_container_name}:{self.kafka_port}", + ) + self.allow_plaintext_listener = os.getenv("ALLOW_PLAINTEXT_LISTENER", "yes") + self.kafka_topic_auto_create = os.getenv("KAFKA_TOPIC_AUTO_CREATE", "true") + self.kafka_transaction_state_log_min_isr = os.getenv( + "KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", 1 + ) + self.kafka_transaction_state_log_replication_factor = os.getenv( + "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", 1 + ) + self.kafka_clusters_bootstrapservers = os.getenv( + "KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS", + f"{self.kafka_container_name}:{self.kafka_port}", + ) diff --git a/tests/containers/settings/opal_client_settings.py b/tests/containers/settings/opal_client_settings.py new file mode 100644 index 000000000..6e8a8e5bf --- /dev/null +++ b/tests/containers/settings/opal_client_settings.py @@ -0,0 +1,304 @@ +import os + +from testcontainers.core.utils import setup_logger + +from tests import utils + + +class OpalClientSettings: + def __init__( + self, + client_token: str | None = None, + container_name: str | None = None, + port: int | None = None, + opal_server_url: str | None = None, + should_report_on_data_updates: str | None = None, + log_format_include_pid: str | None = None, + tests_debug: bool | None = False, + log_diagnose: str | None = None, + log_level: str | None = None, + debug_enabled: bool | None = None, + debug_port: int | None = None, + image: str | None = None, + opa_port: int | None = None, + default_update_callbacks: str | None = None, + opa_health_check_policy_enabled: str | None = None, + auth_jwt_audience: str | None = None, + auth_jwt_issuer: str | None = None, + statistics_enabled: str | None = None, + policy_store_type: str | None = None, + policy_store_url: str | None = None, + iniline_cedar_enabled: str | None = None, + inline_cedar_exec_path: str | None = None, + inline_cedar_config: str | None = None, + inline_cedar_log_format: str | None = None, + inline_opa_enabled: bool | None = None, + inline_opa_exec_path: str | None = None, + inline_opa_config: str | None = None, + inline_opa_log_format: str | None = None, + uvicorn_asgi_app: str | None = None, + container_index: int = 1, + topics: str | None = None, + public_key: str | None = None, + private_key: str | None = None, + **kwargs, + ): + """ + Args: + client_token: The client token to use for authentication. + container_name: The name of the container. + port: The port to use for the server. + opal_server_url: The URL of the server. + should_report_on_data_updates: Whether to report on data updates. + log_format_include_pid: Whether to include the process ID in the log format. + tests_debug: Whether to run the tests in debug mode. + log_diagnose: Whether to log diagnose information. + log_level: The log level to use. + debug_enabled: Whether to enable debug mode. + debug_port: The port to use for the debug server. + image: The image to use for the container. + opa_port: The port to use for the OPA server. + default_update_callbacks: The default update callbacks to use. + opa_health_check_policy_enabled: Whether to enable the OPA health check policy. + auth_jwt_audience: The JWT audience to use for authentication. + auth_jwt_issuer: The JWT issuer to use for authentication. + statistics_enabled: Whether to enable statistics. + policy_store_type: The policy store type to use. + policy_store_url: The URL of the policy store. + iniline_cedar_enabled: Whether to enable inline Cedar. + inline_cedar_exec_path: The path to the Cedar executable. + inline_cedar_config: The configuration to use for Cedar. + inline_cedar_log_format: The log format to use for Cedar. + inline_opa_enabled: Whether to enable inline OPA. + inline_opa_exec_path: The path to the OPA executable. + inline_opa_config: The configuration to use for OPA. + inline_opa_log_format: The log format to use for OPA. + uvicorn_asgi_app: The ASGI app to use for the server. + container_index: The index of the container. + topics: The topics to use for the server. + public_key: The public key to use for authentication. + private_key: The private key to use for authentication. + **kwargs: Additional keyword arguments. + + Instructions: + To add a setting, add it to the constructor and update the load_from_env() method. + That will initialize the settings from environment variables, or + from a fallback value in the getenv() method. + Then assign your settings to the corresponding variables in the constructor. + + If your var should be passed on to the container as an environment variable + make sure to also add it in the getEnvVars() method + """ + + self.logger = setup_logger("OpalClientSettings") + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.port = port if port else self.port + self.opal_server_url = ( + opal_server_url if opal_server_url else self.opal_server_url + ) + self.opa_port = opa_port if opa_port else self.opa_port + self.should_report_on_data_updates = ( + should_report_on_data_updates + if should_report_on_data_updates + else self.should_report_on_data_updates + ) + self.log_format_include_pid = ( + log_format_include_pid + if log_format_include_pid + else self.log_format_include_pid + ) + + self.tests_debug = tests_debug if tests_debug else self.tests_debug + self.log_diagnose = log_diagnose if log_diagnose else self.log_diagnose + self.log_level = log_level if log_level else self.log_level + self.debug_enabled = debug_enabled if debug_enabled else self.debug_enabled + self.default_update_callbacks = ( + default_update_callbacks + if default_update_callbacks + else self.default_update_callbacks + ) + self.client_token = client_token if client_token else self.client_token + self.opa_health_check_policy_enabled = ( + opa_health_check_policy_enabled + if opa_health_check_policy_enabled + else self.opa_health_check_policy_enabled + ) + self.auth_jwt_audience = ( + auth_jwt_audience if auth_jwt_audience else self.auth_jwt_audience + ) + self.auth_jwt_issuer = ( + auth_jwt_issuer if auth_jwt_issuer else self.auth_jwt_issuer + ) + self.statistics_enabled = ( + statistics_enabled if statistics_enabled else self.statistics_enabled + ) + self.container_index = ( + container_index if container_index else self.container_index + ) + self.debug_port = debug_port if debug_port else self.debug_port + self.__dict__.update(kwargs) + + self.policy_store_type = ( + policy_store_type if policy_store_type else self.policy_store_type + ) + self.policy_store_url = ( + policy_store_url if policy_store_url else self.policy_store_url + ) + + self.public_key = public_key if public_key else self.public_key + self.private_key = private_key if private_key else self.private_key + + self.uvicorn_asgi_app = ( + uvicorn_asgi_app if uvicorn_asgi_app else self.uvicorn_asgi_app + ) + + self.iniline_cedar_enabled = ( + iniline_cedar_enabled + if iniline_cedar_enabled + else self.iniline_cedar_enabled + ) + self.inline_cedar_exec_path = ( + inline_cedar_exec_path + if inline_cedar_exec_path + else self.inline_cedar_exec_path + ) + self.inline_cedar_config = ( + inline_cedar_config if inline_cedar_config else self.inline_cedar_config + ) + self.inline_cedar_log_format = ( + inline_cedar_log_format + if inline_cedar_log_format + else self.inline_cedar_log_format + ) + + self.inline_opa_enabled = ( + inline_opa_enabled if inline_opa_enabled else self.inline_opa_enabled + ) + self.inline_opa_exec_path = ( + inline_opa_exec_path if inline_opa_exec_path else self.inline_opa_exec_path + ) + self.inline_opa_config = ( + inline_opa_config if inline_opa_config else self.inline_opa_config + ) + self.inline_opa_log_format = ( + inline_opa_log_format + if inline_opa_log_format + else self.inline_opa_log_format + ) + self.topics = topics if topics else self.topics + + self.validate_dependencies() + + def validate_dependencies(self): + if not self.image: + raise ValueError("OPAL_CLIENT_IMAGE is required.") + if not self.container_name: + raise ValueError("OPAL_CLIENT_CONTAINER_NAME is required.") + if not self.opal_server_url: + raise ValueError("OPAL_SERVER_URL is required.") + + self.logger.info( + f"{self.container_name} | Dependencies validated successfully." + ) + + def getEnvVars(self): + env_vars = { + "OPAL_SERVER_URL": self.opal_server_url, + "OPAL_LOG_FORMAT_INCLUDE_PID": self.log_format_include_pid, + "OPAL_SHOULD_REPORT_ON_DATA_UPDATES": self.should_report_on_data_updates, + "OPAL_DEFAULT_UPDATE_CALLBACKS": self.default_update_callbacks, + "OPAL_OPA_HEALTH_CHECK_POLICY_ENABLED": self.opa_health_check_policy_enabled, + "OPAL_CLIENT_TOKEN": self.client_token, + "OPAL_AUTH_JWT_AUDIENCE": self.auth_jwt_audience, + "OPAL_AUTH_JWT_ISSUER": self.auth_jwt_issuer, + "OPAL_STATISTICS_ENABLED": self.statistics_enabled, + # TODO: make not hardcoded + "OPAL_DATA_TOPICS": self.topics, + "UVICORN_ASGI_APP": self.uvicorn_asgi_app, + "UVICORN_NUM_WORKERS": "1", + "UVICORN_PORT": str(self.port), + "OPAL_AUTH_PUBLIC_KEY": self.public_key, + } + + if self.tests_debug: + env_vars["LOG_DIAGNOSE"] = self.log_diagnose + env_vars["OPAL_LOG_LEVEL"] = self.log_level + + if self.policy_store_type: + env_vars["OPAL_POLICY_STORE_TYPE"] = self.policy_store_type + + if self.policy_store_url: + env_vars["OPAL_POLICY_STORE_URL"] = self.policy_store_url + + if self.inline_opa_enabled: + env_vars["OPAL_INLINE_OPA_ENABLED"] = self.inline_opa_enabled + env_vars["OPAL_INLINE_OPA_EXEC_PATH"] = self.inline_opa_exec_path + env_vars["OPAL_INLINE_OPA_CONFIG"] = self.inline_opa_config + env_vars["OPAL_INLINE_OPA_LOG_FORMAT"] = self.inline_opa_log_format + + if self.iniline_cedar_enabled: + env_vars["OPAL_INILINE_CEDAR_ENABLED"] = self.iniline_cedar_enabled + env_vars["OPAL_INILINE_CEDAR_EXEC_PATH"] = self.inline_cedar_exec_path + env_vars["OPAL_INILINE_CEDAR_CONFIG"] = self.inline_cedar_config + env_vars["OPAL_INILINE_CEDAR_LOG_FORMAT"] = self.inline_cedar_log_format + + return env_vars + + def load_from_env(self): + self.image = os.getenv("OPAL_CLIENT_IMAGE", "opal_client_debug_local") + self.container_name = os.getenv("OPAL_CLIENT_CONTAINER_NAME", "opal_client") + self.port = os.getenv("OPAL_CLIENT_PORT", utils.find_available_port(7000)) + self.opal_server_url = os.getenv("OPAL_SERVER_URL", "http://opal_server:7002") + self.opa_port = os.getenv("OPA_PORT", utils.find_available_port(8181)) + self.tests_debug = os.getenv("OPAL_TESTS_DEBUG", "true") + self.log_diagnose = os.getenv("LOG_DIAGNOSE", "true") + self.log_level = os.getenv("OPAL_LOG_LEVEL", "DEBUG") + self.public_key = os.getenv("OPAL_AUTH_PUBLIC_KEY", None) + self.private_key = os.getenv("OPAL_AUTH_PRIVATE_KEY", None) + self.log_format_include_pid = os.getenv("OPAL_LOG_FORMAT_INCLUDE_PID", "true") + self.should_report_on_data_updates = os.getenv( + "OPAL_SHOULD_REPORT_ON_DATA_UPDATES", "true" + ) + self.default_update_callbacks = os.getenv("OPAL_DEFAULT_UPDATE_CALLBACKS", None) + self.opa_health_check_policy_enabled = os.getenv( + "OPAL_OPA_HEALTH_CHECK_POLICY_ENABLED", "true" + ) + self.client_token = os.getenv("OPAL_CLIENT_TOKEN", None) + self.auth_jwt_audience = os.getenv( + "OPAL_AUTH_JWT_AUDIENCE", "https://api.opal.ac/v1/" + ) + self.auth_jwt_issuer = os.getenv("OPAL_AUTH_JWT_ISSUER", "https://opal.ac/") + self.statistics_enabled = os.getenv("OPAL_STATISTICS_ENABLED", "true") + self.debug_enabled = os.getenv("OPAL_DEBUG_ENABLED", True) + self.debug_port = os.getenv( + "CLIENT_DEBUG_PORT", utils.find_available_port(6678) + ) + self.policy_store_url = os.getenv("OPAL_POLICY_STORE_URL", None) + + self.policy_store_type = os.getenv("OPAL_POLICY_STORE_TYPE", "OPA") + + self.uvicorn_asgi_app = os.getenv("UVICORN_ASGI_APP", "opal_client.main:app") + + self.iniline_cedar_enabled = os.getenv("OPAL_INILINE_CEDAR_ENABLED", "false") + self.inline_cedar_exec_path = os.getenv( + "OPAL_INLINE_CEDAR_EXEC_PATH", "/cedar/cedar-agent" + ) + self.inline_cedar_config = os.getenv( + "OPAL_INLINE_CEDAR_CONFIG", '{"addr": "0.0.0.0:8180"}' + ) + self.inline_cedar_log_format = os.getenv("OPAL_INLINE_CEDAR_LOG_FORMAT", "http") + + self.inline_opa_enabled = os.getenv("OPAL_INLINE_OPA_ENABLED", "true") + self.inline_opa_exec_path = os.getenv("OPAL_INLINE_OPA_EXEC_PATH", "/opal/opa") + self.inline_opa_config = os.getenv( + "OPAL_INLINE_OPA_CONFIG", None #'{"addr": "0.0.0.0:8181"}' + ) + self.inline_opa_log_format = os.getenv("OPAL_INLINE_OPA_LOG_FORMAT", "http") + self.topics = os.getenv("OPAL_DATA_TOPICS", "policy_data") + + if not self.private_key or not self.public_key: + self.private_key, self.public_key = utils.generate_ssh_key_pair() diff --git a/tests/containers/settings/opal_server_settings.py b/tests/containers/settings/opal_server_settings.py new file mode 100644 index 000000000..059866bd4 --- /dev/null +++ b/tests/containers/settings/opal_server_settings.py @@ -0,0 +1,262 @@ +import json +import os +from secrets import token_hex + +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.settings import pytest_settings + + +class OpalServerSettings: + def __init__( + self, + container_name: str = None, + port: int = None, + uvicorn_workers: str = None, + policy_repo_url: str = None, + polling_interval: str = None, + private_key: str = None, + public_key: str = None, + master_token: str = None, + data_topics: str = None, + auth_audience: str = None, + auth_issuer: str = None, + tests_debug: bool = False, + log_diagnose: str = None, + log_level: str = None, + log_format_include_pid: bool = None, + statistics_enabled: bool = None, + debug_enabled: bool = None, + debug_port: int = None, + auth_private_key_passphrase: str = None, + policy_repo_main_branch: str = None, + image: str = None, + broadcast_uri: str = None, + webhook_secret: str = None, + webhook_params: str = None, + uvicorn_asgi_app: str = None, + uvicorn_port: int = None, + all_data_url: str = None, + policy_repo_reuse_clone_path: bool = None, + container_index: int = 1, + **kwargs, + ): + """Initialize the OPAL Server with the provided parameters. + + :param image: Docker image for the OPAL server. + :param container_name: Name of the Docker container. + :param network_name: Name of the Docker network to attach. + :param port: Exposed port for the OPAL server. + :param uvicorn_workers: Number of Uvicorn workers. + :param policy_repo_url: URL of the policy repository. + :param polling_interval: Polling interval for the policy + repository. + :param private_key: SSH private key for authentication. + :param public_key: SSH public key for authentication. + :param master_token: Master token for OPAL authentication. + :param data_topics: Data topics for OPAL configuration. + :param broadcast_uri: Optional URI for the broadcast channel. + :param auth_audience: Optional audience for authentication. + :param auth_issuer: Optional issuer for authentication. + :param tests_debug: Optional flag for tests debug mode. + :param log_diagnose: Optional flag for log diagnosis. + :param log_level: Optional log level for the OPAL server. + :param log_format_include_pid: Optional flag for including PID + in log format. + :param statistics_enabled: Optional flag for enabling + statistics. + :param debug_enabled: Optional flag for enabling debug mode with + debugpy. + :param debug_port: Optional port for debugpy. + :param auth_private_key_passphrase: Optional passphrase for the + private key. + :param policy_repo_main_branch: Optional main branch for the + policy repository. + :param webhook_secret: Optional secret for the webhook. + :param webhook_params: Optional parameters for the webhook. + :param uvicorn_asgi_app: Optional ASGI app for Uvicorn. + :param uvicorn_port: Optional port for Uvicorn. + :param all_data_url: Optional URL for all data. + :param policy_repo_reuse_clone_path: Optional flag for reusing + the clone path for the policy repository. + :param container_index: Optional index for the container. + :param kwargs: Additional keyword arguments. + """ + + self.logger = setup_logger(__name__) + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.port = port if port else self.port + self.uvicorn_workers = ( + uvicorn_workers if uvicorn_workers else self.uvicorn_workers + ) + self.policy_repo_url = ( + policy_repo_url if policy_repo_url else self.policy_repo_url + ) + self.polling_interval = ( + polling_interval if polling_interval else self.polling_interval + ) + self.private_key = private_key if private_key else self.private_key + self.public_key = public_key if public_key else self.public_key + self.master_token = master_token if master_token else self.master_token + self.data_topics = data_topics if data_topics else self.data_topics + self.broadcast_uri = broadcast_uri if broadcast_uri else self.broadcast_uri + self.auth_audience = auth_audience if auth_audience else self.auth_audience + self.auth_issuer = auth_issuer if auth_issuer else self.auth_issuer + self.tests_debug = tests_debug if tests_debug else self.tests_debug + self.log_diagnose = log_diagnose if log_diagnose else self.log_diagnose + self.log_level = log_level if log_level else self.log_level + self.log_format_include_pid = ( + log_format_include_pid + if log_format_include_pid + else self.log_format_include_pid + ) + self.statistics_enabled = ( + statistics_enabled if statistics_enabled else self.statistics_enabled + ) + self.debugEnabled = debug_enabled if debug_enabled else self.debugEnabled + self.debug_port = debug_port if debug_port else self.debug_port + self.auth_private_key_passphrase = ( + auth_private_key_passphrase + if auth_private_key_passphrase + else self.auth_private_key_passphrase + ) + self.policy_repo_main_branch = ( + policy_repo_main_branch + if policy_repo_main_branch + else self.policy_repo_main_branch + ) + + self.uvicorn_asgi_app = ( + uvicorn_asgi_app if uvicorn_asgi_app else self.uvicorn_asgi_app + ) + self.uvicorn_port = uvicorn_port if uvicorn_port else self.uvicorn_port + self.all_data_url = all_data_url if all_data_url else self.all_data_url + self.policy_repo_reuse_clone_path = ( + policy_repo_reuse_clone_path + if policy_repo_reuse_clone_path + else self.policy_repo_reuse_clone_path + ) + + self.container_index = ( + container_index if container_index else self.container_index + ) + + self.webhook_secret = webhook_secret if webhook_secret else self.webhook_secret + self.webhook_params = webhook_params if webhook_params else self.webhook_params + + self.__dict__.update(kwargs) + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.policy_repo_url: + raise ValueError("OPAL_POLICY_REPO_URL is required.") + if not self.private_key or not self.public_key: + raise ValueError("SSH private and public keys are required.") + if not self.master_token: + raise ValueError("OPAL master token is required.") + self.logger.info( + f"{self.container_name} | Dependencies validated successfully." + ) + + def getEnvVars(self): + # Configure environment variables + + env_vars = { + "UVICORN_NUM_WORKERS": self.uvicorn_workers, + "OPAL_POLICY_REPO_URL": self.policy_repo_url, + "OPAL_POLICY_REPO_MAIN_BRANCH": self.policy_repo_main_branch, + "OPAL_POLICY_REPO_POLLING_INTERVAL": self.polling_interval, + "OPAL_AUTH_PRIVATE_KEY": self.private_key, + "OPAL_AUTH_PUBLIC_KEY": self.public_key, + "OPAL_AUTH_MASTER_TOKEN": self.master_token, + "OPAL_DATA_CONFIG_SOURCES": f"""{{"config":{{"entries":[{{"url":"http://{self.container_name}:7002/policy-data","topics":["{self.data_topics}"],"dst_path":"/static"}}]}}}}""", + "OPAL_LOG_FORMAT_INCLUDE_PID": self.log_format_include_pid, + "OPAL_STATISTICS_ENABLED": self.statistics_enabled, + "OPAL_AUTH_JWT_AUDIENCE": self.auth_audience, + "OPAL_AUTH_JWT_ISSUER": self.auth_issuer, + "UVICORN_ASGI_APP": self.uvicorn_asgi_app, + "UVICORN_PORT": self.uvicorn_port, + "OPAL_ALL_DATA_URL": self.all_data_url, + "OPAL_POLICY_REPO_REUSE_CLONE_PATH": self.policy_repo_reuse_clone_path, + } + + if pytest_settings.use_webhook: + env_vars["OPAL_WEBHOOK_SECRET"] = self.webhook_secret + env_vars["OPAL_WEBHOOK_PARAMS"] = self.webhook_params + + if self.tests_debug: + env_vars["LOG_DIAGNOSE"] = self.log_diagnose + env_vars["OPAL_LOG_LEVEL"] = self.log_level + + if self.auth_private_key_passphrase: + env_vars[ + "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE" + ] = self.auth_private_key_passphrase + + if self.broadcast_uri: + env_vars["OPAL_BROADCAST_URI"] = self.broadcast_uri + + return env_vars + + def load_from_env(self): + self.image = os.getenv("OPAL_SERVER_IMAGE", "opal_server_debug_local") + self.container_name = os.getenv("OPAL_SERVER_CONTAINER_NAME", None) + self.port = os.getenv("OPAL_SERVER_PORT", utils.find_available_port(7002)) + self.uvicorn_workers = os.getenv("OPAL_SERVER_UVICORN_WORKERS", "1") + self.policy_repo_url = os.getenv("OPAL_POLICY_REPO_URL", None) + self.polling_interval = os.getenv("OPAL_POLICY_REPO_POLLING_INTERVAL", "30") + self.private_key = os.getenv("OPAL_AUTH_PRIVATE_KEY", None) + self.public_key = os.getenv("OPAL_AUTH_PUBLIC_KEY", None) + self.master_token = os.getenv("OPAL_AUTH_MASTER_TOKEN", token_hex(16)) + self.data_topics = os.getenv("OPAL_DATA_TOPICS", "policy_data") + self.broadcast_uri = os.getenv("OPAL_BROADCAST_URI", None) + self.auth_audience = os.getenv( + "OPAL_AUTH_JWT_AUDIENCE", "https://api.opal.ac/v1/" + ) + self.auth_issuer = os.getenv("OPAL_AUTH_JWT_ISSUER", "https://opal.ac/") + self.tests_debug = os.getenv("OPAL_TESTS_DEBUG", "true") + self.log_diagnose = os.getenv("LOG_DIAGNOSE", "true") + self.log_level = os.getenv("OPAL_LOG_LEVEL", "INFO") + self.log_format_include_pid = os.getenv("OPAL_LOG_FORMAT_INCLUDE_PID", "true") + self.statistics_enabled = os.getenv("OPAL_STATISTICS_ENABLED", "true") + self.debugEnabled = os.getenv("OPAL_DEBUG_ENABLED", "true") + self.auth_private_key_passphrase = os.getenv( + "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE", None + ) + self.policy_repo_main_branch = os.getenv( + "OPAL_POLICY_REPO_MAIN_BRANCH", "master" + ) + self.debug_port = os.getenv( + "SERVER_DEBUG_PORT", utils.find_available_port(5678) + ) + self.webhook_secret = os.getenv("OPAL_POLICY_REPO_WEBHOOK_SECRET", "P3rm1t10") + self.webhook_params = os.getenv( + "OPAL_POLICY_REPO_WEBHOOK_PARAMS", + json.dumps( + { + "secret_header_name": "x-webhook-token", + "secret_type": "token", + "secret_parsing_regex": "(.*)", + "event_request_key": "gitEvent", + "push_event_value": "git.push", + } + ), + ) + self.all_data_url = os.getenv("OPAL_ALL_DATA_URL", None) + self.policy_repo_reuse_clone_path = os.getenv( + "OPAL_POLICY_REPO_REUSE_CLONE_PATH", "true" + ) + self.uvicorn_asgi_app = os.getenv( + "OPAL_SERVER_UVICORN_ASGI_APP", "opal_server.main:app" + ) + self.uvicorn_port = os.getenv("OPAL_SERVER_UVICORN_PORT", "7002") + + if not self.private_key or not self.public_key: + self.private_key, self.public_key = utils.generate_ssh_key_pair() diff --git a/tests/containers/settings/postgres_broadcast_settings.py b/tests/containers/settings/postgres_broadcast_settings.py new file mode 100644 index 000000000..09f3ba4d9 --- /dev/null +++ b/tests/containers/settings/postgres_broadcast_settings.py @@ -0,0 +1,61 @@ +import os +from testcontainers.core.utils import setup_logger + + +class PostgresBroadcastSettings: + def __init__( + self, + container_name: str | None = None, + host: str | None = None, + port: int | None = None, + user: str | None = None, + password: str | None = None, + database: str | None = None, + ): + + self.logger = setup_logger("PostgresBroadcastSettings") + + self.load_from_env() + + self.container_name = container_name if container_name else self.container_name + self.host = host if host else self.host + self.port = port if port else self.port + self.user = user if user else self.user + self.password = password if password else self.password + self.database = database if database else self.database + self.protocol = "postgres" + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.host: + raise ValueError("POSTGRES_HOST is required.") + if not self.port: + raise ValueError("POSTGRES_PORT is required.") + if not self.user: + raise ValueError("POSTGRES_USER is required.") + if not self.password: + raise ValueError("POSTGRES_PASSWORD is required.") + if not self.database: + raise ValueError("POSTGRES_DATABASE is required.") + + self.logger.info(f"{self.container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "POSTGRES_HOST": self.host, + "POSTGRES_PORT": self.port, + "POSTGRES_USER": self.user, + "POSTGRES_PASSWORD": self.password, + "POSTGRES_DATABASE": self.database, + } + + def load_from_env(self): + self.host = os.getenv("POSTGRES_HOST", "localhost") + self.port = int(os.getenv("POSTGRES_PORT", 5432)) + self.user = os.getenv("POSTGRES_USER", "postgres") + self.password = os.getenv("POSTGRES_PASSWORD", "postgres") + self.database = os.getenv("POSTGRES_DATABASE", "postgres") + self.container_name = os.getenv("POSTGRES_CONTAINER_NAME", "broadcast_channel") diff --git a/tests/containers/zookeeper_container.py b/tests/containers/zookeeper_container.py new file mode 100644 index 000000000..30c35204e --- /dev/null +++ b/tests/containers/zookeeper_container.py @@ -0,0 +1,40 @@ +import debugpy +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +import docker +from tests.containers.permitContainer import PermitContainer + + +class ZookeeperContainer(PermitContainer, DockerContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + PermitContainer.__init__(self) + DockerContainer.__init__( + self, + image="confluentinc/cp-zookeeper:latest", + docker_client_kw=docker_client_kw, + **kwargs, + ) + + self.with_bind_ports(2181, 2181) + self.with_env("ZOOKEEPER_CLIENT_PORT", "2181") + self.with_env("ZOOKEEPER_TICK_TIME", "2000") + self.with_env("ALLOW_ANONYMOUS_LOGIN", "yes") + + self.with_network(self.network) + + self.with_network_aliases("zookeper") + # Add a custom name for the container + self.with_name(f"zookeeper") diff --git a/tests/docker/Dockerfile.cedar b/tests/docker/Dockerfile.cedar new file mode 100644 index 000000000..5715a820c --- /dev/null +++ b/tests/docker/Dockerfile.cedar @@ -0,0 +1,32 @@ +# CEDAR AGENT BUILD STAGE --------------------------- +# This stage compiles the Cedar agent +# --------------------------------------------------- + FROM rust:1.79 AS cedar-builder + + # Copy Cedar agent source code + COPY ./cedar-agent /tmp/cedar-agent + WORKDIR /tmp/cedar-agent + + # Build the Cedar agent in release mode + RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release + + # CEDAR AGENT IMAGE --------------------------------- + # The final image with the Cedar agent executable + # --------------------------------------------------- + FROM alpine:latest AS cedar-agent + + # Create a non-root user for running the agent + RUN adduser -D cedar && mkdir -p /cedar && chown cedar:cedar /cedar + USER cedar + + # Copy the Cedar agent binary from the build stage + COPY --from=cedar-builder /tmp/cedar-agent/target/*/cedar-agent /cedar/cedar-agent + + # Expose Cedar agent port + EXPOSE 8180 + + # Set default working directory + WORKDIR /cedar + + # Set the default command + CMD ["/cedar/cedar-agent"] diff --git a/tests/docker/Dockerfile.client b/tests/docker/Dockerfile.client new file mode 100644 index 000000000..810f3c1c0 --- /dev/null +++ b/tests/docker/Dockerfile.client @@ -0,0 +1,17 @@ +FROM permitio/opal-client:latest + +# Install debugpy +RUN pip install debugpy + +# Set up Gunicorn to include debugpy (or switch to Uvicorn for debugging) +USER root + +WORKDIR /opal + +COPY start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +USER opal + +CMD ["./start_debug.sh"] diff --git a/tests/docker/Dockerfile.client.local b/tests/docker/Dockerfile.client.local new file mode 100644 index 000000000..093c90fbf --- /dev/null +++ b/tests/docker/Dockerfile.client.local @@ -0,0 +1,76 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- +FROM common AS client-standalone + +# install the opal-client package +RUN cd ./packages/opal-client && python setup.py install + +USER opal + +RUN mkdir -p /opal/backup +VOLUME /opal/backup diff --git a/tests/docker/Dockerfile.client_cedar.local b/tests/docker/Dockerfile.client_cedar.local new file mode 100644 index 000000000..2fcab94fe --- /dev/null +++ b/tests/docker/Dockerfile.client_cedar.local @@ -0,0 +1,111 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +FROM rust:1.79 AS cedar-builder + +# Copy Cedar agent source code +COPY ./cedar-agent /tmp/cedar-agent +WORKDIR /tmp/cedar-agent + +# Build the Cedar agent in release mode +RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- + FROM common AS client-standalone + + # install the opal-client package + RUN cd ./packages/opal-client && python setup.py install + + USER opal + + RUN mkdir -p /opal/backup + VOLUME /opal/backup + + # CEDAR CLIENT IMAGE -------------------------------- +# Using standalone image as base -------------------- +# --------------------------------------------------- +FROM client-standalone AS client-cedar + +# Temporarily move back to root for additional setup +USER root + +# Copy cedar from its build stage +COPY --from=cedar-builder /tmp/cedar-agent/target/*/cedar-agent /bin/cedar-agent + +ENV UVICORN_NUM_WORKERS=1 +ENV UVICORN_ASGI_APP=opal_client.main:app +ENV UVICORN_PORT=7000 + +# enable inline Cedar agent +ENV OPAL_POLICY_STORE_TYPE=CEDAR +ENV OPAL_INLINE_CEDAR_ENABLED=true +ENV OPAL_INLINE_CEDAR_EXEC_PATH=/bin/cedar-agent +ENV OPAL_INLINE_CEDAR_CONFIG='{"addr": "0.0.0.0:8180"}' +ENV OPAL_POLICY_STORE_URL=http://localhost:8180 + +# expose cedar port +EXPOSE 8180 +USER opal diff --git a/tests/docker/Dockerfile.client_opa.local b/tests/docker/Dockerfile.client_opa.local new file mode 100644 index 000000000..903f0d81e --- /dev/null +++ b/tests/docker/Dockerfile.client_opa.local @@ -0,0 +1,106 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- + FROM common AS client-standalone + + # install the opal-client package + RUN cd ./packages/opal-client && python setup.py install + + USER opal + + RUN mkdir -p /opal/backup + VOLUME /opal/backup + + # IMAGE to extract OPA from official image ---------- + # --------------------------------------------------- + FROM alpine:latest AS opa-extractor + USER root + + RUN apk update && apk add skopeo tar + WORKDIR /opal + + # copy opa from official docker image + ARG opa_image=openpolicyagent/opa + ARG opa_tag=latest-static + RUN skopeo copy "docker://${opa_image}:${opa_tag}" docker-archive:./image.tar && \ + mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ + find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ + rm -r image image.tar + + + # OPA CLIENT IMAGE ---------------------------------- + # Using standalone image as base -------------------- + # --------------------------------------------------- + FROM client-standalone AS client + + # Temporarily move back to root for additional setup + USER root + + # copy opa from opa-extractor + COPY --from=opa-extractor /opal/opa ./opa + + USER opal diff --git a/tests/docker/Dockerfile.opa b/tests/docker/Dockerfile.opa new file mode 100644 index 000000000..89a6f63da --- /dev/null +++ b/tests/docker/Dockerfile.opa @@ -0,0 +1,36 @@ +# OPA EXTRACTOR STAGE -------------------------------- +# This stage extracts the OPA binary from the official OPA image +# ----------------------------------------------------- + FROM alpine:latest AS opa-extractor + + # Install necessary tools for extracting the OPA binary + RUN apk update && apk add --no-cache skopeo tar + + # Define working directory + WORKDIR /opa + + # Copy OPA binary from the official OPA image + ARG OPA_IMAGE=openpolicyagent/opa + ARG OPA_TAG=latest-static + RUN skopeo copy "docker://${OPA_IMAGE}:${OPA_TAG}" docker-archive:./image.tar && \ + mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ + find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ + rm -r image image.tar + + # STANDALONE OPA CONTAINER ---------------------------- + # This is the final image with the extracted OPA binary + # ----------------------------------------------------- + FROM alpine:latest AS opa + + # Create a non-root user for running OPA + RUN adduser -D opa && mkdir -p /opa && chown opa:opa /opa + USER opa + + # Copy the OPA binary from the extractor stage + COPY --from=opa-extractor /opa/opa /opa/opa + + # Set the working directory + WORKDIR /opa + + # Set the default command to run the OPA server + CMD ["/opa/opa", "run", "--server", "--log-level", "info"] diff --git a/tests/docker/Dockerfile.server b/tests/docker/Dockerfile.server new file mode 100644 index 000000000..9a6dd30fb --- /dev/null +++ b/tests/docker/Dockerfile.server @@ -0,0 +1,17 @@ +FROM permitio/opal-server:latest + +# Install debugpy +RUN pip install debugpy + +# Set up Gunicorn to include debugpy (or switch to Uvicorn for debugging) +USER root + +WORKDIR /opal + +COPY start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +USER opal + +CMD ["./start_debug.sh"] diff --git a/tests/docker/Dockerfile.server.local b/tests/docker/Dockerfile.server.local new file mode 100644 index 000000000..d4056ee2b --- /dev/null +++ b/tests/docker/Dockerfile.server.local @@ -0,0 +1,102 @@ +# Dockerfile.server + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh + +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . + +# copy app code +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# SERVER IMAGE -------------------------------------- +# --------------------------------------------------- +FROM common AS server + +RUN apt-get update && apt-get install -y openssh-client git && apt-get clean +RUN git config --global core.symlinks false # Mitigate CVE-2024-32002 + +USER opal + +# Potentially trust POLICY REPO HOST ssh signature -- +# opal trackes a remote (git) repository and fetches policy (e.g rego) from it. +# however, if the policy repo uses an ssh url scheme, authentication to said repo +# is done via ssh, and without adding the repo remote host (i.e: github.com) to +# the ssh known hosts file, ssh will issue output an interactive prompt that +# looks something like this: +# The authenticity of host 'github.com (192.30.252.131)' can't be established. +# RSA key fingerprint is 16:27:ac:a5:76:28:1d:52:13:1a:21:2d:bz:1d:66:a8. +# Are you sure you want to continue connecting (yes/no)? +# if the docker build arg `TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT` is set to `true` +# (default), the host specified by `POLICY_REPO_HOST` build arg (i.e: `github.com`) +# will be added to the known ssh hosts file at build time and prevent said prompt +# from showing. +ARG TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT="true" +ARG POLICY_REPO_HOST="github.com" + +RUN if [ "$TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT" = "true" ] ; then \ + mkdir -p ~/.ssh && \ + chmod 0700 ~/.ssh && \ + ssh-keyscan -t rsa ${POLICY_REPO_HOST} >> ~/.ssh/known_hosts ; fi + +USER root + +# install the opal-server package +RUN cd ./packages/opal-server && python setup.py install + +USER opal diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/broadcasters.py b/tests/fixtures/broadcasters.py new file mode 100644 index 000000000..0fa65d246 --- /dev/null +++ b/tests/fixtures/broadcasters.py @@ -0,0 +1,110 @@ +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.kafka_ui_container import KafkaUIContainer +from tests.containers.postgres_broadcast_container import PostgresBroadcastContainer +from tests.containers.redis_broadcast_container import RedisBroadcastContainer +from tests.containers.redis_ui_container import RedisUIContainer +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) +from tests.containers.zookeeper_container import ZookeeperContainer + +logger = setup_logger(__name__) + + +@pytest.fixture(scope="session") +def postgres_broadcast_channel(opal_network: Network): + """Fixture that yields a running Postgres broadcast channel container. + + The container is started once and kept running throughout the entire + test session. It is stopped once all tests have finished running, + unless an exception is raised during teardown. + """ + try: + with PostgresBroadcastContainer( + network=opal_network, settings=PostgresBroadcastSettings() + ) as container: + yield container + + try: + if container.get_wrapped_container().status == "running": + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container}") + return + + except Exception as e: + logger.error( + f"Failed on container: {container} with error: {e} {e.__traceback__}" + ) + return + + +@pytest.fixture(scope="session") +def kafka_broadcast_channel(opal_network: Network): + """Fixture that sets up a Kafka broadcast channel for testing purposes. + + This fixture initializes a Zookeeper container, a Kafka container, + and a Kafka UI container, connecting them to the specified network. + It yields a list of these containers, which remain running + throughout the test session. At the end of the session, it attempts + to stop each container, logging an error if any container fails to + stop. + """ + + with ZookeeperContainer(opal_network) as zookeeper_container: + with KafkaBroadcastContainer( + opal_network, zookeeper_container + ) as kafka_container: + with KafkaUIContainer(opal_network, kafka_container) as kafka_ui_container: + containers = [zookeeper_container, kafka_container, kafka_ui_container] + yield containers + + for container in containers: + try: + container.stop() + except Exception: + logger.error(f"Failed to stop container: {container}") + return + + +@pytest.fixture(scope="session") +def redis_broadcast_channel(opal_network: Network): + """Fixture that yields a running redis broadcast channel container. + + The fixture starts a redis broadcast container and a redis ui + container. The yield value is a list of the two containers. The + fixture stops the containers after the test is done. + """ + with RedisBroadcastContainer(opal_network) as redis_container: + with RedisUIContainer(opal_network, redis_container) as redis_ui_container: + containers = [redis_container, redis_ui_container] + yield containers + + for container in containers: + try: + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container}") + return + + +@pytest.fixture(scope="session") +def broadcast_channel(opal_network: Network, postgres_broadcast_channel): + """Fixture that yields a running broadcast channel container. + + The container is started once and kept running throughout the entire + test session. It is stopped once all tests have finished running, + unless an exception is raised during teardown. + """ + + yield postgres_broadcast_channel + + try: + postgres_broadcast_channel.stop() + except Exception: + logger.error(f"Failed to stop containers: {postgres_broadcast_channel}") + return diff --git a/tests/fixtures/images.py b/tests/fixtures/images.py new file mode 100644 index 000000000..fcc7964a7 --- /dev/null +++ b/tests/fixtures/images.py @@ -0,0 +1,91 @@ +import pytest + +import docker +from tests import utils +from tests.settings import pytest_settings, session_matrix + + +@pytest.fixture(scope="session") +def opal_server_image(session_matrix): + """Builds a Docker image for the OPAL server in debug mode. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL server image for the + tests. + """ + + if pytest_settings.do_not_build_images: + yield "permitio/opal-server:latest" + return + + image_name = "opal_server_debug_local:latest" + yield from utils.build_docker_image( + "Dockerfile.server.local", image_name, session_matrix + ) + + +@pytest.fixture(scope="session") +def opa_image(session_matrix): + """Builds a Docker image containing the Open Policy Agent (OPA) binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPA image for the tests. + """ + image_name = "opa" + + yield from utils.build_docker_image("Dockerfile.opa", image_name, session_matrix) + + +@pytest.fixture(scope="session") +def cedar_image(session_matrix): + """Builds a Docker image containing the Cedar binary. + + Yields the name of the built image. + + This fixture is used to provide a working Cedar image for the tests. + """ + image_name = "cedar" + + yield from utils.build_docker_image("Dockerfile.cedar", image_name, session_matrix) + + +@pytest.fixture(scope="session") +def opal_client_image(session_matrix): + """Builds a Docker image containing the OPAL client binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL client image for the + tests. + """ + if pytest_settings.do_not_build_images: + yield "permitio/opal-client:latest" + return + + image_name = "opal_client_debug_local:latest" + + yield from utils.build_docker_image( + "Dockerfile.client.local", image_name, session_matrix + ) + + +@pytest.fixture(scope="session") +def opal_client_with_opa_image(session_matrix): + """Builds a Docker image containing the OPAL client binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL client image for the + tests. + """ + if pytest_settings.do_not_build_images: + yield "permitio/opal-client:latest" + return + + image_name = "opal_client_with_opa_debug_local:latest" + + yield from utils.build_docker_image( + "Dockerfile.client_opa.local", image_name, session_matrix + ) diff --git a/tests/fixtures/policy_repos.py b/tests/fixtures/policy_repos.py new file mode 100644 index 000000000..d6d08dc52 --- /dev/null +++ b/tests/fixtures/policy_repos.py @@ -0,0 +1,111 @@ +import os + +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.gitea_container import GiteaContainer +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_factory import ( + PolicyRepoFactory, + SupportedPolicyRepo, +) +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + + +@pytest.fixture(scope="session") +def gitea_settings(): + """Returns a GiteaSettings object with default values for the Gitea + container name, repository name, temporary directory, and data directory. + + This fixture is used to create a Gitea container for testing and to + initialize the repository settings for the policy repository. + + :return: A GiteaSettings object with default settings. + """ + return GiteaSettings( + container_name="gitea_server", + repo_name="test_repo", + temp_dir=os.path.join(os.path.dirname(__file__), "temp"), + data_dir=os.path.join(os.path.dirname(__file__), "../policies"), + ) + + +@pytest.fixture(scope="session") +def gitea_server(opal_network: Network, gitea_settings: GiteaSettings): + """Creates a Gitea container and initializes a test repository. + + The Gitea container is created with the default settings for the + container name, repository name, temporary directory, and data + directory. The container is then started and the test repository is + initialized. + + The fixture yields the GiteaContainer object, which can be used to + interact with the Gitea container. + + :param opal_network: The network to create the container on. + :param gitea_settings: The settings for the Gitea container. + :return: The GiteaContainer object. + """ + with GiteaContainer( + settings=gitea_settings, + network=opal_network, + ) as gitea_container: + gitea_container.deploy_gitea() + gitea_container.init_repo() + yield gitea_container + + +@pytest.fixture(scope="session") +def policy_repo( + gitea_settings: GiteaSettings, temp_dir: str, request +) -> PolicyRepoBase: + """Creates a policy repository for testing. + + This fixture creates a policy repository based on the configuration + specified in pytest.ini. The repository is created with the default + branch name "master" and is initialized with the policies from the + source repository specified in pytest.ini. + + The fixture yields the PolicyRepoBase object, which can be used to + interact with the policy repository. + + :param gitea_settings: The settings for the Gitea container. + :param temp_dir: The temporary directory to use for the policy + repository. + :param request: The pytest request object. + :return: The PolicyRepoBase object. + """ + if pytest_settings.policy_repo_provider == SupportedPolicyRepo.GITEA: + gitea_server = request.getfixturevalue("gitea_server") + + repo_settings = PolicyRepoSettings( + temp_dir, + pytest_settings.repo_owner, + pytest_settings.repo_name, + "master", + gitea_settings.container_name, + gitea_settings.port_http, + gitea_settings.port_ssh, + pytest_settings.repo_password, + None, + pytest_settings.ssh_key_path, + pytest_settings.source_repo_owner, + pytest_settings.source_repo_name, + True, + True, + pytest_settings.webhook_secret, + ) + policy_repo = PolicyRepoFactory( + pytest_settings.policy_repo_provider + ).get_policy_repo( + repo_settings, + logger, + ) + + policy_repo.setup(gitea_settings) + return policy_repo diff --git a/tests/fixtures/policy_stores.py b/tests/fixtures/policy_stores.py new file mode 100644 index 000000000..8838fa833 --- /dev/null +++ b/tests/fixtures/policy_stores.py @@ -0,0 +1,89 @@ +import pytest +from images import cedar_image, opa_image +from testcontainers.core.network import Network + +from tests.containers.cedar_container import CedarContainer +from tests.containers.opa_container import OpaContainer, OpaSettings +from tests.containers.settings.cedar_settings import CedarSettings + + +@pytest.fixture(scope="session") +def opa_server(opal_network: Network, opa_image): + """OPA server fixture. + + This fixture starts an OPA server and stops it after all tests have been + executed. The OPA server is started in a separate thread and is available + under the name "opa" in the test container network. + + The fixture yields the container object, which can be used to access the + container logs or to execute commands inside the container. + + The fixture is scoped to the session, meaning it is executed only once per + test session. + + Parameters + ---------- + opal_network : Network + The network to which the OPA server should be connected. + opa_image : str + The OPA server image to use. + + Yields + ------ + container : OpaContainer + The OPA server container object. + """ + with OpaContainer( + settings=OpaSettings( + container_name="opa", + image=opa_image, + ), + network=opal_network, + ) as container: + assert container.wait_for_log( + log_str="Server started", timeout=30 + ), "OPA server did not start." + yield container + + container.stop() + + +@pytest.fixture(scope="session") +def cedar_server(opal_network: Network, cedar_image): + """CEDAR server fixture. + + This fixture starts a CEDAR server and stops it after all tests have been + executed. The CEDAR server is started in a separate thread and is available + under the name "cedar" in the test container network. + + The fixture yields the container object, which can be used to access the + container logs or to execute commands inside the container. + + The fixture is scoped to the session, meaning it is executed only once per + test session. + + Parameters + ---------- + opal_network : Network + The network to which the CEDAR server should be connected. + cedar_image : str + The CEDAR server image to use. + + Yields + ------ + container : CedarContainer + The CEDAR server container object. + """ + with CedarContainer( + settings=CedarSettings( + container_name="cedar", + image=cedar_image, + ), + network=opal_network, + ) as container: + # assert container.wait_for_log( + # log_str="Server started", timeout=30 + # ), "CEDAR server did not start." + yield container + + container.stop() diff --git a/tests/genopalkeys.sh b/tests/genopalkeys.sh new file mode 100644 index 000000000..b57a2ce06 --- /dev/null +++ b/tests/genopalkeys.sh @@ -0,0 +1,16 @@ +# This function generates a pair of RSA keys using ssh-keygen, extracts the public key into OPAL_AUTH_PUBLIC_KEY, +# formats the private key by replacing newlines with underscores and stores it in OPAL_AUTH_PRIVATE_KEY, +# and then removes the key files. It outputs messages indicating the start and completion of key generation. + +function generate_opal_keys { + echo "- Generating OPAL keys" + + ssh-keygen -q -t rsa -b 4096 -m pem -f opal_crypto_key -N "" + OPAL_AUTH_PUBLIC_KEY="$(cat opal_crypto_key.pub)" + OPAL_AUTH_PRIVATE_KEY="$(tr '\n' '_' /dev/null 2>&1 + + if ! command -v opal-server &> /dev/null || ! command -v opal-client &> /dev/null; then + echo "Installation failed: opal-server or opal-client is not available." + exit 1 + fi + + echo "- opal-server and opal-client successfully installed." +} + +install_opal_server_and_client diff --git a/tests/policies/rbac.rego b/tests/policies/rbac.rego new file mode 100644 index 000000000..fa09dc922 --- /dev/null +++ b/tests/policies/rbac.rego @@ -0,0 +1,9 @@ +package app.rbac +default allow = false + +# Allow the action if the user is granted permission to perform the action. +allow { + # unless user location is outside US + country := data.users[input.user].location.country + country == "US" +} diff --git a/tests/policy_repos/gitea_policy_repo.py b/tests/policy_repos/gitea_policy_repo.py new file mode 100644 index 000000000..e3efa161c --- /dev/null +++ b/tests/policy_repos/gitea_policy_repo.py @@ -0,0 +1,106 @@ +import codecs +import os + +from git import GitCommandError, Repo + +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class GiteaPolicyRepo(PolicyRepoBase): + def __init__(self, settings: PolicyRepoSettings, *args): + super().__init__() + self.settings = settings + + def setup(self, settings: PolicyRepoSettings): + self.settings = settings + + def get_repo_url(self): + if self.settings is None: + raise Exception("Gitea settings not set") + + return f"http://{self.settings.container_name}:{self.settings.port_http}/{self.settings.username}/{self.settings.repo_name}.git" + + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + + # Push changes to the remote repository + print(f"Pushing changes for branch {branch}...") + try: + repo.git.push(authenticated_url, branch) + except GitCommandError as e: + print(f"Error pushing branch {branch}: {e}") + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.local_clone_path + + self.logger.info( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITEA_REPO_URL = f"http://localhost:{self.settings.repo_port}/{self.settings.owner}/{self.settings.repo_name}.git" + username = self.settings.owner + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITEA_REPO_URL.replace( + "http://", f"http://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + print("Operation completed successfully.") + finally: + # Ensure cleanup is performed regardless of success or failure + self.cleanup(CLONE_DIR) + + def cleanup(self): + return super().cleanup() + + def setup_webhook(self, host, port): + return super().setup_webhook(host, port) + + def create_webhook(self): + return super().create_webhook() diff --git a/tests/policy_repos/github_policy_repo.py b/tests/policy_repos/github_policy_repo.py new file mode 100644 index 000000000..5b7f44eaf --- /dev/null +++ b/tests/policy_repos/github_policy_repo.py @@ -0,0 +1,411 @@ +import codecs +import logging +import os +import random +import shutil +import subprocess + +import requests +from git import GitCommandError, Repo +from github import Auth, Github +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class GithubPolicyRepo(PolicyRepoBase): + def __init__( + self, + settings: PolicyRepoSettings, + logger: logging.Logger = setup_logger(__name__), + ): + self.logger = logger + self.load_from_env() + + self.protocol = "git" + self.host = "github.com" + self.port = 22 + self.temp_dir = settings.local_clone_path + self.ssh_key_name = "OPAL_PYTEST" + + self.owner = settings.owner if settings.owner else self.owner + self.password = settings.password + self.github_pat = settings.pat if settings.pat else self.github_pat + self.repo = settings.repo_name if settings.repo_name else self.repo + + self.source_repo_owner = ( + settings.source_repo_owner + if settings.source_repo_owner + else self.source_repo_owner + ) + self.source_repo_name = ( + settings.source_repo_name + if settings.source_repo_name + else self.source_repo_name + ) + + self.local_repo_path = os.path.join(self.temp_dir, self.source_repo_name) + self.ssh_key_path = ( + settings.ssh_key_path if settings.ssh_key_path else self.ssh_key_path + ) + self.should_fork = settings.should_fork + self.webhook_secret = ( + settings.webhook_secret if settings.webhook_secret else self.webhook_secret + ) + + if not self.password and not self.github_pat and not self.ssh_key_path: + self.logger.error("No password or Github PAT or SSH key provided.") + raise Exception("No authentication method provided.") + + self.load_ssh_key() + + def load_from_env(self): + self.owner = os.getenv("OPAL_TARGET_ACCOUNT", None) + self.github_pat = os.getenv("OPAL_GITHUB_PAT", None) + self.ssh_key_path = os.getenv( + "OPAL_PYTEST_POLICY_REPO_SSH_KEY_PATH", "~/.ssh/id_rsa" + ) + self.repo = os.getenv("OPAL_TARGET_REPO_NAME", "opal-example-policy-repo") + self.source_repo_owner = os.getenv("OPAL_SOURCE_ACCOUNT", "permitio") + self.source_repo_name = os.getenv( + "OPAL_SOURCE_REPO_NAME", "opal-example-policy-repo" + ) + self.webhook_secret: str = os.getenv("OPAL_WEBHOOK_SECRET", "xxxxx") + + def load_ssh_key(self): + if self.ssh_key_path.startswith("~"): + self.ssh_key_path = os.path.expanduser("~/.ssh/id_rsa") + + if not os.path.exists(self.ssh_key_path): + self.logger.debug(f"SSH key file not found at {self.ssh_key_path}") + + self.logger.debug("Generating new SSH key...") + ssh_keys = utils.generate_ssh_key_pair() + self.ssh_key = ssh_keys["public"] + self.private_key = ssh_keys["private"] + + try: + with open(self.ssh_key_path, "r") as ssh_key_file: + self.ssh_key = ssh_key_file.read().strip() + + os.environ["OPAL_POLICY_REPO_SSH_KEY"] = self.ssh_key + except Exception as e: + self.logger.error(f"Error loading SSH key: {e}") + + def setup_webhook(self, host, port): + self.webhook_host = host + self.webhook_port = port + + def set_envvars(self): + # Update .env file + with open(".env", "a") as env_file: + env_file.write(f'OPAL_POLICY_REPO_URL="{self.get_repo_url()}"\n') + env_file.write(f'OPAL_POLICY_REPO_BRANCH="{self.test_branch}"\n') + + with open(".env", "a") as env_file: + env_file.write(f'OPAL_POLICY_REPO_SSH_KEY="{self.ssh_key}"\n') + + def get_repo_url(self): + return self.build_repo_url(self.owner, self.repo) + + def build_repo_url(self, owner, repo) -> str: + if owner is None: + raise Exception("Owner not set") + + if self.protocol == "ssh" or self.protocol == "git": + return f"git@{self.host}:{owner}/{repo}.git" + + if self.protocol == "http" or self.protocol == "https": + if self.github_pat: + return f"{self.protocol}://{self.host}/{owner}/{repo}.git" + + if self.password is None and self.github_pat is None and self.ssh_key is None: + raise Exception("No authentication method set") + + return f"{self.protocol}://{self.owner}:{self.password}@{self.host}:{self.port}/{owner}/{repo}" + + def get_source_repo_url(self): + return self.build_repo_url(self.source_repo_owner, self.source_repo_name) + + def clone_initial_repo(self): + Repo.clone_from(self.get_source_repo_url(), self.local_repo_path) + + def check_repo_exists(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo_list = gh.get_user().get_repos() + for repo in repo_list: + if repo.full_name == self.repo: + self.logger.debug(f"Repository {self.repo} already exists.") + return True + + except Exception as e: + self.logger.error(f"Error checking repository existence: {e}") + + return False + + def create_target_repo(self): + if self.check_repo_exists(): + return + + try: + gh = Github(auth=Auth.Token(self.github_pat)) + gh.get_user().create_repo(self.repo) + self.logger.info(f"Repository {self.repo} created successfully.") + except Exception as e: + self.logger.error(f"Error creating repository: {e}") + + def fork_target_repo(self): + if self.check_repo_exists(): + return + + self.logger.debug(f"Forking repository {self.source_repo_name}...") + + if self.github_pat is None: + try: + gh = Github(auth=Auth.Token(self.github_pat)) + gh.get_user().create_fork(self.source_repo_owner, self.source_repo_name) + self.logger.info( + f"Repository {self.source_repo_name} forked successfully." + ) + except Exception as e: + self.logger.error(f"Error forking repository: {e}") + return + + # Try with PAT + try: + headers = {"Authorization": f"token {self.github_pat}"} + response = requests.post( + f"https://api.github.com/repos/{self.source_repo_owner}/{self.source_repo_name}/forks", + headers=headers, + ) + if response.status_code == 202: + self.logger.info("Fork created successfully!") + else: + self.logger.error(f"Error creating fork: {response.status_code}") + self.logger.debug(response.json()) + + except Exception as e: + self.logger.error(f"Error forking repository: {str(e)}") + + def cleanup(self): + self.delete_test_branches() + + def delete_test_branches(self): + """Deletes all branches starting with 'test-' from the specified + repository.""" + + try: + self.logger.info(f"Deleting test branches from {self.repo}...") + + # Initialize Github API + gh = Github(auth=Auth.Token(self.github_pat)) + + # Get the repository + repo = gh.get_user().get_repo(self.repo) + + # Enumerate branches and delete pytest- branches + branches = repo.get_branches() + for branch in branches: + if branch.name.startswith("test-"): + ref = f"heads/{branch.name}" + repo.get_git_ref(ref).delete() + self.logger.info(f"Deleted branch: {branch.name}") + else: + self.logger.info(f"Skipping branch: {branch.name}") + + self.logger.info("All test branches have been deleted successfully.") + except Exception as e: + self.logger.error(f"An error occurred: {e}") + + return + + def generate_test_branch(self): + self.test_branch = ( + f"test-{random.randint(1000, 9999)}{random.randint(1000, 9999)}" + ) + os.environ["OPAL_POLICY_REPO_BRANCH"] = self.test_branch + + def create_test_branch(self): + try: + # Initialize the repository + repo = Repo(self.local_repo_path) + + # Ensure the repository is clean + if repo.is_dirty(untracked_files=True): + raise RuntimeError( + "The repository has uncommitted changes. Commit or stash them before proceeding." + ) + + # Set the origin remote URL + remote_url = f"https://github.com/{self.owner}/{self.repo}.git" + if "origin" in repo.remotes: + origin = repo.remote(name="origin") + origin.set_url(remote_url) # Update origin URL if it exists + else: + origin = repo.create_remote( + "origin", remote_url + ) # Create origin remote if it doesn't exist + + self.logger.debug(f"Origin set to: {remote_url}") + + # Create and checkout the new branch + new_branch = repo.create_head(self.test_branch) # Create branch + new_branch.checkout() # Switch to the new branch + + # Push the new branch to the remote + origin.push(refspec=f"{self.test_branch}:{self.test_branch}") + + self.logger.info( + f"Branch '{self.test_branch}' successfully created and pushed." + ) + except GitCommandError as e: + self.logger.error(f"Git command failed: {e}") + except Exception as e: + self.logger.error(f"An error occurred: {e}") + + def cleanup(self, delete_repo=True, delete_ssh_key=True): + subprocess.run(["rm", "-rf", "./opal-example-policy-repo"], check=True) + + self.delete_test_branches() + + if delete_repo: + self.delete_repo() + + if delete_ssh_key: + self.delete_ssh_key() + + def delete_ssh_key(self): + gh = Github(auth=Auth.Token(self.github_pat)) + user = gh.get_user() + keys = user.get_keys() + for key in keys: + if key.title == self.ssh_key_name: + key.delete() + self.logger.debug(f"SSH key deleted: {key.title}") + break + + self.logger.debug("All OPAL SSH keys have been deleted successfully.") + + return + + def delete_repo(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(self.repo) + repo.delete() + self.logger.debug(f"Repository {self.repo} deleted successfully.") + except Exception as e: + self.logger.error(f"Error deleting repository: {e}") + + def setup(self): + self.clone_initial_repo() + + if self.should_fork: + self.fork_target_repo() + else: + self.create_target_repo() + + self.generate_test_branch() + self.create_test_branch() + + def add_ssh_key(self): + gh = Github(auth=Auth.Token(self.github_pat)) + user = gh.get_user() + keys = user.get_keys() + for key in keys: + if key.title == self.ssh_key_name: + return + + key = user.create_key(self.ssh_key_name, self.ssh_key) + self.logger.info(f"SSH key added: {key.title}") + + def create_webhook(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + self.logger.info( + f"Creating webhook for repository {self.owner}/{self.repo}" + ) + repo = gh.get_user().get_repo(f"{self.repo}") + url = utils.create_localtunnel(self.webhook_port) + self.logger.info(f"Webhook URL: {url}") + self.github_webhook = repo.create_hook( + "web", + { + "url": f"{url}/webhook", + "content_type": "json", + f"secret": "abc123", + "insecure_ssl": "1", + }, + events=["push"], + active=True, + ) + self.logger.info("Webhook created successfully.") + except Exception as e: + self.logger.error(f"Error creating webhook: {e}") + + def delete_webhook(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(f"{self.repo}") + repo.delete_hook(self.github_webhook.id) + self.logger.info("Webhook deleted successfully.") + except Exception as e: + self.logger.error(f"Error deleting webhook: {e}") + + def update_branch(self, file_name, file_content): + self.logger.info( + f"Updating branch '{self.test_branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + if file_content is not None: + file_content = codecs.decode(file_content, "unicode_escape") + + # Create or update the specified file with the provided content + file_path = os.path.join(self.local_repo_path, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + if file_content is None: + with open(file_path, "r") as f: + file_content = f.read() + + try: + # Stage the changes + self.logger.debug(f"Staging changes for branch {self.test_branch}...") + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(self.repo) + branch_ref = f"heads/{self.test_branch}" + ref = repo.get_git_ref(branch_ref) + latest_commit = repo.get_git_commit(ref.object.sha) + base_tree = latest_commit.commit.tree + new_tree = repo.create_git_tree( + [ + { + "path": file_name, + "mode": "100644", + "type": "blob", + "content": file_content, + } + ], + base_tree, + ) + new_commit = repo.create_git_commit( + f"Commit changes for branch {self.test_branch}", + new_tree, + [latest_commit], + ) + ref.edit(new_commit.sha) + self.logger.debug(f"Changes pushed for branch {self.test_branch}.") + + except Exception as e: + self.logger.error(f"Error updating branch: {e}") + return False + + return True + + def remove_webhook(self): + self.github_webhook.delete() diff --git a/tests/policy_repos/gitlab_policy_repo.py b/tests/policy_repos/gitlab_policy_repo.py new file mode 100644 index 000000000..00dc0f7f5 --- /dev/null +++ b/tests/policy_repos/gitlab_policy_repo.py @@ -0,0 +1,103 @@ +import codecs + +from tests.policy_repos.policy_repo_base import PolicyRepoBase + + +class GitlabPolicyRepo(PolicyRepoBase): + def __init__(self, owner, repo, token): + self.owner = owner + self.repo = repo + self.token = token + + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + repo.git.push("origin", branch) + + # Clean up the cloned repository + print(f"Cleaning up branch {branch}...") + shutil.rmtree(CLONE_DIR) + + print(f"Branch {branch} processed successfully.") + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.temp_dir + + self.logger.debug( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITHUB_REPO_URL = ( + f"https://github.com/{self.settings.username}/{self.settings.repo_name}.git" + ) + username = self.settings.username + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITHUB_REPO_URL.replace( + "https://", f"https://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + except Exception as e: + self.logger.error(f"Error updating branch: {e}") + return False + return True + + # implementation using git subprocess + # try: + # # Change to the policy repository directory + # os.chdir(opal_repo_path) + + # # Create a .rego file with the policy name as the package + # with open(regofile, "w") as f: + # f.write(f"package {policy_name}\n") + + # # Run Git commands to add, commit, and push the policy file + # subprocess.run(["git", "add", regofile], check=True) + # subprocess.run(["git", "commit", "-m", f"Add {regofile}"], check=True) + # subprocess.run(["git", "push"], check=True) + # finally: + # # Change back to the previous directory + # os.chdir("..") diff --git a/tests/policy_repos/policy_repo_base.py b/tests/policy_repos/policy_repo_base.py new file mode 100644 index 000000000..65701cafe --- /dev/null +++ b/tests/policy_repos/policy_repo_base.py @@ -0,0 +1,27 @@ +from abc import ABC, abstractmethod + + +class PolicyRepoBase(ABC): + @abstractmethod + def get_repo_url(self) -> str: + pass + + @abstractmethod + def setup_webhook(self, host, port): + pass + + @abstractmethod + def setup(self) -> None: + pass + + @abstractmethod + def cleanup(self) -> None: + pass + + @abstractmethod + def update_branch(self, file_name, file_content) -> None: + pass + + @abstractmethod + def create_webhook(self): + pass diff --git a/tests/policy_repos/policy_repo_factory.py b/tests/policy_repos/policy_repo_factory.py new file mode 100644 index 000000000..e3fe2aa5e --- /dev/null +++ b/tests/policy_repos/policy_repo_factory.py @@ -0,0 +1,51 @@ +import logging +import os +from enum import Enum + +from testcontainers.core.utils import setup_logger + +from tests.policy_repos.gitea_policy_repo import GiteaPolicyRepo +from tests.policy_repos.github_policy_repo import GithubPolicyRepo +from tests.policy_repos.gitlab_policy_repo import GitlabPolicyRepo +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class SupportedPolicyRepo(Enum): + GITEA = "Gitea" + GITHUB = "Github" + GITLAB = "Gitlab" + # BITBUCKET = "Bitbucket" + # AZURE_DEVOPS = "AzureDevOps" + + +# Factory class to create a policy repository object based on the type of policy repository. +class PolicyRepoFactory: + def __init__(self, policy_repo: str = SupportedPolicyRepo.GITEA): + """ + :param policy_repo: The type of policy repository. Defaults to GITEA. + """ + self.assert_exists(policy_repo) + + self.policy_repo = policy_repo + + def get_policy_repo( + self, + settings: PolicyRepoSettings, + logger: logging.Logger = setup_logger(__name__), + ) -> PolicyRepoBase: + factory = { + SupportedPolicyRepo.GITEA: GiteaPolicyRepo, + SupportedPolicyRepo.GITHUB: GithubPolicyRepo, + SupportedPolicyRepo.GITLAB: GitlabPolicyRepo, + } + + return factory[SupportedPolicyRepo(self.policy_repo)](settings) + + def assert_exists(self, policy_repo: str) -> bool: + try: + source_enum = SupportedPolicyRepo(policy_repo) + except ValueError: + raise ValueError( + f"Unsupported REPO_SOURCE value: {policy_repo}. Must be one of {[e.value for e in SupportedPolicyRepo]}" + ) diff --git a/tests/policy_repos/policy_repo_settings.py b/tests/policy_repos/policy_repo_settings.py new file mode 100644 index 000000000..975c0edb8 --- /dev/null +++ b/tests/policy_repos/policy_repo_settings.py @@ -0,0 +1,36 @@ +class PolicyRepoSettings: + def __init__( + self, + local_clone_path: str | None = None, + owner: str | None = None, + repo_name: str | None = None, + branch_name: str | None = None, + repo_host: str | None = None, + repo_port_http: int | None = None, + repo_port_ssh: int | None = None, + password: str | None = None, + pat: str | None = None, + ssh_key_path: str | None = None, + source_repo_owner: str | None = None, + source_repo_name: str | None = None, + should_fork: bool = False, + should_create_repo: bool = False, # if True, will create the repo, if the should_fork is False. + # If should_fork is True, it will fork and not create the repo from scratch. + # if False, the an existing repository is expected + webhook_secret: str | None = None, + ): + self.local_clone_path = local_clone_path + self.owner = owner + self.repo_name = repo_name + self.branch_name = branch_name + self.repo_host = repo_host + self.repo_port_http = repo_port_http + self.repo_port_ssh = repo_port_ssh + self.password = password + self.pat = pat + self.ssh_key_path = ssh_key_path + self.source_repo_owner = source_repo_owner + self.source_repo_name = source_repo_name + self.should_fork = should_fork + self.should_create_repo = should_create_repo + self.webhook_secret = webhook_secret diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 000000000..87ffbfda9 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +asyncio_default_fixture_loop_scope = function +log_cli = true +log_level = INFO +log_cli_level = INFO +log_file = pytest_logs.log +log_file_level = DEBUG +pythonpath = fixtures diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..a1a6b5ae1 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,4 @@ +PyGithub +debugpy +pytest +testcontainers diff --git a/tests/run.sh b/tests/run.sh new file mode 100755 index 000000000..11a2b1b75 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -e + +if [[ -f ".env" ]]; then + # shellcheck disable=SC1091 + source .env +fi + + +# Deletes pytest-generated .env files so they don't interfere with other tests. +function cleanup { + + PATTERN="pytest_[a-f,0-9]*.env" + echo "Looking for auto-generated .env files matching pattern '$PATTERN'..." + + for file in $PATTERN; do + if [[ -f "$file" ]]; then + echo "Deleting file: $file" + rm "$file" + else + echo "No matching files found for pattern '$PATTERN'." + break + fi + done + + echo "Cleanup complete!\n" +} + +function main { + + # Cleanup before starting, maybe some leftovers from previous runs + cleanup + + echo "Running tests..." + + # Check if a specific test is provided + if [[ -n "$1" ]]; then + echo "Running specific test: $1" + python -Xfrozen_modules=off -m debugpy --listen 5678 -m pytest -s "$@" + else + echo "Running all tests..." + python -Xfrozen_modules=off -m debugpy --listen 5678 -m pytest -s + fi + + echo "Done!" + + # Cleanup at the end + cleanup +} + +main "$@" diff --git a/tests/settings.py b/tests/settings.py new file mode 100644 index 000000000..4122ba52a --- /dev/null +++ b/tests/settings.py @@ -0,0 +1,171 @@ +import io +import json +import os +from contextlib import redirect_stdout +from secrets import token_hex +from typing import List + +import pytest +from dotenv import load_dotenv +from opal_common.cli.commands import obtain_token +from opal_common.schemas.security import PeerType +from testcontainers.core.generic import DockerContainer +from testcontainers.core.waiting_utils import wait_for_logs + +from tests.policy_repos.policy_repo_factory import SupportedPolicyRepo + + +class TestSettings: + def __init__(self): + """Initialize settings for the test session. + + This method creates a new session ID, then loads settings from environment + variables. The session ID is a 2-character hexadecimal string, and is used to + identify the test session for logging and debugging purposes. + + The settings loaded from environment variables are as follows: + + - OPAL_PYTEST_POLICY_REPO_PROVIDER: The policy repository provider to use + for the test session. Valid values are 'GITEA' and 'GITHUB'. If not set, + defaults to 'GITEA'. + """ + self.session_id = token_hex(2) + + self.load_from_env() + + def load_from_env(self): + """Loads environment variables into the test settings. + + This function loads the environment variables using the `load_dotenv` function + and assigns them to various attributes of the settings object. The environment + variables control various aspects of the test session, such as the policy + repository provider, repository details, authentication credentials, and + configuration options for the test environment. + + Attributes set by this function: + - policy_repo_provider: The provider for the policy repository. Defaults to GITEA. + - repo_owner: The owner of the policy repository. Defaults to "iwphonedo". + - repo_name: The name of the policy repository. Defaults to "opal-example-policy-repo". + - repo_password: The password for accessing the policy repository. + - github_pat: The GitHub personal access token for accessing the repository. + - ssh_key_path: The path to the SSH key used for repository access. + - source_repo_owner: The owner of the source repository. Defaults to "permitio". + - source_repo_name: The name of the source repository. Defaults to "opal-example-policy-repo". + - webhook_secret: The secret used for authenticating webhooks. Defaults to "xxxxx". + - should_fork: Whether to fork the repository. Defaults to "true". + - use_webhook: Whether to use webhooks for triggering updates. Defaults to "true". + - wait_for_debugger: Whether to wait for a debugger. Defaults to "false". + - skip_rebuild_images: Whether to skip rebuilding Docker images. Defaults to "false". + - keep_images: Whether to keep Docker images after tests. Defaults to "true". + """ + + load_dotenv() + + self.policy_repo_provider = os.getenv( + "OPAL_PYTEST_POLICY_REPO_PROVIDER", SupportedPolicyRepo.GITEA + ) + self.repo_owner = os.getenv("OPAL_PYTEST_REPO_OWNER", "iwphonedo") + self.repo_name = os.getenv("OPAL_PYTEST_REPO_NAME", "opal-example-policy-repo") + self.repo_password = os.getenv("OPAL_PYTEST_REPO_PASSWORD") + self.github_pat = os.getenv("OPAL_PYTEST_GITHUB_PAT") + self.ssh_key_path = os.getenv("OPAL_PYTEST_SSH_KEY_PATH") + self.source_repo_owner = os.getenv("OPAL_PYTEST_SOURCE_ACCOUNT", "permitio") + self.source_repo_name = os.getenv( + "OPAL_PYTEST_SOURCE_REPO", "opal-example-policy-repo" + ) + self.webhook_secret = os.getenv("OPAL_PYTEST_WEBHOOK_SECRET", "xxxxx") + self.should_fork = os.getenv("OPAL_PYTEST_SHOULD_FORK", "true") + self.use_webhook = os.getenv("OPAL_PYTEST_USE_WEBHOOK", "true") + self.wait_for_debugger = os.getenv("OPAL_PYTEST_WAIT_FOR_DEBUGGER", False) + + # This will fallback to the official permitio images of opal-server and opal-client, you could use it to fallback also opa and cedar + self.do_not_build_images = os.getenv("OPAL_PYTEST_DO_NOT_BUILD_IMAGES", False) + + # This will use the same image between test sessions. Otherwise, it will rebuild the images with every execution. + # Don't use it if you changed the code, as your changes won't be deployed. + # In order to use this flag, you should first set the keep_images flag to true, and for the following execution you will have the images. + self.skip_rebuild_images = os.getenv("OPAL_PYTEST_SKIP_REBUILD_IMAGES", False) + + # This will keep the images after the test session. If you use it, you will be able to use skip_rebuild_images the next time. + self.keep_images = os.getenv("OPAL_PYTEST_KEEP_IMAGES", True) + + def dump_settings(self): + with open(f"pytest_{self.session_id}.env", "w") as envfile: + envfile.write("#!/usr/bin/env bash\n\n") + for key, val in globals().items(): + if key.startswith("OPAL") or key.startswith("UVICORN"): + envfile.write(f"export {key}='{val}'\n\n") + + +pytest_settings = TestSettings() +from testcontainers.core.utils import setup_logger + + +class PyTestSessionSettings(List): + repo_providers = ["gitea"] + modes = ["without_webhook"] + broadcasters = ["postgres"] + broadcaster = "fgsfdg" + repo_provider = "fdgdfg" + mode = "rgrtre" + + def __init__( + self, + session_id: str = None, + repo_provider: str = None, + broadcaster: str = None, + mode: str = None, + ): + super().__init__() + + self.session_id = session_id + self.repo_provider = repo_provider + self.broadcaster = broadcaster + self.mode = mode + + self.current_broadcaster = 0 + self.current_repo_provider = 0 + self.current_mode = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.current_broadcaster >= len(self.broadcasters): + raise StopIteration + + while self.current_broadcaster < len(self.broadcasters): + is_first = ( + (self.current_broadcaster <= 0) + and (self.current_repo_provider <= 0) + and (self.current_mode <= 0) + ) + + # Update settings + self.broadcaster = self.broadcasters[self.current_broadcaster] + self.repo_provider = self.repo_providers[self.current_repo_provider] + self.mode = self.modes[self.current_mode] + # Move to the next combination + self.current_mode += 1 + if self.current_mode >= len(self.modes): + self.current_mode = 0 + self.current_repo_provider += 1 + if self.current_repo_provider >= len(self.repo_providers): + self.current_repo_provider = 0 + self.current_broadcaster += 1 + + return { + "session_id": self.session_id, + "repo_provider": self.repo_provider, + "broadcaster": self.broadcaster, + "mode": self.mode, + "is_final": (self.current_broadcaster >= len(self.broadcasters)), + "is_first": is_first, + } + + print("Finished iterating over PyTestSessionSettings...") + + +@pytest.fixture(params=list(PyTestSessionSettings()), scope="session") +def session_matrix(request): + return request.param diff --git a/tests/start_debug.sh b/tests/start_debug.sh new file mode 100644 index 000000000..b2ef3f267 --- /dev/null +++ b/tests/start_debug.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +echo "Starting Opal Server or Client with Debugpy in Debug Mode..." + +# Set default values for variables if not already set +export GUNICORN_CONF=${GUNICORN_CONF:-./gunicorn_conf.py} +export UVICORN_PORT=${UVICORN_PORT:-8000} +export UVICORN_NUM_WORKERS=${UVICORN_NUM_WORKERS:-1} +export GUNICORN_TIMEOUT=${GUNICORN_TIMEOUT:-30} +export GUNICORN_KEEP_ALIVE_TIMEOUT=${GUNICORN_KEEP_ALIVE_TIMEOUT:-5} +#export UVICORN_ASGI_APP=${UVICORN_ASGI_APP:-opal_server.main:app} + +# Check for OPAL_BROADCAST_URI when multiple workers are enabled +if [[ -z "${OPAL_BROADCAST_URI}" && "${UVICORN_NUM_WORKERS}" != "1" ]]; then + echo "OPAL_BROADCAST_URI must be set when having multiple workers" + exit 1 +fi + +# Ensure PYTHONPATH includes the directory for `opal_server` +export PYTHONPATH=/opal/packages/opal-server:$PYTHONPATH +export PYTHONPATH=/opal/packages/opal-client:$PYTHONPATH +echo "PYTHONPATH: $PYTHONPATH" + +# Start Gunicorn with Debugpy +#exec python -m debugpy --listen 0.0.0.0:5678 --wait-for-client \ +exec python -m debugpy --listen 0.0.0.0:5678 \ + -m gunicorn -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker \ + --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} \ + -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT} diff --git a/tests/test_app.py b/tests/test_app.py new file mode 100644 index 000000000..4ca2d6daf --- /dev/null +++ b/tests/test_app.py @@ -0,0 +1,308 @@ +import asyncio +import subprocess +import time +from datetime import datetime, timezone +from typing import List + +import pytest +import requests +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.gitea_container import GiteaContainer +from tests.containers.opal_client_container import OpalClientContainer, PermitContainer +from tests.containers.opal_server_container import OpalServerContainer +from tests.policy_repos.policy_repo_factory import SupportedPolicyRepo +from tests.settings import PyTestSessionSettings, session_matrix + +logger = setup_logger(__name__) + +OPAL_DISTRIBUTION_TIME_SECONDS = 2 +ip_to_location_base_url = "https://api.country.is/" + + +def publish_data_user_location( + src, user, DATASOURCE_TOKEN: str, port: int, topics: str = "policy_data" +): + """Publish user location data to OPAL.""" + # Construct the command to publish data update + publish_data_user_location_command = ( + f"opal-client publish-data-update --server-url http://localhost:{port} --src-url {src} " + f"-t {topics} --dst-path /users/{user}/location {DATASOURCE_TOKEN}" + ) + + # Execute the command + result = subprocess.run(publish_data_user_location_command, shell=True) + # Check command execution result + if result.returncode != 0: + logger.error("Error: Failed to update user location!") + else: + logger.info(f"Successfully updated user location with source: {src}") + + +async def data_publish_and_test( + user, + allowed_country, + locations, + DATASOURCE_TOKEN: str, + opal_client: OpalClientContainer, + port: int, +): + """Run the user location policy tests multiple times.""" + + for location in locations: + ip = location[0] + user_country = location[1] + + publish_data_user_location( + f"{ip_to_location_base_url}{ip}", user, DATASOURCE_TOKEN, port + ) + + if allowed_country == user_country: + print( + f"{user}'s location set to: {user_country}. current_country is set to: {allowed_country} Expected outcome: ALLOWED." + ) + else: + print( + f"{user}'s location set to: {user_country}. current_country is set to: {allowed_country} Expected outcome: NOT ALLOWED." + ) + + await asyncio.sleep(1) + + assert await utils.opal_authorize( + user, + f"http://localhost:{opal_client.settings.opa_port}/v1/data/app/rbac/allow", + ) == (allowed_country == user_country) + return True + + +def update_policy( + gitea_container: GiteaContainer, + opal_server_container: OpalServerContainer, + country_value, +): + """Update the policy file dynamically.""" + + gitea_container.update_branch( + opal_server_container.settings.policy_repo_main_branch, + "rbac.rego", + ( + "package app.rbac\n" + "import rego.v1\n" + "default allow := false\n\n" + "# Allow the action if the user is granted permission to perform the action.\n" + "allow if {\n" + "\t# unless user location is outside US\n" + "\tcountry := data.users[input.user].location.country\n" + '\tcountry == "' + country_value + '"\n' + "}" + ), + ) + + utils.wait_policy_repo_polling_interval(opal_server_container) + + +def test_topiced_user_location( + opal_servers: list[OpalServerContainer], + topiced_clients: dict[str, OpalClientContainer], +): + """Test data publishing.""" + + for topic, clients in topiced_clients.items(): + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + # Publish data to the OPAL server + publish_data_user_location( + f"{ip_to_location_base_url}8.8.8.8", + "bob", + opal_servers[0].obtain_OPAL_tokens("test_user_location")["datasource"], + opal_servers[0].settings.port, + topic, + ) + + logger.info(f"Published user location for 'bob'. | topic: {topic}") + + for client in clients: + log_found = client.wait_for_log( + "PUT /v1/data/users/bob/location -> 204", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), "Expected log entry not found after the reference timestamp." + + +def test_user_location( + opal_servers: list[OpalServerContainer], + connected_clients: list[OpalClientContainer], + session_matrix: PyTestSessionSettings, +): + """Test data publishing.""" + + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + # Publish data to the OPAL server + logger.info(ip_to_location_base_url) + publish_data_user_location( + f"{ip_to_location_base_url}8.8.8.8", + "bob", + opal_servers[0].obtain_OPAL_tokens("test_user_locaation")["datasource"], + opal_servers[0].settings.port, + ) + logger.info("Published user location for 'bob'.") + + for client in connected_clients: + log_found = client.wait_for_log( + "PUT /v1/data/users/bob/location -> 204", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert log_found, "Expected log entry not found after the reference timestamp." + + +# @pytest.mark.parametrize("location", ["CN", "US", "SE"]) +@pytest.mark.asyncio +async def test_policy_and_data_updates( + gitea_server: GiteaContainer, + opal_servers: list[OpalServerContainer], + opal_clients: list[OpalClientContainer], + temp_dir, +): + """This script updates policy configurations and tests access based on + specified settings and locations. + + It integrates with Gitea and OPA for policy management and testing. + """ + + # Parse locations into separate lists of IPs and countries + locations = [("8.8.8.8", "US"), ("77.53.31.138", "SE")] + for server in opal_servers: + DATASOURCE_TOKEN = server.obtain_OPAL_tokens("test_policy_and_data_updates")[ + "datasource" + ] + + for location in locations: + # Update policy to allow only non-US users + print(f"Updating policy to allow only users from {location[1]}...") + update_policy(gitea_server, server, location[1]) + + for client in opal_clients: + assert await data_publish_and_test( + "bob", + location[1], + locations, + DATASOURCE_TOKEN, + client, + server.settings.port, + ) + + +@pytest.mark.parametrize("attempts", [10]) # Number of attempts to repeat the check +def test_read_statistics( + attempts, + opal_servers: list[OpalServerContainer], + number_of_opal_servers: int, + number_of_opal_clients: int, +): + """Tests the statistics feature by verifying the number of clients and + servers.""" + + print("- Testing statistics feature") + + time.sleep(15) + + for server in opal_servers: + print(f"OPAL Server: {server.settings.container_name}:7002") + + # The URL for statistics + stats_url = f"http://localhost:{server.settings.port}/stats" + + headers = { + "Authorization": f"Bearer {server.obtain_OPAL_tokens('test_read_statistics')['datasource']}" + } + + # Repeat the request multiple times + for attempt in range(attempts): + print(f"Attempt {attempt + 1}/{attempts} - Checking statistics...") + + try: + time.sleep(1) + # Send a request to the statistics endpoint + response = requests.get(stats_url, headers=headers) + response.raise_for_status() # Raise an error for HTTP status codes 4xx/5xx + + print(f"Response: {response.status_code} {response.text}") + + # Look for the expected data in the response + stats = utils.get_client_and_server_count(response.text) + if stats is None: + pytest.fail( + f"Expected statistics not found in response: {response.text}" + ) + + client_count = stats["client_count"] + server_count = stats["server_count"] + print( + f"Number of OPAL servers expected: {number_of_opal_servers}, found: {server_count}" + ) + print( + f"Number of OPAL clients expected: {number_of_opal_clients}, found: {client_count}" + ) + + if server_count < number_of_opal_servers: + pytest.fail( + f"Expected number of servers not found in response: {response.text}" + ) + + if client_count < number_of_opal_clients: + pytest.fail( + f"Expected number of clients not found in response: {response.text}" + ) + + except requests.RequestException as e: + if response is not None: + print(f"Request failed: {response.status_code} {response.text}") + pytest.fail(f"Failed to fetch statistics: {e}") + + print("Statistics check passed in all attempts.") + + +@pytest.mark.asyncio +async def test_policy_update( + gitea_server: GiteaContainer, + opal_servers: list[OpalServerContainer], + opal_clients: list[OpalClientContainer], + temp_dir, +): + # Parse locations into separate lists of IPs and countries + location = "CN" + + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + for server in opal_servers: + # Update policy to allow only non-US users + print(f"Updating policy to allow only users from {location}...") + update_policy(gitea_server, server, "location") + + log_found = server.wait_for_log( + "Found new commits: old HEAD was", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), f"Expected log entry not found in server '{server.settings.container_name}' after the reference timestamp." + + for client in opal_clients: + log_found = client.wait_for_log( + "Fetching policy bundle from", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), f"Expected log entry not found in client '{client.settings.container_name}' after the reference timestamp." diff --git a/tests/test_opal_server_config.py b/tests/test_opal_server_config.py new file mode 100644 index 000000000..7353b6528 --- /dev/null +++ b/tests/test_opal_server_config.py @@ -0,0 +1,4 @@ +# Test each config value from /packages.opal-server/opal_server/config.py OpalServerConfig +print( + "Test each config value from /packages.opal-server/opal_server/config.py OpalServerConfig" +) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 000000000..d58d24c37 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,506 @@ +import asyncio +import json +import os +import platform +import re +import subprocess +import sys +import time + +import aiohttp +import requests +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from git import Repo +from testcontainers.core.utils import setup_logger + +import docker +from tests.containers.opal_server_container import OpalServerContainer +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + + +def compose(filename="docker-compose-app-tests.yml", *args): + """Helper function to run docker compose commands with the given arguments. + + Assumes `docker-compose-app-tests.yml` is the compose file and `.env` is the environment file. + """ + command = [ + "docker", + "compose", + "-f", + filename, + "--env-file", + ".env", + ] + list(args) + result = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + if result.returncode != 0: + raise RuntimeError(f"Compose command failed: {result.stderr.strip()}") + return result.stdout + + +def build_docker_image(docker_file: str, image_name: str, session_matrix: dict): + """Build the Docker image from the Dockerfile.server.local file in the + tests/docker directory.""" + + docker_client = docker.from_env() + + print(f"Building Docker image '{image_name}'...") + + image = None + if (not session_matrix["is_first"]) or (pytest_settings.skip_rebuild_images): + exists = any(image_name in image.tags for image in docker_client.images.list()) + if exists: + image = docker_client.images.get(image_name) + + if not image: + # context_path=os.path.join(os.path.dirname(__file__), ".."), # Expands the context + context_path = ".." + dockerfile_path = os.path.join(os.path.dirname(__file__), "docker", docker_file) + logger.info(f"Context path: {context_path}, Dockerfile path: {dockerfile_path}") + + # Ensure the Dockerfile exists + if not os.path.exists(dockerfile_path): + raise FileNotFoundError(f"Dockerfile not found at {dockerfile_path}") + + logger.debug(f"Building Docker image from {dockerfile_path}...") + + try: + # Build the Docker image + image, logs = docker_client.images.build( + path=context_path, + dockerfile=dockerfile_path, + tag=image_name, + rm=True, + ) + # Print build logs + for log in logs: + logger.debug(log.get("stream", "").strip()) + except Exception as e: + raise RuntimeError(f"Failed to build Docker image: {e}") + + logger.debug(f"Docker image '{image_name}' built successfully.") + + yield image_name + + if session_matrix["is_final"]: + # Optionally, clean up the image after the test session + try: + if pytest_settings.keep_images: + return + + image.remove(force=True) + print(f"Docker image '{image.id}' removed.") + except Exception as cleanup_error: + print( + f"Failed to remove Docker image '{image_name}'{image.id}: {cleanup_error}" + ) + + +def remove_pytest_opal_networks(): + """Remove all Docker networks with names starting with 'pytest_opal_'.""" + try: + client = docker.from_env() + networks = client.networks.list() + + for network in networks: + if network.name.startswith("pytest_opal_"): + try: + logger.debug(f"Removing network: {network.name}") + network.remove() + except Exception as e: + logger.debug(f"Failed to remove network {network.name}: {e}") + logger.debug("Cleanup complete!") + except Exception as e: + logger.debug(f"Error while accessing Docker: {e}") + + +def generate_ssh_key_pair(): + # Generate a private key + private_key = rsa.generate_private_key( + public_exponent=65537, # Standard public exponent + key_size=2048, # Key size in bits + ) + + # Serialize the private key in PEM format + private_key_pem = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), # No passphrase + ) + + # Generate the corresponding public key + public_key = private_key.public_key() + + # Serialize the public key in OpenSSH format + public_key_openssh = public_key.public_bytes( + encoding=serialization.Encoding.OpenSSH, + format=serialization.PublicFormat.OpenSSH, + ) + + # Return the keys as strings + return private_key_pem.decode("utf-8"), public_key_openssh.decode("utf-8") + + +async def opal_authorize(user: str, policy_url: str): + """Test if the user is authorized based on the current policy.""" + + # HTTP headers and request payload + headers = {"Content-Type": "application/json"} + data = { + "input": {"user": user, "action": "read", "object": "id123", "type": "finance"} + } + + # Send POST request to OPA + response = requests.post(policy_url, headers=headers, json=data) + + allowed = False + # Parse the JSON response + response_json = response.json() + assert "result" in response_json, response_json + allowed = response.json()["result"] + logger.debug( + f"Authorization test result: {user} is {'ALLOWED' if allowed else 'NOT ALLOWED'}." + ) + + return allowed + + +def wait_policy_repo_polling_interval(opal_server_container: OpalServerContainer): + # Allow time for the update to propagate + propagation_time = 5 # seconds + for i in range( + int(opal_server_container.settings.polling_interval) + propagation_time, 0, -1 + ): + logger.debug( + f"waiting for OPAL server to pull the new policy {i} secondes left", + end="\r", + ) + time.sleep(1) + + +def is_port_available(port): + # Determine the platform (Linux or macOS) + system_platform = platform.system().lower() + + # Run the appropriate netstat command based on the platform + if system_platform == "darwin": # macOS + result = subprocess.run( + ["netstat", "-an"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + # macOS 'netstat' shows *. format for listening ports + if f".{port} " in result.stdout: + return False # Port is in use + else: # Linux + result = subprocess.run( + ["netstat", "-an"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + # Linux 'netstat' shows 0.0.0.0: or ::: format for listening ports + if f":{port} " in result.stdout or f"::{port} " in result.stdout: + return False # Port is in use + + return True # Port is available + + +def find_available_port(starting_port=5001): + port = starting_port + while True: + if is_port_available(port): + return port + port += 1 + + +def publish_data_update( + server_url: str, + server_route: str, + token: str, + src_url: str = None, + reason: str = "", + topics: list[str] = ["policy_data"], + data: str = None, + src_config: dict[str, any] = None, + dst_path: str = "", + save_method: str = "PUT", +): + """Publish a DataUpdate through an OPAL-server. + + Args: + server_url (str): URL of the OPAL-server. + server_route (str): Route in the server for updates. + token (str): JWT token for authentication. + src_url (Optional[str]): URL of the data source. + reason (str): Reason for the update. + topics (Optional[List[str]]): Topics for the update. + data (Optional[str]): Data to include in the update. + src_config (Optional[Dict[str, Any]]): Fetching config as JSON. + dst_path (str): Destination path in the client data store. + save_method (str): Method to save data (e.g., "PUT"). + """ + entries = [] + if src_url: + entries.append( + { + "url": src_url, + "data": json.loads(data) if data else None, + "topics": topics or ["policy_data"], # Ensure topics is not None + "dst_path": dst_path, + "save_method": save_method, + "config": src_config, + } + ) + + update_payload = {"entries": entries, "reason": reason} + + async def send_update(): + headers = {"content-type": "application/json"} + if token: + headers["Authorization"] = f"Bearer {token}" + + async with aiohttp.ClientSession(headers=headers) as session: + async with session.post( + f"{server_url}{server_route}", json=update_payload + ) as response: + if response.status == 200: + return "Event Published Successfully" + else: + error_text = await response.text() + raise RuntimeError( + f"Failed with status {response.status}: {error_text}" + ) + + return asyncio.run(send_update()) + + +def publish_data_update_with_curl( + server_url: str, + server_route: str, + token: str, + src_url: str = None, + reason: str = "", + topics: list[str] = ["policy_data"], + data: str = None, + src_config: dict[str, any] = None, + dst_path: str = "", + save_method: str = "PUT", +): + """Publish a DataUpdate through an OPAL-server using curl. + # Example usage + # publish_data_update_with_curl("http://example.com", "/update", "your-token", src_url="http://data-source") + + Args: + server_url (str): URL of the OPAL-server. + server_route (str): Route in the server for updates. + token (str): JWT token for authentication. + src_url (Optional[str]): URL of the data source. + reason (str): Reason for the update. + topics (Optional[List[str]]): Topics for the update. + data (Optional[str]): Data to include in the update. + src_config (Optional[Dict[str, Any]]): Fetching config as JSON. + dst_path (str): Destination path in the client data store. + save_method (str): Method to save data (e.g., "PUT"). + """ + entries = [] + if src_url: + entries.append( + { + "url": src_url, + "data": json.loads(data) if data else None, + "topics": topics or ["policy_data"], # Ensure topics is not None + "dst_path": dst_path, + "save_method": save_method, + "config": src_config, + } + ) + + update_payload = {"entries": entries, "reason": reason} + + # Prepare headers for the curl command + headers = [ + "Content-Type: application/json", + ] + if token: + headers.append(f"Authorization: Bearer {token}") + + # Build the curl command + curl_command = [ + "curl", + "-X", + "POST", + f"{server_url}{server_route}", + "-H", + " -H ".join([f'"{header}"' for header in headers]), + "-d", + json.dumps(update_payload), + ] + + # Execute the curl command + try: + result = subprocess.run( + curl_command, capture_output=True, text=True, check=True + ) + if result.returncode == 0: + return "Event Published Successfully" + else: + raise RuntimeError( + f"Failed with status {result.returncode}: {result.stderr}" + ) + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Error executing curl: {e.stderr}") + + +def get_client_and_server_count(json_data): + """Extracts the client_count and server_count from a given JSON string. + + Args: + json_data (str): A JSON string containing the client and server counts. + + Returns: + dict: A dictionary with keys 'client_count' and 'server_count'. + """ + try: + # Parse the JSON string + data = json.loads(json_data) + + # Extract client and server counts + client_count = data.get("client_count", 0) + server_count = data.get("server_count", 0) + + return {"client_count": client_count, "server_count": server_count} + except json.JSONDecodeError: + raise ValueError("Invalid JSON input.") + + +def install_opal_server_and_client(): + logger.debug("- Installing opal-server and opal-client from pip...") + + try: + # Install opal-server and opal-client + subprocess.run( + [sys.executable, "-m", "pip", "install", "opal-server", "opal-client"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True, + ) + + # Verify installation + opal_server_installed = ( + subprocess.run( + ["opal-server"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True, + ).returncode + == 0 + ) + + opal_client_installed = ( + subprocess.run( + ["opal-client"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True, + ).returncode + == 0 + ) + + if not opal_server_installed or not opal_client_installed: + logger.debug( + "Installation failed: opal-server or opal-client is not available." + ) + sys.exit(1) + + logger.debug("- opal-server and opal-client successfully installed.") + + except subprocess.CalledProcessError: + logger.debug("Installation failed: pip command encountered an error.") + sys.exit(1) + + +def export_env(varname, value): + """Exports an environment variable with a given value and updates the + current environment. + + Args: + varname (str): The name of the environment variable to set. + value (str): The value to assign to the environment variable. + + Returns: + str: The value assigned to the environment variable. + + Side Effects: + Prints the export statement to the console and sets the environment variable. + """ + + logger.debug(f"export {varname}={value}") + os.environ[varname] = value + + return value + + +def remove_env(varname): + """Removes an environment variable from the current environment. + + Args: + varname (str): The name of the environment variable to remove. + + Returns: + None + + Side Effects: + Prints the unset statement to the console and removes the environment variable. + """ + logger.debug(f"unset {varname}") + del os.environ[varname] + + return + + +def create_localtunnel(port=8000): + try: + # Run the LocalTunnel command + process = subprocess.Popen( + ["lt", "--port", str(port)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + + # Read output line by line + for line in iter(process.stdout.readline, ""): + # Match the public URL from LocalTunnel output + match = re.search(r"https://[a-z0-9\-]+\.loca\.lt", line) + if match: + public_url = match.group(0) + logger.debug(f"Public URL: {public_url}") + return public_url + + except Exception as e: + logger.debug(f"Error starting LocalTunnel: {e}") + + return None + + +import sys + + +def global_exception_handler(exc_type, exc_value, exc_traceback): + if issubclass(exc_type, KeyboardInterrupt): + # Allow Ctrl+C to exit the program without a traceback + sys.__excepthook__(exc_type, exc_value, exc_traceback) + return + + # Log or print the exception details + logger.debug(f"Uncaught exception: {exc_type.__name__}: {exc_value}") + + +# Set the global exception handler +sys.excepthook = global_exception_handler From a3f892a975c800afbd188e9f0197f92856201d8d Mon Sep 17 00:00:00 2001 From: Israel Weinberg Date: Wed, 8 Jan 2025 17:25:59 +0200 Subject: [PATCH 02/18] Refactor test scripts and improve documentation comments --- tests/fixtures/policy_stores.py | 2 +- tests/genopalkeys.sh | 2 ++ tests/install_opal.sh | 7 ++++--- tests/run.sh | 26 -------------------------- tests/utils.py | 9 +++++++-- 5 files changed, 14 insertions(+), 32 deletions(-) diff --git a/tests/fixtures/policy_stores.py b/tests/fixtures/policy_stores.py index 8838fa833..c07980e75 100644 --- a/tests/fixtures/policy_stores.py +++ b/tests/fixtures/policy_stores.py @@ -1,10 +1,10 @@ import pytest -from images import cedar_image, opa_image from testcontainers.core.network import Network from tests.containers.cedar_container import CedarContainer from tests.containers.opa_container import OpaContainer, OpaSettings from tests.containers.settings.cedar_settings import CedarSettings +from tests.fixtures.images import cedar_image, opa_image @pytest.fixture(scope="session") diff --git a/tests/genopalkeys.sh b/tests/genopalkeys.sh index b57a2ce06..23256cdd5 100644 --- a/tests/genopalkeys.sh +++ b/tests/genopalkeys.sh @@ -1,3 +1,5 @@ +# This is utility script to generate OPAL keys - Use it for your needs + # This function generates a pair of RSA keys using ssh-keygen, extracts the public key into OPAL_AUTH_PUBLIC_KEY, # formats the private key by replacing newlines with underscores and stores it in OPAL_AUTH_PRIVATE_KEY, # and then removes the key files. It outputs messages indicating the start and completion of key generation. diff --git a/tests/install_opal.sh b/tests/install_opal.sh index e4740f146..c51db1c82 100644 --- a/tests/install_opal.sh +++ b/tests/install_opal.sh @@ -1,7 +1,8 @@ +# This is a helper script to install opal-server and opal-client - # Installs opal-server and opal-client using pip. - # If the installation fails or the commands are not available, - # it exits with an error message. +# Installs opal-server and opal-client using pip. +# If the installation fails or the commands are not available, +# it exits with an error message. function install_opal_server_and_client { echo "- Installing opal-server and opal-client from pip..." diff --git a/tests/run.sh b/tests/run.sh index 11a2b1b75..38922a912 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -6,31 +6,8 @@ if [[ -f ".env" ]]; then source .env fi - -# Deletes pytest-generated .env files so they don't interfere with other tests. -function cleanup { - - PATTERN="pytest_[a-f,0-9]*.env" - echo "Looking for auto-generated .env files matching pattern '$PATTERN'..." - - for file in $PATTERN; do - if [[ -f "$file" ]]; then - echo "Deleting file: $file" - rm "$file" - else - echo "No matching files found for pattern '$PATTERN'." - break - fi - done - - echo "Cleanup complete!\n" -} - function main { - # Cleanup before starting, maybe some leftovers from previous runs - cleanup - echo "Running tests..." # Check if a specific test is provided @@ -43,9 +20,6 @@ function main { fi echo "Done!" - - # Cleanup at the end - cleanup } main "$@" diff --git a/tests/utils.py b/tests/utils.py index d58d24c37..2cab0962d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -57,8 +57,13 @@ def build_docker_image(docker_file: str, image_name: str, session_matrix: dict): image = docker_client.images.get(image_name) if not image: - # context_path=os.path.join(os.path.dirname(__file__), ".."), # Expands the context - context_path = ".." + if "tests" in os.path.abspath(__file__): + logger.info(f"Right now the file is {os.path.abspath(__file__)}") + context_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "opal") + ) + else: + context_path = ".." dockerfile_path = os.path.join(os.path.dirname(__file__), "docker", docker_file) logger.info(f"Context path: {context_path}, Dockerfile path: {dockerfile_path}") From ed3c033bcf977e3fcc466d646a2197713260f07a Mon Sep 17 00:00:00 2001 From: Israel Weinberg Date: Wed, 8 Jan 2025 22:36:29 +0200 Subject: [PATCH 03/18] Refactor service configurations and update container dependencies --- .gitignore | 9 +- .vscode/settings.json | 3 +- app-tests/docker-compose-app-tests.yml | 10 +- app-tests/jwks_dir/jwks.json | 2 +- app-tests/minrun.sh | 79 +--------- app-tests/sample_service/Dockerfile | 44 ------ app-tests/sample_service/app.py | 80 ----------- app-tests/sample_service/nginx.conf | 109 -------------- app-tests/sample_service/openapi.yaml | 88 ------------ app-tests/sample_service/policy.rego | 37 ----- app-tests/sample_service/requirements.txt | 18 --- app-tests/sample_service/start.sh | 10 -- app-tests/sample_service/supervisord.conf | 8 -- docker/Dockerfile.client | 118 --------------- docker/Dockerfile.server | 116 --------------- docker/docker-compose-local.yml | 105 -------------- packages/opal-client/opal_client/main.py | 6 - packages/opal-client/requires.txt | 1 - packages/opal-server/opal_server/data/api.py | 2 - packages/opal-server/opal_server/main.py | 6 - tests/README.md | 40 +++--- tests/containers/broadcast_container_base.py | 8 +- tests/containers/cedar_container.py | 6 +- tests/containers/gitea_container.py | 6 +- tests/containers/kafka_broadcast_container.py | 6 +- tests/containers/kafka_ui_container.py | 6 +- tests/containers/opa_container.py | 6 +- tests/containers/opal_client_container.py | 6 +- tests/containers/opal_server_container.py | 12 +- tests/containers/opal_test_container.py | 50 +++++++ tests/containers/permitContainer.py | 135 ------------------ .../postgres_broadcast_container.py | 7 +- .../containers/pulsar_broadcast_container.py | 6 +- tests/containers/redis_broadcast_container.py | 6 +- tests/containers/redis_ui_container.py | 6 +- tests/containers/zookeeper_container.py | 6 +- tests/fixtures/broadcasters.py | 2 +- tests/test_app.py | 5 +- 38 files changed, 132 insertions(+), 1038 deletions(-) delete mode 100644 app-tests/sample_service/Dockerfile delete mode 100644 app-tests/sample_service/app.py delete mode 100644 app-tests/sample_service/nginx.conf delete mode 100644 app-tests/sample_service/openapi.yaml delete mode 100644 app-tests/sample_service/policy.rego delete mode 100644 app-tests/sample_service/requirements.txt delete mode 100644 app-tests/sample_service/start.sh delete mode 100644 app-tests/sample_service/supervisord.conf delete mode 100644 docker/Dockerfile.client delete mode 100644 docker/Dockerfile.server delete mode 100644 docker/docker-compose-local.yml create mode 100644 tests/containers/opal_test_container.py delete mode 100644 tests/containers/permitContainer.py diff --git a/.gitignore b/.gitignore index 47754a866..757499e0b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,8 @@ # OPAL specific -opal_test_keys/* .env +*.env opal-example-policy-repo/* -data/ -OPAL_DATASOURCE_TOKEN.tkn -OPAL_CLIENT_TOKEN.tkn + # Temporary and Python cache files **/*.pyc @@ -63,7 +61,6 @@ coverage.xml *.py,cover .hypothesis/ .pytest_cache/ -new_pytest_env/temp # Translations *.mo @@ -136,5 +133,3 @@ dmypy.json # System files .DS_Store -pytest_6dbc.env -tests/pytest_1a09.env diff --git a/.vscode/settings.json b/.vscode/settings.json index dc49512f5..f5f61e4bd 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,6 +2,7 @@ "cmake.ignoreCMakeListsMissing": true, "makefile.configureOnOpen": false, "python.analysis.extraPaths": [ - "./packages/opal-common" + "./packages/opal-common", + "./packages/opal-server" ] } diff --git a/app-tests/docker-compose-app-tests.yml b/app-tests/docker-compose-app-tests.yml index 3bb517d3d..6c9398c9d 100644 --- a/app-tests/docker-compose-app-tests.yml +++ b/app-tests/docker-compose-app-tests.yml @@ -41,10 +41,10 @@ services: opal_client: image: permitio/opal-client:${OPAL_IMAGE_TAG:-latest} - scale: 0 + scale: 2 deploy: mode: replicated - replicas: 0 + replicas: 2 endpoint_mode: vip environment: - OPAL_SERVER_URL=http://opal_server:7002 @@ -57,9 +57,9 @@ services: - OPAL_AUTH_JWT_AUDIENCE=https://api.opal.ac/v1/ - OPAL_AUTH_JWT_ISSUER=https://opal.ac/ - OPAL_STATISTICS_ENABLED=true - #ports: - # - "7766-7767:7000" - # - "8181-8182:8181" + ports: + - "7766-7767:7000" + - "8181-8182:8181" depends_on: - opal_server command: sh -c "exec ./wait-for.sh opal_server:7002 --timeout=20 -- ./start.sh" diff --git a/app-tests/jwks_dir/jwks.json b/app-tests/jwks_dir/jwks.json index d8a027820..4b33b871c 100644 --- a/app-tests/jwks_dir/jwks.json +++ b/app-tests/jwks_dir/jwks.json @@ -1 +1 @@ -{"keys": [{"kty": "RSA", "key_ops": ["verify"], "n": "3HYeEOlS7BXR4x0klclD909fcrjyr4Jkmuixfl8cRmS7q3LPIsl1hIueKK0qBLjc7jIUsPCUEoJTIwMcdFfPQnViexerdx_ekupUwR0IFRzJli5wG0cYW5UkKOKDaXRrN0cnOQLZ_48ol6aEki8lkbGNYmaGtqrNTHsKA8uEP8S7AKnFqseTHJPhKAGzeeFKjWD4wAR0dgXkixLVxcAFohP-WR68oWPlrRnkBfb3ovRgQpo0UVWnjY99DJu9KZCaCGhSyjP42kjY65PykFmWHRUTltfMq7dkGkKuIOn_0YEVFoGhTda934vDqZO2EXgjj2lTCpYkVNK_WsL8ILIeCHEnK2ZYnxl4BgKLOHu2xyk6U4i8VjYpJo0U9UDcvNHaIuPNTxs0LNr1VtSVLxvZHDZ8f0UnmElgSGnAHj1cFamT-erIfIFfSuhzJ2qwcA30Y1B6EO8bBrQ2YgSWm8CNJO9FqWkEK0SPb3xw64KSnGtuAbBpVoNvK8AeHT7m9-11QtS7PWUiUGkSGKtTMlsvz90hb-rbFBIRcDuP2NG6BfoFq-rbcX9A2Djqhpxi6Zfr_5s7GN95UTbUKCLau3jvgTwdSxjCPsiE9t1phlWmTwp_eC8uYVriSPwiQ4ZqBIUE3Gz1PikqRJCt2E2KFMiHjSwlEPMtrbPwnO1B0G1ZXk8", "e": "AQAB"}]} +{"keys": [{"kty": "RSA", "key_ops": ["verify"], "n": "zQSk0F8jfU3KR9w-7-aq5n7Elh34Vhi1pzQsKG7VVoJgzqHqhXP1JsbSo-4ntAZ77fCWKMd25y4gOmNpur_0sOErRtSGwdlwVRxef-wztDD0ecqksMF7c2ZNwdq3hXxJ9NrDpcg8ORmIt6q-T5ZtbvoYhVy37LKw5dr0ry-SxfTeUuadFin4wTMQAwuiYKNQMGjuW8eEGi_ZEjziXjhQhEWfIIhH1v_jcWnW-_cjx7fvJ_Jau98vs40KKLawnvueiRdi8KWNQpA4b6480b0KfC0U7qbr61-fyL8u0L7aotTxmMzCfRabBRQ53sR5zYvvhV4Y-OZM_82RAFleIFsNfjrkbN5Sq7NEStE3b_yLMYu_uW5IoewOt7X3MgICq0jiXrbvExuJ0pq7DGlax5uhBt316Gt_HY8yqWAnCaa766_0av8IVZstRlCPOjhaM4liEneNdlzGheQlxoi6SxvQFhy4jbcG4tAmbvRIWpAbYaaXyB0H_PMPN7uPzo9lQNv1N6jMHlUq9GOGMy83qy6iQ2aA_NouCwDUKh5WUDePZwpwNFd5Fs6EiBcESG0SJkTwyuFPM6iYl6H2S7Knf8CYJMmixr_Ezm0id0Ltm0_FAwoEUqbltSmaGDZeI5T29732eFr9lK0Fw5R8_2X6uIRaIljCMSkbrRQcsm8gUZ_H9ms", "e": "AQAB"}]} diff --git a/app-tests/minrun.sh b/app-tests/minrun.sh index 1905be2c6..d45752103 100755 --- a/app-tests/minrun.sh +++ b/app-tests/minrun.sh @@ -44,7 +44,7 @@ function prepare_policy_repo { OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-example-policy-repo.git} echo "- Forking the policy repo" -OPAL_TARGET_ACCOUNT="iwphonedo" +OPAL_TARGET_ACCOUNT="SomeTargetAccount" # Replace with your GitHub username ORIGINAL_REPO_NAME=$(basename -s .git "$OPAL_POLICY_REPO_URL") NEW_REPO_NAME="${ORIGINAL_REPO_NAME}" FORKED_REPO_URL="git@github.com:${OPAL_TARGET_ACCOUNT}/${NEW_REPO_NAME}.git" @@ -64,7 +64,7 @@ else else echo "Error creating fork: $?" fi - + # Update OPAL_POLICY_REPO_URL to point to the forked repo OPAL_POLICY_REPO_URL="$FORKED_REPO_URL" echo "Updated OPAL_POLICY_REPO_URL to $OPAL_POLICY_REPO_URL" @@ -95,20 +95,6 @@ function compose { docker compose -f ./docker-compose-app-tests.yml --env-file .env "$@" } -function check_clients_logged { - echo "- Looking for msg '$1' in client's logs" - compose logs --index 1 opal_client | grep -q "$1" - compose logs --index 2 opal_client | grep -q "$1" -} - -function check_no_error { - # Without index would output all replicas - if compose logs opal_client | grep -q 'ERROR'; then - echo "- Found error in logs" - exit 1 - fi -} - function clean_up { ARG=$? if [[ "$ARG" -ne 0 ]]; then @@ -125,70 +111,15 @@ function clean_up { exit $ARG } -function test_push_policy { - echo "- Testing pushing policy $1" - regofile="$1.rego" - cd opal-tests-policy-repo - echo "package $1" > "$regofile" - git add "$regofile" - git commit -m "Add $regofile" - git push - cd - - - curl -s --request POST 'http://localhost:7002/webhook' --header 'Content-Type: application/json' --header 'x-webhook-token: xxxxx' --data-raw '{"gitEvent":"git.push","repository":{"git_url":"'"$OPAL_POLICY_REPO_URL"'"}}' - sleep 5 - check_clients_logged "PUT /v1/policies/$regofile -> 200" -} - -function test_data_publish { - echo "- Testing data publish for user $1" - user=$1 - OPAL_CLIENT_TOKEN=$OPAL_DATA_SOURCE_TOKEN opal-client publish-data-update --src-url https://api.country.is/23.54.6.78 -t policy_data --dst-path "/users/$user/location" - sleep 5 - check_clients_logged "PUT /v1/data/users/$user/location -> 204" -} - -function test_statistics { - echo "- Testing statistics feature" - # Make sure 2 servers & 2 clients (repeat few times cause different workers might response) - for _ in {1..10}; do - curl -s 'http://localhost:7002/stats' --header "Authorization: Bearer $OPAL_DATA_SOURCE_TOKEN" | grep '"client_count":2,"server_count":2' - done -} - function main { - # Setup + generate_opal_keys prepare_policy_repo trap clean_up EXIT - # Bring up OPAL containers - #compose down --remove-orphans - #compose up -d - #sleep 10 - - # Check containers started correctly - #check_clients_logged "Connected to PubSub server" - #check_clients_logged "Got policy bundle" - #check_clients_logged 'PUT /v1/data/static -> 204' - #check_no_error - - # Test functionality - # test_data_publish "bob" - # test_push_policy "something" - # test_statistics - - # echo "- Testing broadcast channel disconnection" - # compose restart broadcast_channel - # sleep 10 - - # test_data_publish "alice" - # test_push_policy "another" - # test_data_publish "sunil" - # test_data_publish "eve" - # test_push_policy "best_one_yet" - # TODO: Test statistics feature again after broadcaster restart (should first fix statistics bug) } +# This script is good if you want to just generate opal keys and initialize the policy repo on your github account +# and then run some docker compose and tests. main diff --git a/app-tests/sample_service/Dockerfile b/app-tests/sample_service/Dockerfile deleted file mode 100644 index f777de051..000000000 --- a/app-tests/sample_service/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# Use an OpenResty base image -FROM openresty/openresty:alpine-fat - -# Install dependencies -RUN apk update && apk add --no-cache python3 py3-pip && \ - python3 -m venv /venv && \ - . /venv/bin/activate && \ - pip install --upgrade pip && \ - pip install flask && \ - pip install requests && \ - pip install jwt - -RUN apk add --no-cache shadow - -RUN addgroup -S nginx && adduser -S nginx -G nginx - -# Set up the Python environment and install other dependencies -WORKDIR /app -COPY requirements.txt /app/requirements.txt -RUN . /venv/bin/activate && pip install --no-cache-dir -r requirements.txt - -# Copy application code -COPY . /app - -# Copy NGINX configuration to OpenResty’s NGINX path -COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf - -# Set environment variables for Flask -ENV FLASK_APP=app.py - -# Expose necessary ports -EXPOSE 80 5000 5682 - -# Ensure the log directory and log file exist, and set proper permissions -RUN mkdir -p /var/log/nginx && \ - touch /var/log/nginx/proxy_access.log && \ - chown nginx:nginx /var/log/nginx/proxy_access.log && \ - touch /var/log/nginx/error.log && \ - chown nginx:nginx /var/log/nginx/error.log - -# Run both OpenResty and Flask -COPY start.sh /start.sh -RUN chmod +x /start.sh -CMD /start.sh \ No newline at end of file diff --git a/app-tests/sample_service/app.py b/app-tests/sample_service/app.py deleted file mode 100644 index 8e345f348..000000000 --- a/app-tests/sample_service/app.py +++ /dev/null @@ -1,80 +0,0 @@ -from flask import Flask, request, jsonify -import requests -import debugpy - -app = Flask(__name__) - -debugpy.listen(("0.0.0.0", 5682)) # Optional, listen for debug requests on port 5678 - -# OPAL Authorization endpoint -OPAL_AUTH_URL = "http://opal_client:8181/v1/data/authorize" # Adjust with actual OPAL endpoint - -@app.route('/a') -def a(): - return 'Endpoint A' - -@app.route('/b') -def b(): - return 'Endpoint B' - -@app.route('/c') -def c(): - # Assuming the JWT token is passed in the Authorization header - auth_header = request.headers.get('Authorization') - - debugpy.wait_for_client() - - if not auth_header: - return jsonify({"error": "Unauthorized, missing Authorization header"}), 401 - - # Extract the token (assuming Bearer token) - token = auth_header.split(" ")[1] if "Bearer" in auth_header else None - - if not token: - return jsonify({"error": "Unauthorized, invalid Authorization header"}), 401 - - import jwt - - try: - # Decode the JWT token to extract the "sub" field - decoded_token = jwt.decode(token, options={"verify_signature": False}) - user = decoded_token.get("sub") - except jwt.DecodeError: - return jsonify({"error": "Unauthorized, invalid token"}), 401 - - if not user: - return jsonify({"error": "Unauthorized, 'sub' field not found in token"}), 401 - - # Prepare the payload for the OPAL authorization request with the extracted user - payload = { - "input": { - "user": user, - "method": request.method, - "path": request.path - } - } - - # Send the request to OPAL authorization endpoint - try: - response = requests.post(OPAL_AUTH_URL, json=payload) - - # Check if OPAL's response contains a positive authorization result - if response.status_code == 200: - opal_response = response.json() - if opal_response.get("result") is True: - return 'Endpoint C - Authorized' # Authorized access - - # If the result is not `true`, deny access - - # Assuming `response` is your variable containing the response object from OPAL - response_data = response.get_data(as_text=True) - return jsonify({"error": f"Forbidden, authorization denied! \n Response Body: {response_data}"}), 403 - # OPAL responded but with a non-200 status, treat as denied - return jsonify({"error": "Forbidden, OPAL authorization failed"}), 403 - - except requests.exceptions.RequestException as e: - # Handle connection or other request errors - return jsonify({"error": f"Error contacting OPAL client: {str(e)}"}), 500 - -if __name__ == '__main__': - app.run() \ No newline at end of file diff --git a/app-tests/sample_service/nginx.conf b/app-tests/sample_service/nginx.conf deleted file mode 100644 index 8662f7fe6..000000000 --- a/app-tests/sample_service/nginx.conf +++ /dev/null @@ -1,109 +0,0 @@ -worker_processes 1; - -events { worker_connections 1024; } - -http { - error_log /var/log/nginx/error.log debug; # Ensure this is set - - lua_shared_dict jwt_cache 10m; # Cache to avoid re-parsing JWT on every request - lua_package_path "/usr/local/lib/lua/?.lua;;"; # Adjust to match the Lua path on your setup - - server { - listen 80; - - set $auth_status 0; - - location /a { - access_log /var/log/nginx/proxy_access.log; - - # Directly proxy to Flask without authorization - proxy_pass http://127.0.0.1:5000; - } - - # This will be enforced in the endpoint - location /c { - access_log /var/log/nginx/proxy_access.log; - - proxy_pass http://127.0.0.1:5000; - } - - location / { - access_log /var/log/nginx/proxy_access.log; - - # Log the Authorization header to see if it's being passed correctly - log_by_lua_block { - ngx.log(ngx.ERR, "Authorization header: ", ngx.var.http_authorization) - } - - # Send authorization subrequest - auth_request /authz_check; - - # Proxy to Flask app if authorized - proxy_pass http://127.0.0.1:5000; - } - - location = /authz_check { - internal; - - # Authorization headers and content type for OPAL client - proxy_set_header Content-Type "application/json"; - proxy_set_header Authorization $http_authorization; - proxy_pass_request_body off; - - access_by_lua_block { - local jwt_token = ngx.var.http_authorization:match("Bearer%s+(.+)") - ngx.log(ngx.ERR, "JWT Token: ", jwt_token) - - if jwt_token then - local decoded_jwt = require("cjson").decode(require("ngx.decode_base64")(jwt_token:match("^[^.]+%.([^.]+)"))) - ngx.log(ngx.ERR, "Decoded JWT: ", require("cjson").encode(decoded_jwt)) - - local user_id = decoded_jwt["sub"] - local method = ngx.req.get_method() - local path = ngx.var.request_uri - - local opa_input = { - input = { - user = user_id, - method = method, - path = path - } - } - - ngx.req.set_body_data(require("cjson").encode(opa_input)) - else - ngx.log(ngx.ERR, "No JWT token found in Authorization header") - end - } - - # Forward request to OPAL - proxy_pass http://opal_client:8181/v1/data/authorize; - - # Process OPAL's response in header_filter_by_lua_block if needed - header_filter_by_lua_block { - ngx.ctx.auth_allowed = false -- Default to unauthorized - - local res_body = ngx.arg[1] - if res_body then - local response_json = require("cjson").decode(res_body) - if response_json and response_json["result"] == true then - ngx.ctx.auth_allowed = true - end - end - - if not ngx.ctx.auth_allowed then - ngx.status = ngx.HTTP_UNAUTHORIZED - ngx.say("Unauthorized") - ngx.exit(ngx.HTTP_UNAUTHORIZED) - end - } - } - # Custom 401 page if unauthorized - error_page 401 = /unauthorized; - error_page 403 = /unauthorized; - location = /unauthorized { - internal; - return 401 "Unauthorized"; - } - } -} \ No newline at end of file diff --git a/app-tests/sample_service/openapi.yaml b/app-tests/sample_service/openapi.yaml deleted file mode 100644 index f45907a6a..000000000 --- a/app-tests/sample_service/openapi.yaml +++ /dev/null @@ -1,88 +0,0 @@ -openapi: 3.0.0 -info: - title: Flask REST API with OPAL Authorization - description: A simple API with three endpoints (`/a`, `/b`, and `/c`), where `/c` requires OPAL authorization. - version: 1.0.0 -servers: - - url: http://localhost:5500 # Modify with actual server URL and port - -paths: - /a: - get: - summary: Endpoint A - description: A simple, unauthenticated endpoint. - responses: - '200': - description: Success - content: - text/plain: - schema: - type: string - example: "Endpoint A" - - /b: - get: - summary: Endpoint B - description: Another unauthenticated endpoint. - responses: - '200': - description: Success - content: - text/plain: - schema: - type: string - example: "Endpoint B" - - /c: - get: - summary: Endpoint C with Authorization - description: | - This endpoint requires authorization. The client must provide a JWT token in the Authorization header. - The endpoint checks with an OPAL server to authorize the user based on the token. - security: - - bearerAuth: [] - responses: - '200': - description: Authorized access to endpoint C - content: - text/plain: - schema: - type: string - example: "Endpoint C - Authorized" - '401': - description: Unauthorized - Missing or invalid JWT token - content: - application/json: - schema: - type: object - properties: - error: - type: string - example: "Unauthorized, missing Authorization header" - '403': - description: Forbidden - Authorization denied by OPAL - content: - application/json: - schema: - type: object - properties: - error: - type: string - example: "Forbidden, authorization denied" - '500': - description: Error contacting OPAL client - content: - application/json: - schema: - type: object - properties: - error: - type: string - example: "Error contacting OPAL client: Connection error" - -components: - securitySchemes: - bearerAuth: - type: http - scheme: bearer - bearerFormat: JWT # Indicates the use of JWT for bearer token \ No newline at end of file diff --git a/app-tests/sample_service/policy.rego b/app-tests/sample_service/policy.rego deleted file mode 100644 index fc156dcbd..000000000 --- a/app-tests/sample_service/policy.rego +++ /dev/null @@ -1,37 +0,0 @@ -package test - -default allow = false - -# User-role mapping -user_roles = { - "alice": "reader", - "bob": "writer" -} - -# Decode the token and store payload -token = {"payload": payload} { - io.jwt.decode(input.token, [_, payload, _]) -} - -# Extract the user role based on the user from `input` -user_role = user_roles[input.user] - -# Allow access to path `a` and `b` only for users with the role `writer` -allow = true { - input.path = ["a"] - input.method = "GET" - user_role == "writer" -} - -allow = true { - input.path = ["b"] - input.method = "GET" - user_role == "writer" -} - -# Allow access to path `c` for users with role `writer` or `reader` -allow = true { - input.path = ["c"] - input.method = "GET" - user_role == "writer" or user_role == "reader" -} \ No newline at end of file diff --git a/app-tests/sample_service/requirements.txt b/app-tests/sample_service/requirements.txt deleted file mode 100644 index f831415e8..000000000 --- a/app-tests/sample_service/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -blinker==1.8.2 -certifi==2024.8.30 -cffi==1.17.1 -charset-normalizer==3.4.0 -click==8.1.7 -cryptography==43.0.3 -debugpy==1.8.7 -Flask==3.0.3 -idna==3.10 -itsdangerous==2.2.0 -Jinja2==3.1.4 -jwt==1.3.1 -MarkupSafe==3.0.2 -pycparser==2.22 -PyJWT==2.9.0 -requests==2.32.3 -urllib3==2.2.3 -Werkzeug==3.1.2 diff --git a/app-tests/sample_service/start.sh b/app-tests/sample_service/start.sh deleted file mode 100644 index 4fcd2a91d..000000000 --- a/app-tests/sample_service/start.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# Activate virtual environment -. /venv/bin/activate - -# Start OpenResty -openresty -g "daemon off;" & - -# Start Flask app -python -Xfrozen_modules=off -m flask run --host=0.0.0.0 --port=5000 diff --git a/app-tests/sample_service/supervisord.conf b/app-tests/sample_service/supervisord.conf deleted file mode 100644 index 62a9ffba4..000000000 --- a/app-tests/sample_service/supervisord.conf +++ /dev/null @@ -1,8 +0,0 @@ -[supervisord] -nodaemon=true - -[program:nginx] -command=nginx -g "daemon off;" - -[program:flask] -command=flask run --host=0.0.0.0 --port=5000 \ No newline at end of file diff --git a/docker/Dockerfile.client b/docker/Dockerfile.client deleted file mode 100644 index 81c1cd192..000000000 --- a/docker/Dockerfile.client +++ /dev/null @@ -1,118 +0,0 @@ -# Dockerfile.server - -# BUILD IMAGE -FROM python:3.10-bookworm AS build-stage -# from now on, work in the /app directory -WORKDIR /app/ -# Layer dependency install (for caching) -COPY ../packages/requires.txt ./base_requires.txt -COPY ../packages/opal-common/requires.txt ./common_requires.txt -COPY ../packages/opal-client/requires.txt ./client_requires.txt -COPY ../packages/opal-server/requires.txt ./server_requires.txt - -RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean - -# install python deps -RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt - -# COMMON IMAGE -FROM python:3.10-slim-bookworm AS common - -# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) -# also remove the default python site-packages that has older versions of packages that won't be overridden -RUN rm -r /usr/local/lib/python3.10/site-packages -COPY --from=build-stage /usr/local /usr/local - -# Add non-root user (with home dir at /opal) -RUN useradd -m -b / -s /bin/bash opal -WORKDIR /opal - -# copy wait-for script (create link at old path to maintain backward compatibility) -COPY ../scripts/wait-for.sh . -RUN chmod +x ./wait-for.sh -RUN ln -s /opal/wait-for.sh /usr/wait-for.sh - -# netcat (nc) is used by the wait-for.sh script -RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean -# Install sudo for Debian/Ubuntu-based images -RUN apt-get update && apt-get install -y sudo && apt-get clean - -# copy startup script (create link at old path to maintain backward compatibility) -COPY ../scripts/start.sh . -RUN chmod +x ./start.sh -RUN ln -s /opal/start.sh /start.sh -# copy gunicorn_config -COPY ../scripts/gunicorn_conf.py . -# copy app code - -COPY ../README.md . -COPY ../packages ./packages/ -# install the opal-common package -RUN cd ./packages/opal-common && python setup.py install -# Make sure scripts in .local are usable: -ENV PATH=/opal:/root/.local/bin:$PATH -# run gunicorn -CMD ["./start.sh"] - - -# STANDALONE IMAGE ---------------------------------- -# --------------------------------------------------- - FROM common AS client-standalone - # uvicorn config ------------------------------------ - # install the opal-client package - RUN cd ./packages/opal-client && python setup.py install - - # WARNING: do not change the number of workers on the opal client! - # only one worker is currently supported for the client. - - # number of uvicorn workers - ENV UVICORN_NUM_WORKERS=1 - # uvicorn asgi app - ENV UVICORN_ASGI_APP=opal_client.main:app - # uvicorn port - ENV UVICORN_PORT=7000 - # disable inline OPA - ENV OPAL_INLINE_OPA_ENABLED=false - - # expose opal client port - EXPOSE 7000 - USER opal - - RUN mkdir -p /opal/backup - VOLUME /opal/backup - - - # IMAGE to extract OPA from official image ---------- - # --------------------------------------------------- - FROM alpine:latest AS opa-extractor - USER root - - RUN apk update && apk add skopeo tar - WORKDIR /opal - - # copy opa from official docker image - ARG opa_image=openpolicyagent/opa - ARG opa_tag=latest-static - RUN skopeo copy "docker://${opa_image}:${opa_tag}" docker-archive:./image.tar && \ - mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ - find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ - rm -r image image.tar - - - # OPA CLIENT IMAGE ---------------------------------- - # Using standalone image as base -------------------- - # --------------------------------------------------- - FROM client-standalone AS client - - # Temporarily move back to root for additional setup - USER root - - # copy opa from opa-extractor - COPY --from=opa-extractor /opal/opa ./opa - - # enable inline OPA - ENV OPAL_INLINE_OPA_ENABLED=true - # expose opa port - EXPOSE 8181 - - USER opal \ No newline at end of file diff --git a/docker/Dockerfile.server b/docker/Dockerfile.server deleted file mode 100644 index 6a15df983..000000000 --- a/docker/Dockerfile.server +++ /dev/null @@ -1,116 +0,0 @@ -# Dockerfile.server - -# BUILD IMAGE -FROM python:3.10-bookworm AS build-stage -# from now on, work in the /app directory -WORKDIR /app/ -# Layer dependency install (for caching) -COPY ../packages/requires.txt ./base_requires.txt -COPY ../packages/opal-common/requires.txt ./common_requires.txt -COPY ../packages/opal-client/requires.txt ./client_requires.txt -COPY ../packages/opal-server/requires.txt ./server_requires.txt - -RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean - -# install python deps -RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt - -# COMMON IMAGE -FROM python:3.10-slim-bookworm AS common - -# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) -# also remove the default python site-packages that has older versions of packages that won't be overridden -RUN rm -r /usr/local/lib/python3.10/site-packages -COPY --from=build-stage /usr/local /usr/local - -# Add non-root user (with home dir at /opal) -RUN useradd -m -b / -s /bin/bash opal -WORKDIR /opal - -# copy wait-for script (create link at old path to maintain backward compatibility) -COPY ../scripts/wait-for.sh . -RUN chmod +x ./wait-for.sh -RUN ln -s /opal/wait-for.sh /usr/wait-for.sh - -# netcat (nc) is used by the wait-for.sh script -RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean -# Install sudo for Debian/Ubuntu-based images -RUN apt-get update && apt-get install -y sudo && apt-get clean - -# copy startup script (create link at old path to maintain backward compatibility) -COPY ../scripts/start.sh . -RUN chmod +x ./start.sh -RUN ln -s /opal/start.sh /start.sh -# copy gunicorn_config -COPY ../scripts/gunicorn_conf.py . -# copy app code - -COPY ../README.md . -COPY ../packages ./packages/ -# install the opal-common package -RUN cd ./packages/opal-common && python setup.py install -# Make sure scripts in .local are usable: -ENV PATH=/opal:/root/.local/bin:$PATH -# run gunicorn -CMD ["./start.sh"] - -# SERVER IMAGE -------------------------------------- -# --------------------------------------------------- -FROM common AS server - -RUN apt-get update && apt-get install -y openssh-client git && apt-get clean -RUN git config --global core.symlinks false # Mitigate CVE-2024-32002 - -USER opal - -# Potentially trust POLICY REPO HOST ssh signature -- -# opal trackes a remote (git) repository and fetches policy (e.g rego) from it. -# however, if the policy repo uses an ssh url scheme, authentication to said repo -# is done via ssh, and without adding the repo remote host (i.e: github.com) to -# the ssh known hosts file, ssh will issue output an interactive prompt that -# looks something like this: -# The authenticity of host 'github.com (192.30.252.131)' can't be established. -# RSA key fingerprint is 16:27:ac:a5:76:28:1d:52:13:1a:21:2d:bz:1d:66:a8. -# Are you sure you want to continue connecting (yes/no)? -# if the docker build arg `TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT` is set to `true` -# (default), the host specified by `POLICY_REPO_HOST` build arg (i.e: `github.com`) -# will be added to the known ssh hosts file at build time and prevent said prompt -# from showing. -ARG TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT="true" -ARG POLICY_REPO_HOST="github.com" - -RUN if [ "$TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT" = "true" ] ; then \ - mkdir -p ~/.ssh && \ - chmod 0700 ~/.ssh && \ - ssh-keyscan -t rsa ${POLICY_REPO_HOST} >> ~/.ssh/known_hosts ; fi - -USER root - -# install the opal-server package -RUN cd ./packages/opal-server && python setup.py install - -# uvicorn config ------------------------------------ - -# number of uvicorn workers -ENV UVICORN_NUM_WORKERS=1 -# uvicorn asgi app -ENV UVICORN_ASGI_APP=opal_server.main:app -# uvicorn port -ENV UVICORN_PORT=7002 - -# opal configuration -------------------------------- -# if you are not setting OPAL_DATA_CONFIG_SOURCES for some reason, -# override this env var with the actual public address of the server -# container (i.e: if you are running in docker compose and the server -# host is `opalserver`, the value will be: http://opalserver:7002/policy-data) -# `host.docker.internal` value will work better than `localhost` if you are -# running dockerized opal server and client on the same machine -# ENV OPAL_ALL_DATA_URL=http://host.docker.internal:7002/policy-data -ENV OPAL_ALL_DATA_URL=http://opal_server:7002/policy-data -# Use fixed path for the policy repo - so new leader would use the same directory without re-cloning it. -# That's ok when running in docker and fs is ephemeral (repo in a bad state would be fixed by restarting container). -ENV OPAL_POLICY_REPO_REUSE_CLONE_PATH=true - -# expose opal server port -EXPOSE 7002 -USER opal diff --git a/docker/docker-compose-local.yml b/docker/docker-compose-local.yml deleted file mode 100644 index 086bb7aee..000000000 --- a/docker/docker-compose-local.yml +++ /dev/null @@ -1,105 +0,0 @@ -version: '3.8' - -services: - # Database service for broadcast channel - broadcast_channel: - image: postgres:alpine - container_name: broadcast_channel - environment: - - POSTGRES_DB=postgres - - POSTGRES_USER=postgres - - POSTGRES_PASSWORD=postgres - - # Gitea service - gitea: - image: gitea/gitea:latest - container_name: gitea - environment: - - USER_UID=1000 - - USER_GID=1000 - - DB_TYPE=sqlite3 # Alternatively, you can set up PostgreSQL or MySQL for production - - GITEA__database__DB_PATH=/data/gitea/gitea.db - - GITEA__server__ROOT_URL=http://localhost:3000/ - - GITEA__service__DISABLE_REGISTRATION=true # Optional: disable public registrations for security - volumes: - - gitea_data:/data - ports: - - "3000:3000" # Expose Gitea's web interface on port 3000 - - "2222:22" # Expose Gitea's SSH service on port 2222 - depends_on: - - broadcast_channel - - # OPAL Server and Client service - opal_server: - build: - context: ../ # Point to the directory containing your Dockerfile - dockerfile: ./docker/Dockerfile.server # Specify your Dockerfile if it's not named 'Dockerfile' - container_name: opal_server - environment: - - OPAL_BROADCAST_URI=postgres://postgres:postgres@broadcast_channel:5432/postgres - - UVICORN_NUM_WORKERS=1 - - OPAL_POLICY_REPO_URL=http://gitea:3000/permit/opal-example-policy-repo - - OPAL_POLICY_REPO_POLLING_INTERVAL=30 - - OPAL_DATA_CONFIG_SOURCES={"config":{"entries":[{"url":"http://opal_server:7002/policy-data","topics":["policy_data"],"dst_path":"/static"}]}} - - OPAL_LOG_FORMAT_INCLUDE_PID=true - - OPAL_SERVER_URL=http://opal_server:7002 - - OPAL_LOG_FORMAT_INCLUDE_PID=true - - OPAL_INLINE_OPA_LOG_FORMAT=http - - DEBUGPY_PORT=5678 - ports: - - "7002:7002" # Expose OPAL Server - - "5679:5678" # DebugPy - volumes: - - ../packages:/app/packages # Mount local packages directory for live updates - - ../scripts:/app/scripts # Mount local scripts for live updates - - ../README.md:/app/README.md # Mount README for reference, if necessary - depends_on: - - gitea - command: sh -c "exec ./wait-for.sh broadcast_channel:5432 --timeout=20 -- ./start.sh" - - opal_client: - build: - context: ../ # Point to the directory containing your Dockerfile - dockerfile: ./docker/Dockerfile.client # Specify your Dockerfile if it's not named 'Dockerfile' - container_name: opal_client - environment: - - OPAL_SERVER_URL=http://opal_server:7002 - - OPAL_LOG_FORMAT_INCLUDE_PID=true - - OPAL_INLINE_OPA_LOG_FORMAT=http - - DEBUGPY_PORT=5678 - ports: - - "7766:7000" # OPAL client - - "8181:8181" # OPA agent - - "5680:5678" # DebugPy - depends_on: - - opal_server - command: sh -c "exec ./wait-for.sh opal_server:7002 --timeout=20 -- ./start.sh" - - sample_service: - build: - context: ../app-tests/sample_service # Point to the directory containing your Dockerfile - dockerfile: ./Dockerfile # Specify your Dockerfile if it's not named 'Dockerfile' - container_name: openresty_nginx # This sets the container name - environment: - - FLASK_APP=app.py - - OPAL_URL=http://opal_client:7000 - ports: - - "5500:80" - - "5682:5682" - volumes: - - ../app-tests/sample_service/sources:/app/sources # Mount the sources directory - depends_on: - - opal_client - frontend: - build: - context: ../app-tests/opal-frontend - dockerfile: ./Dockerfile - container_name: frontend - ports: - - "4200:80" # Serve Angular app on http://localhost:4200 - depends_on: - - sample_service # Make sure the backend is up first - -volumes: - opa_backup: - gitea_data: # Data volume for Gitea \ No newline at end of file diff --git a/packages/opal-client/opal_client/main.py b/packages/opal-client/opal_client/main.py index 635ddccc4..65f3bb665 100644 --- a/packages/opal-client/opal_client/main.py +++ b/packages/opal-client/opal_client/main.py @@ -1,11 +1,5 @@ from opal_client.client import OpalClient client = OpalClient() - -import debugpy -#debugpy.listen(("0.0.0.0", 5678)) -print("Waiting for debugger attach...") -#debugpy.wait_for_client() # Optional, wait for debugger to attach before continuing - # expose app for Uvicorn app = client.app diff --git a/packages/opal-client/requires.txt b/packages/opal-client/requires.txt index ed57c33ee..4acb85cb6 100644 --- a/packages/opal-client/requires.txt +++ b/packages/opal-client/requires.txt @@ -11,4 +11,3 @@ opentelemetry-instrumentation opentelemetry-instrumentation-fastapi opentelemetry-exporter-otlp opentelemetry-exporter-prometheus -debugpy diff --git a/packages/opal-server/opal_server/data/api.py b/packages/opal-server/opal_server/data/api.py index ba426575e..b4c82dd09 100644 --- a/packages/opal-server/opal_server/data/api.py +++ b/packages/opal-server/opal_server/data/api.py @@ -87,8 +87,6 @@ async def get_data_sources_config(authorization: Optional[str] = Header(None)): token = get_token_from_header(authorization) if data_sources_config.config is not None: logger.info("Serving source configuration") - logger.info("Source config: {config}", config=data_sources_config.config) - debugpy.breakpoint() return data_sources_config.config elif data_sources_config.external_source_url is not None: url = str(data_sources_config.external_source_url) diff --git a/packages/opal-server/opal_server/main.py b/packages/opal-server/opal_server/main.py index 9a56377d7..7e61e2a66 100644 --- a/packages/opal-server/opal_server/main.py +++ b/packages/opal-server/opal_server/main.py @@ -1,9 +1,3 @@ - -import debugpy -#debugpy.listen(("0.0.0.0", 5678)) -print("Waiting for debugger attach...") -#debugpy.wait_for_client() # Optional, wait for debugger to attach before continuing - def create_app(*args, **kwargs): from opal_server.server import OpalServer diff --git a/tests/README.md b/tests/README.md index 815917d65..0cc451d30 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,24 +1,32 @@ -Got it! If you'd like, I can incorporate code snippets or provide clarifications. Just let me know what specific details or examples you'd like me to include. Here's the README file in Markdown format: - ---- - # Tests The tests folder contains integration and unit tests for OPAL. These tests ensure the proper functionality and reliability of OPAL across various components and scenarios. Below is an overview of the test structure, utilities, and execution methods. +## Running the Tests + +To execute the tests, run the `run.sh` script from the root directory of the repository. This script sets up the environment and executes all tests: + +```bash +./run.sh +``` + +What you will see is that pytest begins to pull the images for the broadcaster(s), then gitea, then the opal_server and opal_client will be built from the local debuggable version using the source code, rather than permitio/opal-server or opal-client images. So you could test your changes to the code. + +If all infrastructure is set up well, you will then see the tests being executed by pytest as normal. + ## Test Structure - **`tests/containers`**: Configurations and setups for containerized environments used in testing OPAL, including Docker and Kubernetes configurations. - **`tests/data-fetchers`**: OPAL data fetchers used in the tests to fetch data from various sources, such as PostgreSQL, MongoDB, etc. - **`tests/docker`**: Dockerfiles and related files used to build Docker images for the tests. - **`tests/policies`**: Policies written in REGO used to verify that OPAL functions correctly. -- **`tests/policy_repos`**: Providers managing policy repositories on platforms such as Gitea, GitHub, GitLab, and others. Additional platforms should implement a class derived from `PolicyRepoBase` (e.g., Bitbucket). -- **`tests/app-tests`**: Integration tests running OPAL with a sample service to verify correct configuration. -- **`tests/policy_stores`**: Test setups to validate support for policy decision engines such as OPA, Cedar, OpenFGA, etc. -- **`conftest.py`**: Fixtures shared across multiple tests for consistent test environments. - -The tests are built using [Pytest](https://pytest.org/en/latest/) and leverage [testcontainers](https://testcontainers.org/) to build and run Docker images. - +- **`containers`**: Configurations and setups for containerized environments used in testing OPAL, including Docker and Kubernetes configurations. +- **`data-fetchers`**: OPAL data fetchers used in the tests to fetch data from various sources, such as PostgreSQL, MongoDB, etc. +- **`docker`**: Dockerfiles and related files used to build Docker images for the tests. +- **`policies`**: Policies written in REGO used to verify that OPAL functions correctly. +- **`policy_repos`**: Providers managing policy repositories on platforms such as Gitea, GitHub, GitLab, and others. Additional platforms should implement a class derived from `PolicyRepoBase` (e.g., Bitbucket). +- **`app-tests`**: Integration tests running OPAL with a sample service to verify correct configuration. +- **`policy_stores`**: Test setups to validate support for policy decision engines such as OPA, Cedar, OpenFGA, etc. ## Infrastructure of the Testing System ### Settings @@ -65,18 +73,10 @@ def test_custom_policy(opal_servers, opal_clients): # Add your test logic here ``` -## Running the Tests - -To execute the tests, run the `run.sh` script from the root directory of the repository. This script sets up the environment and executes all tests: - -```bash -./run.sh -``` - ## OPAL API Reference Refer to the [OPAL API Documentation](https://opal-v2.permit.io/redoc#tag/Bundle-Server/operation/get_policy_policy_get) for additional details on endpoints and functionality. --- -Let me know if you'd like to include specific code examples or any other details! \ No newline at end of file +Let me know if you'd like to include specific code examples or any other details! diff --git a/tests/containers/broadcast_container_base.py b/tests/containers/broadcast_container_base.py index 4a278ee85..15e4fd22b 100644 --- a/tests/containers/broadcast_container_base.py +++ b/tests/containers/broadcast_container_base.py @@ -1,9 +1,9 @@ -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer -class BroadcastContainerBase(PermitContainer): - def __init__(self): - PermitContainer.__init__(self) +class BroadcastContainerBase(OpalTestContainer): + def __init__(self, **kwargs): + OpalTestContainer.__init__(self, **kwargs) def get_url(self) -> str: url = ( diff --git a/tests/containers/cedar_container.py b/tests/containers/cedar_container.py index c661ad095..aab56ee0b 100644 --- a/tests/containers/cedar_container.py +++ b/tests/containers/cedar_container.py @@ -3,12 +3,12 @@ from testcontainers.core.utils import setup_logger from tests import utils -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.settings.cedar_settings import CedarSettings from tests.containers.settings.opal_client_settings import OpalClientSettings -class CedarContainer(PermitContainer, DockerContainer): +class CedarContainer(OpalTestContainer, DockerContainer): def __init__( self, settings: CedarSettings, @@ -16,7 +16,7 @@ def __init__( docker_client_kw: dict | None = None, **kwargs, ) -> None: - PermitContainer.__init__(self) # Initialize PermitContainer + OpalTestContainer.__init__(self) # Initialize OpalTestContainer DockerContainer.__init__( self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/gitea_container.py b/tests/containers/gitea_container.py index 83c14e631..2719d4835 100644 --- a/tests/containers/gitea_container.py +++ b/tests/containers/gitea_container.py @@ -10,11 +10,11 @@ from testcontainers.core.utils import setup_logger import docker -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.settings.gitea_settings import GiteaSettings -class GiteaContainer(PermitContainer, DockerContainer): +class GiteaContainer(OpalTestContainer, DockerContainer): def __init__( self, settings: GiteaSettings, @@ -35,7 +35,7 @@ def __init__( # Set container lifecycle properties self.with_kwargs(auto_remove=False, restart_policy={"Name": "always"}) - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image=self.settings.image, diff --git a/tests/containers/kafka_broadcast_container.py b/tests/containers/kafka_broadcast_container.py index 1fa52e1d1..697bd69e1 100644 --- a/tests/containers/kafka_broadcast_container.py +++ b/tests/containers/kafka_broadcast_container.py @@ -3,11 +3,11 @@ from testcontainers.kafka import KafkaContainer import docker -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.zookeeper_container import ZookeeperContainer -class KafkaBroadcastContainer(PermitContainer, KafkaContainer): +class KafkaBroadcastContainer(OpalTestContainer, KafkaContainer): def __init__( self, network: Network, @@ -23,7 +23,7 @@ def __init__( self.zookeeper_container = zookeeper_container self.network = network - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) KafkaContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) self.with_network(self.network) diff --git a/tests/containers/kafka_ui_container.py b/tests/containers/kafka_ui_container.py index a01b70f1f..f28cbc257 100644 --- a/tests/containers/kafka_ui_container.py +++ b/tests/containers/kafka_ui_container.py @@ -2,10 +2,10 @@ from testcontainers.core.network import Network from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer -class KafkaUIContainer(PermitContainer, DockerContainer): +class KafkaUIContainer(OpalTestContainer, DockerContainer): def __init__( self, network: Network, @@ -23,7 +23,7 @@ def __init__( self.image = "provectuslabs/kafka-ui:latest" - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image=self.image, docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/opa_container.py b/tests/containers/opa_container.py index 8227394e2..1e5cbe9ca 100644 --- a/tests/containers/opa_container.py +++ b/tests/containers/opa_container.py @@ -3,7 +3,7 @@ from testcontainers.core.utils import setup_logger from tests import utils -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.settings.opal_client_settings import OpalClientSettings @@ -29,7 +29,7 @@ def getEnvVars(self): return {} -class OpaContainer(PermitContainer, DockerContainer): +class OpaContainer(OpalTestContainer, DockerContainer): def __init__( self, settings: OpaSettings, @@ -37,7 +37,7 @@ def __init__( docker_client_kw: dict | None = None, **kwargs, ) -> None: - PermitContainer.__init__(self) # Initialize PermitContainer + OpalTestContainer.__init__(self) # Initialize OpalTestContainer DockerContainer.__init__( self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/opal_client_container.py b/tests/containers/opal_client_container.py index dee8d3301..282c6d151 100644 --- a/tests/containers/opal_client_container.py +++ b/tests/containers/opal_client_container.py @@ -3,11 +3,11 @@ from testcontainers.core.utils import setup_logger from tests import utils -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.settings.opal_client_settings import OpalClientSettings -class OpalClientContainer(PermitContainer, DockerContainer): +class OpalClientContainer(OpalTestContainer, DockerContainer): def __init__( self, settings: OpalClientSettings, @@ -15,7 +15,7 @@ def __init__( docker_client_kw: dict | None = None, **kwargs, ) -> None: - PermitContainer.__init__(self) # Initialize PermitContainer + OpalTestContainer.__init__(self) # Initialize OpalTestContainer DockerContainer.__init__( self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/opal_server_container.py b/tests/containers/opal_server_container.py index 71c6ce50b..ab55bfccf 100644 --- a/tests/containers/opal_server_container.py +++ b/tests/containers/opal_server_container.py @@ -3,11 +3,11 @@ from testcontainers.core.network import Network from testcontainers.core.utils import setup_logger -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer from tests.containers.settings.opal_server_settings import OpalServerSettings -class OpalServerContainer(PermitContainer, DockerContainer): +class OpalServerContainer(OpalTestContainer, DockerContainer): def __init__( self, settings: OpalServerSettings, @@ -20,7 +20,7 @@ def __init__( self.logger = setup_logger(__name__) - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image=self.settings.image, docker_client_kw=docker_client_kw, **kwargs ) @@ -53,7 +53,7 @@ def reload_with_settings(self, settings: OpalServerSettings | None = None): self.start() - def obtain_OPAL_tokens(self, caller: str = "Unkonwn caller") -> dict: + def obtain_OPAL_tokens(self, caller: str = "Unknown caller") -> dict: """Fetch client and datasource tokens from the OPAL server.""" token_url = f"http://localhost:{self.settings.port}/token" headers = { @@ -77,7 +77,9 @@ def obtain_OPAL_tokens(self, caller: str = "Unkonwn caller") -> dict: token = response.json().get("token") if token: tokens[token_type] = token - self.logger.info(f"{caller} | Successfully fetched OPAL {token_type} token.") + self.logger.info( + f"{caller} | Successfully fetched OPAL {token_type} token." + ) else: self.logger.error( f"{caller} | Failed to fetch OPAL {token_type} token: {response.json()}" diff --git a/tests/containers/opal_test_container.py b/tests/containers/opal_test_container.py new file mode 100644 index 000000000..8e5bef536 --- /dev/null +++ b/tests/containers/opal_test_container.py @@ -0,0 +1,50 @@ +import re +import time +from datetime import datetime + +from testcontainers.core.utils import setup_logger + + +class OpalTestContainer: + def __init__(self, **kwargs): + self.opalLogger = setup_logger(__name__) + + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.timestamp_with_ansi = ( + r"\x1b\[.*?(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+\d{4})" + ) + + def wait_for_log( + self, log_str: str, timeout: int, reference_timestamp: datetime | None = None + ): + log_found = False + logs = self._container.logs(stream=True) + start_time = time.time() + + for line in logs: + if time.time() - start_time > timeout: + self.opalLogger.warning( + f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" + ) + break + + decoded_line = line.decode("utf-8").strip() + match = re.search(self.timestamp_with_ansi, decoded_line) + if match: + log_timestamp_string = match.group(1) + log_timestamp = datetime.strptime( + log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" + ) + + if (reference_timestamp is None) or ( + log_timestamp > reference_timestamp + ): + if log_str in decoded_line: + log_found = True + break + + return log_found diff --git a/tests/containers/permitContainer.py b/tests/containers/permitContainer.py deleted file mode 100644 index 50858a95e..000000000 --- a/tests/containers/permitContainer.py +++ /dev/null @@ -1,135 +0,0 @@ -import re -import time -from datetime import datetime - -from testcontainers.core.utils import setup_logger - - -class PermitContainer: - def __init__(self): - self.permitLogger = setup_logger(__name__) - - # Regex to match any ANSI-escaped timestamp in the format YYYY-MM-DDTHH:MM:SS.mmmmmm+0000 - self.timestamp_with_ansi = ( - r"\x1b\[.*?(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+\d{4})" - ) - self.errors = [] - # self.check_errors() - - def wait_for_log( - self, log_str: str, timeout: int, reference_timestamp: datetime | None = None - ): - """Wait for a specific log to appear in the container logs after the - reference timestamp. - - Args: - reference_timestamp (datetime): The timestamp to start checking logs from. - log_str (str): The string to search for in the logs. - timeout (int): Maximum time to wait for the log (in seconds). - - Returns: - bool: True if the log was found, False if the timeout was reached. - """ - # Stream logs from the opal_client container - log_found = False - logs = self._container.logs(stream=True) - - start_time = time.time() # Record the start time - - for line in logs: - # Check if the timeout has been exceeded - elapsed_time = time.time() - start_time - if elapsed_time > timeout: - self.permitLogger.warning(f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}") - break - - decoded_line = line.decode("utf-8").strip() - - # Search for the timestamp in the line - match = re.search(self.timestamp_with_ansi, decoded_line) - if match: - log_timestamp_string = match.group(1) - log_timestamp = datetime.strptime( - log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" - ) - - if (reference_timestamp is None) or ( - log_timestamp > reference_timestamp - ): - if log_str in decoded_line: - log_found = True - break - - return log_found - - def wait_for_error( - self, reference_timestamp: datetime, error_str: str = "Error", timeout: int = 30 - ): - """Wait for a specific log to appear in the container logs after the - reference timestamp. - - Args: - reference_timestamp (datetime): The timestamp to start checking logs from. - log_str (str): The string to search for in the logs. - timeout (int): Maximum time to wait for the log (in seconds). - - Returns: - bool: True if the log was found, False if the timeout was reached. - """ - # Stream logs from the opal_client container - err_found = False - logs = self._container.logs(stream=True) - - start_time = time.time() # Record the start time - - for line in logs: - # Check if the timeout has been exceeded - elapsed_time = time.time() - start_time - if elapsed_time > timeout: - self.permitLogger.warning("Timeout reached while waiting for the log.") - break - - decoded_line = line.decode("utf-8").strip() - - # Search for the timestamp in the line - match = re.search(self.timestamp_with_ansi, decoded_line) - if match: - log_timestamp_string = match.group(1) - log_timestamp = datetime.strptime( - log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" - ) - - if log_timestamp > reference_timestamp: - self.permitLogger.info(f"Checking log line: {decoded_line}") - if error_str in decoded_line: - err_found = True - for err in self.errors: - m = re.search(self.timestamp_with_ansi, decoded_line) - if m.group(1) == match.group(1): - self.errors.remove(err) - self.permitLogger.info("err found!") - break - return err_found - - async def check_errors(self): - # Stream logs from the opal_client container - logs = self._container.logs(stream=True) - - log_str = "ERROR" - - for line in logs: - decoded_line = line.decode("utf-8").strip() - self.permitLogger.info(f"Checking log line: {decoded_line}") - self.permitLogger.info(f"scanning line: {decoded_line}") - if log_str in decoded_line: - self.permitLogger.error("\n\n\n\n") - self.permitLogger.error(f"error found: {decoded_line}") - self.permitLogger.error("\n\n\n\n") - self.errors.append(decoded_line) - - def __del__(self): - if len(self.errors) > 0: - self.permitLogger.error("Errors found in container logs:") - for error in self.errors: - self.permitLogger.error(error) - assert False, "Errors found in container logs." diff --git a/tests/containers/postgres_broadcast_container.py b/tests/containers/postgres_broadcast_container.py index bb0764335..42f343b10 100644 --- a/tests/containers/postgres_broadcast_container.py +++ b/tests/containers/postgres_broadcast_container.py @@ -16,15 +16,10 @@ def __init__( docker_client_kw: dict | None = None, **kwargs, ) -> None: - # Add custom labels to the kwargs - labels = kwargs.get("labels", {}) - labels.update({"com.docker.compose.project": "pytest"}) - kwargs["labels"] = labels - self.network = network self.settings = settings - BroadcastContainerBase.__init__(self) + BroadcastContainerBase.__init__(self, **kwargs) PostgresContainer.__init__( self, image, diff --git a/tests/containers/pulsar_broadcast_container.py b/tests/containers/pulsar_broadcast_container.py index 74abc951a..a4498cb7b 100644 --- a/tests/containers/pulsar_broadcast_container.py +++ b/tests/containers/pulsar_broadcast_container.py @@ -1,12 +1,12 @@ import debugpy -from containers.permitContainer import PermitContainer from testcontainers.core.container import DockerContainer from testcontainers.core.network import Network import docker +from tests.containers.opal_test_container import OpalTestContainer -class PulsarBroadcastContainer(PermitContainer, DockerContainer): +class PulsarBroadcastContainer(OpalTestContainer, DockerContainer): def __init__( self, network: Network, @@ -20,7 +20,7 @@ def __init__( self.network = network - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image="pulsar:latest", docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/redis_broadcast_container.py b/tests/containers/redis_broadcast_container.py index b11ad61b6..a8ddfd2f3 100644 --- a/tests/containers/redis_broadcast_container.py +++ b/tests/containers/redis_broadcast_container.py @@ -1,10 +1,10 @@ from testcontainers.core.network import Network from testcontainers.redis import RedisContainer -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer -class RedisBroadcastContainer(PermitContainer, RedisContainer): +class RedisBroadcastContainer(OpalTestContainer, RedisContainer): def __init__( self, network: Network, @@ -18,7 +18,7 @@ def __init__( self.network = network - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) RedisContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) self.with_network(self.network) diff --git a/tests/containers/redis_ui_container.py b/tests/containers/redis_ui_container.py index eb02df918..c467ae90a 100644 --- a/tests/containers/redis_ui_container.py +++ b/tests/containers/redis_ui_container.py @@ -2,10 +2,10 @@ from testcontainers.core.network import Network from testcontainers.redis import RedisContainer -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer -class RedisUIContainer(PermitContainer, DockerContainer): +class RedisUIContainer(OpalTestContainer, DockerContainer): def __init__( self, network: Network, @@ -23,7 +23,7 @@ def __init__( self.container_name = "redis-ui" self.image = "redislabs/redisinsight:latest" - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image=self.image, docker_client_kw=docker_client_kw, **kwargs ) diff --git a/tests/containers/zookeeper_container.py b/tests/containers/zookeeper_container.py index 30c35204e..aec6327e3 100644 --- a/tests/containers/zookeeper_container.py +++ b/tests/containers/zookeeper_container.py @@ -3,10 +3,10 @@ from testcontainers.core.network import Network import docker -from tests.containers.permitContainer import PermitContainer +from tests.containers.opal_test_container import OpalTestContainer -class ZookeeperContainer(PermitContainer, DockerContainer): +class ZookeeperContainer(OpalTestContainer, DockerContainer): def __init__( self, network: Network, @@ -20,7 +20,7 @@ def __init__( self.network = network - PermitContainer.__init__(self) + OpalTestContainer.__init__(self) DockerContainer.__init__( self, image="confluentinc/cp-zookeeper:latest", diff --git a/tests/fixtures/broadcasters.py b/tests/fixtures/broadcasters.py index 0fa65d246..2272bf394 100644 --- a/tests/fixtures/broadcasters.py +++ b/tests/fixtures/broadcasters.py @@ -38,7 +38,7 @@ def postgres_broadcast_channel(opal_network: Network): except Exception as e: logger.error( - f"Failed on container: {container} with error: {e} {e.__traceback__}" + f"Failed on container: {container if container is not None else None} with error: {e} {e.__traceback__}" ) return diff --git a/tests/test_app.py b/tests/test_app.py index 4ca2d6daf..14f4b2052 100644 --- a/tests/test_app.py +++ b/tests/test_app.py @@ -11,7 +11,10 @@ from tests import utils from tests.containers.broadcast_container_base import BroadcastContainerBase from tests.containers.gitea_container import GiteaContainer -from tests.containers.opal_client_container import OpalClientContainer, PermitContainer +from tests.containers.opal_client_container import ( + OpalClientContainer, + OpalTestContainer, +) from tests.containers.opal_server_container import OpalServerContainer from tests.policy_repos.policy_repo_factory import SupportedPolicyRepo from tests.settings import PyTestSessionSettings, session_matrix From 59b34044d8f790b9942bd80b283fd90ce7192318 Mon Sep 17 00:00:00 2001 From: Israel Weinberg Date: Wed, 8 Jan 2025 22:36:54 +0200 Subject: [PATCH 04/18] Add end-to-end tests to GitHub Actions workflow --- .github/workflows/tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 92767b793..171c838ca 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -126,3 +126,8 @@ jobs: git config --global user.email "<>" ./run.sh + + - name: e2e tests + working-directory: ./tests + run: | + ./run.sh From 58ada4a22e05d77a7c0bde99ba7b93188f9a50f8 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Thu, 9 Jan 2025 10:09:43 +0200 Subject: [PATCH 05/18] Refactor log waiting mechanism to use threading for improved performance and add detailed docstring --- tests/containers/opal_test_container.py | 94 +++++++++++++++++-------- tests/test_app.py | 4 +- 2 files changed, 68 insertions(+), 30 deletions(-) diff --git a/tests/containers/opal_test_container.py b/tests/containers/opal_test_container.py index 8e5bef536..5c4f37f1f 100644 --- a/tests/containers/opal_test_container.py +++ b/tests/containers/opal_test_container.py @@ -1,3 +1,5 @@ +import threading +import asyncio import re import time from datetime import datetime @@ -18,33 +20,69 @@ def __init__(self, **kwargs): r"\x1b\[.*?(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+\d{4})" ) + def wait_for_log( self, log_str: str, timeout: int, reference_timestamp: datetime | None = None - ): - log_found = False - logs = self._container.logs(stream=True) - start_time = time.time() - - for line in logs: - if time.time() - start_time > timeout: - self.opalLogger.warning( - f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" - ) - break - - decoded_line = line.decode("utf-8").strip() - match = re.search(self.timestamp_with_ansi, decoded_line) - if match: - log_timestamp_string = match.group(1) - log_timestamp = datetime.strptime( - log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" - ) - - if (reference_timestamp is None) or ( - log_timestamp > reference_timestamp - ): - if log_str in decoded_line: - log_found = True - break - - return log_found + ) -> bool: + """ + Wait for a specific log to appear in the container logs after the + reference timestamp. + + Args: + log_str (str): The string to search for in the logs. + timeout (int): Maximum time to wait for the log (in seconds). + reference_timestamp (datetime | None): The timestamp to start checking logs from. + + Returns: + bool: True if the log was found, False if the timeout was reached. + """ + + #timeout = 0.1 + timeout = timeout + + log_found = threading.Event() + + def process_logs(): + """ + Asynchronous sub-function to check logs with timeout handling. + """ + #input(f"Press Enter to continue... searching for: {log_str} | on container: {self.settings.container_name} | timeout set to: {timeout}") + logs = self._container.logs(stream=True) # Stream logs + start_time = time.time() + + for line in logs: # Synchronous iteration over logs + elapsed_time = time.time() - start_time + if elapsed_time > timeout: + self.opalLogger.warning( + f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" + ) + break + + decoded_line = line.decode("utf-8").strip() + + # Extract timestamp if present + match = re.search(self.timestamp_with_ansi, decoded_line) + if match: + log_timestamp_string = match.group(1) + log_timestamp = datetime.strptime( + log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" + ) + + if reference_timestamp is None or log_timestamp > reference_timestamp: + if log_str in decoded_line: + log_found.set() # Signal that the log was found + break + + log_thread = threading.Thread(target=process_logs) + log_thread.start() + + log_thread.join(timeout=float(timeout)) + + if not log_found.is_set(): + self.opalLogger.warning( + f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" + ) + return False + + return True + \ No newline at end of file diff --git a/tests/test_app.py b/tests/test_app.py index 14f4b2052..acb20717f 100644 --- a/tests/test_app.py +++ b/tests/test_app.py @@ -274,8 +274,8 @@ def test_read_statistics( print("Statistics check passed in all attempts.") -@pytest.mark.asyncio -async def test_policy_update( +#@pytest.mark.asyncio +def test_policy_update( gitea_server: GiteaContainer, opal_servers: list[OpalServerContainer], opal_clients: list[OpalClientContainer], From 2c978f6d6fe6ffa1aa883e2d6e775bf84ea263d2 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Thu, 9 Jan 2025 11:18:13 +0200 Subject: [PATCH 06/18] Refactor PostgresBroadcastContainer usage in tests for improved clarity and error handling --- .../postgres_broadcast_container.py | 2 ++ tests/fixtures/broadcasters.py | 23 ++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/containers/postgres_broadcast_container.py b/tests/containers/postgres_broadcast_container.py index 42f343b10..82fa504e8 100644 --- a/tests/containers/postgres_broadcast_container.py +++ b/tests/containers/postgres_broadcast_container.py @@ -35,3 +35,5 @@ def __init__( self.with_network_aliases("broadcast_channel") self.with_name(f"postgres_broadcast_channel") + + self.start() diff --git a/tests/fixtures/broadcasters.py b/tests/fixtures/broadcasters.py index 2272bf394..7b0a1a5ff 100644 --- a/tests/fixtures/broadcasters.py +++ b/tests/fixtures/broadcasters.py @@ -24,21 +24,22 @@ def postgres_broadcast_channel(opal_network: Network): unless an exception is raised during teardown. """ try: - with PostgresBroadcastContainer( - network=opal_network, settings=PostgresBroadcastSettings() - ) as container: - yield container + container = PostgresBroadcastContainer( + network=opal_network, + settings=PostgresBroadcastSettings() + ) + yield container - try: - if container.get_wrapped_container().status == "running": - container.stop() - except Exception: - logger.error(f"Failed to stop containers: {container}") - return + try: + if container.get_wrapped_container().status == "running": + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container.settings.container_name}") + return except Exception as e: logger.error( - f"Failed on container: {container if container is not None else None} with error: {e} {e.__traceback__}" + f"Failed on container: {container.settings.container_name} with error: {e} {e.__traceback__}" ) return From d8a8edf57062ef9715cb91d56b5a8fc5a066aa4c Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:02:38 +0200 Subject: [PATCH 07/18] Fix sleep duration in GitHub Actions and adjust context path in Docker image build; add end-to-end tests workflow --- .github/workflows/e2e_tests.yml | 44 +++++++++++++++++++++++++++++++++ tests/conftest.py | 2 +- tests/utils.py | 2 +- 3 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/e2e_tests.yml diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml new file mode 100644 index 000000000..0e9e5998d --- /dev/null +++ b/.github/workflows/e2e_tests.yml @@ -0,0 +1,44 @@ +name: Run Tests + +on: + push: + branches: + - e2e-deploy + pull_request: + branches: + - e2e-deploy + +jobs: + test: + runs-on: ubuntu-latest + + steps: + # Checkout the repository + - name: Checkout code + uses: actions/checkout@v3 + + # Set up Python + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' # Adjust the version as needed + + # Install opal dependencies + - name: Install dependencies + run: pip install -r requirements.txt + + # Ensure Docker is installed and running + - name: Set up Docker + uses: docker/setup-buildx-action@v2 + + # cd into the tests directory + - name: cd into tests directory + run: cd tests + + # Install test dependencies + - name: Install dependencies + run: pip install -r requirements.txt + + # Run the tests using the script + - name: Run tests + run: ./run.sh diff --git a/tests/conftest.py b/tests/conftest.py index bdded10dc..4bf2046f2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -469,7 +469,7 @@ def wait_sometime(): if os.getenv("GITHUB_ACTIONS") == "true": print("Running inside GitHub Actions. Sleeping for 30 seconds...") - time.sleep(3600) # Sleep for 30 seconds + time.sleep(30) # Sleep for 30 seconds else: print("Running on the local machine. Press Enter to continue...") input() # Wait for key press diff --git a/tests/utils.py b/tests/utils.py index 2cab0962d..5ea1d8077 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -60,7 +60,7 @@ def build_docker_image(docker_file: str, image_name: str, session_matrix: dict): if "tests" in os.path.abspath(__file__): logger.info(f"Right now the file is {os.path.abspath(__file__)}") context_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "..", "..", "opal") + os.path.join(os.path.dirname(__file__), "..") ) else: context_path = ".." From 7da2b5f22e060194ed9d25ac7ba6d2250257c8f6 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:04:27 +0200 Subject: [PATCH 08/18] Update e2e_tests workflow to change directory command and add listing before test execution --- .github/workflows/e2e_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml index 0e9e5998d..766c98d0c 100644 --- a/.github/workflows/e2e_tests.yml +++ b/.github/workflows/e2e_tests.yml @@ -33,7 +33,7 @@ jobs: # cd into the tests directory - name: cd into tests directory - run: cd tests + run: cd ./tests # Install test dependencies - name: Install dependencies @@ -41,4 +41,4 @@ jobs: # Run the tests using the script - name: Run tests - run: ./run.sh + run: ls -la && ./run.sh From a52803d205ec9d038e966e776d02face87267103 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:06:19 +0200 Subject: [PATCH 09/18] Refactor e2e_tests workflow to streamline directory navigation and improve dependency installation --- .github/workflows/e2e_tests.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml index 766c98d0c..3177b977a 100644 --- a/.github/workflows/e2e_tests.yml +++ b/.github/workflows/e2e_tests.yml @@ -24,21 +24,17 @@ jobs: python-version: '3.x' # Adjust the version as needed # Install opal dependencies - - name: Install dependencies + - name: Install opal dependencies run: pip install -r requirements.txt # Ensure Docker is installed and running - name: Set up Docker uses: docker/setup-buildx-action@v2 - # cd into the tests directory - - name: cd into tests directory - run: cd ./tests - # Install test dependencies - - name: Install dependencies - run: pip install -r requirements.txt + - name: Install test dependencies + run: cd ./tests && pip install -r requirements.txt # Run the tests using the script - name: Run tests - run: ls -la && ./run.sh + run: cd ./tests && ./run.sh From c247d85fceb73cba1fbe7b8c5ca9d85fe74fa70f Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:14:14 +0200 Subject: [PATCH 10/18] Update e2e_tests workflow to trigger on master branch for improved test execution consistency --- .github/workflows/e2e_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml index 3177b977a..7b590e3e7 100644 --- a/.github/workflows/e2e_tests.yml +++ b/.github/workflows/e2e_tests.yml @@ -3,10 +3,10 @@ name: Run Tests on: push: branches: - - e2e-deploy + - master pull_request: branches: - - e2e-deploy + - master jobs: test: From f7c4b62c40c2f201474885101fe4f7f41565379b Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:15:46 +0200 Subject: [PATCH 11/18] Remove redundant e2e tests step in workflow for cleaner execution --- .github/workflows/tests.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 171c838ca..6262457aa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -125,9 +125,4 @@ jobs: git config --global user.name "$GITHUB_ACTOR" git config --global user.email "<>" - ./run.sh - - - name: e2e tests - working-directory: ./tests - run: | - ./run.sh + ./run.sh \ No newline at end of file From d3b782f624c63eb792ac451edb0c1add48332f04 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:20:42 +0200 Subject: [PATCH 12/18] Add newline at end of run.sh execution in tests workflow for consistency --- .github/workflows/tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6262457aa..3789dde91 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -125,4 +125,5 @@ jobs: git config --global user.name "$GITHUB_ACTOR" git config --global user.email "<>" - ./run.sh \ No newline at end of file + ./run.sh + \ No newline at end of file From 81f6a5d9be357b1f08f3ed8d2510fd0def08931a Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:22:00 +0200 Subject: [PATCH 13/18] Remove trailing newline at end of run.sh execution in tests workflow --- .github/workflows/tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3789dde91..92767b793 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -126,4 +126,3 @@ jobs: git config --global user.email "<>" ./run.sh - \ No newline at end of file From 862f857daf6bd6c6673702a75bae8269b2c3413f Mon Sep 17 00:00:00 2001 From: Israel Weinberg Date: Fri, 10 Jan 2025 10:29:16 +0200 Subject: [PATCH 14/18] Remove unused debugpy import from api.py --- packages/opal-server/opal_server/data/api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/opal-server/opal_server/data/api.py b/packages/opal-server/opal_server/data/api.py index b4c82dd09..3ef9d5732 100644 --- a/packages/opal-server/opal_server/data/api.py +++ b/packages/opal-server/opal_server/data/api.py @@ -1,6 +1,5 @@ from typing import Optional -import debugpy from fastapi import APIRouter, Depends, Header, HTTPException, status from fastapi.responses import RedirectResponse from opal_common.authentication.authz import ( From 8d9037192ccc9c6b69cebe303f6c4438e387265f Mon Sep 17 00:00:00 2001 From: Israel Weinberg Date: Fri, 10 Jan 2025 10:33:32 +0200 Subject: [PATCH 15/18] Rename workflow to 'E2E Tests' and add trigger for completed 'Tests' workflow --- .github/workflows/e2e_tests.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml index 7b590e3e7..2e771b21b 100644 --- a/.github/workflows/e2e_tests.yml +++ b/.github/workflows/e2e_tests.yml @@ -1,4 +1,4 @@ -name: Run Tests +name: E2E Tests on: push: @@ -7,6 +7,10 @@ on: pull_request: branches: - master + workflow_run: + workflows: ["Tests"] + types: + - completed jobs: test: From 6694f511927c60262a976d2e11f3c931c9cd62d6 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:38:49 +0200 Subject: [PATCH 16/18] Refactor start.sh to remove sleep and debugpy execution, streamline gunicorn startup --- scripts/start.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/scripts/start.sh b/scripts/start.sh index 16aebbeea..00ec14322 100755 --- a/scripts/start.sh +++ b/scripts/start.sh @@ -5,8 +5,6 @@ export GUNICORN_CONF=${GUNICORN_CONF:-./gunicorn_conf.py} export GUNICORN_TIMEOUT=${GUNICORN_TIMEOUT:-30} export GUNICORN_KEEP_ALIVE_TIMEOUT=${GUNICORN_KEEP_ALIVE_TIMEOUT:-5} -sleep 10 - if [[ -z "${OPAL_BROADCAST_URI}" && "${UVICORN_NUM_WORKERS}" != "1" ]]; then echo "OPAL_BROADCAST_URI must be set when having multiple workers" exit 1 @@ -17,9 +15,4 @@ prefix="" if [[ -z "${OPAL_ENABLE_DATADOG_APM}" && "${OPAL_ENABLE_DATADOG_APM}" = "true" ]]; then prefix=ddtrace-run fi - -#(set -x; exec $prefix gunicorn --reload -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) -(set -x; exec $prefix python -m debugpy --listen 0.0.0.0:5678 -m uvicorn ${UVICORN_ASGI_APP} --reload --host 0.0.0.0 --port ${UVICORN_PORT} ) - -# write a code that will wait for the user to press enter -read -n1 -r -p "Press any key to continue..." key +(set -x; exec $prefix gunicorn -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) \ No newline at end of file From 8d3b0001b0ca3e618a62f660b868986059db143f Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:43:06 +0200 Subject: [PATCH 17/18] Fix Dockerfile paths to use relative references for consistency --- docker/Dockerfile | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 9ad66f271..6aa06c591 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,10 +5,10 @@ FROM python:3.10-bookworm AS build-stage # from now on, work in the /app directory WORKDIR /app/ # Layer dependency install (for caching) -COPY ../packages/requires.txt ./base_requires.txt -COPY ../packages/opal-common/requires.txt ./common_requires.txt -COPY ../packages/opal-client/requires.txt ./client_requires.txt -COPY ../packages/opal-server/requires.txt ./server_requires.txt +COPY ./packages/requires.txt ./base_requires.txt +COPY ./packages/opal-common/requires.txt ./common_requires.txt +COPY ./packages/opal-client/requires.txt ./client_requires.txt +COPY ./packages/opal-server/requires.txt ./server_requires.txt # install python deps RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt @@ -16,7 +16,7 @@ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./ # split this stage to save time and reduce image size # --------------------------------------------------- FROM rust:1.79 AS cedar-builder -COPY ../cedar-agent /tmp/cedar-agent +COPY ./cedar-agent /tmp/cedar-agent WORKDIR /tmp/cedar-agent RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release @@ -34,7 +34,7 @@ RUN useradd -m -b / -s /bin/bash opal WORKDIR /opal # copy wait-for script (create link at old path to maintain backward compatibility) -COPY ../scripts/wait-for.sh . +COPY ./scripts/wait-for.sh . RUN chmod +x ./wait-for.sh RUN ln -s /opal/wait-for.sh /usr/wait-for.sh @@ -42,15 +42,15 @@ RUN ln -s /opal/wait-for.sh /usr/wait-for.sh RUN apt-get update && apt-get install -y netcat-traditional jq wget && apt-get clean # copy startup script (create link at old path to maintain backward compatibility) -COPY ../scripts/start.sh . +COPY ./scripts/start.sh . RUN chmod +x ./start.sh RUN ln -s /opal/start.sh /start.sh # copy gunicorn_config -COPY ../scripts/gunicorn_conf.py . +COPY ./scripts/gunicorn_conf.py . # copy app code -COPY ../README.md . -COPY ../packages ./packages/ +COPY ./README.md . +COPY ./packages ./packages/ # install the opal-common package RUN cd ./packages/opal-common && python setup.py install # Make sure scripts in .local are usable: From 71f2b35405e3b281f9b8335c3bfa147f87828d58 Mon Sep 17 00:00:00 2001 From: ariWeinberg <66802642+ariWeinberg@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:44:40 +0200 Subject: [PATCH 18/18] Update Dockerfile to use relative path for wait-for.sh and remove unused port exposure --- docker/Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 6aa06c591..a14953117 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -34,7 +34,7 @@ RUN useradd -m -b / -s /bin/bash opal WORKDIR /opal # copy wait-for script (create link at old path to maintain backward compatibility) -COPY ./scripts/wait-for.sh . +COPY scripts/wait-for.sh . RUN chmod +x ./wait-for.sh RUN ln -s /opal/wait-for.sh /usr/wait-for.sh @@ -119,8 +119,6 @@ ENV OPAL_INLINE_OPA_ENABLED=true ENV OPAL_INLINE_OPA_EXEC_PATH=/opal/opa # expose opa port EXPOSE 8181 -EXPOSE 5678 - USER opal # CEDAR CLIENT IMAGE --------------------------------