Skip to content
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 6 additions & 12 deletions .github/workflows/deploy-production.yml
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,10 @@ jobs:
# to do with the GHA workflow "env" settings.
export PATH="/home/runner/.pulumi/bin:$PATH"

# Create a YAML config stump containing only the nested tree leading to the image tag update

# Create a YAML config stump containing only the new image tag as a YAML variable
cd pulumi
cat << EOF > newimage.yaml
resources:
tb:fargate:FargateClusterWithLogging:
backend:
task_definition:
container_definitions:
backend:
image: "${{ steps.pulumi-tag-extract.outputs.pulumi_tag }}"
EOF
echo ".apmt_image: &APMT_IMAGE ${{ steps.pulumi-tag-extract.outputs.pulumi_tag }}" > newimage.yaml
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is simpler now due to the DRYing out of the task definitions.


# Use yq to merge the stump into the main config
yq -i '. *= load("newimage.yaml")' config.prod.yaml
Expand All @@ -118,8 +111,9 @@ jobs:
pulumi login
pulumi stack select prod
TBPULUMI_DISABLE_PROTECTION=True \
pulumi up -y --diff --target \
'urn:pulumi:prod::appointment::tb:fargate:FargateClusterWithLogging$aws:ecs/taskDefinition:TaskDefinition::appointment-prod-fargate-backend-taskdef' \
pulumi up -y --diff \
--target 'urn:pulumi:stage::appointment::tb:fargate:FargateClusterWithLogging$aws:ecs/taskDefinition:TaskDefinition::appointment-stage-fargate-backend-taskdef' \
--target 'urn:pulumi:stage::appointment::tb:fargate:AutoscalingFargateCluster::appointment-stage-afc-appointment' \
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This makes sure that images get deployed to both clusters when we do a release.

Comment thread
davinotdavid marked this conversation as resolved.
Outdated
--target-dependents

prod-sanity-browserstack:
Expand Down
17 changes: 5 additions & 12 deletions .github/workflows/deploy-staging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -137,16 +137,8 @@ jobs:

cd pulumi

# Create a YAML config stump containing only the nested tree leading to the image tag update
cat << EOF > newimage.yaml
resources:
tb:fargate:FargateClusterWithLogging:
backend:
task_definition:
container_definitions:
backend:
image: "$ECR_TAG"
EOF
# Create a YAML config stump containing only the new image tag as a YAML variable
echo ".apmt_image: &APMT_IMAGE $ECR_TAG" > newimage.yaml

# Use yq to merge the stump into the main config
yq -i '. *= load("newimage.yaml")' config.stage.yaml
Expand All @@ -155,8 +147,9 @@ jobs:
export PULUMI_CONFIG_PASSPHRASE="${{ secrets.PULUMI_PASSPHRASE }}"
pulumi login
pulumi stack select stage
pulumi up -y --diff --target \
'urn:pulumi:stage::appointment::tb:fargate:FargateClusterWithLogging$aws:ecs/taskDefinition:TaskDefinition::appointment-stage-fargate-backend-taskdef' \
pulumi up -y --diff \
--target 'urn:pulumi:stage::appointment::tb:fargate:FargateClusterWithLogging$aws:ecs/taskDefinition:TaskDefinition::appointment-stage-fargate-backend-taskdef' \
--target 'urn:pulumi:stage::appointment::tb:fargate:AutoscalingFargateCluster::appointment-stage-afc-appointment' \
--target-dependents

create-release:
Expand Down
1 change: 1 addition & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ celery[redis]==5.*
cryptography==46.0.6
dnspython==2.*
fastapi[standard]==0.*
flower>=2.0.1,<3
fluent.runtime==0.4.0
fluent.syntax==0.19.0
google-api-python-client==2.*
Expand Down
7 changes: 6 additions & 1 deletion backend/scripts/entry.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ if [[ "$CONTAINER_ROLE" == "worker" ]]; then
elif [[ "$CONTAINER_ROLE" == "beat" ]]; then
echo "Starting Celery beat scheduler..."
celery -A appointment.celery_app:celery beat -l INFO
else
elif [[ "$CONTAINER_ROLE" == "flower" ]]; then
celery -A appointment.celery_app:celery flower -l INFO
elif [[ "$CONTAINER_ROLE" == "api" ]]; then
if [[ "$IS_LOCAL_DEV" == "yes" ]]; then
echo "Running setup"
run-command main setup
Expand All @@ -35,4 +37,7 @@ else

echo "Running uvicorn with these arguments: '$ARGS'"
uvicorn $ARGS
else
echo "Unrecognized CONTAINER_ROLE: $CONTAINER_ROLE"
exit 1
fi
11 changes: 11 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,17 @@ services:
- backend
- redis

celery-flower:
<<: *backend
ports:
- 5556:5555
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Davi requested this port exposure change so that this does not conflict with a simultaneously running Flower container for a local dev instance of Accounts.

environment:
- CONTAINER_ROLE=flower
- FLOWER_UNAUTHENTICATED_API=true
depends_on:
- backend
- redis

frontend:
build: ./frontend
volumes:
Expand Down
21 changes: 21 additions & 0 deletions pulumi/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@
resources = project.config.get('resources')
cloudflare_zone_id = project.pulumi_config.require_secret('cloudflare_zone_id')

# Create CloudWatch Logs groups to store application logs, etc. in
logdests = {
dest_name: tb_pulumi.cloudwatch.LogDestination(
f'{project.name_prefix}-logdest-{dest_name}',
project=project,
**dest_config,
)
for dest_name, dest_config in resources.get('tb:cloudwatch:LogDestination', {}).items()
}

# Create some private network space
vpc_opts = resources['tb:network:MultiCidrVpc'].get('appointment', {})
vpc = tb_pulumi.network.MultiCidrVpc(name=f'{project.name_prefix}-vpc', project=project, **vpc_opts)
Expand Down Expand Up @@ -70,6 +80,17 @@
vpc=vpc,
)

# AutoscalingFargateClusters, the newer class that replaces the above model. For now, we run it alongside.
afcs = {
afc_name: tb_pulumi.fargate.AutoscalingFargateCluster(
f'{project.name_prefix}-afc-{afc_name}',
project=project,
subnets=vpc.resources['subnets'],
**afc_config,
)
for afc_name, afc_config in resources.get('tb:fargate:AutoscalingFargateCluster', {}).items()
}

# CloudFront function to handle request rewrites headed to the backend
rewrite_function = cloudfront.rewrite_function(project=project)
project.resources['cf_rewrite_function'] = rewrite_function
Expand Down
Loading
Loading