♻️(docker) remove usage of dockerize

We remove dockerize and use healthcheck on docker compose services
instead.
This commit is contained in:
Manuel Raynaud
2025-02-14 12:42:02 +01:00
parent e123e91959
commit 0dc8b4556c
3 changed files with 52 additions and 43 deletions

View File

@@ -88,28 +88,6 @@ jobs:
- name: Start Docker services - name: Start Docker services
run: make bootstrap FLUSH_ARGS='--no-input' cache= run: make bootstrap FLUSH_ARGS='--no-input' cache=
# Tool to wait for a service to be ready
- name: Install Dockerize
run: |
curl -sSL https://github.com/jwilder/dockerize/releases/download/v0.8.0/dockerize-linux-amd64-v0.8.0.tar.gz | sudo tar -C /usr/local/bin -xzv
- name: Wait for services to be ready
run: |
printf "Minio check...\n"
dockerize -wait tcp://localhost:9000 -timeout 20s
printf "Keyclock check...\n"
dockerize -wait tcp://localhost:8080 -timeout 20s
printf "Server collaboration check...\n"
dockerize -wait tcp://localhost:4444 -timeout 20s
printf "Ngnix check...\n"
dockerize -wait tcp://localhost:8083 -timeout 20s
printf "DRF check...\n"
dockerize -wait tcp://localhost:8071 -timeout 20s
printf "Postgres Keyclock check...\n"
dockerize -wait tcp://localhost:5433 -timeout 20s
printf "Postgres back check...\n"
dockerize -wait tcp://localhost:15432 -timeout 20s
- name: Run e2e tests - name: Run e2e tests
run: cd src/frontend/ && yarn e2e:test --project='chromium' run: cd src/frontend/ && yarn e2e:test --project='chromium'

View File

@@ -44,7 +44,6 @@ COMPOSE_EXEC_APP = $(COMPOSE_EXEC) app-dev
COMPOSE_RUN = $(COMPOSE) run --rm COMPOSE_RUN = $(COMPOSE) run --rm
COMPOSE_RUN_APP = $(COMPOSE_RUN) app-dev COMPOSE_RUN_APP = $(COMPOSE_RUN) app-dev
COMPOSE_RUN_CROWDIN = $(COMPOSE_RUN) crowdin crowdin COMPOSE_RUN_CROWDIN = $(COMPOSE_RUN) crowdin crowdin
WAIT_DB = @$(COMPOSE_RUN) dockerize -wait tcp://$(DB_HOST):$(DB_PORT) -timeout 60s
# -- Backend # -- Backend
MANAGE = $(COMPOSE_RUN_APP) python manage.py MANAGE = $(COMPOSE_RUN_APP) python manage.py
@@ -124,8 +123,6 @@ run: ## start the wsgi (production) and development server
@$(COMPOSE) up --force-recreate -d celery-dev @$(COMPOSE) up --force-recreate -d celery-dev
@$(COMPOSE) up --force-recreate -d y-provider @$(COMPOSE) up --force-recreate -d y-provider
@$(COMPOSE) up --force-recreate -d nginx @$(COMPOSE) up --force-recreate -d nginx
@echo "Wait for postgresql to be up..."
@$(WAIT_DB)
.PHONY: run .PHONY: run
run-with-frontend: ## Start all the containers needed (backend to frontend) run-with-frontend: ## Start all the containers needed (backend to frontend)
@@ -188,14 +185,12 @@ test-back-parallel: ## run all back-end tests in parallel
makemigrations: ## run django makemigrations for the impress project. makemigrations: ## run django makemigrations for the impress project.
@echo "$(BOLD)Running makemigrations$(RESET)" @echo "$(BOLD)Running makemigrations$(RESET)"
@$(COMPOSE) up -d postgresql @$(COMPOSE) up -d postgresql
@$(WAIT_DB)
@$(MANAGE) makemigrations @$(MANAGE) makemigrations
.PHONY: makemigrations .PHONY: makemigrations
migrate: ## run django migrations for the impress project. migrate: ## run django migrations for the impress project.
@echo "$(BOLD)Running migrations$(RESET)" @echo "$(BOLD)Running migrations$(RESET)"
@$(COMPOSE) up -d postgresql @$(COMPOSE) up -d postgresql
@$(WAIT_DB)
@$(MANAGE) migrate @$(MANAGE) migrate
.PHONY: migrate .PHONY: migrate

View File

@@ -1,6 +1,11 @@
services: services:
postgresql: postgresql:
image: postgres:16 image: postgres:16
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
interval: 1s
timeout: 2s
retries: 300
env_file: env_file:
- env.d/development/postgresql - env.d/development/postgresql
ports: ports:
@@ -15,7 +20,7 @@ services:
- "1081:1080" - "1081:1080"
minio: minio:
# user: ${DOCKER_USER:-1000} user: ${DOCKER_USER:-1000}
image: minio/minio image: minio/minio
environment: environment:
- MINIO_ROOT_USER=impress - MINIO_ROOT_USER=impress
@@ -23,6 +28,11 @@ services:
ports: ports:
- '9000:9000' - '9000:9000'
- '9001:9001' - '9001:9001'
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 1s
timeout: 20s
retries: 300
entrypoint: "" entrypoint: ""
command: minio server --console-address :9001 /data command: minio server --console-address :9001 /data
volumes: volumes:
@@ -31,7 +41,9 @@ services:
createbuckets: createbuckets:
image: minio/mc image: minio/mc
depends_on: depends_on:
- minio minio:
condition: service_healthy
restart: true
entrypoint: > entrypoint: >
sh -c " sh -c "
/usr/bin/mc alias set impress http://minio:9000 impress password && \ /usr/bin/mc alias set impress http://minio:9000 impress password && \
@@ -59,10 +71,15 @@ services:
- ./src/backend:/app - ./src/backend:/app
- ./data/static:/data/static - ./data/static:/data/static
depends_on: depends_on:
- postgresql postgresql:
- mailcatcher condition: service_healthy
- redis restart: true
- createbuckets mailcatcher:
condition: service_started
redis:
condition: service_started
createbuckets:
condition: service_started
celery-dev: celery-dev:
user: ${DOCKER_USER:-1000} user: ${DOCKER_USER:-1000}
@@ -93,9 +110,13 @@ services:
- env.d/development/common - env.d/development/common
- env.d/development/postgresql - env.d/development/postgresql
depends_on: depends_on:
- postgresql postgresql:
- redis condition: service_healthy
- minio restart: true
redis:
condition: service_started
minio:
condition: service_started
celery: celery:
user: ${DOCKER_USER:-1000} user: ${DOCKER_USER:-1000}
@@ -116,9 +137,13 @@ services:
volumes: volumes:
- ./docker/files/etc/nginx/conf.d:/etc/nginx/conf.d:ro - ./docker/files/etc/nginx/conf.d:/etc/nginx/conf.d:ro
depends_on: depends_on:
- keycloak app-dev:
- app-dev condition: service_started
- y-provider y-provider:
condition: service_started
keycloak:
condition: service_healthy
restart: true
frontend-dev: frontend-dev:
user: "${DOCKER_USER:-1000}" user: "${DOCKER_USER:-1000}"
@@ -135,9 +160,6 @@ services:
ports: ports:
- "3000:3000" - "3000:3000"
dockerize:
image: jwilder/dockerize
crowdin: crowdin:
image: crowdin/cli:3.16.0 image: crowdin/cli:3.16.0
volumes: volumes:
@@ -169,6 +191,11 @@ services:
kc_postgresql: kc_postgresql:
image: postgres:14.3 image: postgres:14.3
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
interval: 1s
timeout: 2s
retries: 300
ports: ports:
- "5433:5432" - "5433:5432"
env_file: env_file:
@@ -187,6 +214,13 @@ services:
- --hostname-admin-url=http://localhost:8083/ - --hostname-admin-url=http://localhost:8083/
- --hostname-strict=false - --hostname-strict=false
- --hostname-strict-https=false - --hostname-strict-https=false
- --health-enabled=true
- --metrics-enabled=true
healthcheck:
test: ["CMD", "curl", "--head", "-fsS", "http://localhost:8080/health/ready"]
interval: 1s
timeout: 2s
retries: 300
environment: environment:
KEYCLOAK_ADMIN: admin KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: admin KEYCLOAK_ADMIN_PASSWORD: admin
@@ -200,4 +234,6 @@ services:
ports: ports:
- "8080:8080" - "8080:8080"
depends_on: depends_on:
- kc_postgresql kc_postgresql:
condition: service_healthy
restart: true