x-backend-image: &backend-image image: cpv3-backend:dev build: context: . dockerfile: Dockerfile target: dev x-backend-env: &backend-env DEBUG: ${DEBUG:-1} JWT_SECRET_KEY: ${JWT_SECRET_KEY:-dev-secret} POSTGRES_USER: ${POSTGRES_USER:-postgres} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} POSTGRES_HOST: db POSTGRES_PORT: 5432 POSTGRES_DATABASE: ${POSTGRES_DATABASE:-coffee_project_db} STORAGE_BACKEND: ${STORAGE_BACKEND:-S3} S3_ACCESS_KEY: ${MINIO_ROOT_USER:-minioadmin} S3_SECRET_KEY: ${MINIO_ROOT_PASSWORD:-minioadmin} S3_BUCKET_NAME: ${S3_BUCKET_NAME:-coffee-bucket} S3_ENDPOINT_URL_INTERNAL: http://minio:9000 # Used only for generated browser links (presigned URLs) S3_ENDPOINT_URL_PUBLIC: http://localhost:9000 REDIS_URL: redis://redis:6379/0 WEBHOOK_BASE_URL: http://api:8000 REMOTION_SERVICE_URL: ${REMOTION_SERVICE_URL:-http://remotion:3001} SALUTE_AUTH_KEY: ${SALUTE_AUTH_KEY:-} SALUTE_CA_CERT_PATH: ${SALUTE_CA_CERT_PATH:-./.certs/russian_trusted_ca_bundle.pem} SALUTE_SSL_VERIFY: ${SALUTE_SSL_VERIFY:-true} SALUTE_SCOPE: ${SALUTE_SCOPE:-SALUTE_SPEECH_PERS} services: db: container_name: cpv3_postgres image: postgres:16 restart: unless-stopped environment: POSTGRES_USER: ${POSTGRES_USER:-postgres} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} POSTGRES_DB: ${POSTGRES_DATABASE:-coffee_project_db} ports: - "127.0.0.1:5332:5432" healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-coffee_project_db}"] interval: 5s timeout: 3s retries: 20 volumes: - cpv3_db:/var/lib/postgresql/data networks: - db-net minio: container_name: cpv3_minio image: minio/minio:RELEASE.2025-09-07T16-13-09Z restart: unless-stopped ports: - "0.0.0.0:9000:9000" - "0.0.0.0:9001:9001" environment: MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin} MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin} command: server /data --console-address ":9001" healthcheck: test: ["CMD", "mc", "ready", "local"] interval: 10s timeout: 5s retries: 5 volumes: - cpv3_minio:/data networks: - db-net - app-net redis: container_name: cpv3_redis image: redis:7-alpine restart: unless-stopped ports: - "127.0.0.1:6379:6379" healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 5s timeout: 3s retries: 10 volumes: - cpv3_redis:/data networks: - db-net - app-net api: container_name: cpv3_api <<: *backend-image restart: unless-stopped env_file: .env depends_on: db: condition: service_healthy redis: condition: service_healthy healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/health/')"] interval: 10s timeout: 5s retries: 5 start_period: 30s environment: <<: *backend-env ports: - "0.0.0.0:8000:8000" volumes: - ./cpv3:/app/cpv3 - ./alembic:/app/alembic - ./alembic.ini:/app/alembic.ini - ./.certs:/app/.certs:ro networks: - db-net - app-net deploy: resources: limits: memory: 512m cpus: "1" worker: container_name: cpv3_worker env_file: .env <<: *backend-image restart: unless-stopped depends_on: db: condition: service_healthy redis: condition: service_healthy healthcheck: test: ["CMD-SHELL", "pgrep -f dramatiq || exit 1"] interval: 15s timeout: 5s retries: 3 environment: <<: *backend-env # watchfiles restarts dramatiq whenever Python files in /app/cpv3 change. # This gives the worker the same hot-reload experience as the API. command: > watchfiles --filter python 'dramatiq cpv3.modules.tasks.service --processes 1 --threads 2' /app/cpv3 volumes: - ./cpv3:/app/cpv3 - ./.certs:/app/.certs:ro networks: - db-net - app-net deploy: resources: limits: memory: 1g cpus: "1" volumes: cpv3_db: cpv3_minio: cpv3_redis: networks: db-net: driver: bridge app-net: driver: bridge