fix(infra): mount .certs/ into api and worker containers for SaluteSpeech TLS

Also add SALUTE_AUTH_KEY and SALUTE_CA_CERT_PATH to shared backend env.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Daniil
2026-04-04 00:56:32 +03:00
parent 9f780f7f81
commit 7d2f444e1c
+136 -87
View File
@@ -1,56 +1,11 @@
services:
db:
container_name: cpv3_postgres
image: postgres:16
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DATABASE:-coffee_project_db}
ports:
- "5332:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-coffee_project_db}"]
interval: 5s
timeout: 3s
retries: 20
volumes:
- cpv3_db:/var/lib/postgresql/data
x-backend-image: &backend-image
image: cpv3-backend:dev
build:
context: .
dockerfile: Dockerfile
target: dev
minio:
container_name: cpv3_minio
image: minio/minio
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
command: server /data --console-address ":9001"
volumes:
- cpv3_minio:/data
redis:
container_name: cpv3_redis
image: redis:7-alpine
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 10
volumes:
- cpv3_redis:/data
api:
container_name: cpv3_api
build: .
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
environment:
x-backend-env: &backend-env
DEBUG: ${DEBUG:-1}
JWT_SECRET_KEY: ${JWT_SECRET_KEY:-dev-secret}
@@ -73,51 +28,145 @@ services:
WEBHOOK_BASE_URL: http://api:8000
REMOTION_SERVICE_URL: ${REMOTION_SERVICE_URL:-http://remotion:3001}
ports:
- "8000:8000"
command: >
sh -c "uv run alembic upgrade head &&
uv run uvicorn cpv3.main:app --host 0.0.0.0 --port 8000 --reload --reload-dir /app/src"
volumes:
- ./src:/app/src
- ./alembic:/app/alembic
- ./alembic.ini:/app/alembic.ini
SALUTE_AUTH_KEY: ${SALUTE_AUTH_KEY:-}
SALUTE_CA_CERT_PATH: ${SALUTE_CA_CERT_PATH:-./.certs/russian_trusted_ca_bundle.pem}
SALUTE_SSL_VERIFY: ${SALUTE_SSL_VERIFY:-true}
SALUTE_SCOPE: ${SALUTE_SCOPE:-SALUTE_SPEECH_PERS}
worker:
container_name: cpv3_worker
build: .
services:
db:
container_name: cpv3_postgres
image: postgres:16
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DATABASE:-coffee_project_db}
ports:
- "127.0.0.1:5332:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-coffee_project_db}"]
interval: 5s
timeout: 3s
retries: 20
volumes:
- cpv3_db:/var/lib/postgresql/data
networks:
- db-net
minio:
container_name: cpv3_minio
image: minio/minio:RELEASE.2025-09-07T16-13-09Z
restart: unless-stopped
ports:
- "0.0.0.0:9000:9000"
- "0.0.0.0:9001:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- cpv3_minio:/data
networks:
- db-net
- app-net
redis:
container_name: cpv3_redis
image: redis:7-alpine
restart: unless-stopped
ports:
- "127.0.0.1:6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 10
volumes:
- cpv3_redis:/data
networks:
- db-net
- app-net
api:
container_name: cpv3_api
<<: *backend-image
restart: unless-stopped
env_file: .env
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/health/')"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
environment:
DEBUG: ${DEBUG:-1}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_HOST: db
POSTGRES_PORT: 5432
POSTGRES_DATABASE: ${POSTGRES_DATABASE:-coffee_project_db}
STORAGE_BACKEND: ${STORAGE_BACKEND:-S3}
S3_ACCESS_KEY: ${MINIO_ROOT_USER:-minioadmin}
S3_SECRET_KEY: ${MINIO_ROOT_PASSWORD:-minioadmin}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-coffee-bucket}
S3_ENDPOINT_URL_INTERNAL: http://minio:9000
S3_ENDPOINT_URL_PUBLIC: http://localhost:9000
REDIS_URL: redis://redis:6379/0
WEBHOOK_BASE_URL: http://api:8000
REMOTION_SERVICE_URL: ${REMOTION_SERVICE_URL:-http://localhost:8001}
command: >
uv run dramatiq cpv3.modules.tasks.service --processes 1 --threads 2
<<: *backend-env
ports:
- "0.0.0.0:8000:8000"
volumes:
- ./src:/app/src
- ./cpv3:/app/cpv3
- ./alembic:/app/alembic
- ./alembic.ini:/app/alembic.ini
- ./.certs:/app/.certs:ro
networks:
- db-net
- app-net
deploy:
resources:
limits:
memory: 512m
cpus: "1"
worker:
container_name: cpv3_worker
env_file: .env
<<: *backend-image
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "pgrep -f dramatiq || exit 1"]
interval: 15s
timeout: 5s
retries: 3
environment:
<<: *backend-env
# watchfiles restarts dramatiq whenever Python files in /app/cpv3 change.
# This gives the worker the same hot-reload experience as the API.
command: >
watchfiles --filter python 'dramatiq cpv3.modules.tasks.service --processes 1 --threads 2' /app/cpv3
volumes:
- ./cpv3:/app/cpv3
- ./.certs:/app/.certs:ro
networks:
- db-net
- app-net
deploy:
resources:
limits:
memory: 1g
cpus: "1"
volumes:
cpv3_db:
cpv3_minio:
cpv3_redis:
networks:
db-net:
driver: bridge
app-net:
driver: bridge