使用 Docker Compose 快速部署常用的服务
前言
- 按照 官方文档 安装 docker
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
常用指令
- 启动服务
cd ~/apps/mysql/docker-compose.yml
docker compose up -d
- 停用服务
cd ~/apps/mysql/docker-compose.yml
docker down
- 查看容器日志
docker container ls
docker logs [id]
- 进入容器
docker exec [id] -it /bin/bash
常用配置一览
笔者习惯将 Volume 目录放到 docker-compose.yml
同级目录下,方便管理,因此会作形如下面的配置:
services:
db:
image: mysql:5.7
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/mysql/confs:/etc/mysql
[SAAS] Dify
- 部署文档
- 修改
.env
文件,部署监听在非 80 端口
EXPOSE_NGINX_PORT=8001
- 使用 Caddy 方向代理
dify.yourdomain.com {
reverse_proxy :8001 {
header_up Host {host} # redundant
header_up X-Real-IP {remote}
header_up X-Forwarded-Port {server_port} # redundant
}
}
[SAAS] Umami
- 配置文件
# apps/umami/docker-compose.yml
services:
umami:
image: ghcr.io/umami-software/umami:postgresql-latest
ports:
- "3000:3000"
environment:
DATABASE_URL: postgresql://umami:umami@db:5432/umami
DATABASE_TYPE: postgresql
APP_SECRET: replace-me-with-a-random-string
depends_on:
db:
condition: service_healthy
restart: always
healthcheck:
test: ["CMD-SHELL", "curl http://localhost:3000/api/heartbeat"]
interval: 5s
timeout: 5s
retries: 5
db:
image: postgres:15-alpine
environment:
POSTGRES_DB: umami
POSTGRES_USER: umami
POSTGRES_PASSWORD: umami
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/umami/data:/var/lib/postgresql/data
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5
volumes:
umami-db-data
- 使用 Caddy 反向代理
umami.yourdomain.com {
reverse_proxy :3000 {
header_up Host {host} # redundant
}
}
[SAAS] Flowise AI
# apps/flowise/docker-compose.yml
services:
flowise:
image: flowiseai/flowise
restart: always
environment:
- PORT=${PORT}
- CORS_ORIGINS=${CORS_ORIGINS}
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
- DEBUG=${DEBUG}
- DATABASE_PATH=${DATABASE_PATH}
- DATABASE_TYPE=${DATABASE_TYPE}
- DATABASE_PORT=${DATABASE_PORT}
- DATABASE_HOST=${DATABASE_HOST}
- DATABASE_NAME=${DATABASE_NAME}
- DATABASE_USER=${DATABASE_USER}
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
- DATABASE_SSL=${DATABASE_SSL}
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
- APIKEY_PATH=${APIKEY_PATH}
- SECRETKEY_PATH=${SECRETKEY_PATH}
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
- LOG_LEVEL=${LOG_LEVEL}
- LOG_PATH=${LOG_PATH}
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
ports:
- '${PORT}:${PORT}'
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/flowise:/root/.flowise
entrypoint: /bin/sh -c "sleep 3; flowise start"
[SAAS] Firecrawl 爬虫
- 克隆仓库
gitl clone git@github.com:mendableai/firecrawl.git
- 修改
docker-compose.yml
文件
name: firecrawl
x-common-service: &common-service
build:
context: apps/api
args:
- http_proxy=http://10.74.176.8:11080
- https_proxy=http://10.74.176.8:11080
networks:
- backend
extra_hosts:
- "host.docker.internal:host-gateway"
services:
playwright-service:
build:
context: apps/playwright-service-ts
args:
- http_proxy=http://10.74.176.8:11080
- https_proxy=http://10.74.176.8:11080
environment:
- PORT=3003
- PROXY_SERVER=${PROXY_SERVER}
- PROXY_USERNAME=${PROXY_USERNAME}
- PROXY_PASSWORD=${PROXY_PASSWORD}
- BLOCK_MEDIA=${BLOCK_MEDIA}
ports:
- "3003:3003"
networks:
- backend
api:
<<: *common-service
environment:
REDIS_URL: ${REDIS_URL:-redis://redis:6379}
REDIS_RATE_LIMIT_URL: ${REDIS_URL:-redis://redis:6379}
PLAYWRIGHT_MICROSERVICE_URL: ${PLAYWRIGHT_MICROSERVICE_URL:-http://playwright-service:3000}
USE_DB_AUTHENTICATION: ${USE_DB_AUTHENTICATION}
PORT: ${PORT:-3002}
NUM_WORKERS_PER_QUEUE: ${NUM_WORKERS_PER_QUEUE}
OPENAI_API_KEY: ${OPENAI_API_KEY}
OPENAI_BASE_URL: ${OPENAI_BASE_URL}
MODEL_NAME: ${MODEL_NAME:-gpt-4o}
SLACK_WEBHOOK_URL: ${SLACK_WEBHOOK_URL}
LLAMAPARSE_API_KEY: ${LLAMAPARSE_API_KEY}
LOGTAIL_KEY: ${LOGTAIL_KEY}
BULL_AUTH_KEY: ${BULL_AUTH_KEY}
TEST_API_KEY: ${TEST_API_KEY}
POSTHOG_API_KEY: ${POSTHOG_API_KEY}
POSTHOG_HOST: ${POSTHOG_HOST}
SUPABASE_ANON_TOKEN: ${SUPABASE_ANON_TOKEN}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_SERVICE_TOKEN: ${SUPABASE_SERVICE_TOKEN}
SCRAPING_BEE_API_KEY: ${SCRAPING_BEE_API_KEY}
HOST: ${HOST:-0.0.0.0}
SELF_HOSTED_WEBHOOK_URL: ${SELF_HOSTED_WEBHOOK_URL}
LOGGING_LEVEL: ${LOGGING_LEVEL}
FLY_PROCESS_GROUP: app
depends_on:
- redis
- playwright-service
ports:
- "3002:3002"
command: [ "pnpm", "run", "start:production" ]
worker:
<<: *common-service
environment:
REDIS_URL: ${REDIS_URL:-redis://redis:6379}
REDIS_RATE_LIMIT_URL: ${REDIS_URL:-redis://redis:6379}
PLAYWRIGHT_MICROSERVICE_URL: ${PLAYWRIGHT_MICROSERVICE_URL:-http://playwright-service:3000}
USE_DB_AUTHENTICATION: ${USE_DB_AUTHENTICATION}
PORT: ${PORT:-3002}
NUM_WORKERS_PER_QUEUE: ${NUM_WORKERS_PER_QUEUE}
OPENAI_API_KEY: ${OPENAI_API_KEY}
OPENAI_BASE_URL: ${OPENAI_BASE_URL}
MODEL_NAME: ${MODEL_NAME:-gpt-4o}
SLACK_WEBHOOK_URL: ${SLACK_WEBHOOK_URL}
LLAMAPARSE_API_KEY: ${LLAMAPARSE_API_KEY}
LOGTAIL_KEY: ${LOGTAIL_KEY}
BULL_AUTH_KEY: ${BULL_AUTH_KEY}
TEST_API_KEY: ${TEST_API_KEY}
POSTHOG_API_KEY: ${POSTHOG_API_KEY}
POSTHOG_HOST: ${POSTHOG_HOST}
SUPABASE_ANON_TOKEN: ${SUPABASE_ANON_TOKEN}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_SERVICE_TOKEN: ${SUPABASE_SERVICE_TOKEN}
SCRAPING_BEE_API_KEY: ${SCRAPING_BEE_API_KEY}
HOST: ${HOST:-0.0.0.0}
SELF_HOSTED_WEBHOOK_URL: ${SELF_HOSTED_WEBHOOK_URL}
LOGGING_LEVEL: ${LOGGING_LEVEL}
FLY_PROCESS_GROUP: worker
depends_on:
- redis
- playwright-service
- api
command: [ "pnpm", "run", "workers" ]
redis:
image: redis:alpine
networks:
- backend
command: redis-server --bind 0.0.0.0
networks:
backend:
driver: bridge
- 添加环境变量文件
- 内网环境酌情配置
PROXY_SERVER
代理访问
# ===== Required ENVS ======
NUM_WORKERS_PER_QUEUE=8
PORT=3002
HOST=0.0.0.0
REDIS_URL=redis://redis:6379 #for self-hosting using docker, use redis://redis:6379. For running locally, use redis://localhost:6379
REDIS_RATE_LIMIT_URL=redis://redis:6379 #for self-hosting using docker, use redis://redis:6379. For running locally, use redis://localhost:6379
PLAYWRIGHT_MICROSERVICE_URL=http://172.28.214.245:3003/scrape
## To turn on DB authentication, you need to set up supabase.
USE_DB_AUTHENTICATION=false
# ===== Optional ENVS ======
# Supabase Setup (used to support DB authentication, advanced logging, etc.)
SUPABASE_ANON_TOKEN=
SUPABASE_URL=
SUPABASE_SERVICE_TOKEN=
# Other Optionals
# use if you've set up authentication and want to test with a real API key
TEST_API_KEY=
# set if you'd like to test the scraping rate limit
RATE_LIMIT_TEST_API_KEY_SCRAPE=
# set if you'd like to test the crawling rate limit
RATE_LIMIT_TEST_API_KEY_CRAWL=
# set if you'd like to use scraping Be to handle JS blocking
SCRAPING_BEE_API_KEY=
# add for LLM dependednt features (image alt generation, etc.)
OPENAI_API_KEY=
BULL_AUTH_KEY=@
# use if you're configuring basic logging with logtail
LOGTAIL_KEY=
# set if you have a llamaparse key you'd like to use to parse pdfs
LLAMAPARSE_API_KEY=
# set if you'd like to send slack server health status messages
SLACK_WEBHOOK_URL=
# set if you'd like to send posthog events like job logs
POSTHOG_API_KEY=
# set if you'd like to send posthog events like job logs
POSTHOG_HOST=
STRIPE_PRICE_ID_STANDARD=
STRIPE_PRICE_ID_SCALE=
STRIPE_PRICE_ID_STARTER=
STRIPE_PRICE_ID_HOBBY=
STRIPE_PRICE_ID_HOBBY_YEARLY=
STRIPE_PRICE_ID_STANDARD_NEW=
STRIPE_PRICE_ID_STANDARD_NEW_YEARLY=
STRIPE_PRICE_ID_GROWTH=
STRIPE_PRICE_ID_GROWTH_YEARLY=
HYPERDX_API_KEY=
HDX_NODE_BETA_MODE=1
# set if you'd like to use the fire engine closed beta
FIRE_ENGINE_BETA_URL=
# Proxy Settings for Playwright (Alternative you can can use a proxy service like oxylabs, which rotates IPs for you on every request)
PROXY_SERVER=
PROXY_USERNAME=
PROXY_PASSWORD=
# set if you'd like to block media requests to save proxy bandwidth
BLOCK_MEDIA=
# Set this to the URL of your webhook when using the self-hosted version of FireCrawl
SELF_HOSTED_WEBHOOK_URL=
# Resend API Key for transactional emails
RESEND_API_KEY=
# LOGGING_LEVEL determines the verbosity of logs that the system will output.
# Available levels are:
# NONE - No logs will be output.
# ERROR - For logging error messages that indicate a failure in a specific operation.
# WARN - For logging potentially harmful situations that are not necessarily errors.
# INFO - For logging informational messages that highlight the progress of the application.
# DEBUG - For logging detailed information on the flow through the system, primarily used for debugging.
# TRACE - For logging more detailed information than the DEBUG level.
# Set LOGGING_LEVEL to one of the above options to control logging output.
LOGGING_LEVEL=DEBUG
[数据库] Mysql
# apps/mysql/docker-compose.yml
services:
db:
image: mysql:5.7
restart: always
environment:
MYSQL_ROOT_PASSWORD: xxx_PASSWORD
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/mysql/confs:/etc/mysql
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/mysql/logs:/var/log/mysql
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/mysql/data:/var/lib/mysql
ports:
- 3306:3306
[数据库] MongoDB
# apps/mongo/docker-compose.yml
services:
mongo:
image: 'mongo:7.0.5'
ports:
- 27017:27017
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/mongo:/var/lib/mongodb/data
mongo-express:
image: 'mongo-express:1.0.2'
ports:
- 8081:8081
environment:
ME_CONFIG_BASICAUTH_USERNAME: admin
ME_CONFIG_BASICAUTH_PASSWORD: admin
[数据库] Milvus
# apps/milvus/docker-compose.yml
services:
etcd:
container_name: milvus-etcd
restart: always
image: quay.io/coreos/etcd:v3.5.14
environment:
- ETCD_AUTO_COMPACTION_MODE=revision
- ETCD_AUTO_COMPACTION_RETENTION=1000
- ETCD_QUOTA_BACKEND_BYTES=4294967296
- ETCD_SNAPSHOT_COUNT=50000
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 30s
timeout: 20s
retries: 3
minio:
container_name: milvus-minio
restart: always
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
environment:
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
ports:
- "9001:9001"
- "19000:9000"
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.4.11
command: ["milvus", "run", "standalone"]
security_opt:
- seccomp:unconfined
environment:
ETCD_ENDPOINTS: etcd:2379
MINIO_ADDRESS: minio:9000
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
ports:
- "19530:19530"
- "9091:9091"
depends_on:
- "etcd"
- "minio"
attu:
container_name: attu
image: zilliz/attu:latest
environment:
MILVUS_URL: milvus-standalone:19530
ports:
- "9002:3000"
depends_on:
- "standalone"
networks:
default:
name: milvus
[数据库] Neo4j
# apps/neo4j/docker-compose.yml
services:
neo4j:
image: neo4j:4.4.38
restart: always
environment:
- NEO4J_AUTH=neo4j/neo4j_passowrd
- NEO4J_apoc_export_file_enabled=true
- NEO4J_apoc_import_file_enabled=true
- NEO4J_apoc_import_file_use__neo4j__config=true
- NEO4J_dbms_connector_bolt_listen__address=7688
- NEO4J_dbms_connector_bolt_advertised__address=7688
- NEO4J_PLUGINS=["apoc", "graph-data-science"]
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/neo4j/data:/data
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/neo4j/logs:/logs
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/neo4j/config:/config
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/neo4j/plugins:/plugins
ports:
- 7474:7474
- 7687:7687
[中间件] Kafka
# apps/kafka/docker-compose.yml
services:
zookeeper:
image: 'bitnami/zookeeper:latest'
restart: always
ports:
- 2181:2181
environment:
ALLOW_ANONYMOUS_LOGIN: "yes"
kafka:
image: 'bitnami/kafka:latest'
restart: always
ports:
- 9092:9092
environment:
KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_ADVERTISED_HOST_NAME: 101.43.207.38 ## 修改:宿主机IP
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://101.43.207.38:9092 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "101.43.207.38:2181"
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- zookeeper
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8888:8080
environment:
- KAFKA_CLUSTERS_0_NAME=dev_cluster
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=101.43.207.38:9092