🔧 Enhance Gitea deployment workflows and fix nginx configuration
- Added new CI/CD workflow `ci-cd-reliable.yml` for reliable deployments with database support.
- Created `docker-compose.zero-downtime-fixed.yml` to address nginx configuration issues for zero-downtime deployments.
- Improved existing workflows to check for nginx configuration file and create a fallback if missing.
- Updated `DEPLOYMENT-FIXES.md` to document new workflows and fixes.
✅ These changes improve deployment reliability and ensure proper nginx configuration for seamless updates.
This commit is contained in:
177
.gitea/workflows/ci-cd-reliable.yml
Normal file
177
.gitea/workflows/ci-cd-reliable.yml
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
name: CI/CD Pipeline (Reliable & Simple)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Verify secrets and variables
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
echo "🏗️ Building Docker image..."
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
echo "✅ Docker image built successfully"
|
||||||
|
|
||||||
|
- name: Deploy with database services
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying with database services..."
|
||||||
|
|
||||||
|
# Export environment variables
|
||||||
|
export NODE_ENV="${{ vars.NODE_ENV }}"
|
||||||
|
export LOG_LEVEL="${{ vars.LOG_LEVEL }}"
|
||||||
|
export NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
export MY_EMAIL="${{ vars.MY_EMAIL }}"
|
||||||
|
export MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Stop old containers
|
||||||
|
echo "🛑 Stopping old containers..."
|
||||||
|
docker compose down || true
|
||||||
|
|
||||||
|
# Clean up orphaned containers
|
||||||
|
echo "🧹 Cleaning up orphaned containers..."
|
||||||
|
docker compose down --remove-orphans || true
|
||||||
|
|
||||||
|
# Start new containers
|
||||||
|
echo "🚀 Starting new containers..."
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
echo "✅ Deployment completed!"
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Wait for containers to be ready
|
||||||
|
run: |
|
||||||
|
echo "⏳ Waiting for containers to be ready..."
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
# Check if all containers are running
|
||||||
|
echo "📊 Checking container status..."
|
||||||
|
docker compose ps
|
||||||
|
|
||||||
|
# Wait for application container to be healthy
|
||||||
|
echo "🏥 Waiting for application container to be healthy..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Application container is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Waiting for application container... ($i/30)"
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Health check
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running comprehensive health checks..."
|
||||||
|
|
||||||
|
# Check container status
|
||||||
|
echo "📊 Container status:"
|
||||||
|
docker compose ps
|
||||||
|
|
||||||
|
# Check application container
|
||||||
|
echo "🏥 Checking application container..."
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ Application health check passed!"
|
||||||
|
else
|
||||||
|
echo "❌ Application health check failed!"
|
||||||
|
docker logs portfolio-app --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check main page
|
||||||
|
if curl -f http://localhost:3000/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible!"
|
||||||
|
else
|
||||||
|
echo "❌ Main page is not accessible!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All health checks passed! Deployment successful!"
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
echo "🧹 Cleaning up old images..."
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
|
echo "✅ Cleanup completed"
|
||||||
@@ -93,13 +93,50 @@ jobs:
|
|||||||
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Check if nginx config file exists
|
||||||
|
echo "🔍 Checking nginx configuration file..."
|
||||||
|
if [ ! -f "nginx-zero-downtime.conf" ]; then
|
||||||
|
echo "⚠️ nginx-zero-downtime.conf not found, creating fallback..."
|
||||||
|
cat > nginx-zero-downtime.conf << 'EOF'
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
http {
|
||||||
|
upstream portfolio_backend {
|
||||||
|
server portfolio-app-1:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
server portfolio-app-2:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name _;
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "healthy\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
location / {
|
||||||
|
proxy_pass http://portfolio_backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
# Stop old containers
|
# Stop old containers
|
||||||
echo "🛑 Stopping old containers..."
|
echo "🛑 Stopping old containers..."
|
||||||
docker compose -f docker-compose.zero-downtime.yml down || true
|
docker compose -f docker-compose.zero-downtime-fixed.yml down || true
|
||||||
|
|
||||||
|
# Clean up any orphaned containers
|
||||||
|
echo "🧹 Cleaning up orphaned containers..."
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml down --remove-orphans || true
|
||||||
|
|
||||||
# Start new containers
|
# Start new containers
|
||||||
echo "🚀 Starting new containers..."
|
echo "🚀 Starting new containers..."
|
||||||
docker compose -f docker-compose.zero-downtime.yml up -d
|
docker compose -f docker-compose.zero-downtime-fixed.yml up -d
|
||||||
|
|
||||||
echo "✅ Zero downtime deployment completed!"
|
echo "✅ Zero downtime deployment completed!"
|
||||||
env:
|
env:
|
||||||
@@ -121,7 +158,7 @@ jobs:
|
|||||||
|
|
||||||
# Check if all containers are running
|
# Check if all containers are running
|
||||||
echo "📊 Checking container status..."
|
echo "📊 Checking container status..."
|
||||||
docker compose -f docker-compose.zero-downtime.yml ps
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
# Wait for application containers to be healthy
|
# Wait for application containers to be healthy
|
||||||
echo "🏥 Waiting for application containers to be healthy..."
|
echo "🏥 Waiting for application containers to be healthy..."
|
||||||
@@ -153,7 +190,7 @@ jobs:
|
|||||||
|
|
||||||
# Check container status
|
# Check container status
|
||||||
echo "📊 Container status:"
|
echo "📊 Container status:"
|
||||||
docker compose -f docker-compose.zero-downtime.yml ps
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
# Check individual application containers
|
# Check individual application containers
|
||||||
echo "🏥 Checking individual application containers..."
|
echo "🏥 Checking individual application containers..."
|
||||||
@@ -203,7 +240,7 @@ jobs:
|
|||||||
- name: Show container status
|
- name: Show container status
|
||||||
run: |
|
run: |
|
||||||
echo "📊 Container status:"
|
echo "📊 Container status:"
|
||||||
docker compose -f docker-compose.zero-downtime.yml ps
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
- name: Cleanup old images
|
- name: Cleanup old images
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -45,22 +45,35 @@ The Gitea Actions were failing with "Connection refused" errors when trying to c
|
|||||||
- `.gitea/workflows/ci-cd-fast.yml`
|
- `.gitea/workflows/ci-cd-fast.yml`
|
||||||
- `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
- `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
||||||
- `.gitea/workflows/ci-cd-simple.yml` (new)
|
- `.gitea/workflows/ci-cd-simple.yml` (new)
|
||||||
|
- `.gitea/workflows/ci-cd-reliable.yml` (new)
|
||||||
|
|
||||||
|
#### **5. ✅ Fixed Nginx Configuration Issue**
|
||||||
|
- **Issue**: Zero-downtime deployment failing due to missing nginx configuration file in Gitea Actions
|
||||||
|
- **Fix**: Created `docker-compose.zero-downtime-fixed.yml` with fallback nginx configuration
|
||||||
|
- **Added**: Automatic nginx config creation if file is missing
|
||||||
|
- **Files**:
|
||||||
|
- `docker-compose.zero-downtime-fixed.yml` (new)
|
||||||
|
|
||||||
## Available Workflows
|
## Available Workflows
|
||||||
|
|
||||||
### 1. CI/CD Simple (Recommended)
|
### 1. CI/CD Reliable (Recommended)
|
||||||
|
- **File**: `.gitea/workflows/ci-cd-reliable.yml`
|
||||||
|
- **Description**: Simple, reliable deployment using docker-compose with database services
|
||||||
|
- **Best for**: Most reliable deployments with database support
|
||||||
|
|
||||||
|
### 2. CI/CD Simple
|
||||||
- **File**: `.gitea/workflows/ci-cd-simple.yml`
|
- **File**: `.gitea/workflows/ci-cd-simple.yml`
|
||||||
- **Description**: Uses the improved deployment script with comprehensive error handling
|
- **Description**: Uses the improved deployment script with comprehensive error handling
|
||||||
- **Best for**: Reliable deployments with good debugging
|
- **Best for**: Reliable deployments without database dependencies
|
||||||
|
|
||||||
### 2. CI/CD Fast
|
### 3. CI/CD Fast
|
||||||
- **File**: `.gitea/workflows/ci-cd-fast.yml`
|
- **File**: `.gitea/workflows/ci-cd-fast.yml`
|
||||||
- **Description**: Fast deployment with rolling updates
|
- **Description**: Fast deployment with rolling updates
|
||||||
- **Best for**: Production deployments with zero downtime
|
- **Best for**: Production deployments with zero downtime
|
||||||
|
|
||||||
### 3. CI/CD Zero Downtime
|
### 4. CI/CD Zero Downtime (Fixed)
|
||||||
- **File**: `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
- **File**: `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
||||||
- **Description**: Full zero-downtime deployment with nginx load balancer
|
- **Description**: Full zero-downtime deployment with nginx load balancer (fixed nginx config issue)
|
||||||
- **Best for**: Production deployments requiring high availability
|
- **Best for**: Production deployments requiring high availability
|
||||||
|
|
||||||
## Testing the Fixes
|
## Testing the Fixes
|
||||||
|
|||||||
172
docker-compose.zero-downtime-fixed.yml
Normal file
172
docker-compose.zero-downtime-fixed.yml
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
# Zero-Downtime Deployment Configuration (Fixed)
|
||||||
|
# Uses nginx as load balancer for seamless updates
|
||||||
|
# Fixed to work in Gitea Actions environment
|
||||||
|
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: portfolio-nginx
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
volumes:
|
||||||
|
# Use a more robust path that works in CI/CD environments
|
||||||
|
- ./nginx-zero-downtime.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
- portfolio-app-1
|
||||||
|
- portfolio-app-2
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
# Fallback: if the config file doesn't exist, create a basic one
|
||||||
|
command: >
|
||||||
|
sh -c "
|
||||||
|
if [ ! -f /etc/nginx/nginx.conf ]; then
|
||||||
|
echo 'Creating fallback nginx configuration...'
|
||||||
|
cat > /etc/nginx/nginx.conf << 'EOF'
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
http {
|
||||||
|
upstream portfolio_backend {
|
||||||
|
server portfolio-app-1:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
server portfolio-app-2:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name _;
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 'healthy\n';
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
location / {
|
||||||
|
proxy_pass http://portfolio_backend;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
nginx -g 'daemon off;'
|
||||||
|
"
|
||||||
|
|
||||||
|
portfolio-app-1:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-1
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
portfolio-app-2:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-2
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: portfolio-postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- POSTGRES_DB=portfolio_db
|
||||||
|
- POSTGRES_USER=portfolio_user
|
||||||
|
- POSTGRES_PASSWORD=portfolio_pass
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U portfolio_user -d portfolio_db"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: portfolio-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
portfolio_data:
|
||||||
|
driver: local
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
portfolio_net:
|
||||||
|
driver: bridge
|
||||||
Reference in New Issue
Block a user