Compare commits
39 Commits
dev
...
f66844870a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f66844870a | ||
|
|
6be2feb8dd | ||
|
|
efda383bd8 | ||
|
|
9c6b313435 | ||
|
|
b06151739f | ||
|
|
ed95163f55 | ||
|
|
fc3f9ebf12 | ||
|
|
ca2cbc2c92 | ||
|
|
cc5009a0d6 | ||
|
|
116dac89b3 | ||
|
|
3dbe80edcc | ||
|
|
bdc38d8b57 | ||
|
|
6338a34612 | ||
|
|
58dd60ea64 | ||
|
|
65ad26eeae | ||
|
|
20a6c416e3 | ||
|
|
89e0f9f2f8 | ||
|
|
8d627028cb | ||
|
|
8afc63ef0b | ||
|
|
72456aa7a0 | ||
|
|
4ccb2b146d | ||
|
|
e245e8afe1 | ||
|
|
5a14efb5fc | ||
|
|
7f6694622c | ||
|
|
83705af7f6 | ||
|
|
b34deb3c81 | ||
|
|
a4c61172f6 | ||
|
|
f7e0172111 | ||
|
|
c4bc27273e | ||
|
|
519ca43168 | ||
|
|
09d925745d | ||
|
|
07cf999a9e | ||
|
|
8ea4fc3fd3 | ||
|
|
0bcba1643e | ||
|
|
24ecc720c5 | ||
|
|
690d9e1cfb | ||
|
|
b44250fe0e | ||
|
|
0af21d6fc6 | ||
|
|
a842cb04f3 |
318
.gitea/workflows/ci-cd-fast.yml
Normal file
318
.gitea/workflows/ci-cd-fast.yml
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
name: CI/CD Pipeline (Fast)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js (Fast)
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
# Disable cache to avoid slow validation
|
||||||
|
cache: ''
|
||||||
|
|
||||||
|
- name: Cache npm dependencies
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.npm
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci --prefer-offline --no-audit
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
|
||||||
|
- name: Prepare for zero-downtime deployment
|
||||||
|
run: |
|
||||||
|
echo "🚀 Preparing zero-downtime deployment..."
|
||||||
|
|
||||||
|
# Check if current container is running
|
||||||
|
if docker ps -q -f name=portfolio-app | grep -q .; then
|
||||||
|
echo "📊 Current container is running, proceeding with zero-downtime update"
|
||||||
|
CURRENT_CONTAINER_RUNNING=true
|
||||||
|
else
|
||||||
|
echo "📊 No current container running, doing fresh deployment"
|
||||||
|
CURRENT_CONTAINER_RUNNING=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure database and redis are running
|
||||||
|
echo "🔧 Ensuring database and redis are running..."
|
||||||
|
docker compose up -d postgres redis
|
||||||
|
|
||||||
|
# Wait for services to be ready
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
- name: Verify secrets and variables before deployment
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Deploy with zero downtime
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying with zero downtime..."
|
||||||
|
|
||||||
|
if [ "$CURRENT_CONTAINER_RUNNING" = "true" ]; then
|
||||||
|
echo "🔄 Performing rolling update..."
|
||||||
|
|
||||||
|
# Generate unique container name
|
||||||
|
TIMESTAMP=$(date +%s)
|
||||||
|
TEMP_CONTAINER_NAME="portfolio-app-temp-$TIMESTAMP"
|
||||||
|
echo "🔧 Using temporary container name: $TEMP_CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Clean up any existing temporary containers
|
||||||
|
echo "🧹 Cleaning up any existing temporary containers..."
|
||||||
|
|
||||||
|
# Remove specific known problematic containers
|
||||||
|
docker rm -f portfolio-app-new portfolio-app-temp-* portfolio-app-backup || true
|
||||||
|
|
||||||
|
# Find and remove any containers with portfolio-app in the name (except the main one)
|
||||||
|
EXISTING_CONTAINERS=$(docker ps -a --format "table {{.Names}}" | grep "portfolio-app" | grep -v "^portfolio-app$" || true)
|
||||||
|
if [ -n "$EXISTING_CONTAINERS" ]; then
|
||||||
|
echo "🗑️ Removing existing portfolio-app containers:"
|
||||||
|
echo "$EXISTING_CONTAINERS"
|
||||||
|
echo "$EXISTING_CONTAINERS" | xargs -r docker rm -f || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Also clean up any stopped containers
|
||||||
|
docker container prune -f || true
|
||||||
|
|
||||||
|
# Start new container with unique temporary name (no port mapping needed for health check)
|
||||||
|
docker run -d \
|
||||||
|
--name $TEMP_CONTAINER_NAME \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
# Wait for new container to be ready
|
||||||
|
echo "⏳ Waiting for new container to be ready..."
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# Health check new container using docker exec
|
||||||
|
for i in {1..20}; do
|
||||||
|
if docker exec $TEMP_CONTAINER_NAME curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ New container is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Health check attempt $i/20..."
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
# Stop old container
|
||||||
|
echo "🛑 Stopping old container..."
|
||||||
|
docker stop portfolio-app || true
|
||||||
|
|
||||||
|
# Remove old container
|
||||||
|
docker rm portfolio-app || true
|
||||||
|
|
||||||
|
# Rename new container
|
||||||
|
docker rename $TEMP_CONTAINER_NAME portfolio-app
|
||||||
|
|
||||||
|
# Update port mapping
|
||||||
|
docker stop portfolio-app
|
||||||
|
docker rm portfolio-app
|
||||||
|
|
||||||
|
# Start with correct port
|
||||||
|
docker run -d \
|
||||||
|
--name portfolio-app \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-p 3000:3000 \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
echo "✅ Rolling update completed!"
|
||||||
|
else
|
||||||
|
echo "🆕 Fresh deployment..."
|
||||||
|
docker compose up -d
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Wait for container to be ready
|
||||||
|
run: |
|
||||||
|
echo "⏳ Waiting for container to be ready..."
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# Check if container is actually running
|
||||||
|
if ! docker ps --filter "name=portfolio-app" --format "{{.Names}}" | grep -q "portfolio-app"; then
|
||||||
|
echo "❌ Container failed to start"
|
||||||
|
echo "Container logs:"
|
||||||
|
docker logs portfolio-app --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for health check with better error handling
|
||||||
|
echo "🏥 Performing health check..."
|
||||||
|
for i in {1..40}; do
|
||||||
|
# First try direct access to port 3000
|
||||||
|
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Application is healthy (direct access)!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If direct access fails, try through docker exec (internal container check)
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Application is healthy (internal check)!"
|
||||||
|
# Check if port is properly exposed
|
||||||
|
if ! curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "⚠️ Application is running but port 3000 is not exposed to host"
|
||||||
|
echo "This might be expected in some deployment configurations"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if container is still running
|
||||||
|
if ! docker ps --filter "name=portfolio-app" --format "{{.Names}}" | grep -q "portfolio-app"; then
|
||||||
|
echo "❌ Container stopped during health check"
|
||||||
|
echo "Container logs:"
|
||||||
|
docker logs portfolio-app --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⏳ Health check attempt $i/40..."
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
# Final health check - try both methods
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Final health check passed (internal)"
|
||||||
|
# Try external access if possible
|
||||||
|
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ External access also working"
|
||||||
|
else
|
||||||
|
echo "⚠️ External access not available (port not exposed)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "❌ Health check timeout - application not responding"
|
||||||
|
echo "Container logs:"
|
||||||
|
docker logs portfolio-app --tail=100
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Health check
|
||||||
|
run: |
|
||||||
|
echo "🔍 Final health verification..."
|
||||||
|
|
||||||
|
# Check container status
|
||||||
|
docker ps --filter "name=portfolio-app" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||||
|
|
||||||
|
# Test health endpoint - try both methods
|
||||||
|
echo "🏥 Testing health endpoint..."
|
||||||
|
if curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ Health endpoint accessible externally"
|
||||||
|
elif docker exec portfolio-app curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ Health endpoint accessible internally (external port not exposed)"
|
||||||
|
else
|
||||||
|
echo "❌ Health endpoint not accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test main page - try both methods
|
||||||
|
echo "🌐 Testing main page..."
|
||||||
|
if curl -f http://localhost:3000/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible externally"
|
||||||
|
elif docker exec portfolio-app curl -f http://localhost:3000/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible internally (external port not exposed)"
|
||||||
|
else
|
||||||
|
echo "❌ Main page is not accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Deployment successful!"
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
177
.gitea/workflows/ci-cd-reliable.yml
Normal file
177
.gitea/workflows/ci-cd-reliable.yml
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
name: CI/CD Pipeline (Reliable & Simple)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Verify secrets and variables
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
echo "🏗️ Building Docker image..."
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
echo "✅ Docker image built successfully"
|
||||||
|
|
||||||
|
- name: Deploy with database services
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying with database services..."
|
||||||
|
|
||||||
|
# Export environment variables
|
||||||
|
export NODE_ENV="${{ vars.NODE_ENV }}"
|
||||||
|
export LOG_LEVEL="${{ vars.LOG_LEVEL }}"
|
||||||
|
export NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
export MY_EMAIL="${{ vars.MY_EMAIL }}"
|
||||||
|
export MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Stop old containers
|
||||||
|
echo "🛑 Stopping old containers..."
|
||||||
|
docker compose down || true
|
||||||
|
|
||||||
|
# Clean up orphaned containers
|
||||||
|
echo "🧹 Cleaning up orphaned containers..."
|
||||||
|
docker compose down --remove-orphans || true
|
||||||
|
|
||||||
|
# Start new containers
|
||||||
|
echo "🚀 Starting new containers..."
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
echo "✅ Deployment completed!"
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Wait for containers to be ready
|
||||||
|
run: |
|
||||||
|
echo "⏳ Waiting for containers to be ready..."
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
# Check if all containers are running
|
||||||
|
echo "📊 Checking container status..."
|
||||||
|
docker compose ps
|
||||||
|
|
||||||
|
# Wait for application container to be healthy
|
||||||
|
echo "🏥 Waiting for application container to be healthy..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Application container is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Waiting for application container... ($i/30)"
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Health check
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running comprehensive health checks..."
|
||||||
|
|
||||||
|
# Check container status
|
||||||
|
echo "📊 Container status:"
|
||||||
|
docker compose ps
|
||||||
|
|
||||||
|
# Check application container
|
||||||
|
echo "🏥 Checking application container..."
|
||||||
|
if docker exec portfolio-app curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ Application health check passed!"
|
||||||
|
else
|
||||||
|
echo "❌ Application health check failed!"
|
||||||
|
docker logs portfolio-app --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check main page
|
||||||
|
if curl -f http://localhost:3000/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible!"
|
||||||
|
else
|
||||||
|
echo "❌ Main page is not accessible!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All health checks passed! Deployment successful!"
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
echo "🧹 Cleaning up old images..."
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
|
echo "✅ Cleanup completed"
|
||||||
143
.gitea/workflows/ci-cd-simple.yml
Normal file
143
.gitea/workflows/ci-cd-simple.yml
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
name: CI/CD Pipeline (Simple & Reliable)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Verify secrets and variables
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Deploy using improved script
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying using improved deployment script..."
|
||||||
|
|
||||||
|
# Set environment variables for the deployment script
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Make the script executable
|
||||||
|
chmod +x ./scripts/gitea-deploy.sh
|
||||||
|
|
||||||
|
# Run the deployment script
|
||||||
|
./scripts/gitea-deploy.sh
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Final verification
|
||||||
|
run: |
|
||||||
|
echo "🔍 Final verification..."
|
||||||
|
|
||||||
|
# Wait a bit more to ensure everything is stable
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Check if container is running
|
||||||
|
if docker ps --filter "name=${{ env.CONTAINER_NAME }}" --format "{{.Names}}" | grep -q "${{ env.CONTAINER_NAME }}"; then
|
||||||
|
echo "✅ Container is running"
|
||||||
|
else
|
||||||
|
echo "❌ Container is not running"
|
||||||
|
docker ps -a
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check health endpoint
|
||||||
|
if curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ Health check passed"
|
||||||
|
else
|
||||||
|
echo "❌ Health check failed"
|
||||||
|
echo "Container logs:"
|
||||||
|
docker logs ${{ env.CONTAINER_NAME }} --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check main page
|
||||||
|
if curl -f http://localhost:3000/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible"
|
||||||
|
else
|
||||||
|
echo "❌ Main page is not accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🎉 Deployment successful!"
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
echo "🧹 Cleaning up old images..."
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
|
echo "✅ Cleanup completed"
|
||||||
257
.gitea/workflows/ci-cd-zero-downtime-fixed.yml
Normal file
257
.gitea/workflows/ci-cd-zero-downtime-fixed.yml
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
name: CI/CD Pipeline (Zero Downtime - Fixed)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v3
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
|
||||||
|
- name: Verify secrets and variables before deployment
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Deploy with zero downtime using docker-compose
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying with zero downtime using docker-compose..."
|
||||||
|
|
||||||
|
# Export environment variables for docker compose
|
||||||
|
export NODE_ENV="${{ vars.NODE_ENV }}"
|
||||||
|
export LOG_LEVEL="${{ vars.LOG_LEVEL }}"
|
||||||
|
export NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
export MY_EMAIL="${{ vars.MY_EMAIL }}"
|
||||||
|
export MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Check if nginx config file exists
|
||||||
|
echo "🔍 Checking nginx configuration file..."
|
||||||
|
if [ ! -f "nginx-zero-downtime.conf" ]; then
|
||||||
|
echo "⚠️ nginx-zero-downtime.conf not found, creating fallback..."
|
||||||
|
cat > nginx-zero-downtime.conf << 'EOF'
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
http {
|
||||||
|
upstream portfolio_backend {
|
||||||
|
server portfolio-app-1:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
server portfolio-app-2:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name _;
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "healthy\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
location / {
|
||||||
|
proxy_pass http://portfolio_backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop old containers
|
||||||
|
echo "🛑 Stopping old containers..."
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml down || true
|
||||||
|
|
||||||
|
# Clean up any orphaned containers
|
||||||
|
echo "🧹 Cleaning up orphaned containers..."
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml down --remove-orphans || true
|
||||||
|
|
||||||
|
# Start new containers
|
||||||
|
echo "🚀 Starting new containers..."
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml up -d
|
||||||
|
|
||||||
|
echo "✅ Zero downtime deployment completed!"
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Wait for containers to be ready
|
||||||
|
run: |
|
||||||
|
echo "⏳ Waiting for containers to be ready..."
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
# Check if all containers are running
|
||||||
|
echo "📊 Checking container status..."
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
|
# Wait for application containers to be healthy (internal check)
|
||||||
|
echo "🏥 Waiting for application containers to be healthy..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
# Check if both app containers are healthy internally
|
||||||
|
if docker exec portfolio-app-1 curl -f http://localhost:3000/api/health > /dev/null 2>&1 && \
|
||||||
|
docker exec portfolio-app-2 curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Both application containers are healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Waiting for application containers... ($i/30)"
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for nginx to be healthy and proxy to work
|
||||||
|
echo "🌐 Waiting for nginx to be healthy and proxy to work..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
# Check nginx health endpoint
|
||||||
|
if curl -f http://localhost/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Nginx health endpoint is working!"
|
||||||
|
# Now check if nginx can proxy to the application
|
||||||
|
if curl -f http://localhost/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Nginx proxy to application is working!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "⏳ Waiting for nginx and proxy... ($i/30)"
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Health check
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running comprehensive health checks..."
|
||||||
|
|
||||||
|
# Check container status
|
||||||
|
echo "📊 Container status:"
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
|
# Check individual application containers (internal)
|
||||||
|
echo "🏥 Checking individual application containers..."
|
||||||
|
if docker exec portfolio-app-1 curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ portfolio-app-1 health check passed!"
|
||||||
|
else
|
||||||
|
echo "❌ portfolio-app-1 health check failed!"
|
||||||
|
docker logs portfolio-app-1 --tail=20
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if docker exec portfolio-app-2 curl -f http://localhost:3000/api/health; then
|
||||||
|
echo "✅ portfolio-app-2 health check passed!"
|
||||||
|
else
|
||||||
|
echo "❌ portfolio-app-2 health check failed!"
|
||||||
|
docker logs portfolio-app-2 --tail=20
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check nginx health
|
||||||
|
if curl -f http://localhost/health; then
|
||||||
|
echo "✅ Nginx health check passed!"
|
||||||
|
else
|
||||||
|
echo "❌ Nginx health check failed!"
|
||||||
|
docker logs portfolio-nginx --tail=20
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check application health through nginx (this is the main test)
|
||||||
|
if curl -f http://localhost/api/health; then
|
||||||
|
echo "✅ Application health check through nginx passed!"
|
||||||
|
else
|
||||||
|
echo "❌ Application health check through nginx failed!"
|
||||||
|
echo "Nginx logs:"
|
||||||
|
docker logs portfolio-nginx --tail=20
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check main page through nginx
|
||||||
|
if curl -f http://localhost/ > /dev/null; then
|
||||||
|
echo "✅ Main page is accessible through nginx!"
|
||||||
|
else
|
||||||
|
echo "❌ Main page is not accessible through nginx!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All health checks passed! Deployment successful!"
|
||||||
|
|
||||||
|
- name: Show container status
|
||||||
|
run: |
|
||||||
|
echo "📊 Container status:"
|
||||||
|
docker compose -f docker-compose.zero-downtime-fixed.yml ps
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
echo "🧹 Cleaning up old images..."
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
|
echo "✅ Cleanup completed"
|
||||||
194
.gitea/workflows/ci-cd-zero-downtime.yml.disabled
Normal file
194
.gitea/workflows/ci-cd-zero-downtime.yml.disabled
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
name: CI/CD Pipeline (Zero Downtime)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
NEW_CONTAINER_NAME: portfolio-app-new
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v3
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
|
||||||
|
- name: Verify secrets and variables before deployment
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Start new container (zero downtime)
|
||||||
|
run: |
|
||||||
|
echo "🚀 Starting new container for zero-downtime deployment..."
|
||||||
|
|
||||||
|
# Start new container with different name
|
||||||
|
docker run -d \
|
||||||
|
--name ${{ env.NEW_CONTAINER_NAME }} \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-p 3001:3000 \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
echo "✅ New container started on port 3001"
|
||||||
|
|
||||||
|
- name: Health check new container
|
||||||
|
run: |
|
||||||
|
echo "🔍 Health checking new container..."
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Health check on new container
|
||||||
|
for i in {1..30}; do
|
||||||
|
if curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ New container is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Waiting for new container to be ready... ($i/30)"
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
# Final health check
|
||||||
|
if ! curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
|
||||||
|
echo "❌ New container failed health check!"
|
||||||
|
docker logs ${{ env.NEW_CONTAINER_NAME }}
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Switch traffic to new container (zero downtime)
|
||||||
|
run: |
|
||||||
|
echo "🔄 Switching traffic to new container..."
|
||||||
|
|
||||||
|
# Stop old container
|
||||||
|
docker stop ${{ env.CONTAINER_NAME }} || true
|
||||||
|
|
||||||
|
# Remove old container
|
||||||
|
docker rm ${{ env.CONTAINER_NAME }} || true
|
||||||
|
|
||||||
|
# Rename new container to production name
|
||||||
|
docker rename ${{ env.NEW_CONTAINER_NAME }} ${{ env.CONTAINER_NAME }}
|
||||||
|
|
||||||
|
# Update port mapping (requires container restart)
|
||||||
|
docker stop ${{ env.CONTAINER_NAME }}
|
||||||
|
docker rm ${{ env.CONTAINER_NAME }}
|
||||||
|
|
||||||
|
# Start with correct port
|
||||||
|
docker run -d \
|
||||||
|
--name ${{ env.CONTAINER_NAME }} \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-p 3000:3000 \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
echo "✅ Traffic switched successfully!"
|
||||||
|
|
||||||
|
- name: Final health check
|
||||||
|
run: |
|
||||||
|
echo "🔍 Final health check..."
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
for i in {1..10}; do
|
||||||
|
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ Deployment successful! Zero downtime achieved!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Final health check... ($i/10)"
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
if ! curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "❌ Final health check failed!"
|
||||||
|
docker logs ${{ env.CONTAINER_NAME }}
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
echo "🧹 Cleaning up old images..."
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
|
echo "✅ Cleanup completed"
|
||||||
293
.gitea/workflows/ci-cd.yml.disabled
Normal file
293
.gitea/workflows/ci-cd.yml.disabled
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
name: CI/CD Pipeline (Simple)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main, production ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main, production ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
DOCKER_IMAGE: portfolio-app
|
||||||
|
CONTAINER_NAME: portfolio-app
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Production deployment pipeline
|
||||||
|
production:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.ref == 'refs/heads/production'
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
cache-dependency-path: 'package-lock.json'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||||
|
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||||
|
|
||||||
|
- name: Prepare for zero-downtime deployment
|
||||||
|
run: |
|
||||||
|
echo "🚀 Preparing zero-downtime deployment..."
|
||||||
|
|
||||||
|
# FORCE REMOVE the problematic container
|
||||||
|
echo "🧹 FORCE removing problematic container portfolio-app-new..."
|
||||||
|
docker rm -f portfolio-app-new || true
|
||||||
|
docker rm -f afa9a70588844b06e17d5e0527119d589a7a3fde8a17608447cf7d8d448cf261 || true
|
||||||
|
|
||||||
|
# Check if current container is running
|
||||||
|
if docker ps -q -f name=portfolio-app | grep -q .; then
|
||||||
|
echo "📊 Current container is running, proceeding with zero-downtime update"
|
||||||
|
CURRENT_CONTAINER_RUNNING=true
|
||||||
|
else
|
||||||
|
echo "📊 No current container running, doing fresh deployment"
|
||||||
|
CURRENT_CONTAINER_RUNNING=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up ALL existing containers first
|
||||||
|
echo "🧹 Cleaning up ALL existing containers..."
|
||||||
|
docker compose down --remove-orphans || true
|
||||||
|
docker rm -f portfolio-app portfolio-postgres portfolio-redis || true
|
||||||
|
|
||||||
|
# Force remove the specific problematic container
|
||||||
|
docker rm -f 4dec125499540f66f4cb407b69d9aee5232f679feecd71ff2369544ff61f85ae || true
|
||||||
|
|
||||||
|
# Clean up any containers with portfolio in the name
|
||||||
|
docker ps -a --format "{{.Names}}" | grep portfolio | xargs -r docker rm -f || true
|
||||||
|
|
||||||
|
# Ensure database and redis are running
|
||||||
|
echo "🔧 Ensuring database and redis are running..."
|
||||||
|
|
||||||
|
# Export environment variables for docker compose
|
||||||
|
export NODE_ENV="${{ vars.NODE_ENV }}"
|
||||||
|
export LOG_LEVEL="${{ vars.LOG_LEVEL }}"
|
||||||
|
export NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
export MY_EMAIL="${{ vars.MY_EMAIL }}"
|
||||||
|
export MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
# Start services with environment variables
|
||||||
|
docker compose up -d postgres redis
|
||||||
|
|
||||||
|
# Wait for services to be ready
|
||||||
|
sleep 10
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Verify secrets and variables before deployment
|
||||||
|
run: |
|
||||||
|
echo "🔍 Verifying secrets and variables..."
|
||||||
|
|
||||||
|
# Check Variables
|
||||||
|
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||||
|
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Secrets
|
||||||
|
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All required secrets and variables are present"
|
||||||
|
|
||||||
|
- name: Deploy with zero downtime
|
||||||
|
run: |
|
||||||
|
echo "🚀 Deploying with zero downtime..."
|
||||||
|
|
||||||
|
if [ "$CURRENT_CONTAINER_RUNNING" = "true" ]; then
|
||||||
|
echo "🔄 Performing rolling update..."
|
||||||
|
|
||||||
|
# Generate unique container name
|
||||||
|
TIMESTAMP=$(date +%s)
|
||||||
|
TEMP_CONTAINER_NAME="portfolio-app-temp-$TIMESTAMP"
|
||||||
|
echo "🔧 Using temporary container name: $TEMP_CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Clean up any existing temporary containers
|
||||||
|
echo "🧹 Cleaning up any existing temporary containers..."
|
||||||
|
|
||||||
|
# Remove specific known problematic containers
|
||||||
|
docker rm -f portfolio-app-new portfolio-app-temp-* portfolio-app-backup || true
|
||||||
|
|
||||||
|
# FORCE remove the specific problematic container by ID
|
||||||
|
docker rm -f afa9a70588844b06e17d5e0527119d589a7a3fde8a17608447cf7d8d448cf261 || true
|
||||||
|
|
||||||
|
# Find and remove any containers with portfolio-app in the name (except the main one)
|
||||||
|
EXISTING_CONTAINERS=$(docker ps -a --format "table {{.Names}}" | grep "portfolio-app" | grep -v "^portfolio-app$" || true)
|
||||||
|
if [ -n "$EXISTING_CONTAINERS" ]; then
|
||||||
|
echo "🗑️ Removing existing portfolio-app containers:"
|
||||||
|
echo "$EXISTING_CONTAINERS"
|
||||||
|
echo "$EXISTING_CONTAINERS" | xargs -r docker rm -f || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Also clean up any stopped containers
|
||||||
|
docker container prune -f || true
|
||||||
|
|
||||||
|
# Double-check: list all containers to see what's left
|
||||||
|
echo "📋 Current containers after cleanup:"
|
||||||
|
docker ps -a --format "table {{.Names}}\t{{.Status}}" | grep portfolio || echo "No portfolio containers found"
|
||||||
|
|
||||||
|
# Start new container with unique temporary name (no port mapping needed for health check)
|
||||||
|
docker run -d \
|
||||||
|
--name $TEMP_CONTAINER_NAME \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
# Wait for new container to be ready
|
||||||
|
echo "⏳ Waiting for new container to be ready..."
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# Health check new container using docker exec
|
||||||
|
for i in {1..20}; do
|
||||||
|
if docker exec $TEMP_CONTAINER_NAME curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ New container is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "⏳ Health check attempt $i/20..."
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
# Stop old container
|
||||||
|
echo "🛑 Stopping old container..."
|
||||||
|
docker stop portfolio-app || true
|
||||||
|
|
||||||
|
# Remove old container
|
||||||
|
docker rm portfolio-app || true
|
||||||
|
|
||||||
|
# Rename new container
|
||||||
|
docker rename $TEMP_CONTAINER_NAME portfolio-app
|
||||||
|
|
||||||
|
# Update port mapping
|
||||||
|
docker stop portfolio-app
|
||||||
|
docker rm portfolio-app
|
||||||
|
|
||||||
|
# Start with correct port
|
||||||
|
docker run -d \
|
||||||
|
--name portfolio-app \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--network portfolio_net \
|
||||||
|
-p 3000:3000 \
|
||||||
|
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||||
|
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||||
|
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||||
|
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
${{ env.DOCKER_IMAGE }}:latest
|
||||||
|
|
||||||
|
echo "✅ Rolling update completed!"
|
||||||
|
else
|
||||||
|
echo "🆕 Fresh deployment..."
|
||||||
|
|
||||||
|
# Export environment variables for docker compose
|
||||||
|
export NODE_ENV="${{ vars.NODE_ENV }}"
|
||||||
|
export LOG_LEVEL="${{ vars.LOG_LEVEL }}"
|
||||||
|
export NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
export MY_EMAIL="${{ vars.MY_EMAIL }}"
|
||||||
|
export MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
export MY_PASSWORD="${{ secrets.MY_PASSWORD }}"
|
||||||
|
export MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}"
|
||||||
|
export ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}"
|
||||||
|
|
||||||
|
docker compose up -d
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Wait for container to be ready
|
||||||
|
run: |
|
||||||
|
sleep 10
|
||||||
|
timeout 60 bash -c 'until curl -f http://localhost:3000/api/health; do sleep 2; done'
|
||||||
|
|
||||||
|
- name: Health check
|
||||||
|
run: |
|
||||||
|
curl -f http://localhost:3000/api/health
|
||||||
|
echo "✅ Deployment successful!"
|
||||||
|
|
||||||
|
- name: Cleanup old images
|
||||||
|
run: |
|
||||||
|
docker image prune -f
|
||||||
|
docker system prune -f
|
||||||
123
.gitea/workflows/debug-secrets.yml
Normal file
123
.gitea/workflows/debug-secrets.yml
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
name: Debug Secrets
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
debug-secrets:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Debug Environment Variables
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking if secrets are available..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "📊 VARIABLES:"
|
||||||
|
echo "✅ NODE_ENV: ${{ vars.NODE_ENV }}"
|
||||||
|
echo "✅ LOG_LEVEL: ${{ vars.LOG_LEVEL }}"
|
||||||
|
echo "✅ NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}"
|
||||||
|
echo "✅ NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}"
|
||||||
|
echo "✅ NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}"
|
||||||
|
echo "✅ MY_EMAIL: ${{ vars.MY_EMAIL }}"
|
||||||
|
echo "✅ MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🔐 SECRETS:"
|
||||||
|
if [ -n "${{ secrets.MY_PASSWORD }}" ]; then
|
||||||
|
echo "✅ MY_PASSWORD: Set (length: ${#MY_PASSWORD})"
|
||||||
|
else
|
||||||
|
echo "❌ MY_PASSWORD: Not set"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||||
|
echo "✅ MY_INFO_PASSWORD: Set (length: ${#MY_INFO_PASSWORD})"
|
||||||
|
else
|
||||||
|
echo "❌ MY_INFO_PASSWORD: Not set"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||||
|
echo "✅ ADMIN_BASIC_AUTH: Set (length: ${#ADMIN_BASIC_AUTH})"
|
||||||
|
else
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH: Not set"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "📋 Summary:"
|
||||||
|
echo "Variables: 7 configured"
|
||||||
|
echo "Secrets: 3 configured"
|
||||||
|
echo "Total environment variables: 10"
|
||||||
|
env:
|
||||||
|
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||||
|
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||||
|
NEXT_PUBLIC_BASE_URL: ${{ vars.NEXT_PUBLIC_BASE_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_URL: ${{ vars.NEXT_PUBLIC_UMAMI_URL }}
|
||||||
|
NEXT_PUBLIC_UMAMI_WEBSITE_ID: ${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}
|
||||||
|
MY_EMAIL: ${{ vars.MY_EMAIL }}
|
||||||
|
MY_INFO_EMAIL: ${{ vars.MY_INFO_EMAIL }}
|
||||||
|
MY_PASSWORD: ${{ secrets.MY_PASSWORD }}
|
||||||
|
MY_INFO_PASSWORD: ${{ secrets.MY_INFO_PASSWORD }}
|
||||||
|
ADMIN_BASIC_AUTH: ${{ secrets.ADMIN_BASIC_AUTH }}
|
||||||
|
|
||||||
|
- name: Test Docker Environment
|
||||||
|
run: |
|
||||||
|
echo "🐳 Testing Docker environment with secrets..."
|
||||||
|
|
||||||
|
# Create a test container to verify environment variables
|
||||||
|
docker run --rm \
|
||||||
|
-e NODE_ENV=production \
|
||||||
|
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||||
|
-e REDIS_URL=redis://redis:6379 \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL="${{ secrets.NEXT_PUBLIC_BASE_URL }}" \
|
||||||
|
-e MY_EMAIL="${{ secrets.MY_EMAIL }}" \
|
||||||
|
-e MY_INFO_EMAIL="${{ secrets.MY_INFO_EMAIL }}" \
|
||||||
|
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||||
|
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||||
|
alpine:latest sh -c '
|
||||||
|
echo "Environment variables in container:"
|
||||||
|
echo "NODE_ENV: $NODE_ENV"
|
||||||
|
echo "DATABASE_URL: $DATABASE_URL"
|
||||||
|
echo "REDIS_URL: $REDIS_URL"
|
||||||
|
echo "NEXT_PUBLIC_BASE_URL: $NEXT_PUBLIC_BASE_URL"
|
||||||
|
echo "MY_EMAIL: $MY_EMAIL"
|
||||||
|
echo "MY_INFO_EMAIL: $MY_INFO_EMAIL"
|
||||||
|
echo "MY_PASSWORD: [HIDDEN - length: ${#MY_PASSWORD}]"
|
||||||
|
echo "MY_INFO_PASSWORD: [HIDDEN - length: ${#MY_INFO_PASSWORD}]"
|
||||||
|
echo "ADMIN_BASIC_AUTH: [HIDDEN - length: ${#ADMIN_BASIC_AUTH}]"
|
||||||
|
'
|
||||||
|
|
||||||
|
- name: Validate Secret Formats
|
||||||
|
run: |
|
||||||
|
echo "🔐 Validating secret formats..."
|
||||||
|
|
||||||
|
# Check NEXT_PUBLIC_BASE_URL format
|
||||||
|
if [[ "${{ secrets.NEXT_PUBLIC_BASE_URL }}" =~ ^https?:// ]]; then
|
||||||
|
echo "✅ NEXT_PUBLIC_BASE_URL: Valid URL format"
|
||||||
|
else
|
||||||
|
echo "❌ NEXT_PUBLIC_BASE_URL: Invalid URL format (should start with http:// or https://)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check email formats
|
||||||
|
if [[ "${{ secrets.MY_EMAIL }}" =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
||||||
|
echo "✅ MY_EMAIL: Valid email format"
|
||||||
|
else
|
||||||
|
echo "❌ MY_EMAIL: Invalid email format"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${{ secrets.MY_INFO_EMAIL }}" =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
||||||
|
echo "✅ MY_INFO_EMAIL: Valid email format"
|
||||||
|
else
|
||||||
|
echo "❌ MY_INFO_EMAIL: Invalid email format"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check ADMIN_BASIC_AUTH format (should be username:password)
|
||||||
|
if [[ "${{ secrets.ADMIN_BASIC_AUTH }}" =~ ^[^:]+:.+$ ]]; then
|
||||||
|
echo "✅ ADMIN_BASIC_AUTH: Valid format (username:password)"
|
||||||
|
else
|
||||||
|
echo "❌ ADMIN_BASIC_AUTH: Invalid format (should be username:password)"
|
||||||
|
fi
|
||||||
41
.gitea/workflows/test-and-build.yml
Normal file
41
.gitea/workflows/test-and-build.yml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
name: Test and Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_VERSION: '20'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-and-build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ env.NODE_VERSION }}
|
||||||
|
cache: 'npm'
|
||||||
|
cache-dependency-path: 'package-lock.json'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
|
||||||
|
- name: Run linting
|
||||||
|
run: npm run lint
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: npm run test
|
||||||
|
|
||||||
|
- name: Build application
|
||||||
|
run: npm run build
|
||||||
|
|
||||||
|
- name: Run security scan
|
||||||
|
run: |
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||||
78
.githooks/README.md
Normal file
78
.githooks/README.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# Git Hooks
|
||||||
|
|
||||||
|
This directory contains Git hooks for the Portfolio project.
|
||||||
|
|
||||||
|
## Pre-Push Hook
|
||||||
|
|
||||||
|
The pre-push hook runs automatically before every `git push` and performs the following checks:
|
||||||
|
|
||||||
|
### Checks Performed:
|
||||||
|
1. **Node.js Version Check** - Ensures Node.js 20+ is installed
|
||||||
|
2. **Dependency Installation** - Installs npm dependencies if needed
|
||||||
|
3. **Linting** - Runs ESLint to check code quality
|
||||||
|
4. **Tests** - Runs Jest test suite
|
||||||
|
5. **Build** - Builds the Next.js application
|
||||||
|
6. **Security Audit** - Runs npm audit for vulnerabilities
|
||||||
|
7. **Secret Detection** - Checks for accidentally committed secrets
|
||||||
|
8. **Docker Configuration** - Validates Dockerfile and docker-compose.yml
|
||||||
|
9. **Production Checks** - Additional checks when pushing to production branch
|
||||||
|
|
||||||
|
### Production Branch Special Checks:
|
||||||
|
- Environment file validation
|
||||||
|
- Docker build test
|
||||||
|
- Deployment readiness check
|
||||||
|
|
||||||
|
### Usage:
|
||||||
|
|
||||||
|
The hook runs automatically on every push. To manually test it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test the hook manually
|
||||||
|
.githooks/pre-push
|
||||||
|
|
||||||
|
# Or push to trigger it
|
||||||
|
git push origin main
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bypassing the Hook:
|
||||||
|
|
||||||
|
If you need to bypass the hook in an emergency:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git push --no-verify origin main
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: Only bypass in emergencies. The hook prevents broken code from being pushed.
|
||||||
|
|
||||||
|
### Troubleshooting:
|
||||||
|
|
||||||
|
If the hook fails:
|
||||||
|
|
||||||
|
1. **Fix the reported issues** (linting errors, test failures, etc.)
|
||||||
|
2. **Run the checks manually** to debug:
|
||||||
|
```bash
|
||||||
|
npm run lint
|
||||||
|
npm run test
|
||||||
|
npm run build
|
||||||
|
npm audit
|
||||||
|
```
|
||||||
|
3. **Check Node.js version**: `node --version` (should be 20+)
|
||||||
|
4. **Reinstall dependencies**: `rm -rf node_modules && npm ci`
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
The hook is configured in `.git/config`:
|
||||||
|
```
|
||||||
|
[core]
|
||||||
|
hooksPath = .githooks
|
||||||
|
```
|
||||||
|
|
||||||
|
To disable hooks temporarily:
|
||||||
|
```bash
|
||||||
|
git config core.hooksPath ""
|
||||||
|
```
|
||||||
|
|
||||||
|
To re-enable:
|
||||||
|
```bash
|
||||||
|
git config core.hooksPath .githooks
|
||||||
|
```
|
||||||
202
.githooks/pre-push
Executable file
202
.githooks/pre-push
Executable file
@@ -0,0 +1,202 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Pre-push hook for Portfolio
|
||||||
|
# Runs CI/CD checks before allowing push
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🚀 Running pre-push checks..."
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Function to print colored output
|
||||||
|
print_status() {
|
||||||
|
echo -e "${BLUE}[INFO]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if we're in the right directory
|
||||||
|
if [ ! -f "package.json" ]; then
|
||||||
|
print_error "Not in project root directory!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Node.js is available
|
||||||
|
if ! command -v node &> /dev/null; then
|
||||||
|
print_error "Node.js is not installed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Node.js version
|
||||||
|
NODE_VERSION=$(node --version | cut -d'v' -f2 | cut -d'.' -f1)
|
||||||
|
if [ "$NODE_VERSION" -lt 20 ]; then
|
||||||
|
print_error "Node.js version 20+ required, found: $(node --version)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Node.js version: $(node --version)"
|
||||||
|
|
||||||
|
# Install dependencies if node_modules doesn't exist
|
||||||
|
if [ ! -d "node_modules" ]; then
|
||||||
|
print_status "Installing dependencies..."
|
||||||
|
npm ci
|
||||||
|
else
|
||||||
|
print_status "Dependencies already installed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run linting
|
||||||
|
print_status "Running ESLint..."
|
||||||
|
if npm run lint; then
|
||||||
|
print_success "Linting passed"
|
||||||
|
else
|
||||||
|
print_error "Linting failed! Please fix the issues before pushing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
print_status "Running tests..."
|
||||||
|
if npm run test; then
|
||||||
|
print_success "Tests passed"
|
||||||
|
else
|
||||||
|
print_error "Tests failed! Please fix the issues before pushing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build application
|
||||||
|
print_status "Building application..."
|
||||||
|
if npm run build; then
|
||||||
|
print_success "Build successful"
|
||||||
|
else
|
||||||
|
print_error "Build failed! Please fix the issues before pushing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Security audit
|
||||||
|
print_status "Running security audit..."
|
||||||
|
if npm audit --audit-level=high; then
|
||||||
|
print_success "Security audit passed"
|
||||||
|
else
|
||||||
|
print_warning "Security audit found issues. Consider running 'npm audit fix'"
|
||||||
|
# Don't fail the push for security warnings, just warn
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for secrets in code
|
||||||
|
print_status "Checking for secrets in code..."
|
||||||
|
if [ -f "scripts/check-secrets.sh" ]; then
|
||||||
|
chmod +x scripts/check-secrets.sh
|
||||||
|
if ./scripts/check-secrets.sh; then
|
||||||
|
print_success "No secrets found in code"
|
||||||
|
else
|
||||||
|
print_error "Secrets detected in code! Please remove them before pushing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "Secret check script not found, skipping..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Docker configuration
|
||||||
|
print_status "Checking Docker configuration..."
|
||||||
|
if [ -f "Dockerfile" ]; then
|
||||||
|
print_success "Dockerfile found"
|
||||||
|
else
|
||||||
|
print_error "Dockerfile not found!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "docker-compose.yml" ]; then
|
||||||
|
print_success "Docker Compose configuration found"
|
||||||
|
else
|
||||||
|
print_error "Docker Compose configuration not found!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we're pushing to production branch
|
||||||
|
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||||
|
if [ "$CURRENT_BRANCH" = "production" ]; then
|
||||||
|
print_warning "Pushing to production branch - this will trigger deployment!"
|
||||||
|
|
||||||
|
# Additional production checks
|
||||||
|
print_status "Running production-specific checks..."
|
||||||
|
|
||||||
|
# Check if environment file exists
|
||||||
|
if [ ! -f ".env" ]; then
|
||||||
|
print_warning "No .env file found. Make sure secrets are configured in Gitea."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker is running and ready
|
||||||
|
print_status "Checking Docker status..."
|
||||||
|
if ! docker info > /dev/null 2>&1; then
|
||||||
|
print_error "Docker is not running! Please start Docker before pushing."
|
||||||
|
print_status "To start Docker:"
|
||||||
|
print_status " - macOS: Open Docker Desktop application"
|
||||||
|
print_status " - Linux: sudo systemctl start docker"
|
||||||
|
print_status " - Windows: Start Docker Desktop application"
|
||||||
|
print_status ""
|
||||||
|
print_status "Wait for Docker to fully start before trying again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test Docker functionality
|
||||||
|
if ! docker run --rm hello-world > /dev/null 2>&1; then
|
||||||
|
print_error "Docker is running but not functional!"
|
||||||
|
print_status "Docker might still be starting up. Please wait and try again."
|
||||||
|
print_status "Or restart Docker if the issue persists."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Docker is running and functional"
|
||||||
|
|
||||||
|
# Check Docker image can be built
|
||||||
|
print_status "Testing Docker build..."
|
||||||
|
|
||||||
|
# Create a temporary log file for build output
|
||||||
|
BUILD_LOG=$(mktemp)
|
||||||
|
|
||||||
|
if docker build -t portfolio-app:test . > "$BUILD_LOG" 2>&1; then
|
||||||
|
print_success "Docker build test passed"
|
||||||
|
docker rmi portfolio-app:test > /dev/null 2>&1
|
||||||
|
rm -f "$BUILD_LOG"
|
||||||
|
else
|
||||||
|
print_error "Docker build test failed!"
|
||||||
|
print_status "Build errors:"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
cat "$BUILD_LOG"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
print_status "Please fix Docker build issues before pushing."
|
||||||
|
print_status "Common issues:"
|
||||||
|
print_status " - Missing files referenced in Dockerfile"
|
||||||
|
print_status " - Network issues during npm install"
|
||||||
|
print_status " - Insufficient disk space"
|
||||||
|
rm -f "$BUILD_LOG"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Final success message
|
||||||
|
echo ""
|
||||||
|
print_success "All pre-push checks passed! ✅"
|
||||||
|
print_status "Ready to push to: $CURRENT_BRANCH"
|
||||||
|
|
||||||
|
# Show what will be pushed
|
||||||
|
echo ""
|
||||||
|
print_status "Files to be pushed:"
|
||||||
|
git diff --name-only HEAD~1 2>/dev/null || git diff --cached --name-only
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_success "🚀 Push will proceed..."
|
||||||
7
.github/workflows/ci-cd.yml
vendored
7
.github/workflows/ci-cd.yml
vendored
@@ -190,8 +190,11 @@ jobs:
|
|||||||
# Stop and remove old container
|
# Stop and remove old container
|
||||||
docker compose -f $COMPOSE_FILE down || true
|
docker compose -f $COMPOSE_FILE down || true
|
||||||
|
|
||||||
# Start new container
|
# Remove old images to force using new one
|
||||||
docker compose -f $COMPOSE_FILE up -d
|
docker image prune -f
|
||||||
|
|
||||||
|
# Start new container with force recreate
|
||||||
|
docker compose -f $COMPOSE_FILE up -d --force-recreate
|
||||||
|
|
||||||
# Wait for health check
|
# Wait for health check
|
||||||
echo "Waiting for application to be healthy..."
|
echo "Waiting for application to be healthy..."
|
||||||
|
|||||||
29
.secretsignore
Normal file
29
.secretsignore
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Ignore patterns for secret detection
|
||||||
|
# These are legitimate authentication patterns, not actual secrets
|
||||||
|
|
||||||
|
# Authentication-related code patterns
|
||||||
|
*password*
|
||||||
|
*username*
|
||||||
|
*credentials*
|
||||||
|
*csrf*
|
||||||
|
*session*
|
||||||
|
*token*
|
||||||
|
*key*
|
||||||
|
*auth*
|
||||||
|
|
||||||
|
# Environment variable references
|
||||||
|
process.env.*
|
||||||
|
|
||||||
|
# Cache and Redis patterns
|
||||||
|
*cache*
|
||||||
|
*redis*
|
||||||
|
|
||||||
|
# Rate limiting patterns
|
||||||
|
*rateLimit*
|
||||||
|
|
||||||
|
# Next.js build artifacts
|
||||||
|
.next/
|
||||||
|
|
||||||
|
# Generated files
|
||||||
|
*.d.ts
|
||||||
|
*.js.map
|
||||||
@@ -1,226 +0,0 @@
|
|||||||
# Automatisches Deployment System
|
|
||||||
|
|
||||||
## Übersicht
|
|
||||||
|
|
||||||
Dieses Portfolio verwendet ein **automatisches Deployment-System**, das bei jedem Git Push die Codebase prüft, den Container erstellt und startet.
|
|
||||||
|
|
||||||
## 🚀 Deployment-Skripte
|
|
||||||
|
|
||||||
### **1. Auto-Deploy (Vollständig)**
|
|
||||||
```bash
|
|
||||||
# Vollständiges automatisches Deployment
|
|
||||||
./scripts/auto-deploy.sh
|
|
||||||
|
|
||||||
# Oder mit npm
|
|
||||||
npm run auto-deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
**Was passiert:**
|
|
||||||
- ✅ Git Status prüfen und uncommitted Changes committen
|
|
||||||
- ✅ Latest Changes pullen
|
|
||||||
- ✅ ESLint Linting
|
|
||||||
- ✅ Tests ausführen
|
|
||||||
- ✅ Next.js Build
|
|
||||||
- ✅ Docker Image erstellen
|
|
||||||
- ✅ Container stoppen/starten
|
|
||||||
- ✅ Health Check
|
|
||||||
- ✅ Cleanup alter Images
|
|
||||||
|
|
||||||
### **2. Quick-Deploy (Schnell)**
|
|
||||||
```bash
|
|
||||||
# Schnelles Deployment ohne Tests
|
|
||||||
./scripts/quick-deploy.sh
|
|
||||||
|
|
||||||
# Oder mit npm
|
|
||||||
npm run quick-deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
**Was passiert:**
|
|
||||||
- ✅ Docker Image erstellen
|
|
||||||
- ✅ Container stoppen/starten
|
|
||||||
- ✅ Health Check
|
|
||||||
|
|
||||||
### **3. Manuelles Deployment**
|
|
||||||
```bash
|
|
||||||
# Manuelles Deployment mit Docker Compose
|
|
||||||
./scripts/deploy.sh
|
|
||||||
|
|
||||||
# Oder mit npm
|
|
||||||
npm run deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔄 Automatisches Deployment
|
|
||||||
|
|
||||||
### **Git Hook Setup**
|
|
||||||
Das System verwendet einen Git Post-Receive Hook, der automatisch bei jedem Push ausgeführt wird:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Hook ist bereits konfiguriert in:
|
|
||||||
.git/hooks/post-receive
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Wie es funktioniert:**
|
|
||||||
1. **Git Push** → Hook wird ausgelöst
|
|
||||||
2. **Auto-Deploy Script** wird ausgeführt
|
|
||||||
3. **Vollständige Pipeline** läuft automatisch
|
|
||||||
4. **Deployment** wird durchgeführt
|
|
||||||
5. **Health Check** bestätigt Erfolg
|
|
||||||
|
|
||||||
## 📋 Deployment-Schritte
|
|
||||||
|
|
||||||
### **Automatisches Deployment:**
|
|
||||||
```bash
|
|
||||||
# 1. Code Quality Checks
|
|
||||||
git status --porcelain
|
|
||||||
git pull origin main
|
|
||||||
npm run lint
|
|
||||||
npm run test
|
|
||||||
|
|
||||||
# 2. Build Application
|
|
||||||
npm run build
|
|
||||||
|
|
||||||
# 3. Docker Operations
|
|
||||||
docker build -t portfolio-app:latest .
|
|
||||||
docker tag portfolio-app:latest portfolio-app:$(date +%Y%m%d-%H%M%S)
|
|
||||||
|
|
||||||
# 4. Deployment
|
|
||||||
docker stop portfolio-app || true
|
|
||||||
docker rm portfolio-app || true
|
|
||||||
docker run -d --name portfolio-app -p 3000:3000 portfolio-app:latest
|
|
||||||
|
|
||||||
# 5. Health Check
|
|
||||||
curl -f http://localhost:3000/api/health
|
|
||||||
|
|
||||||
# 6. Cleanup
|
|
||||||
docker system prune -f
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🎯 Verwendung
|
|
||||||
|
|
||||||
### **Für Entwicklung:**
|
|
||||||
```bash
|
|
||||||
# Schnelles Deployment während der Entwicklung
|
|
||||||
npm run quick-deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Für Production:**
|
|
||||||
```bash
|
|
||||||
# Vollständiges Deployment mit Tests
|
|
||||||
npm run auto-deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Automatisch bei Push:**
|
|
||||||
```bash
|
|
||||||
# Einfach committen und pushen
|
|
||||||
git add .
|
|
||||||
git commit -m "Update feature"
|
|
||||||
git push origin main
|
|
||||||
# → Automatisches Deployment läuft
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📊 Monitoring
|
|
||||||
|
|
||||||
### **Container Status:**
|
|
||||||
```bash
|
|
||||||
# Status prüfen
|
|
||||||
npm run monitor status
|
|
||||||
|
|
||||||
# Health Check
|
|
||||||
npm run monitor health
|
|
||||||
|
|
||||||
# Logs anzeigen
|
|
||||||
npm run monitor logs
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Deployment Logs:**
|
|
||||||
```bash
|
|
||||||
# Deployment-Logs anzeigen
|
|
||||||
tail -f /var/log/portfolio-deploy.log
|
|
||||||
|
|
||||||
# Git-Deployment-Logs
|
|
||||||
tail -f /var/log/git-deploy.log
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 Konfiguration
|
|
||||||
|
|
||||||
### **Ports:**
|
|
||||||
- **Standard Port:** 3000
|
|
||||||
- **Backup Port:** 3001 (falls 3000 belegt)
|
|
||||||
|
|
||||||
### **Container:**
|
|
||||||
- **Name:** portfolio-app
|
|
||||||
- **Image:** portfolio-app:latest
|
|
||||||
- **Restart Policy:** unless-stopped
|
|
||||||
|
|
||||||
### **Logs:**
|
|
||||||
- **Deployment Logs:** `/var/log/portfolio-deploy.log`
|
|
||||||
- **Git Logs:** `/var/log/git-deploy.log`
|
|
||||||
|
|
||||||
## 🚨 Troubleshooting
|
|
||||||
|
|
||||||
### **Deployment schlägt fehl:**
|
|
||||||
```bash
|
|
||||||
# Logs prüfen
|
|
||||||
docker logs portfolio-app
|
|
||||||
|
|
||||||
# Container-Status prüfen
|
|
||||||
docker ps -a
|
|
||||||
|
|
||||||
# Manuell neu starten
|
|
||||||
npm run quick-deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Port bereits belegt:**
|
|
||||||
```bash
|
|
||||||
# Ports prüfen
|
|
||||||
lsof -i :3000
|
|
||||||
|
|
||||||
# Anderen Port verwenden
|
|
||||||
docker run -d --name portfolio-app -p 3001:3000 portfolio-app:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Tests schlagen fehl:**
|
|
||||||
```bash
|
|
||||||
# Tests lokal ausführen
|
|
||||||
npm run test
|
|
||||||
|
|
||||||
# Linting prüfen
|
|
||||||
npm run lint
|
|
||||||
|
|
||||||
# Build testen
|
|
||||||
npm run build
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📈 Features
|
|
||||||
|
|
||||||
### **Automatische Features:**
|
|
||||||
- ✅ **Git Integration** - Automatisch bei Push
|
|
||||||
- ✅ **Code Quality** - Linting und Tests
|
|
||||||
- ✅ **Health Checks** - Automatische Verifikation
|
|
||||||
- ✅ **Rollback** - Alte Container werden gestoppt
|
|
||||||
- ✅ **Cleanup** - Alte Images werden entfernt
|
|
||||||
- ✅ **Logging** - Vollständige Deployment-Logs
|
|
||||||
|
|
||||||
### **Sicherheits-Features:**
|
|
||||||
- ✅ **Non-root Container**
|
|
||||||
- ✅ **Resource Limits**
|
|
||||||
- ✅ **Health Monitoring**
|
|
||||||
- ✅ **Error Handling**
|
|
||||||
- ✅ **Rollback bei Fehlern**
|
|
||||||
|
|
||||||
## 🎉 Vorteile
|
|
||||||
|
|
||||||
1. **Automatisierung** - Keine manuellen Schritte nötig
|
|
||||||
2. **Konsistenz** - Immer gleiche Deployment-Prozesse
|
|
||||||
3. **Sicherheit** - Tests vor jedem Deployment
|
|
||||||
4. **Monitoring** - Vollständige Logs und Health Checks
|
|
||||||
5. **Schnell** - Quick-Deploy für Entwicklung
|
|
||||||
6. **Zuverlässig** - Automatische Rollbacks bei Fehlern
|
|
||||||
|
|
||||||
## 📞 Support
|
|
||||||
|
|
||||||
Bei Problemen:
|
|
||||||
1. **Logs prüfen:** `tail -f /var/log/portfolio-deploy.log`
|
|
||||||
2. **Container-Status:** `npm run monitor status`
|
|
||||||
3. **Health Check:** `npm run monitor health`
|
|
||||||
4. **Manueller Neustart:** `npm run quick-deploy`
|
|
||||||
144
DEPLOYMENT-FIXES.md
Normal file
144
DEPLOYMENT-FIXES.md
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
# Deployment Fixes for Gitea Actions
|
||||||
|
|
||||||
|
## Problem Summary
|
||||||
|
The Gitea Actions were failing with "Connection refused" errors when trying to connect to localhost:3000. This was caused by several issues:
|
||||||
|
|
||||||
|
1. **Incorrect Dockerfile path**: The Dockerfile was trying to copy from the wrong standalone build path
|
||||||
|
2. **Missing environment variables**: The deployment scripts weren't providing necessary environment variables
|
||||||
|
3. **Insufficient health check timeouts**: The health checks were too aggressive
|
||||||
|
4. **Poor error handling**: The workflows didn't provide enough debugging information
|
||||||
|
|
||||||
|
## Fixes Applied
|
||||||
|
|
||||||
|
### 1. Fixed Dockerfile
|
||||||
|
- **Issue**: Dockerfile was trying to copy from `/app/.next/standalone/portfolio` but the actual path was `/app/.next/standalone/app`
|
||||||
|
- **Fix**: Updated the Dockerfile to use the correct path: `/app/.next/standalone/app`
|
||||||
|
- **File**: `Dockerfile`
|
||||||
|
|
||||||
|
### 2. Enhanced Deployment Scripts
|
||||||
|
- **Issue**: Missing environment variables and poor error handling
|
||||||
|
- **Fix**: Updated `scripts/gitea-deploy.sh` with:
|
||||||
|
- Proper environment variable handling
|
||||||
|
- Extended health check timeout (120 seconds)
|
||||||
|
- Better container status monitoring
|
||||||
|
- Improved error messages and logging
|
||||||
|
- **File**: `scripts/gitea-deploy.sh`
|
||||||
|
|
||||||
|
### 3. Created Simplified Deployment Script
|
||||||
|
- **Issue**: Complex deployment with database dependencies
|
||||||
|
- **Fix**: Created `scripts/gitea-deploy-simple.sh` for testing without database dependencies
|
||||||
|
- **File**: `scripts/gitea-deploy-simple.sh`
|
||||||
|
|
||||||
|
### 4. Fixed Next.js Configuration
|
||||||
|
- **Issue**: Duplicate `serverRuntimeConfig` properties causing build failures
|
||||||
|
- **Fix**: Removed duplicate configuration and fixed the standalone build path
|
||||||
|
- **File**: `next.config.ts`
|
||||||
|
|
||||||
|
### 5. Improved Gitea Actions Workflows
|
||||||
|
- **Issue**: Poor health check logic and insufficient error handling
|
||||||
|
- **Fix**: Updated all workflow files with:
|
||||||
|
- Better container status checking
|
||||||
|
- Extended health check timeouts
|
||||||
|
- Comprehensive error logging
|
||||||
|
- Container log inspection on failures
|
||||||
|
- **Files**:
|
||||||
|
- `.gitea/workflows/ci-cd-fast.yml`
|
||||||
|
- `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
||||||
|
- `.gitea/workflows/ci-cd-simple.yml` (new)
|
||||||
|
- `.gitea/workflows/ci-cd-reliable.yml` (new)
|
||||||
|
|
||||||
|
#### **5. ✅ Fixed Nginx Configuration Issue**
|
||||||
|
- **Issue**: Zero-downtime deployment failing due to missing nginx configuration file in Gitea Actions
|
||||||
|
- **Fix**: Created `docker-compose.zero-downtime-fixed.yml` with fallback nginx configuration
|
||||||
|
- **Added**: Automatic nginx config creation if file is missing
|
||||||
|
- **Files**:
|
||||||
|
- `docker-compose.zero-downtime-fixed.yml` (new)
|
||||||
|
|
||||||
|
#### **6. ✅ Fixed Health Check Logic**
|
||||||
|
- **Issue**: Health checks timing out even though applications were running correctly
|
||||||
|
- **Root Cause**: Workflows trying to access `localhost:3000` directly, but containers don't expose port 3000 to host
|
||||||
|
- **Fix**: Updated health check logic to:
|
||||||
|
- Use `docker exec` for internal container health checks
|
||||||
|
- Check nginx proxy endpoints (`localhost/api/health`) for zero-downtime deployments
|
||||||
|
- Provide fallback health check methods
|
||||||
|
- Better error messages and debugging information
|
||||||
|
- **Files**:
|
||||||
|
- `.gitea/workflows/ci-cd-zero-downtime-fixed.yml` (updated)
|
||||||
|
- `.gitea/workflows/ci-cd-fast.yml` (updated)
|
||||||
|
|
||||||
|
## Available Workflows
|
||||||
|
|
||||||
|
### 1. CI/CD Reliable (Recommended)
|
||||||
|
- **File**: `.gitea/workflows/ci-cd-reliable.yml`
|
||||||
|
- **Description**: Simple, reliable deployment using docker-compose with database services
|
||||||
|
- **Best for**: Most reliable deployments with database support
|
||||||
|
|
||||||
|
### 2. CI/CD Simple
|
||||||
|
- **File**: `.gitea/workflows/ci-cd-simple.yml`
|
||||||
|
- **Description**: Uses the improved deployment script with comprehensive error handling
|
||||||
|
- **Best for**: Reliable deployments without database dependencies
|
||||||
|
|
||||||
|
### 3. CI/CD Fast
|
||||||
|
- **File**: `.gitea/workflows/ci-cd-fast.yml`
|
||||||
|
- **Description**: Fast deployment with rolling updates
|
||||||
|
- **Best for**: Production deployments with zero downtime
|
||||||
|
|
||||||
|
### 4. CI/CD Zero Downtime (Fixed)
|
||||||
|
- **File**: `.gitea/workflows/ci-cd-zero-downtime-fixed.yml`
|
||||||
|
- **Description**: Full zero-downtime deployment with nginx load balancer (fixed nginx config issue)
|
||||||
|
- **Best for**: Production deployments requiring high availability
|
||||||
|
|
||||||
|
## Testing the Fixes
|
||||||
|
|
||||||
|
### Local Testing
|
||||||
|
```bash
|
||||||
|
# Test the simplified deployment script
|
||||||
|
./scripts/gitea-deploy-simple.sh
|
||||||
|
|
||||||
|
# Test the full deployment script
|
||||||
|
./scripts/gitea-deploy.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verification
|
||||||
|
```bash
|
||||||
|
# Check if the application is running
|
||||||
|
curl -f http://localhost:3000/api/health
|
||||||
|
|
||||||
|
# Check the main page
|
||||||
|
curl -f http://localhost:3000/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variables Required
|
||||||
|
|
||||||
|
### Variables (in Gitea repository settings)
|
||||||
|
- `NODE_ENV`: production
|
||||||
|
- `LOG_LEVEL`: info
|
||||||
|
- `NEXT_PUBLIC_BASE_URL`: https://dk0.dev
|
||||||
|
- `NEXT_PUBLIC_UMAMI_URL`: https://analytics.dk0.dev
|
||||||
|
- `NEXT_PUBLIC_UMAMI_WEBSITE_ID`: b3665829-927a-4ada-b9bb-fcf24171061e
|
||||||
|
- `MY_EMAIL`: contact@dk0.dev
|
||||||
|
- `MY_INFO_EMAIL`: info@dk0.dev
|
||||||
|
|
||||||
|
### Secrets (in Gitea repository settings)
|
||||||
|
- `MY_PASSWORD`: Your email password
|
||||||
|
- `MY_INFO_PASSWORD`: Your info email password
|
||||||
|
- `ADMIN_BASIC_AUTH`: admin:your_secure_password_here
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### If deployment still fails:
|
||||||
|
1. Check the Gitea Actions logs for specific error messages
|
||||||
|
2. Verify all environment variables and secrets are set correctly
|
||||||
|
3. Check if the Docker image builds successfully locally
|
||||||
|
4. Ensure the health check endpoint is accessible
|
||||||
|
|
||||||
|
### Common Issues:
|
||||||
|
- **"Connection refused"**: Container failed to start or crashed
|
||||||
|
- **"Health check timeout"**: Application is taking too long to start
|
||||||
|
- **"Build failed"**: Docker build issues, check Dockerfile and dependencies
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
1. Push these changes to your Gitea repository
|
||||||
|
2. The Actions should now work without the "Connection refused" errors
|
||||||
|
3. Monitor the deployment logs for any remaining issues
|
||||||
|
4. Consider using the "CI/CD Simple" workflow for the most reliable deployments
|
||||||
385
DEPLOYMENT.md
385
DEPLOYMENT.md
@@ -1,272 +1,229 @@
|
|||||||
# Portfolio Deployment Guide
|
# Portfolio Deployment Guide
|
||||||
|
|
||||||
## Übersicht
|
## Overview
|
||||||
|
|
||||||
Dieses Portfolio verwendet ein **optimiertes CI/CD-System** mit Docker für Production-Deployment. Das System ist darauf ausgelegt, hohen Traffic zu bewältigen und automatische Tests vor dem Deployment durchzuführen.
|
This document covers all aspects of deploying the Portfolio application, including local development, CI/CD, and production deployment.
|
||||||
|
|
||||||
## 🚀 Features
|
## Prerequisites
|
||||||
|
|
||||||
### ✅ **CI/CD Pipeline**
|
- Docker and Docker Compose installed
|
||||||
- **Automatische Tests** vor jedem Deployment
|
- Node.js 20+ for local development
|
||||||
- **Security Scanning** mit Trivy
|
- Access to Gitea repository with Actions enabled
|
||||||
- **Multi-Architecture Docker Builds** (AMD64 + ARM64)
|
|
||||||
- **Health Checks** und Deployment-Verifikation
|
|
||||||
- **Automatische Cleanup** alter Images
|
|
||||||
|
|
||||||
### ⚡ **Performance-Optimierungen**
|
## Environment Setup
|
||||||
- **Multi-Stage Docker Build** für kleinere Images
|
|
||||||
- **Nginx Load Balancer** mit Caching
|
|
||||||
- **Gzip Compression** und optimierte Headers
|
|
||||||
- **Rate Limiting** für API-Endpoints
|
|
||||||
- **Resource Limits** für Container
|
|
||||||
|
|
||||||
### 🔒 **Sicherheit**
|
### Required Secrets in Gitea
|
||||||
- **Non-root User** im Container
|
|
||||||
- **Security Headers** (HSTS, CSP, etc.)
|
|
||||||
- **SSL/TLS Termination** mit Nginx
|
|
||||||
- **Vulnerability Scanning** in CI/CD
|
|
||||||
|
|
||||||
## 📁 Dateistruktur
|
Configure these secrets in your Gitea repository (Settings → Secrets):
|
||||||
|
|
||||||
```
|
| Secret Name | Description | Example |
|
||||||
├── .github/workflows/
|
|-------------|-------------|---------|
|
||||||
│ └── ci-cd.yml # CI/CD Pipeline
|
| `NEXT_PUBLIC_BASE_URL` | Public URL of your website | `https://dk0.dev` |
|
||||||
├── scripts/
|
| `MY_EMAIL` | Main email for contact form | `contact@dk0.dev` |
|
||||||
│ ├── deploy.sh # Deployment-Skript
|
| `MY_INFO_EMAIL` | Info email address | `info@dk0.dev` |
|
||||||
│ └── monitor.sh # Monitoring-Skript
|
| `MY_PASSWORD` | Password for main email | `your_email_password` |
|
||||||
├── docker-compose.prod.yml # Production Docker Compose
|
| `MY_INFO_PASSWORD` | Password for info email | `your_info_email_password` |
|
||||||
├── nginx.conf # Nginx Konfiguration
|
| `ADMIN_BASIC_AUTH` | Admin basic auth for protected areas | `admin:your_secure_password` |
|
||||||
├── Dockerfile # Optimiertes Dockerfile
|
|
||||||
└── env.example # Environment Template
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🛠️ Setup
|
### Local Environment
|
||||||
|
|
||||||
### 1. **Environment Variables**
|
1. Copy environment template:
|
||||||
```bash
|
```bash
|
||||||
# Kopiere die Beispiel-Datei
|
|
||||||
cp env.example .env
|
cp env.example .env
|
||||||
|
|
||||||
# Bearbeite die .env Datei mit deinen Werten
|
|
||||||
nano .env
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2. **GitHub Secrets & Variables**
|
2. Update `.env` with your values:
|
||||||
Konfiguriere in deinem GitHub Repository:
|
|
||||||
|
|
||||||
**Secrets:**
|
|
||||||
- `GITHUB_TOKEN` (automatisch verfügbar)
|
|
||||||
- `GHOST_API_KEY`
|
|
||||||
- `MY_PASSWORD`
|
|
||||||
- `MY_INFO_PASSWORD`
|
|
||||||
|
|
||||||
**Variables:**
|
|
||||||
- `NEXT_PUBLIC_BASE_URL`
|
|
||||||
- `GHOST_API_URL`
|
|
||||||
- `MY_EMAIL`
|
|
||||||
- `MY_INFO_EMAIL`
|
|
||||||
|
|
||||||
### 3. **SSL-Zertifikate**
|
|
||||||
```bash
|
```bash
|
||||||
# Erstelle SSL-Verzeichnis
|
NEXT_PUBLIC_BASE_URL=https://dk0.dev
|
||||||
mkdir -p ssl
|
MY_EMAIL=contact@dk0.dev
|
||||||
|
MY_INFO_EMAIL=info@dk0.dev
|
||||||
# Kopiere deine SSL-Zertifikate
|
MY_PASSWORD=your_email_password
|
||||||
cp your-cert.pem ssl/cert.pem
|
MY_INFO_PASSWORD=your_info_email_password
|
||||||
cp your-key.pem ssl/key.pem
|
ADMIN_BASIC_AUTH=admin:your_secure_password
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🚀 Deployment
|
## Deployment Methods
|
||||||
|
|
||||||
### **Automatisches Deployment**
|
### 1. Local Development
|
||||||
Das System deployt automatisch bei Push auf den `production` Branch:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Code auf production Branch pushen
|
# Start all services
|
||||||
git push origin production
|
docker compose up -d
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
docker compose logs -f portfolio
|
||||||
|
|
||||||
|
# Stop services
|
||||||
|
docker compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Manuelles Deployment**
|
### 2. CI/CD Pipeline (Automatic)
|
||||||
|
|
||||||
|
The CI/CD pipeline runs automatically on:
|
||||||
|
- **Push to `main`**: Runs tests, linting, build, and security checks
|
||||||
|
- **Push to `production`**: Full deployment including Docker build and deployment
|
||||||
|
|
||||||
|
#### Pipeline Steps:
|
||||||
|
1. **Install dependencies** (`npm ci`)
|
||||||
|
2. **Run linting** (`npm run lint`)
|
||||||
|
3. **Run tests** (`npm run test`)
|
||||||
|
4. **Build application** (`npm run build`)
|
||||||
|
5. **Security scan** (`npm audit`)
|
||||||
|
6. **Build Docker image** (production only)
|
||||||
|
7. **Deploy with Docker Compose** (production only)
|
||||||
|
|
||||||
|
### 3. Manual Deployment
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Lokales Deployment
|
# Build and start services
|
||||||
./scripts/deploy.sh production
|
docker compose up -d --build
|
||||||
|
|
||||||
# Oder mit npm
|
# Check service status
|
||||||
npm run deploy
|
docker compose ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
docker compose logs -f
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Docker Commands**
|
## Service Configuration
|
||||||
|
|
||||||
|
### Portfolio App
|
||||||
|
- **Port**: 3000 (configurable via `PORT` environment variable)
|
||||||
|
- **Health Check**: `http://localhost:3000/api/health`
|
||||||
|
- **Environment**: Production
|
||||||
|
- **Resources**: 512M memory limit, 0.5 CPU limit
|
||||||
|
|
||||||
|
### PostgreSQL Database
|
||||||
|
- **Port**: 5432 (internal)
|
||||||
|
- **Database**: `portfolio_db`
|
||||||
|
- **User**: `portfolio_user`
|
||||||
|
- **Password**: `portfolio_pass`
|
||||||
|
- **Health Check**: `pg_isready`
|
||||||
|
|
||||||
|
### Redis Cache
|
||||||
|
- **Port**: 6379 (internal)
|
||||||
|
- **Health Check**: `redis-cli ping`
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Secrets not loading**:
|
||||||
|
- Run the debug workflow: Actions → Debug Secrets
|
||||||
|
- Verify all secrets are set in Gitea
|
||||||
|
- Check secret names match exactly
|
||||||
|
|
||||||
|
2. **Container won't start**:
|
||||||
```bash
|
```bash
|
||||||
# Container starten
|
# Check logs
|
||||||
npm run docker:compose
|
docker compose logs portfolio
|
||||||
|
|
||||||
# Container stoppen
|
# Check service status
|
||||||
npm run docker:down
|
docker compose ps
|
||||||
|
|
||||||
# Health Check
|
# Restart services
|
||||||
npm run health
|
docker compose restart
|
||||||
```
|
```
|
||||||
|
|
||||||
## 📊 Monitoring
|
3. **Database connection issues**:
|
||||||
|
|
||||||
### **Container Status**
|
|
||||||
```bash
|
```bash
|
||||||
# Status anzeigen
|
# Check PostgreSQL status
|
||||||
./scripts/monitor.sh status
|
docker compose exec postgres pg_isready -U portfolio_user -d portfolio_db
|
||||||
|
|
||||||
# Oder mit npm
|
# Check database logs
|
||||||
npm run monitor status
|
docker compose logs postgres
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Health Check**
|
4. **Redis connection issues**:
|
||||||
```bash
|
```bash
|
||||||
# Application Health
|
# Test Redis connection
|
||||||
./scripts/monitor.sh health
|
docker compose exec redis redis-cli ping
|
||||||
|
|
||||||
# Oder direkt
|
# Check Redis logs
|
||||||
curl http://localhost:3000/api/health
|
docker compose logs redis
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Logs anzeigen**
|
### Debug Commands
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Letzte 50 Zeilen
|
# Check environment variables in container
|
||||||
./scripts/monitor.sh logs 50
|
docker exec portfolio-app env | grep -E "(DATABASE_URL|REDIS_URL|NEXT_PUBLIC_BASE_URL)"
|
||||||
|
|
||||||
# Live-Logs folgen
|
# Test health endpoints
|
||||||
./scripts/monitor.sh logs 100
|
curl -f http://localhost:3000/api/health
|
||||||
|
|
||||||
|
# View all service logs
|
||||||
|
docker compose logs --tail=50
|
||||||
|
|
||||||
|
# Check resource usage
|
||||||
|
docker stats
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Metriken**
|
## Monitoring
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
- **Portfolio App**: `http://localhost:3000/api/health`
|
||||||
|
- **PostgreSQL**: `pg_isready` command
|
||||||
|
- **Redis**: `redis-cli ping` command
|
||||||
|
|
||||||
|
### Logs
|
||||||
```bash
|
```bash
|
||||||
# Detaillierte Metriken
|
# Follow all logs
|
||||||
./scripts/monitor.sh metrics
|
docker compose logs -f
|
||||||
|
|
||||||
|
# Follow specific service logs
|
||||||
|
docker compose logs -f portfolio
|
||||||
|
docker compose logs -f postgres
|
||||||
|
docker compose logs -f redis
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 Wartung
|
## Security
|
||||||
|
|
||||||
### **Container neustarten**
|
### Security Scans
|
||||||
|
- **NPM Audit**: Runs automatically in CI/CD
|
||||||
|
- **Dependency Check**: Checks for known vulnerabilities
|
||||||
|
- **Secret Detection**: Prevents accidental secret commits
|
||||||
|
|
||||||
|
### Best Practices
|
||||||
|
- Never commit secrets to repository
|
||||||
|
- Use environment variables for sensitive data
|
||||||
|
- Regularly update dependencies
|
||||||
|
- Monitor security advisories
|
||||||
|
|
||||||
|
## Backup and Recovery
|
||||||
|
|
||||||
|
### Database Backup
|
||||||
```bash
|
```bash
|
||||||
./scripts/monitor.sh restart
|
# Create backup
|
||||||
|
docker compose exec postgres pg_dump -U portfolio_user portfolio_db > backup.sql
|
||||||
|
|
||||||
|
# Restore backup
|
||||||
|
docker compose exec -T postgres psql -U portfolio_user portfolio_db < backup.sql
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Cleanup**
|
### Volume Backup
|
||||||
```bash
|
```bash
|
||||||
# Docker-Ressourcen bereinigen
|
# Backup volumes
|
||||||
./scripts/monitor.sh cleanup
|
docker run --rm -v portfolio_postgres_data:/data -v $(pwd):/backup alpine tar czf /backup/postgres_backup.tar.gz /data
|
||||||
|
docker run --rm -v portfolio_redis_data:/data -v $(pwd):/backup alpine tar czf /backup/redis_backup.tar.gz /data
|
||||||
```
|
```
|
||||||
|
|
||||||
### **Updates**
|
## Performance Optimization
|
||||||
```bash
|
|
||||||
# Neues Image pullen und deployen
|
|
||||||
./scripts/deploy.sh production
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📈 Performance-Tuning
|
### Resource Limits
|
||||||
|
- **Portfolio App**: 512M memory, 0.5 CPU
|
||||||
|
- **PostgreSQL**: 256M memory, 0.25 CPU
|
||||||
|
- **Redis**: Default limits
|
||||||
|
|
||||||
### **Nginx Optimierungen**
|
### Caching
|
||||||
- **Gzip Compression** aktiviert
|
- **Next.js**: Built-in caching
|
||||||
- **Static Asset Caching** (1 Jahr)
|
- **Redis**: Session and analytics caching
|
||||||
- **API Rate Limiting** (10 req/s)
|
- **Static Assets**: Served from CDN
|
||||||
- **Load Balancing** bereit für Skalierung
|
|
||||||
|
|
||||||
### **Docker Optimierungen**
|
## Support
|
||||||
- **Multi-Stage Build** für kleinere Images
|
|
||||||
- **Non-root User** für Sicherheit
|
|
||||||
- **Health Checks** für automatische Recovery
|
|
||||||
- **Resource Limits** (512MB RAM, 0.5 CPU)
|
|
||||||
|
|
||||||
### **Next.js Optimierungen**
|
For issues or questions:
|
||||||
- **Standalone Output** für Docker
|
1. Check the troubleshooting section above
|
||||||
- **Image Optimization** (WebP, AVIF)
|
2. Review CI/CD pipeline logs
|
||||||
- **CSS Optimization** aktiviert
|
3. Run the debug workflow
|
||||||
- **Package Import Optimization**
|
4. Check service health endpoints
|
||||||
|
|
||||||
## 🚨 Troubleshooting
|
|
||||||
|
|
||||||
### **Container startet nicht**
|
|
||||||
```bash
|
|
||||||
# Logs prüfen
|
|
||||||
./scripts/monitor.sh logs
|
|
||||||
|
|
||||||
# Status prüfen
|
|
||||||
./scripts/monitor.sh status
|
|
||||||
|
|
||||||
# Neustarten
|
|
||||||
./scripts/monitor.sh restart
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Health Check schlägt fehl**
|
|
||||||
```bash
|
|
||||||
# Manueller Health Check
|
|
||||||
curl -v http://localhost:3000/api/health
|
|
||||||
|
|
||||||
# Container-Logs prüfen
|
|
||||||
docker compose -f docker-compose.prod.yml logs portfolio
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Performance-Probleme**
|
|
||||||
```bash
|
|
||||||
# Resource-Usage prüfen
|
|
||||||
./scripts/monitor.sh metrics
|
|
||||||
|
|
||||||
# Nginx-Logs prüfen
|
|
||||||
docker compose -f docker-compose.prod.yml logs nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
### **SSL-Probleme**
|
|
||||||
```bash
|
|
||||||
# SSL-Zertifikate prüfen
|
|
||||||
openssl x509 -in ssl/cert.pem -text -noout
|
|
||||||
|
|
||||||
# Nginx-Konfiguration testen
|
|
||||||
docker compose -f docker-compose.prod.yml exec nginx nginx -t
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📋 CI/CD Pipeline
|
|
||||||
|
|
||||||
### **Workflow-Schritte**
|
|
||||||
1. **Test** - Linting, Tests, Build
|
|
||||||
2. **Security** - Trivy Vulnerability Scan
|
|
||||||
3. **Build** - Multi-Arch Docker Image
|
|
||||||
4. **Deploy** - Automatisches Deployment
|
|
||||||
|
|
||||||
### **Trigger**
|
|
||||||
- **Push auf `main`** - Build nur
|
|
||||||
- **Push auf `production`** - Build + Deploy
|
|
||||||
- **Pull Request** - Test + Security
|
|
||||||
|
|
||||||
### **Monitoring**
|
|
||||||
- **GitHub Actions** - Pipeline-Status
|
|
||||||
- **Container Health** - Automatische Checks
|
|
||||||
- **Resource Usage** - Monitoring-Skript
|
|
||||||
|
|
||||||
## 🔄 Skalierung
|
|
||||||
|
|
||||||
### **Horizontal Scaling**
|
|
||||||
```yaml
|
|
||||||
# In nginx.conf - weitere Backend-Server hinzufügen
|
|
||||||
upstream portfolio_backend {
|
|
||||||
least_conn;
|
|
||||||
server portfolio:3000 max_fails=3 fail_timeout=30s;
|
|
||||||
server portfolio-2:3000 max_fails=3 fail_timeout=30s;
|
|
||||||
server portfolio-3:3000 max_fails=3 fail_timeout=30s;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### **Vertical Scaling**
|
|
||||||
```yaml
|
|
||||||
# In docker-compose.prod.yml - Resource-Limits erhöhen
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 1G
|
|
||||||
cpus: '1.0'
|
|
||||||
```
|
|
||||||
|
|
||||||
## 📞 Support
|
|
||||||
|
|
||||||
Bei Problemen:
|
|
||||||
1. **Logs prüfen**: `./scripts/monitor.sh logs`
|
|
||||||
2. **Status prüfen**: `./scripts/monitor.sh status`
|
|
||||||
3. **Health Check**: `./scripts/monitor.sh health`
|
|
||||||
4. **Container neustarten**: `./scripts/monitor.sh restart`
|
|
||||||
@@ -4,7 +4,7 @@ FROM node:20 AS base
|
|||||||
# Install dependencies only when needed
|
# Install dependencies only when needed
|
||||||
FROM base AS deps
|
FROM base AS deps
|
||||||
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
|
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
|
||||||
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
RUN apt-get update && apt-get install -y --no-install-recommends curl && rm -rf /var/lib/apt/lists/*
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Install dependencies based on the preferred package manager
|
# Install dependencies based on the preferred package manager
|
||||||
@@ -55,7 +55,7 @@ RUN chown nextjs:nodejs .next
|
|||||||
|
|
||||||
# Automatically leverage output traces to reduce image size
|
# Automatically leverage output traces to reduce image size
|
||||||
# https://nextjs.org/docs/advanced-features/output-file-tracing
|
# https://nextjs.org/docs/advanced-features/output-file-tracing
|
||||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
|
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone/app ./
|
||||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||||
|
|
||||||
# Copy Prisma files
|
# Copy Prisma files
|
||||||
|
|||||||
@@ -93,10 +93,10 @@ const Contact = () => {
|
|||||||
className="text-center mb-16"
|
className="text-center mb-16"
|
||||||
>
|
>
|
||||||
<h2 className="text-4xl md:text-5xl font-bold mb-6 gradient-text">
|
<h2 className="text-4xl md:text-5xl font-bold mb-6 gradient-text">
|
||||||
Get In Touch
|
Contact Me
|
||||||
</h2>
|
</h2>
|
||||||
<p className="text-xl text-gray-400 max-w-2xl mx-auto">
|
<p className="text-xl text-gray-400 max-w-2xl mx-auto">
|
||||||
Have a project in mind or want to collaborate? I would love to hear from you!
|
Interested in working together or have questions about my projects? Feel free to reach out!
|
||||||
</p>
|
</p>
|
||||||
</motion.div>
|
</motion.div>
|
||||||
|
|
||||||
@@ -111,11 +111,11 @@ const Contact = () => {
|
|||||||
>
|
>
|
||||||
<div>
|
<div>
|
||||||
<h3 className="text-2xl font-bold text-white mb-6">
|
<h3 className="text-2xl font-bold text-white mb-6">
|
||||||
Let's Connect
|
Get In Touch
|
||||||
</h3>
|
</h3>
|
||||||
<p className="text-gray-400 leading-relaxed">
|
<p className="text-gray-400 leading-relaxed">
|
||||||
I'm always open to discussing new opportunities, interesting projects,
|
I'm always available to discuss new opportunities, interesting projects,
|
||||||
or just having a chat about technology and innovation.
|
or simply chat about technology and innovation.
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|||||||
@@ -101,9 +101,9 @@ const Hero = () => {
|
|||||||
<Image
|
<Image
|
||||||
src="/images/me.jpg"
|
src="/images/me.jpg"
|
||||||
alt="Dennis Konkol - Software Engineer"
|
alt="Dennis Konkol - Software Engineer"
|
||||||
fill={true}
|
fill
|
||||||
className="object-cover"
|
className="object-cover"
|
||||||
priority={true}
|
priority
|
||||||
/>
|
/>
|
||||||
|
|
||||||
{/* Hover overlay effect */}
|
{/* Hover overlay effect */}
|
||||||
@@ -216,7 +216,7 @@ const Hero = () => {
|
|||||||
whileTap={{ scale: 0.95 }}
|
whileTap={{ scale: 0.95 }}
|
||||||
className="px-8 py-4 text-lg font-semibold border-2 border-gray-600 text-gray-300 hover:text-white hover:border-gray-500 rounded-lg transition-all duration-200"
|
className="px-8 py-4 text-lg font-semibold border-2 border-gray-600 text-gray-300 hover:text-white hover:border-gray-500 rounded-lg transition-all duration-200"
|
||||||
>
|
>
|
||||||
Get In Touch
|
Contact Me
|
||||||
</motion.a>
|
</motion.a>
|
||||||
</motion.div>
|
</motion.div>
|
||||||
|
|
||||||
|
|||||||
@@ -567,6 +567,7 @@ function EditorPageContent() {
|
|||||||
className="p-2 rounded-lg text-gray-300"
|
className="p-2 rounded-lg text-gray-300"
|
||||||
title="Image"
|
title="Image"
|
||||||
>
|
>
|
||||||
|
{/* eslint-disable-next-line jsx-a11y/alt-text */}
|
||||||
<Image className="w-4 h-4" />
|
<Image className="w-4 h-4" />
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ export const metadata: Metadata = {
|
|||||||
authors: [{name: "Dennis Konkol", url: "https://dk0.dev"}],
|
authors: [{name: "Dennis Konkol", url: "https://dk0.dev"}],
|
||||||
openGraph: {
|
openGraph: {
|
||||||
title: "Dennis Konkol | Portfolio",
|
title: "Dennis Konkol | Portfolio",
|
||||||
description: "Explore my projects and get in touch!",
|
description: "Explore my projects and contact me for collaboration opportunities!",
|
||||||
url: "https://dk0.dev",
|
url: "https://dk0.dev",
|
||||||
siteName: "Dennis Konkol Portfolio",
|
siteName: "Dennis Konkol Portfolio",
|
||||||
images: [
|
images: [
|
||||||
|
|||||||
@@ -1,16 +1,17 @@
|
|||||||
|
# Unified Docker Compose configuration for Portfolio
|
||||||
|
# Supports both local development and production deployment
|
||||||
|
|
||||||
services:
|
services:
|
||||||
portfolio:
|
portfolio:
|
||||||
build:
|
image: portfolio-app:latest
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
container_name: portfolio-app
|
container_name: portfolio-app
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- "4000:3000"
|
- "${PORT:-3000}:3000" # Configurable port, defaults to 3000
|
||||||
environment:
|
environment:
|
||||||
- NODE_ENV=production
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
- REDIS_URL=redis://redis-redis-shared-1:6379
|
- REDIS_URL=redis://redis:6379
|
||||||
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
- MY_EMAIL=${MY_EMAIL}
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
@@ -25,6 +26,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
postgres:
|
postgres:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
@@ -67,6 +70,21 @@ services:
|
|||||||
memory: 128M
|
memory: 128M
|
||||||
cpus: '0.1'
|
cpus: '0.1'
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: portfolio-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
portfolio_data:
|
portfolio_data:
|
||||||
driver: local
|
driver: local
|
||||||
@@ -77,6 +95,6 @@ volumes:
|
|||||||
|
|
||||||
networks:
|
networks:
|
||||||
portfolio_net:
|
portfolio_net:
|
||||||
external: true
|
driver: bridge
|
||||||
proxy:
|
proxy:
|
||||||
external: true
|
external: true
|
||||||
145
docker-compose.zero-downtime-fixed.yml
Normal file
145
docker-compose.zero-downtime-fixed.yml
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
# Zero-Downtime Deployment Configuration (Fixed)
|
||||||
|
# Uses nginx as load balancer for seamless updates
|
||||||
|
# Fixed to work in Gitea Actions environment
|
||||||
|
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: portfolio-nginx
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
volumes:
|
||||||
|
# Use a more robust path that works in CI/CD environments
|
||||||
|
- ./nginx-zero-downtime.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
# Remove default nginx configuration to prevent conflicts
|
||||||
|
- /etc/nginx/conf.d
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
- portfolio-app-1
|
||||||
|
- portfolio-app-2
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
# Simple startup command
|
||||||
|
command: >
|
||||||
|
sh -c "
|
||||||
|
rm -rf /etc/nginx/conf.d/*
|
||||||
|
nginx -g 'daemon off;'
|
||||||
|
"
|
||||||
|
|
||||||
|
portfolio-app-1:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-1
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
portfolio-app-2:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-2
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: portfolio-postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- POSTGRES_DB=portfolio_db
|
||||||
|
- POSTGRES_USER=portfolio_user
|
||||||
|
- POSTGRES_PASSWORD=portfolio_pass
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U portfolio_user -d portfolio_db"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: portfolio-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
portfolio_data:
|
||||||
|
driver: local
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
portfolio_net:
|
||||||
|
driver: bridge
|
||||||
135
docker-compose.zero-downtime.yml
Normal file
135
docker-compose.zero-downtime.yml
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# Zero-Downtime Deployment Configuration
|
||||||
|
# Uses nginx as load balancer for seamless updates
|
||||||
|
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: portfolio-nginx
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
volumes:
|
||||||
|
- ./nginx-zero-downtime.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
- portfolio-app-1
|
||||||
|
- portfolio-app-2
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
portfolio-app-1:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-1
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
portfolio-app-2:
|
||||||
|
image: portfolio-app:latest
|
||||||
|
container_name: portfolio-app-2
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=${NODE_ENV:-production}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||||
|
- REDIS_URL=redis://redis:6379
|
||||||
|
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||||
|
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||||
|
- MY_EMAIL=${MY_EMAIL}
|
||||||
|
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||||
|
- MY_PASSWORD=${MY_PASSWORD}
|
||||||
|
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||||
|
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||||
|
volumes:
|
||||||
|
- portfolio_data:/app/.next/cache
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: portfolio-postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- POSTGRES_DB=portfolio_db
|
||||||
|
- POSTGRES_USER=portfolio_user
|
||||||
|
- POSTGRES_PASSWORD=portfolio_pass
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U portfolio_user -d portfolio_db"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: portfolio-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- portfolio_net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
portfolio_data:
|
||||||
|
driver: local
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
portfolio_net:
|
||||||
|
driver: bridge
|
||||||
@@ -25,8 +25,14 @@ jest.mock('next/link', () => {
|
|||||||
|
|
||||||
// Mock next/image
|
// Mock next/image
|
||||||
jest.mock('next/image', () => {
|
jest.mock('next/image', () => {
|
||||||
const ImageComponent = ({ src, alt, ...props }: Record<string, unknown>) =>
|
const ImageComponent = ({ src, alt, fill, priority, ...props }: Record<string, unknown>) => {
|
||||||
React.createElement('img', { src, alt, ...props });
|
// Convert boolean props to strings for DOM compatibility
|
||||||
|
const domProps: Record<string, unknown> = { src, alt };
|
||||||
|
if (fill) domProps.style = { width: '100%', height: '100%', objectFit: 'cover' };
|
||||||
|
if (priority) domProps.loading = 'eager';
|
||||||
|
|
||||||
|
return React.createElement('img', { ...domProps, ...props });
|
||||||
|
};
|
||||||
ImageComponent.displayName = 'Image';
|
ImageComponent.displayName = 'Image';
|
||||||
return ImageComponent;
|
return ImageComponent;
|
||||||
});
|
});
|
||||||
|
|||||||
65
logs/gitea-deploy-simple.log
Normal file
65
logs/gitea-deploy-simple.log
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
[0;34m[2025-09-13 23:24:42][0m 🚀 Starting simplified Gitea deployment for portfolio
|
||||||
|
[0;34m[2025-09-13 23:24:42][0m 🔨 Step 1: Building application...
|
||||||
|
[0;34m[2025-09-13 23:24:42][0m 📦 Building Next.js application...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application built successfully
|
||||||
|
[0;34m[2025-09-13 23:25:04][0m 🐳 Step 2: Docker operations...
|
||||||
|
[0;34m[2025-09-13 23:25:04][0m 🏗️ Building Docker image...
|
||||||
|
[0;31m[ERROR][0m Docker build failed
|
||||||
|
[0;34m[2025-09-13 23:26:50][0m 🚀 Starting simplified Gitea deployment for portfolio
|
||||||
|
[0;34m[2025-09-13 23:26:50][0m 🔨 Step 1: Building application...
|
||||||
|
[0;34m[2025-09-13 23:26:50][0m 📦 Building Next.js application...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application built successfully
|
||||||
|
[0;34m[2025-09-13 23:27:13][0m 🐳 Step 2: Docker operations...
|
||||||
|
[0;34m[2025-09-13 23:27:13][0m 🏗️ Building Docker image...
|
||||||
|
[0;31m[ERROR][0m Docker build failed
|
||||||
|
[0;34m[2025-09-13 23:28:23][0m 🚀 Starting simplified Gitea deployment for portfolio
|
||||||
|
[0;34m[2025-09-13 23:28:23][0m 🔨 Step 1: Building application...
|
||||||
|
[0;34m[2025-09-13 23:28:23][0m 📦 Building Next.js application...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application built successfully
|
||||||
|
[0;34m[2025-09-13 23:28:49][0m 🐳 Step 2: Docker operations...
|
||||||
|
[0;34m[2025-09-13 23:28:49][0m 🏗️ Building Docker image...
|
||||||
|
[0;31m[ERROR][0m Docker build failed
|
||||||
|
[0;34m[2025-09-13 23:35:08][0m 🚀 Starting simplified Gitea deployment for portfolio
|
||||||
|
[0;34m[2025-09-13 23:35:08][0m 🔨 Step 1: Building application...
|
||||||
|
[0;34m[2025-09-13 23:35:08][0m 📦 Building Next.js application...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application built successfully
|
||||||
|
[0;34m[2025-09-13 23:35:31][0m 🐳 Step 2: Docker operations...
|
||||||
|
[0;34m[2025-09-13 23:35:31][0m 🏗️ Building Docker image...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Docker image built successfully
|
||||||
|
[0;34m[2025-09-13 23:36:32][0m 🚀 Step 3: Deploying application...
|
||||||
|
[0;34m[2025-09-13 23:36:33][0m 🚀 Starting new container on port 3000...
|
||||||
|
[0;34m[2025-09-13 23:36:33][0m ⏳ Waiting for container to be ready...
|
||||||
|
[0;34m[2025-09-13 23:36:53][0m 🏥 Performing health check...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application is healthy!
|
||||||
|
[0;34m[2025-09-13 23:36:53][0m ✅ Step 4: Verifying deployment...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Main page is accessible
|
||||||
|
[0;34m[2025-09-13 23:36:53][0m 📊 Container status:
|
||||||
|
[0;34m[2025-09-13 23:36:53][0m 📈 Resource usage:
|
||||||
|
[0;32m[SUCCESS][0m 🎉 Simplified Gitea deployment completed successfully!
|
||||||
|
[0;34m[2025-09-13 23:36:54][0m 🌐 Application is available at: http://localhost:3000
|
||||||
|
[0;34m[2025-09-13 23:36:54][0m 🏥 Health check endpoint: http://localhost:3000/api/health
|
||||||
|
[0;34m[2025-09-13 23:36:54][0m 📊 Container name: portfolio-app-simple
|
||||||
|
[0;34m[2025-09-13 23:36:54][0m 📝 Logs: docker logs portfolio-app-simple
|
||||||
|
Sat Sep 13 23:36:54 CEST 2025: Simplified Gitea deployment successful - Port: 3000 - Image: portfolio-app:20250913-233632
|
||||||
|
[0;34m[2025-09-13 23:46:31][0m 🚀 Starting simplified Gitea deployment for portfolio
|
||||||
|
[0;34m[2025-09-13 23:46:31][0m 🔨 Step 1: Building application...
|
||||||
|
[0;34m[2025-09-13 23:46:31][0m 📦 Building Next.js application...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application built successfully
|
||||||
|
[0;34m[2025-09-13 23:46:54][0m 🐳 Step 2: Docker operations...
|
||||||
|
[0;34m[2025-09-13 23:46:54][0m 🏗️ Building Docker image...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Docker image built successfully
|
||||||
|
[0;34m[2025-09-13 23:48:01][0m 🚀 Step 3: Deploying application...
|
||||||
|
[0;34m[2025-09-13 23:48:01][0m 🚀 Starting new container on port 3000...
|
||||||
|
[0;34m[2025-09-13 23:48:01][0m ⏳ Waiting for container to be ready...
|
||||||
|
[0;34m[2025-09-13 23:48:21][0m 🏥 Performing health check...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Application is healthy!
|
||||||
|
[0;34m[2025-09-13 23:48:21][0m ✅ Step 4: Verifying deployment...
|
||||||
|
[0;32m[SUCCESS][0m ✅ Main page is accessible
|
||||||
|
[0;34m[2025-09-13 23:48:22][0m 📊 Container status:
|
||||||
|
[0;34m[2025-09-13 23:48:22][0m 📈 Resource usage:
|
||||||
|
[0;32m[SUCCESS][0m 🎉 Simplified Gitea deployment completed successfully!
|
||||||
|
[0;34m[2025-09-13 23:48:23][0m 🌐 Application is available at: http://localhost:3000
|
||||||
|
[0;34m[2025-09-13 23:48:23][0m 🏥 Health check endpoint: http://localhost:3000/api/health
|
||||||
|
[0;34m[2025-09-13 23:48:23][0m 📊 Container name: portfolio-app-simple
|
||||||
|
[0;34m[2025-09-13 23:48:23][0m 📝 Logs: docker logs portfolio-app-simple
|
||||||
|
Sat Sep 13 23:48:23 CEST 2025: Simplified Gitea deployment successful - Port: 3000 - Image: portfolio-app:20250913-234801
|
||||||
@@ -8,6 +8,12 @@ dotenv.config({ path: path.resolve(__dirname, '.env') });
|
|||||||
const nextConfig: NextConfig = {
|
const nextConfig: NextConfig = {
|
||||||
// Enable standalone output for Docker
|
// Enable standalone output for Docker
|
||||||
output: 'standalone',
|
output: 'standalone',
|
||||||
|
outputFileTracingRoot: path.join(__dirname, '../../'),
|
||||||
|
|
||||||
|
// Ensure proper server configuration
|
||||||
|
serverRuntimeConfig: {
|
||||||
|
// Will only be available on the server side
|
||||||
|
},
|
||||||
|
|
||||||
// Optimize for production
|
// Optimize for production
|
||||||
compress: true,
|
compress: true,
|
||||||
@@ -22,14 +28,6 @@ const nextConfig: NextConfig = {
|
|||||||
env: {
|
env: {
|
||||||
NEXT_PUBLIC_BASE_URL: process.env.NEXT_PUBLIC_BASE_URL
|
NEXT_PUBLIC_BASE_URL: process.env.NEXT_PUBLIC_BASE_URL
|
||||||
},
|
},
|
||||||
serverRuntimeConfig: {
|
|
||||||
GHOST_API_URL: process.env.GHOST_API_URL,
|
|
||||||
GHOST_API_KEY: process.env.GHOST_API_KEY,
|
|
||||||
MY_EMAIL: process.env.MY_EMAIL,
|
|
||||||
MY_INFO_EMAIL: process.env.MY_INFO_EMAIL,
|
|
||||||
MY_PASSWORD: process.env.MY_PASSWORD,
|
|
||||||
MY_INFO_PASSWORD: process.env.MY_INFO_PASSWORD
|
|
||||||
},
|
|
||||||
|
|
||||||
// Performance optimizations
|
// Performance optimizations
|
||||||
experimental: {
|
experimental: {
|
||||||
@@ -41,6 +39,23 @@ const nextConfig: NextConfig = {
|
|||||||
formats: ['image/webp', 'image/avif'],
|
formats: ['image/webp', 'image/avif'],
|
||||||
minimumCacheTTL: 60,
|
minimumCacheTTL: 60,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Dynamic routes are handled automatically by Next.js
|
||||||
|
|
||||||
|
// Add cache-busting headers
|
||||||
|
async headers() {
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
source: '/(.*)',
|
||||||
|
headers: [
|
||||||
|
{
|
||||||
|
key: 'Cache-Control',
|
||||||
|
value: 'public, max-age=0, must-revalidate',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
import bundleAnalyzer from "@next/bundle-analyzer";
|
import bundleAnalyzer from "@next/bundle-analyzer";
|
||||||
|
|||||||
67
nginx-zero-downtime.conf
Normal file
67
nginx-zero-downtime.conf
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
upstream portfolio_backend {
|
||||||
|
# Health check enabled upstream
|
||||||
|
server portfolio-app-1:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
server portfolio-app-2:3000 max_fails=3 fail_timeout=30s;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Resolver for dynamic upstream resolution
|
||||||
|
resolver 127.0.0.11 valid=10s;
|
||||||
|
|
||||||
|
# Main server
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
# Health check endpoint
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "healthy\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main location
|
||||||
|
location / {
|
||||||
|
proxy_pass http://portfolio_backend;
|
||||||
|
|
||||||
|
# Proxy settings
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# Timeout settings
|
||||||
|
proxy_connect_timeout 5s;
|
||||||
|
proxy_send_timeout 60s;
|
||||||
|
proxy_read_timeout 60s;
|
||||||
|
|
||||||
|
# Buffer settings
|
||||||
|
proxy_buffering on;
|
||||||
|
proxy_buffer_size 4k;
|
||||||
|
proxy_buffers 8 4k;
|
||||||
|
|
||||||
|
# Health check for upstream
|
||||||
|
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
|
||||||
|
proxy_next_upstream_tries 2;
|
||||||
|
proxy_next_upstream_timeout 10s;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Static files caching
|
||||||
|
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
||||||
|
proxy_pass http://portfolio_backend;
|
||||||
|
|
||||||
|
# Proxy settings
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -32,6 +32,8 @@
|
|||||||
"deploy": "./scripts/deploy.sh",
|
"deploy": "./scripts/deploy.sh",
|
||||||
"auto-deploy": "./scripts/auto-deploy.sh",
|
"auto-deploy": "./scripts/auto-deploy.sh",
|
||||||
"quick-deploy": "./scripts/quick-deploy.sh",
|
"quick-deploy": "./scripts/quick-deploy.sh",
|
||||||
|
"gitea-deploy": "./scripts/gitea-deploy.sh",
|
||||||
|
"setup-gitea-runner": "./scripts/setup-gitea-runner.sh",
|
||||||
"monitor": "./scripts/monitor.sh",
|
"monitor": "./scripts/monitor.sh",
|
||||||
"health": "curl -f http://localhost:3000/api/health"
|
"health": "curl -f http://localhost:3000/api/health"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
// This is your Prisma schema file,
|
|
||||||
// learn more about it in the docs: https://pris.ly/d/prisma-schema
|
|
||||||
|
|
||||||
generator client {
|
generator client {
|
||||||
provider = "prisma-client-js"
|
provider = "prisma-client-js"
|
||||||
}
|
}
|
||||||
@@ -13,8 +10,8 @@ datasource db {
|
|||||||
model Project {
|
model Project {
|
||||||
id Int @id @default(autoincrement())
|
id Int @id @default(autoincrement())
|
||||||
title String @db.VarChar(255)
|
title String @db.VarChar(255)
|
||||||
description String @db.Text
|
description String
|
||||||
content String @db.Text
|
content String
|
||||||
tags String[] @default([])
|
tags String[] @default([])
|
||||||
featured Boolean @default(false)
|
featured Boolean @default(false)
|
||||||
category String @db.VarChar(100)
|
category String @db.VarChar(100)
|
||||||
@@ -23,12 +20,10 @@ model Project {
|
|||||||
live String? @db.VarChar(500)
|
live String? @db.VarChar(500)
|
||||||
published Boolean @default(true)
|
published Boolean @default(true)
|
||||||
imageUrl String? @db.VarChar(500)
|
imageUrl String? @db.VarChar(500)
|
||||||
metaDescription String? @db.Text
|
metaDescription String?
|
||||||
keywords String? @db.Text
|
keywords String?
|
||||||
ogImage String? @db.VarChar(500)
|
ogImage String? @db.VarChar(500)
|
||||||
schema Json?
|
schema Json?
|
||||||
|
|
||||||
// Advanced features
|
|
||||||
difficulty Difficulty @default(INTERMEDIATE)
|
difficulty Difficulty @default(INTERMEDIATE)
|
||||||
timeToComplete String? @db.VarChar(100)
|
timeToComplete String? @db.VarChar(100)
|
||||||
technologies String[] @default([])
|
technologies String[] @default([])
|
||||||
@@ -37,20 +32,13 @@ model Project {
|
|||||||
futureImprovements String[] @default([])
|
futureImprovements String[] @default([])
|
||||||
demoVideo String? @db.VarChar(500)
|
demoVideo String? @db.VarChar(500)
|
||||||
screenshots String[] @default([])
|
screenshots String[] @default([])
|
||||||
colorScheme String @db.VarChar(100) @default("Dark")
|
colorScheme String @default("Dark") @db.VarChar(100)
|
||||||
accessibility Boolean @default(true)
|
accessibility Boolean @default(true)
|
||||||
|
performance Json @default("{\"loadTime\": \"1.5s\", \"bundleSize\": \"50KB\", \"lighthouse\": 90}")
|
||||||
// Performance metrics
|
analytics Json @default("{\"likes\": 0, \"views\": 0, \"shares\": 0}")
|
||||||
performance Json @default("{\"lighthouse\": 90, \"bundleSize\": \"50KB\", \"loadTime\": \"1.5s\"}")
|
|
||||||
|
|
||||||
// Analytics
|
|
||||||
analytics Json @default("{\"views\": 0, \"likes\": 0, \"shares\": 0}")
|
|
||||||
|
|
||||||
// Timestamps
|
|
||||||
createdAt DateTime @default(now()) @map("created_at")
|
createdAt DateTime @default(now()) @map("created_at")
|
||||||
updatedAt DateTime @updatedAt @map("updated_at")
|
updatedAt DateTime @updatedAt @map("updated_at")
|
||||||
|
|
||||||
// Indexes for performance
|
|
||||||
@@index([category])
|
@@index([category])
|
||||||
@@index([featured])
|
@@index([featured])
|
||||||
@@index([published])
|
@@index([published])
|
||||||
@@ -59,6 +47,49 @@ model Project {
|
|||||||
@@index([tags])
|
@@index([tags])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
model PageView {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
projectId Int? @map("project_id")
|
||||||
|
page String @db.VarChar(100)
|
||||||
|
ip String? @db.VarChar(45)
|
||||||
|
userAgent String? @map("user_agent")
|
||||||
|
referrer String? @db.VarChar(500)
|
||||||
|
timestamp DateTime @default(now())
|
||||||
|
|
||||||
|
@@index([projectId])
|
||||||
|
@@index([timestamp])
|
||||||
|
@@index([page])
|
||||||
|
}
|
||||||
|
|
||||||
|
model UserInteraction {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
projectId Int @map("project_id")
|
||||||
|
type InteractionType
|
||||||
|
ip String? @db.VarChar(45)
|
||||||
|
userAgent String? @map("user_agent")
|
||||||
|
timestamp DateTime @default(now())
|
||||||
|
|
||||||
|
@@index([projectId])
|
||||||
|
@@index([type])
|
||||||
|
@@index([timestamp])
|
||||||
|
}
|
||||||
|
|
||||||
|
model Contact {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
name String @db.VarChar(255)
|
||||||
|
email String @db.VarChar(255)
|
||||||
|
subject String @db.VarChar(500)
|
||||||
|
message String
|
||||||
|
responded Boolean @default(false)
|
||||||
|
responseTemplate String? @map("response_template") @db.VarChar(50)
|
||||||
|
createdAt DateTime @default(now()) @map("created_at")
|
||||||
|
updatedAt DateTime @updatedAt @map("updated_at")
|
||||||
|
|
||||||
|
@@index([email])
|
||||||
|
@@index([responded])
|
||||||
|
@@index([createdAt])
|
||||||
|
}
|
||||||
|
|
||||||
enum Difficulty {
|
enum Difficulty {
|
||||||
BEGINNER
|
BEGINNER
|
||||||
INTERMEDIATE
|
INTERMEDIATE
|
||||||
@@ -66,55 +97,9 @@ enum Difficulty {
|
|||||||
EXPERT
|
EXPERT
|
||||||
}
|
}
|
||||||
|
|
||||||
// Analytics tracking
|
|
||||||
model PageView {
|
|
||||||
id Int @id @default(autoincrement())
|
|
||||||
projectId Int? @map("project_id")
|
|
||||||
page String @db.VarChar(100)
|
|
||||||
ip String? @db.VarChar(45)
|
|
||||||
userAgent String? @db.Text @map("user_agent")
|
|
||||||
referrer String? @db.VarChar(500)
|
|
||||||
timestamp DateTime @default(now())
|
|
||||||
|
|
||||||
@@index([projectId])
|
|
||||||
@@index([timestamp])
|
|
||||||
@@index([page])
|
|
||||||
}
|
|
||||||
|
|
||||||
// User interactions
|
|
||||||
model UserInteraction {
|
|
||||||
id Int @id @default(autoincrement())
|
|
||||||
projectId Int @map("project_id")
|
|
||||||
type InteractionType
|
|
||||||
ip String? @db.VarChar(45)
|
|
||||||
userAgent String? @db.Text @map("user_agent")
|
|
||||||
timestamp DateTime @default(now())
|
|
||||||
|
|
||||||
@@index([projectId])
|
|
||||||
@@index([type])
|
|
||||||
@@index([timestamp])
|
|
||||||
}
|
|
||||||
|
|
||||||
enum InteractionType {
|
enum InteractionType {
|
||||||
LIKE
|
LIKE
|
||||||
SHARE
|
SHARE
|
||||||
BOOKMARK
|
BOOKMARK
|
||||||
COMMENT
|
COMMENT
|
||||||
}
|
}
|
||||||
|
|
||||||
// Contact form submissions
|
|
||||||
model Contact {
|
|
||||||
id Int @id @default(autoincrement())
|
|
||||||
name String @db.VarChar(255)
|
|
||||||
email String @db.VarChar(255)
|
|
||||||
subject String @db.VarChar(500)
|
|
||||||
message String @db.Text
|
|
||||||
responded Boolean @default(false)
|
|
||||||
responseTemplate String? @db.VarChar(50) @map("response_template")
|
|
||||||
createdAt DateTime @default(now()) @map("created_at")
|
|
||||||
updatedAt DateTime @updatedAt @map("updated_at")
|
|
||||||
|
|
||||||
@@index([email])
|
|
||||||
@@index([responded])
|
|
||||||
@@index([createdAt])
|
|
||||||
}
|
|
||||||
|
|||||||
85
scripts/check-secrets.sh
Executable file
85
scripts/check-secrets.sh
Executable file
@@ -0,0 +1,85 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Advanced Secret Detection Script
|
||||||
|
# This script checks for actual secrets, not legitimate authentication code
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
print_status() {
|
||||||
|
echo -e "${GREEN}✅ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}❌ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "🔍 Advanced secret detection..."
|
||||||
|
|
||||||
|
SECRETS_FOUND=false
|
||||||
|
|
||||||
|
# Check for hardcoded secrets (more specific patterns)
|
||||||
|
echo "Checking for hardcoded secrets..."
|
||||||
|
|
||||||
|
# Check for actual API keys, tokens, passwords (not variable names)
|
||||||
|
if grep -r -E "(api[_-]?key|secret[_-]?key|private[_-]?key|access[_-]?token|bearer[_-]?token)\s*[:=]\s*['\"][^'\"]{20,}" \
|
||||||
|
--include="*.js" --include="*.ts" --include="*.json" --include="*.env*" . | \
|
||||||
|
grep -v node_modules | grep -v ".git" | grep -v ".next/" | grep -v "test"; then
|
||||||
|
print_error "Hardcoded API keys or tokens found!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for database connection strings with credentials (excluding .env files)
|
||||||
|
if grep -r -E "(postgresql|mysql|mongodb)://[^:]+:[^@]+@" \
|
||||||
|
--include="*.js" --include="*.ts" --include="*.json" . | \
|
||||||
|
grep -v node_modules | grep -v ".git" | grep -v ".next/" | grep -v "test" | \
|
||||||
|
grep -v ".env"; then
|
||||||
|
print_error "Database connection strings with credentials found in source code!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for AWS/cloud service credentials
|
||||||
|
if grep -r -E "(aws[_-]?access[_-]?key[_-]?id|aws[_-]?secret[_-]?access[_-]?key|azure[_-]?account[_-]?key|gcp[_-]?service[_-]?account)" \
|
||||||
|
--include="*.js" --include="*.ts" --include="*.json" --include="*.env*" . | \
|
||||||
|
grep -v node_modules | grep -v ".git" | grep -v ".next/" | grep -v "test"; then
|
||||||
|
print_error "Cloud service credentials found!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for .env files in git (should be in .gitignore)
|
||||||
|
if git ls-files | grep -E "\.env$|\.env\."; then
|
||||||
|
print_error ".env files found in git repository!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for common secret file patterns
|
||||||
|
if find . -name "*.pem" -o -name "*.key" -o -name "*.p12" -o -name "*.pfx" | grep -v node_modules | grep -v ".git"; then
|
||||||
|
print_error "Certificate or key files found in repository!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for JWT secrets or signing keys
|
||||||
|
if grep -r -E "(jwt[_-]?secret|signing[_-]?key|encryption[_-]?key)\s*[:=]\s*['\"][^'\"]{32,}" \
|
||||||
|
--include="*.js" --include="*.ts" --include="*.json" --include="*.env*" . | \
|
||||||
|
grep -v node_modules | grep -v ".git" | grep -v ".next/" | grep -v "test"; then
|
||||||
|
print_error "JWT secrets or signing keys found!"
|
||||||
|
SECRETS_FOUND=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$SECRETS_FOUND" = false ]; then
|
||||||
|
print_status "No actual secrets found in code"
|
||||||
|
else
|
||||||
|
print_error "Potential secrets detected - please review and remove"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔍 Secret detection completed!"
|
||||||
@@ -10,7 +10,7 @@ ENVIRONMENT=${1:-production}
|
|||||||
REGISTRY="ghcr.io"
|
REGISTRY="ghcr.io"
|
||||||
IMAGE_NAME="dennis-konkol/my_portfolio"
|
IMAGE_NAME="dennis-konkol/my_portfolio"
|
||||||
CONTAINER_NAME="portfolio-app"
|
CONTAINER_NAME="portfolio-app"
|
||||||
COMPOSE_FILE="docker-compose.prod.yml"
|
COMPOSE_FILE="docker-compose.zero-downtime.yml"
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
@@ -79,10 +79,10 @@ echo "$GITHUB_TOKEN" | docker login $REGISTRY -u $GITHUB_ACTOR --password-stdin
|
|||||||
warning "Failed to login to registry. Make sure GITHUB_TOKEN and GITHUB_ACTOR are set."
|
warning "Failed to login to registry. Make sure GITHUB_TOKEN and GITHUB_ACTOR are set."
|
||||||
}
|
}
|
||||||
|
|
||||||
# Pull latest image
|
# Build latest image locally
|
||||||
log "Pulling latest image..."
|
log "Building latest image locally..."
|
||||||
docker pull $FULL_IMAGE_NAME || {
|
docker build -t portfolio-app:latest . || {
|
||||||
error "Failed to pull image $FULL_IMAGE_NAME"
|
error "Failed to build image locally"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,7 +112,7 @@ HEALTH_CHECK_INTERVAL=2
|
|||||||
ELAPSED=0
|
ELAPSED=0
|
||||||
|
|
||||||
while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do
|
while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do
|
||||||
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
if curl -f http://localhost/api/health > /dev/null 2>&1; then
|
||||||
success "Application is healthy!"
|
success "Application is healthy!"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
@@ -131,7 +131,7 @@ fi
|
|||||||
|
|
||||||
# Verify deployment
|
# Verify deployment
|
||||||
log "Verifying deployment..."
|
log "Verifying deployment..."
|
||||||
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
if curl -f http://localhost/api/health > /dev/null 2>&1; then
|
||||||
success "Deployment successful!"
|
success "Deployment successful!"
|
||||||
|
|
||||||
# Show container status
|
# Show container status
|
||||||
@@ -156,5 +156,5 @@ docker system prune -f --volumes || {
|
|||||||
}
|
}
|
||||||
|
|
||||||
success "Deployment completed successfully!"
|
success "Deployment completed successfully!"
|
||||||
log "Application is available at: http://localhost:3000"
|
log "Application is available at: http://localhost/"
|
||||||
log "Health check endpoint: http://localhost:3000/api/health"
|
log "Health check endpoint: http://localhost/api/health"
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ exec('docker-compose --version', (error) => {
|
|||||||
shell: isWindows,
|
shell: isWindows,
|
||||||
env: {
|
env: {
|
||||||
...process.env,
|
...process.env,
|
||||||
DATABASE_URL: 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public',
|
DATABASE_URL: process.env.DATABASE_URL || 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public',
|
||||||
REDIS_URL: 'redis://localhost:6379',
|
REDIS_URL: 'redis://localhost:6379',
|
||||||
NODE_ENV: 'development'
|
NODE_ENV: 'development'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ console.log('💡 For full development environment with DB, use: npm run dev:ful
|
|||||||
const env = {
|
const env = {
|
||||||
...process.env,
|
...process.env,
|
||||||
NODE_ENV: 'development',
|
NODE_ENV: 'development',
|
||||||
DATABASE_URL: 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public',
|
DATABASE_URL: process.env.DATABASE_URL || 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public',
|
||||||
REDIS_URL: 'redis://localhost:6379',
|
REDIS_URL: 'redis://localhost:6379',
|
||||||
NEXT_PUBLIC_BASE_URL: 'http://localhost:3000'
|
NEXT_PUBLIC_BASE_URL: 'http://localhost:3000'
|
||||||
};
|
};
|
||||||
|
|||||||
201
scripts/gitea-deploy-simple.sh
Executable file
201
scripts/gitea-deploy-simple.sh
Executable file
@@ -0,0 +1,201 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Simplified Gitea deployment script for testing
|
||||||
|
# This version doesn't require database dependencies
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
PROJECT_NAME="portfolio"
|
||||||
|
CONTAINER_NAME="portfolio-app-simple"
|
||||||
|
IMAGE_NAME="portfolio-app"
|
||||||
|
PORT=3000
|
||||||
|
BACKUP_PORT=3001
|
||||||
|
LOG_FILE="./logs/gitea-deploy-simple.log"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
if [[ $EUID -eq 0 ]]; then
|
||||||
|
error "This script should not be run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker is running
|
||||||
|
if ! docker info > /dev/null 2>&1; then
|
||||||
|
error "Docker is not running. Please start Docker and try again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we're in the right directory
|
||||||
|
if [ ! -f "package.json" ] || [ ! -f "Dockerfile" ]; then
|
||||||
|
error "Please run this script from the project root directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "🚀 Starting simplified Gitea deployment for $PROJECT_NAME"
|
||||||
|
|
||||||
|
# Step 1: Build Application
|
||||||
|
log "🔨 Step 1: Building application..."
|
||||||
|
|
||||||
|
# Build Next.js application
|
||||||
|
log "📦 Building Next.js application..."
|
||||||
|
npm run build || {
|
||||||
|
error "Build failed"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
success "✅ Application built successfully"
|
||||||
|
|
||||||
|
# Step 2: Docker Operations
|
||||||
|
log "🐳 Step 2: Docker operations..."
|
||||||
|
|
||||||
|
# Build Docker image
|
||||||
|
log "🏗️ Building Docker image..."
|
||||||
|
docker build -t "$IMAGE_NAME:latest" . || {
|
||||||
|
error "Docker build failed"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Tag with timestamp
|
||||||
|
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||||
|
docker tag "$IMAGE_NAME:latest" "$IMAGE_NAME:$TIMESTAMP"
|
||||||
|
|
||||||
|
success "✅ Docker image built successfully"
|
||||||
|
|
||||||
|
# Step 3: Deployment
|
||||||
|
log "🚀 Step 3: Deploying application..."
|
||||||
|
|
||||||
|
# Check if container is running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" = "true" ]; then
|
||||||
|
log "📦 Stopping existing container..."
|
||||||
|
docker stop "$CONTAINER_NAME" || true
|
||||||
|
docker rm "$CONTAINER_NAME" || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if port is available
|
||||||
|
if lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null ; then
|
||||||
|
warning "Port $PORT is in use. Trying backup port $BACKUP_PORT"
|
||||||
|
DEPLOY_PORT=$BACKUP_PORT
|
||||||
|
else
|
||||||
|
DEPLOY_PORT=$PORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start new container with minimal environment variables
|
||||||
|
log "🚀 Starting new container on port $DEPLOY_PORT..."
|
||||||
|
docker run -d \
|
||||||
|
--name "$CONTAINER_NAME" \
|
||||||
|
--restart unless-stopped \
|
||||||
|
-p "$DEPLOY_PORT:3000" \
|
||||||
|
-e NODE_ENV=production \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL=https://dk0.dev \
|
||||||
|
-e MY_EMAIL=contact@dk0.dev \
|
||||||
|
-e MY_INFO_EMAIL=info@dk0.dev \
|
||||||
|
-e MY_PASSWORD=test-password \
|
||||||
|
-e MY_INFO_PASSWORD=test-password \
|
||||||
|
-e ADMIN_BASIC_AUTH=admin:test123 \
|
||||||
|
-e LOG_LEVEL=info \
|
||||||
|
"$IMAGE_NAME:latest" || {
|
||||||
|
error "Failed to start container"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait for container to be ready
|
||||||
|
log "⏳ Waiting for container to be ready..."
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
# Check if container is actually running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" != "true" ]; then
|
||||||
|
error "Container failed to start or crashed"
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
log "🏥 Performing health check..."
|
||||||
|
HEALTH_CHECK_TIMEOUT=180
|
||||||
|
HEALTH_CHECK_INTERVAL=5
|
||||||
|
ELAPSED=0
|
||||||
|
|
||||||
|
while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do
|
||||||
|
# Check if container is still running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" != "true" ]; then
|
||||||
|
error "Container stopped during health check"
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try health check endpoint
|
||||||
|
if curl -f "http://localhost:$DEPLOY_PORT/api/health" > /dev/null 2>&1; then
|
||||||
|
success "✅ Application is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep $HEALTH_CHECK_INTERVAL
|
||||||
|
ELAPSED=$((ELAPSED + HEALTH_CHECK_INTERVAL))
|
||||||
|
echo -n "."
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $ELAPSED -ge $HEALTH_CHECK_TIMEOUT ]; then
|
||||||
|
error "Health check timeout. Application may not be running properly."
|
||||||
|
log "Container status:"
|
||||||
|
docker inspect "$CONTAINER_NAME" --format='{{.State.Status}} - {{.State.Health.Status}}'
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=100
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 4: Verification
|
||||||
|
log "✅ Step 4: Verifying deployment..."
|
||||||
|
|
||||||
|
# Test main page
|
||||||
|
if curl -f "http://localhost:$DEPLOY_PORT/" > /dev/null 2>&1; then
|
||||||
|
success "✅ Main page is accessible"
|
||||||
|
else
|
||||||
|
error "❌ Main page is not accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show container status
|
||||||
|
log "📊 Container status:"
|
||||||
|
docker ps --filter "name=$CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||||
|
|
||||||
|
# Show resource usage
|
||||||
|
log "📈 Resource usage:"
|
||||||
|
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" "$CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Final success message
|
||||||
|
success "🎉 Simplified Gitea deployment completed successfully!"
|
||||||
|
log "🌐 Application is available at: http://localhost:$DEPLOY_PORT"
|
||||||
|
log "🏥 Health check endpoint: http://localhost:$DEPLOY_PORT/api/health"
|
||||||
|
log "📊 Container name: $CONTAINER_NAME"
|
||||||
|
log "📝 Logs: docker logs $CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Update deployment log
|
||||||
|
echo "$(date): Simplified Gitea deployment successful - Port: $DEPLOY_PORT - Image: $IMAGE_NAME:$TIMESTAMP" >> "$LOG_FILE"
|
||||||
|
|
||||||
|
exit 0
|
||||||
233
scripts/gitea-deploy.sh
Executable file
233
scripts/gitea-deploy.sh
Executable file
@@ -0,0 +1,233 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Gitea-specific deployment script
|
||||||
|
# Optimiert für lokalen Gitea Runner
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
PROJECT_NAME="portfolio"
|
||||||
|
CONTAINER_NAME="portfolio-app"
|
||||||
|
IMAGE_NAME="portfolio-app"
|
||||||
|
PORT=3000
|
||||||
|
BACKUP_PORT=3001
|
||||||
|
LOG_FILE="./logs/gitea-deploy.log"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
if [[ $EUID -eq 0 ]]; then
|
||||||
|
error "This script should not be run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker is running
|
||||||
|
if ! docker info > /dev/null 2>&1; then
|
||||||
|
error "Docker is not running. Please start Docker and try again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we're in the right directory
|
||||||
|
if [ ! -f "package.json" ] || [ ! -f "Dockerfile" ]; then
|
||||||
|
error "Please run this script from the project root directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "🚀 Starting Gitea deployment for $PROJECT_NAME"
|
||||||
|
|
||||||
|
# Step 1: Code Quality Checks
|
||||||
|
log "📋 Step 1: Running code quality checks..."
|
||||||
|
|
||||||
|
# Run linting
|
||||||
|
log "🔍 Running ESLint..."
|
||||||
|
npm run lint || {
|
||||||
|
error "ESLint failed. Please fix the issues before deploying."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
log "🧪 Running tests..."
|
||||||
|
npm run test || {
|
||||||
|
error "Tests failed. Please fix the issues before deploying."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
success "✅ Code quality checks passed"
|
||||||
|
|
||||||
|
# Step 2: Build Application
|
||||||
|
log "🔨 Step 2: Building application..."
|
||||||
|
|
||||||
|
# Build Next.js application
|
||||||
|
log "📦 Building Next.js application..."
|
||||||
|
npm run build || {
|
||||||
|
error "Build failed"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
success "✅ Application built successfully"
|
||||||
|
|
||||||
|
# Step 3: Docker Operations
|
||||||
|
log "🐳 Step 3: Docker operations..."
|
||||||
|
|
||||||
|
# Build Docker image
|
||||||
|
log "🏗️ Building Docker image..."
|
||||||
|
docker build -t "$IMAGE_NAME:latest" . || {
|
||||||
|
error "Docker build failed"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Tag with timestamp
|
||||||
|
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||||
|
docker tag "$IMAGE_NAME:latest" "$IMAGE_NAME:$TIMESTAMP"
|
||||||
|
|
||||||
|
success "✅ Docker image built successfully"
|
||||||
|
|
||||||
|
# Step 4: Deployment
|
||||||
|
log "🚀 Step 4: Deploying application..."
|
||||||
|
|
||||||
|
# Check if container is running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" = "true" ]; then
|
||||||
|
log "📦 Stopping existing container..."
|
||||||
|
docker stop "$CONTAINER_NAME" || true
|
||||||
|
docker rm "$CONTAINER_NAME" || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if port is available
|
||||||
|
if lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null ; then
|
||||||
|
warning "Port $PORT is in use. Trying backup port $BACKUP_PORT"
|
||||||
|
DEPLOY_PORT=$BACKUP_PORT
|
||||||
|
else
|
||||||
|
DEPLOY_PORT=$PORT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start new container with environment variables
|
||||||
|
log "🚀 Starting new container on port $DEPLOY_PORT..."
|
||||||
|
docker run -d \
|
||||||
|
--name "$CONTAINER_NAME" \
|
||||||
|
--restart unless-stopped \
|
||||||
|
-p "$DEPLOY_PORT:3000" \
|
||||||
|
-e NODE_ENV=production \
|
||||||
|
-e NEXT_PUBLIC_BASE_URL=https://dk0.dev \
|
||||||
|
-e MY_EMAIL=contact@dk0.dev \
|
||||||
|
-e MY_INFO_EMAIL=info@dk0.dev \
|
||||||
|
-e MY_PASSWORD="${MY_PASSWORD:-your-email-password}" \
|
||||||
|
-e MY_INFO_PASSWORD="${MY_INFO_PASSWORD:-your-info-email-password}" \
|
||||||
|
-e ADMIN_BASIC_AUTH="${ADMIN_BASIC_AUTH:-admin:your_secure_password_here}" \
|
||||||
|
-e LOG_LEVEL=info \
|
||||||
|
"$IMAGE_NAME:latest" || {
|
||||||
|
error "Failed to start container"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait for container to be ready
|
||||||
|
log "⏳ Waiting for container to be ready..."
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# Check if container is actually running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" != "true" ]; then
|
||||||
|
error "Container failed to start or crashed"
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
log "🏥 Performing health check..."
|
||||||
|
HEALTH_CHECK_TIMEOUT=120
|
||||||
|
HEALTH_CHECK_INTERVAL=3
|
||||||
|
ELAPSED=0
|
||||||
|
|
||||||
|
while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do
|
||||||
|
# Check if container is still running
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" != "true" ]; then
|
||||||
|
error "Container stopped during health check"
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=50
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try health check endpoint
|
||||||
|
if curl -f "http://localhost:$DEPLOY_PORT/api/health" > /dev/null 2>&1; then
|
||||||
|
success "✅ Application is healthy!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep $HEALTH_CHECK_INTERVAL
|
||||||
|
ELAPSED=$((ELAPSED + HEALTH_CHECK_INTERVAL))
|
||||||
|
echo -n "."
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $ELAPSED -ge $HEALTH_CHECK_TIMEOUT ]; then
|
||||||
|
error "Health check timeout. Application may not be running properly."
|
||||||
|
log "Container status:"
|
||||||
|
docker inspect "$CONTAINER_NAME" --format='{{.State.Status}} - {{.State.Health.Status}}'
|
||||||
|
log "Container logs:"
|
||||||
|
docker logs "$CONTAINER_NAME" --tail=100
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 5: Verification
|
||||||
|
log "✅ Step 5: Verifying deployment..."
|
||||||
|
|
||||||
|
# Test main page
|
||||||
|
if curl -f "http://localhost:$DEPLOY_PORT/" > /dev/null 2>&1; then
|
||||||
|
success "✅ Main page is accessible"
|
||||||
|
else
|
||||||
|
error "❌ Main page is not accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show container status
|
||||||
|
log "📊 Container status:"
|
||||||
|
docker ps --filter "name=$CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||||
|
|
||||||
|
# Show resource usage
|
||||||
|
log "📈 Resource usage:"
|
||||||
|
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" "$CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Step 6: Cleanup
|
||||||
|
log "🧹 Step 6: Cleaning up old images..."
|
||||||
|
|
||||||
|
# Remove old images (keep last 3 versions)
|
||||||
|
docker images "$IMAGE_NAME" --format "table {{.Tag}}\t{{.ID}}" | tail -n +2 | head -n -3 | awk '{print $2}' | xargs -r docker rmi || {
|
||||||
|
warning "No old images to remove"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean up unused Docker resources
|
||||||
|
docker system prune -f --volumes || {
|
||||||
|
warning "Failed to clean up Docker resources"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Final success message
|
||||||
|
success "🎉 Gitea deployment completed successfully!"
|
||||||
|
log "🌐 Application is available at: http://localhost:$DEPLOY_PORT"
|
||||||
|
log "🏥 Health check endpoint: http://localhost:$DEPLOY_PORT/api/health"
|
||||||
|
log "📊 Container name: $CONTAINER_NAME"
|
||||||
|
log "📝 Logs: docker logs $CONTAINER_NAME"
|
||||||
|
|
||||||
|
# Update deployment log
|
||||||
|
echo "$(date): Gitea deployment successful - Port: $DEPLOY_PORT - Image: $IMAGE_NAME:$TIMESTAMP" >> "$LOG_FILE"
|
||||||
|
|
||||||
|
exit 0
|
||||||
@@ -37,7 +37,7 @@ warning() {
|
|||||||
check_health() {
|
check_health() {
|
||||||
log "Checking application health..."
|
log "Checking application health..."
|
||||||
|
|
||||||
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
if curl -f http://localhost/api/health > /dev/null 2>&1; then
|
||||||
success "Application is healthy"
|
success "Application is healthy"
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
|
|||||||
85
scripts/security-scan.sh
Executable file
85
scripts/security-scan.sh
Executable file
@@ -0,0 +1,85 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Security Scan Script
|
||||||
|
# This script runs various security checks on the portfolio project
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🔒 Starting security scan..."
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Function to print colored output
|
||||||
|
print_status() {
|
||||||
|
echo -e "${GREEN}✅ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}❌ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if we're in the right directory
|
||||||
|
if [ ! -f "package.json" ]; then
|
||||||
|
print_error "Please run this script from the project root directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 1. NPM Audit
|
||||||
|
echo "🔍 Running npm audit..."
|
||||||
|
if npm audit --audit-level=high; then
|
||||||
|
print_status "NPM audit passed - no high/critical vulnerabilities found"
|
||||||
|
else
|
||||||
|
print_warning "NPM audit found vulnerabilities - check the output above"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 2. Trivy scan (if available)
|
||||||
|
echo "🔍 Running Trivy vulnerability scan..."
|
||||||
|
if command -v trivy &> /dev/null; then
|
||||||
|
if trivy fs --scanners vuln,secret --format table .; then
|
||||||
|
print_status "Trivy scan completed successfully"
|
||||||
|
else
|
||||||
|
print_warning "Trivy scan found issues - check the output above"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "Trivy not installed - skipping Trivy scan"
|
||||||
|
echo "To install Trivy: brew install trivy"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 3. Check for secrets using advanced detection
|
||||||
|
echo "🔍 Checking for potential secrets in code..."
|
||||||
|
if ./scripts/check-secrets.sh; then
|
||||||
|
print_status "No secrets found in code"
|
||||||
|
else
|
||||||
|
print_error "Secrets detected - please review"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 4. Check for outdated dependencies
|
||||||
|
echo "🔍 Checking for outdated dependencies..."
|
||||||
|
if npm outdated; then
|
||||||
|
print_status "All dependencies are up to date"
|
||||||
|
else
|
||||||
|
print_warning "Some dependencies are outdated - consider updating"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 5. Check for known vulnerable packages
|
||||||
|
echo "🔍 Checking for known vulnerable packages..."
|
||||||
|
if npm audit --audit-level=moderate; then
|
||||||
|
print_status "No moderate+ vulnerabilities found"
|
||||||
|
else
|
||||||
|
print_warning "Some vulnerabilities found - run 'npm audit fix' to attempt fixes"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🔒 Security scan completed!"
|
||||||
|
echo "For more detailed security analysis, consider:"
|
||||||
|
echo " - Running 'npm audit fix' to fix vulnerabilities"
|
||||||
|
echo " - Installing Trivy for comprehensive vulnerability scanning"
|
||||||
|
echo " - Using tools like Snyk or GitHub Dependabot for ongoing monitoring"
|
||||||
@@ -6,7 +6,7 @@ const { exec } = require('child_process');
|
|||||||
console.log('🗄️ Setting up database...');
|
console.log('🗄️ Setting up database...');
|
||||||
|
|
||||||
// Set environment variables for development
|
// Set environment variables for development
|
||||||
process.env.DATABASE_URL = 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public';
|
process.env.DATABASE_URL = process.env.DATABASE_URL || 'postgresql://portfolio_user:portfolio_dev_pass@localhost:5432/portfolio_dev?schema=public';
|
||||||
|
|
||||||
// Function to run command and return promise
|
// Function to run command and return promise
|
||||||
function runCommand(command) {
|
function runCommand(command) {
|
||||||
|
|||||||
192
scripts/setup-gitea-runner.sh
Executable file
192
scripts/setup-gitea-runner.sh
Executable file
@@ -0,0 +1,192 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Gitea Runner Setup Script
|
||||||
|
# Installiert und konfiguriert einen lokalen Gitea Runner
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
GITEA_URL="${GITEA_URL:-http://localhost:3000}"
|
||||||
|
RUNNER_NAME="${RUNNER_NAME:-portfolio-runner}"
|
||||||
|
RUNNER_LABELS="${RUNNER_LABELS:-ubuntu-latest,self-hosted,portfolio}"
|
||||||
|
RUNNER_WORK_DIR="${RUNNER_WORK_DIR:-/tmp/gitea-runner}"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
if [[ $EUID -eq 0 ]]; then
|
||||||
|
error "This script should not be run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "🚀 Setting up Gitea Runner for Portfolio"
|
||||||
|
|
||||||
|
# Check if Gitea URL is accessible
|
||||||
|
log "🔍 Checking Gitea server accessibility..."
|
||||||
|
if ! curl -f "$GITEA_URL" > /dev/null 2>&1; then
|
||||||
|
error "Cannot access Gitea server at $GITEA_URL"
|
||||||
|
error "Please make sure Gitea is running and accessible"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
success "✅ Gitea server is accessible"
|
||||||
|
|
||||||
|
# Create runner directory
|
||||||
|
log "📁 Creating runner directory..."
|
||||||
|
mkdir -p "$RUNNER_WORK_DIR"
|
||||||
|
cd "$RUNNER_WORK_DIR"
|
||||||
|
|
||||||
|
# Download Gitea Runner
|
||||||
|
log "📥 Downloading Gitea Runner..."
|
||||||
|
RUNNER_VERSION="latest"
|
||||||
|
RUNNER_ARCH="linux-amd64"
|
||||||
|
|
||||||
|
# Get latest version
|
||||||
|
if [ "$RUNNER_VERSION" = "latest" ]; then
|
||||||
|
RUNNER_VERSION=$(curl -s https://api.github.com/repos/woodpecker-ci/woodpecker/releases/latest | grep -o '"tag_name": "[^"]*' | grep -o '[^"]*$')
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUNNER_URL="https://github.com/woodpecker-ci/woodpecker/releases/download/${RUNNER_VERSION}/woodpecker-agent_${RUNNER_VERSION}_${RUNNER_ARCH}.tar.gz"
|
||||||
|
|
||||||
|
log "Downloading from: $RUNNER_URL"
|
||||||
|
curl -L -o woodpecker-agent.tar.gz "$RUNNER_URL"
|
||||||
|
|
||||||
|
# Extract runner
|
||||||
|
log "📦 Extracting Gitea Runner..."
|
||||||
|
tar -xzf woodpecker-agent.tar.gz
|
||||||
|
chmod +x woodpecker-agent
|
||||||
|
|
||||||
|
success "✅ Gitea Runner downloaded and extracted"
|
||||||
|
|
||||||
|
# Create systemd service
|
||||||
|
log "⚙️ Creating systemd service..."
|
||||||
|
sudo tee /etc/systemd/system/gitea-runner.service > /dev/null <<EOF
|
||||||
|
[Unit]
|
||||||
|
Description=Gitea Runner for Portfolio
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=$USER
|
||||||
|
WorkingDirectory=$RUNNER_WORK_DIR
|
||||||
|
ExecStart=$RUNNER_WORK_DIR/woodpecker-agent
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
Environment=WOODPECKER_SERVER=$GITEA_URL
|
||||||
|
Environment=WOODPECKER_AGENT_SECRET=
|
||||||
|
Environment=WOODPECKER_LOG_LEVEL=info
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Reload systemd
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
success "✅ Systemd service created"
|
||||||
|
|
||||||
|
# Instructions for manual registration
|
||||||
|
log "📋 Manual registration required:"
|
||||||
|
echo ""
|
||||||
|
echo "1. Go to your Gitea instance: $GITEA_URL"
|
||||||
|
echo "2. Navigate to: Settings → Actions → Runners"
|
||||||
|
echo "3. Click 'Create new Runner'"
|
||||||
|
echo "4. Copy the registration token"
|
||||||
|
echo "5. Run the following command:"
|
||||||
|
echo ""
|
||||||
|
echo " cd $RUNNER_WORK_DIR"
|
||||||
|
echo " ./woodpecker-agent register --server $GITEA_URL --token YOUR_TOKEN"
|
||||||
|
echo ""
|
||||||
|
echo "6. After registration, start the service:"
|
||||||
|
echo " sudo systemctl enable gitea-runner"
|
||||||
|
echo " sudo systemctl start gitea-runner"
|
||||||
|
echo ""
|
||||||
|
echo "7. Check status:"
|
||||||
|
echo " sudo systemctl status gitea-runner"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create helper scripts
|
||||||
|
log "📝 Creating helper scripts..."
|
||||||
|
|
||||||
|
# Start script
|
||||||
|
cat > "$RUNNER_WORK_DIR/start-runner.sh" << 'EOF'
|
||||||
|
#!/bin/bash
|
||||||
|
echo "Starting Gitea Runner..."
|
||||||
|
sudo systemctl start gitea-runner
|
||||||
|
sudo systemctl status gitea-runner
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Stop script
|
||||||
|
cat > "$RUNNER_WORK_DIR/stop-runner.sh" << 'EOF'
|
||||||
|
#!/bin/bash
|
||||||
|
echo "Stopping Gitea Runner..."
|
||||||
|
sudo systemctl stop gitea-runner
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Status script
|
||||||
|
cat > "$RUNNER_WORK_DIR/status-runner.sh" << 'EOF'
|
||||||
|
#!/bin/bash
|
||||||
|
echo "Gitea Runner Status:"
|
||||||
|
sudo systemctl status gitea-runner
|
||||||
|
echo ""
|
||||||
|
echo "Logs (last 20 lines):"
|
||||||
|
sudo journalctl -u gitea-runner -n 20 --no-pager
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Logs script
|
||||||
|
cat > "$RUNNER_WORK_DIR/logs-runner.sh" << 'EOF'
|
||||||
|
#!/bin/bash
|
||||||
|
echo "Gitea Runner Logs:"
|
||||||
|
sudo journalctl -u gitea-runner -f
|
||||||
|
EOF
|
||||||
|
|
||||||
|
chmod +x "$RUNNER_WORK_DIR"/*.sh
|
||||||
|
|
||||||
|
success "✅ Helper scripts created"
|
||||||
|
|
||||||
|
# Create environment file
|
||||||
|
cat > "$RUNNER_WORK_DIR/.env" << EOF
|
||||||
|
# Gitea Runner Configuration
|
||||||
|
GITEA_URL=$GITEA_URL
|
||||||
|
RUNNER_NAME=$RUNNER_NAME
|
||||||
|
RUNNER_LABELS=$RUNNER_LABELS
|
||||||
|
RUNNER_WORK_DIR=$RUNNER_WORK_DIR
|
||||||
|
EOF
|
||||||
|
|
||||||
|
log "📋 Setup Summary:"
|
||||||
|
echo " • Runner Directory: $RUNNER_WORK_DIR"
|
||||||
|
echo " • Gitea URL: $GITEA_URL"
|
||||||
|
echo " • Runner Name: $RUNNER_NAME"
|
||||||
|
echo " • Labels: $RUNNER_LABELS"
|
||||||
|
echo " • Helper Scripts: $RUNNER_WORK_DIR/*.sh"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
log "🎯 Next Steps:"
|
||||||
|
echo "1. Register the runner in Gitea web interface"
|
||||||
|
echo "2. Enable and start the service"
|
||||||
|
echo "3. Test with a workflow run"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
success "🎉 Gitea Runner setup completed!"
|
||||||
|
log "📁 All files are in: $RUNNER_WORK_DIR"
|
||||||
62
sync-env.ps1
Normal file
62
sync-env.ps1
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Simple Bitwarden .env Sync for Windows
|
||||||
|
# Run with: powershell -ExecutionPolicy Bypass -File sync-env.ps1
|
||||||
|
|
||||||
|
Write-Host "=== Bitwarden .env Sync ===" -ForegroundColor Cyan
|
||||||
|
|
||||||
|
# Check if bw is installed
|
||||||
|
if (!(Get-Command bw -ErrorAction SilentlyContinue)) {
|
||||||
|
Write-Host "Error: Bitwarden CLI (bw) is not installed" -ForegroundColor Red
|
||||||
|
Write-Host "Install it from: https://bitwarden.com/help/cli/"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
$status = bw status | ConvertFrom-Json
|
||||||
|
|
||||||
|
if ($status.status -eq "unauthenticated") {
|
||||||
|
Write-Host "Please login to Bitwarden:"
|
||||||
|
bw login
|
||||||
|
$status = bw status | ConvertFrom-Json
|
||||||
|
}
|
||||||
|
|
||||||
|
# Unlock if needed
|
||||||
|
if ($status.status -eq "locked") {
|
||||||
|
Write-Host "Unlocking vault..."
|
||||||
|
$env:BW_SESSION = bw unlock --raw
|
||||||
|
if ($LASTEXITCODE -ne 0) {
|
||||||
|
Write-Host "Error: Failed to unlock vault" -ForegroundColor Red
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sync
|
||||||
|
Write-Host "Syncing with Bitwarden..."
|
||||||
|
bw sync | Out-Null
|
||||||
|
|
||||||
|
# CHANGE THIS to your Bitwarden item name
|
||||||
|
$itemName = "portfolio-env"
|
||||||
|
|
||||||
|
Write-Host "Fetching environment variables..."
|
||||||
|
|
||||||
|
# Get item
|
||||||
|
$item = bw get item $itemName 2>$null | ConvertFrom-Json
|
||||||
|
|
||||||
|
if (!$item) {
|
||||||
|
Write-Host "Error: Could not find item '$itemName' in Bitwarden" -ForegroundColor Red
|
||||||
|
Write-Host "Make sure you have an item with this exact name in your vault"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get notes
|
||||||
|
$notes = $item.notes
|
||||||
|
|
||||||
|
if (!$notes) {
|
||||||
|
Write-Host "Error: No notes found in item '$itemName'" -ForegroundColor Red
|
||||||
|
Write-Host "Add your environment variables to the notes field"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create .env file
|
||||||
|
$notes | Out-File -FilePath ".env" -Encoding UTF8 -NoNewline
|
||||||
|
|
||||||
|
Write-Host "✓ Created .env file successfully!" -ForegroundColor Green
|
||||||
64
sync-env.sh
Executable file
64
sync-env.sh
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Simple Bitwarden .env Sync
|
||||||
|
# Works on Mac and Windows (Git Bash)
|
||||||
|
|
||||||
|
echo "=== Bitwarden .env Sync ==="
|
||||||
|
|
||||||
|
# Check if bw is installed
|
||||||
|
if ! command -v bw &> /dev/null; then
|
||||||
|
echo "Error: Bitwarden CLI (bw) is not installed"
|
||||||
|
echo "Install it from: https://bitwarden.com/help/cli/"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if logged in
|
||||||
|
STATUS=$(bw status | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||||
|
|
||||||
|
if [ "$STATUS" = "unauthenticated" ]; then
|
||||||
|
echo "Please login to Bitwarden:"
|
||||||
|
bw login
|
||||||
|
STATUS="locked"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Unlock vault if needed
|
||||||
|
if [ "$STATUS" = "locked" ]; then
|
||||||
|
echo "Unlocking vault..."
|
||||||
|
export BW_SESSION=$(bw unlock --raw)
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Error: Failed to unlock vault"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Sync with Bitwarden
|
||||||
|
echo "Syncing with Bitwarden..."
|
||||||
|
bw sync
|
||||||
|
|
||||||
|
# CHANGE THIS to your Bitwarden item name
|
||||||
|
ITEM_NAME="portfolio-env"
|
||||||
|
|
||||||
|
echo "Fetching environment variables..."
|
||||||
|
|
||||||
|
# Get the item from Bitwarden
|
||||||
|
ITEM=$(bw get item "$ITEM_NAME" 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -z "$ITEM" ]; then
|
||||||
|
echo "Error: Could not find item '$ITEM_NAME' in Bitwarden"
|
||||||
|
echo "Make sure you have an item with this exact name in your vault"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract notes (where env vars should be stored)
|
||||||
|
NOTES=$(echo "$ITEM" | grep -o '"notes":"[^"]*' | cut -d'"' -f4 | sed 's/\\n/\n/g')
|
||||||
|
|
||||||
|
if [ -z "$NOTES" ]; then
|
||||||
|
echo "Error: No notes found in item '$ITEM_NAME'"
|
||||||
|
echo "Add your environment variables to the notes field"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create .env file
|
||||||
|
echo "$NOTES" > .env
|
||||||
|
|
||||||
|
echo "✓ Created .env file successfully!"
|
||||||
36
test-deployment.sh
Executable file
36
test-deployment.sh
Executable file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Test script for deployment issues
|
||||||
|
echo "🧪 Testing deployment locally..."
|
||||||
|
|
||||||
|
# Set test environment variables
|
||||||
|
export NODE_ENV=production
|
||||||
|
export LOG_LEVEL=info
|
||||||
|
export NEXT_PUBLIC_BASE_URL=https://dk0.dev
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL=https://analytics.dk0.dev
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID=b3665829-927a-4ada-b9bb-fcf24171061e
|
||||||
|
export MY_EMAIL=contact@dk0.dev
|
||||||
|
export MY_INFO_EMAIL=info@dk0.dev
|
||||||
|
export MY_PASSWORD=test_password
|
||||||
|
export MY_INFO_PASSWORD=test_info_password
|
||||||
|
export ADMIN_BASIC_AUTH=admin:test_password
|
||||||
|
|
||||||
|
echo "🔧 Environment variables set:"
|
||||||
|
echo "NODE_ENV: $NODE_ENV"
|
||||||
|
echo "NEXT_PUBLIC_BASE_URL: $NEXT_PUBLIC_BASE_URL"
|
||||||
|
echo "MY_EMAIL: $MY_EMAIL"
|
||||||
|
|
||||||
|
echo "🧹 Cleaning up existing containers..."
|
||||||
|
docker compose down --remove-orphans || true
|
||||||
|
docker rm -f portfolio-app portfolio-postgres portfolio-redis || true
|
||||||
|
|
||||||
|
echo "🔧 Starting database and redis..."
|
||||||
|
docker compose up -d postgres redis
|
||||||
|
|
||||||
|
echo "⏳ Waiting for services to be ready..."
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
echo "📋 Checking running containers:"
|
||||||
|
docker ps --format "table {{.Names}}\t{{.Status}}"
|
||||||
|
|
||||||
|
echo "✅ Test completed!"
|
||||||
42
test-fix.sh
Executable file
42
test-fix.sh
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Test the fix for container conflicts and environment variables
|
||||||
|
echo "🧪 Testing the fix..."
|
||||||
|
|
||||||
|
# Set test environment variables
|
||||||
|
export NODE_ENV=production
|
||||||
|
export LOG_LEVEL=info
|
||||||
|
export NEXT_PUBLIC_BASE_URL=https://dk0.dev
|
||||||
|
export NEXT_PUBLIC_UMAMI_URL=https://analytics.dk0.dev
|
||||||
|
export NEXT_PUBLIC_UMAMI_WEBSITE_ID=b3665829-927a-4ada-b9bb-fcf24171061e
|
||||||
|
export MY_EMAIL=contact@dk0.dev
|
||||||
|
export MY_INFO_EMAIL=info@dk0.dev
|
||||||
|
export MY_PASSWORD=test_password
|
||||||
|
export MY_INFO_PASSWORD=test_info_password
|
||||||
|
export ADMIN_BASIC_AUTH=admin:test_password
|
||||||
|
|
||||||
|
echo "🔧 Environment variables set:"
|
||||||
|
echo "NODE_ENV: $NODE_ENV"
|
||||||
|
echo "NEXT_PUBLIC_BASE_URL: $NEXT_PUBLIC_BASE_URL"
|
||||||
|
echo "MY_EMAIL: $MY_EMAIL"
|
||||||
|
|
||||||
|
echo "🧹 Cleaning up ALL existing containers..."
|
||||||
|
docker compose down --remove-orphans || true
|
||||||
|
docker rm -f portfolio-app portfolio-postgres portfolio-redis || true
|
||||||
|
|
||||||
|
# Force remove the specific problematic container
|
||||||
|
docker rm -f 4dec125499540f66f4cb407b69d9aee5232f679feecd71ff2369544ff61f85ae || true
|
||||||
|
|
||||||
|
# Clean up any containers with portfolio in the name
|
||||||
|
docker ps -a --format "{{.Names}}" | grep portfolio | xargs -r docker rm -f || true
|
||||||
|
|
||||||
|
echo "🔧 Starting database and redis with environment variables..."
|
||||||
|
docker compose up -d postgres redis
|
||||||
|
|
||||||
|
echo "⏳ Waiting for services to be ready..."
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
echo "📋 Checking running containers:"
|
||||||
|
docker ps --format "table {{.Names}}\t{{.Status}}"
|
||||||
|
|
||||||
|
echo "✅ Test completed!"
|
||||||
Reference in New Issue
Block a user