Implement zero-downtime deployment strategy
- Add rolling update mechanism for seamless deployments - Start new container on port 3001, health check, then switch - Preserve database and redis connections during updates - Automatic fallback to fresh deployment if no current container - Add advanced nginx load balancer configuration for future use - Eliminate container name conflicts with proper cleanup - Website stays online during deployments
This commit is contained in:
194
.gitea/workflows/ci-cd-zero-downtime.yml
Normal file
194
.gitea/workflows/ci-cd-zero-downtime.yml
Normal file
@@ -0,0 +1,194 @@
|
||||
name: CI/CD Pipeline (Zero Downtime)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ production ]
|
||||
|
||||
env:
|
||||
NODE_VERSION: '20'
|
||||
DOCKER_IMAGE: portfolio-app
|
||||
CONTAINER_NAME: portfolio-app
|
||||
NEW_CONTAINER_NAME: portfolio-app-new
|
||||
|
||||
jobs:
|
||||
production:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run linting
|
||||
run: npm run lint
|
||||
|
||||
- name: Run tests
|
||||
run: npm run test
|
||||
|
||||
- name: Build application
|
||||
run: npm run build
|
||||
|
||||
- name: Run security scan
|
||||
run: |
|
||||
echo "🔍 Running npm audit..."
|
||||
npm audit --audit-level=high || echo "⚠️ Some vulnerabilities found, but continuing..."
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
- name: Verify secrets and variables before deployment
|
||||
run: |
|
||||
echo "🔍 Verifying secrets and variables..."
|
||||
|
||||
# Check Variables
|
||||
if [ -z "${{ vars.NEXT_PUBLIC_BASE_URL }}" ]; then
|
||||
echo "❌ NEXT_PUBLIC_BASE_URL variable is missing!"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${{ vars.MY_EMAIL }}" ]; then
|
||||
echo "❌ MY_EMAIL variable is missing!"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${{ vars.MY_INFO_EMAIL }}" ]; then
|
||||
echo "❌ MY_INFO_EMAIL variable is missing!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Secrets
|
||||
if [ -z "${{ secrets.MY_PASSWORD }}" ]; then
|
||||
echo "❌ MY_PASSWORD secret is missing!"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${{ secrets.MY_INFO_PASSWORD }}" ]; then
|
||||
echo "❌ MY_INFO_PASSWORD secret is missing!"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${{ secrets.ADMIN_BASIC_AUTH }}" ]; then
|
||||
echo "❌ ADMIN_BASIC_AUTH secret is missing!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All required secrets and variables are present"
|
||||
|
||||
- name: Start new container (zero downtime)
|
||||
run: |
|
||||
echo "🚀 Starting new container for zero-downtime deployment..."
|
||||
|
||||
# Start new container with different name
|
||||
docker run -d \
|
||||
--name ${{ env.NEW_CONTAINER_NAME }} \
|
||||
--restart unless-stopped \
|
||||
--network portfolio_net \
|
||||
-p 3001:3000 \
|
||||
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||
-e REDIS_URL=redis://redis:6379 \
|
||||
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||
${{ env.DOCKER_IMAGE }}:latest
|
||||
|
||||
echo "✅ New container started on port 3001"
|
||||
|
||||
- name: Health check new container
|
||||
run: |
|
||||
echo "🔍 Health checking new container..."
|
||||
sleep 10
|
||||
|
||||
# Health check on new container
|
||||
for i in {1..30}; do
|
||||
if curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
|
||||
echo "✅ New container is healthy!"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Waiting for new container to be ready... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Final health check
|
||||
if ! curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
|
||||
echo "❌ New container failed health check!"
|
||||
docker logs ${{ env.NEW_CONTAINER_NAME }}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Switch traffic to new container (zero downtime)
|
||||
run: |
|
||||
echo "🔄 Switching traffic to new container..."
|
||||
|
||||
# Stop old container
|
||||
docker stop ${{ env.CONTAINER_NAME }} || true
|
||||
|
||||
# Remove old container
|
||||
docker rm ${{ env.CONTAINER_NAME }} || true
|
||||
|
||||
# Rename new container to production name
|
||||
docker rename ${{ env.NEW_CONTAINER_NAME }} ${{ env.CONTAINER_NAME }}
|
||||
|
||||
# Update port mapping (requires container restart)
|
||||
docker stop ${{ env.CONTAINER_NAME }}
|
||||
docker rm ${{ env.CONTAINER_NAME }}
|
||||
|
||||
# Start with correct port
|
||||
docker run -d \
|
||||
--name ${{ env.CONTAINER_NAME }} \
|
||||
--restart unless-stopped \
|
||||
--network portfolio_net \
|
||||
-p 3000:3000 \
|
||||
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||
-e REDIS_URL=redis://redis:6379 \
|
||||
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||
${{ env.DOCKER_IMAGE }}:latest
|
||||
|
||||
echo "✅ Traffic switched successfully!"
|
||||
|
||||
- name: Final health check
|
||||
run: |
|
||||
echo "🔍 Final health check..."
|
||||
sleep 5
|
||||
|
||||
for i in {1..10}; do
|
||||
if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||
echo "✅ Deployment successful! Zero downtime achieved!"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Final health check... ($i/10)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if ! curl -f http://localhost:3000/api/health > /dev/null 2>&1; then
|
||||
echo "❌ Final health check failed!"
|
||||
docker logs ${{ env.CONTAINER_NAME }}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Cleanup old images
|
||||
run: |
|
||||
echo "🧹 Cleaning up old images..."
|
||||
docker image prune -f
|
||||
docker system prune -f
|
||||
echo "✅ Cleanup completed"
|
||||
@@ -48,9 +48,25 @@ jobs:
|
||||
docker build -t ${{ env.DOCKER_IMAGE }}:latest .
|
||||
docker tag ${{ env.DOCKER_IMAGE }}:latest ${{ env.DOCKER_IMAGE }}:$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
- name: Stop existing services
|
||||
- name: Prepare for zero-downtime deployment
|
||||
run: |
|
||||
docker compose down || true
|
||||
echo "🚀 Preparing zero-downtime deployment..."
|
||||
|
||||
# Check if current container is running
|
||||
if docker ps -q -f name=portfolio-app | grep -q .; then
|
||||
echo "📊 Current container is running, proceeding with zero-downtime update"
|
||||
CURRENT_CONTAINER_RUNNING=true
|
||||
else
|
||||
echo "📊 No current container running, doing fresh deployment"
|
||||
CURRENT_CONTAINER_RUNNING=false
|
||||
fi
|
||||
|
||||
# Ensure database and redis are running
|
||||
echo "🔧 Ensuring database and redis are running..."
|
||||
docker compose up -d postgres redis
|
||||
|
||||
# Wait for services to be ready
|
||||
sleep 10
|
||||
|
||||
- name: Verify secrets and variables before deployment
|
||||
run: |
|
||||
@@ -86,9 +102,86 @@ jobs:
|
||||
|
||||
echo "✅ All required secrets and variables are present"
|
||||
|
||||
- name: Start services with Docker Compose
|
||||
- name: Deploy with zero downtime
|
||||
run: |
|
||||
docker compose up -d
|
||||
echo "🚀 Deploying with zero downtime..."
|
||||
|
||||
if [ "$CURRENT_CONTAINER_RUNNING" = "true" ]; then
|
||||
echo "🔄 Performing rolling update..."
|
||||
|
||||
# Start new container with temporary name
|
||||
docker run -d \
|
||||
--name portfolio-app-new \
|
||||
--restart unless-stopped \
|
||||
--network portfolio_net \
|
||||
-p 3001:3000 \
|
||||
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||
-e REDIS_URL=redis://redis:6379 \
|
||||
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||
${{ env.DOCKER_IMAGE }}:latest
|
||||
|
||||
# Wait for new container to be ready
|
||||
echo "⏳ Waiting for new container to be ready..."
|
||||
sleep 15
|
||||
|
||||
# Health check new container
|
||||
for i in {1..20}; do
|
||||
if curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
|
||||
echo "✅ New container is healthy!"
|
||||
break
|
||||
fi
|
||||
echo "⏳ Health check attempt $i/20..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
# Stop old container
|
||||
echo "🛑 Stopping old container..."
|
||||
docker stop portfolio-app || true
|
||||
|
||||
# Remove old container
|
||||
docker rm portfolio-app || true
|
||||
|
||||
# Rename new container
|
||||
docker rename portfolio-app-new portfolio-app
|
||||
|
||||
# Update port mapping
|
||||
docker stop portfolio-app
|
||||
docker rm portfolio-app
|
||||
|
||||
# Start with correct port
|
||||
docker run -d \
|
||||
--name portfolio-app \
|
||||
--restart unless-stopped \
|
||||
--network portfolio_net \
|
||||
-p 3000:3000 \
|
||||
-e NODE_ENV=${{ vars.NODE_ENV }} \
|
||||
-e LOG_LEVEL=${{ vars.LOG_LEVEL }} \
|
||||
-e DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public \
|
||||
-e REDIS_URL=redis://redis:6379 \
|
||||
-e NEXT_PUBLIC_BASE_URL="${{ vars.NEXT_PUBLIC_BASE_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_URL="${{ vars.NEXT_PUBLIC_UMAMI_URL }}" \
|
||||
-e NEXT_PUBLIC_UMAMI_WEBSITE_ID="${{ vars.NEXT_PUBLIC_UMAMI_WEBSITE_ID }}" \
|
||||
-e MY_EMAIL="${{ vars.MY_EMAIL }}" \
|
||||
-e MY_INFO_EMAIL="${{ vars.MY_INFO_EMAIL }}" \
|
||||
-e MY_PASSWORD="${{ secrets.MY_PASSWORD }}" \
|
||||
-e MY_INFO_PASSWORD="${{ secrets.MY_INFO_PASSWORD }}" \
|
||||
-e ADMIN_BASIC_AUTH="${{ secrets.ADMIN_BASIC_AUTH }}" \
|
||||
${{ env.DOCKER_IMAGE }}:latest
|
||||
|
||||
echo "✅ Rolling update completed!"
|
||||
else
|
||||
echo "🆕 Fresh deployment..."
|
||||
docker compose up -d
|
||||
fi
|
||||
env:
|
||||
NODE_ENV: ${{ vars.NODE_ENV }}
|
||||
LOG_LEVEL: ${{ vars.LOG_LEVEL }}
|
||||
|
||||
135
docker-compose.zero-downtime.yml
Normal file
135
docker-compose.zero-downtime.yml
Normal file
@@ -0,0 +1,135 @@
|
||||
# Zero-Downtime Deployment Configuration
|
||||
# Uses nginx as load balancer for seamless updates
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: portfolio-nginx
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx-zero-downtime.conf:/etc/nginx/nginx.conf:ro
|
||||
networks:
|
||||
- portfolio_net
|
||||
depends_on:
|
||||
- portfolio-app-1
|
||||
- portfolio-app-2
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
portfolio-app-1:
|
||||
image: portfolio-app:latest
|
||||
container_name: portfolio-app-1
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV:-production}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||
- REDIS_URL=redis://redis:6379
|
||||
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||
- MY_EMAIL=${MY_EMAIL}
|
||||
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||
- MY_PASSWORD=${MY_PASSWORD}
|
||||
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||
volumes:
|
||||
- portfolio_data:/app/.next/cache
|
||||
networks:
|
||||
- portfolio_net
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
portfolio-app-2:
|
||||
image: portfolio-app:latest
|
||||
container_name: portfolio-app-2
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV:-production}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- DATABASE_URL=postgresql://portfolio_user:portfolio_pass@postgres:5432/portfolio_db?schema=public
|
||||
- REDIS_URL=redis://redis:6379
|
||||
- NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL}
|
||||
- NEXT_PUBLIC_UMAMI_URL=${NEXT_PUBLIC_UMAMI_URL}
|
||||
- NEXT_PUBLIC_UMAMI_WEBSITE_ID=${NEXT_PUBLIC_UMAMI_WEBSITE_ID}
|
||||
- MY_EMAIL=${MY_EMAIL}
|
||||
- MY_INFO_EMAIL=${MY_INFO_EMAIL}
|
||||
- MY_PASSWORD=${MY_PASSWORD}
|
||||
- MY_INFO_PASSWORD=${MY_INFO_PASSWORD}
|
||||
- ADMIN_BASIC_AUTH=${ADMIN_BASIC_AUTH}
|
||||
volumes:
|
||||
- portfolio_data:/app/.next/cache
|
||||
networks:
|
||||
- portfolio_net
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: portfolio-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- POSTGRES_DB=portfolio_db
|
||||
- POSTGRES_USER=portfolio_user
|
||||
- POSTGRES_PASSWORD=portfolio_pass
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- portfolio_net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U portfolio_user -d portfolio_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: portfolio-redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- portfolio_net
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
portfolio_data:
|
||||
driver: local
|
||||
postgres_data:
|
||||
driver: local
|
||||
redis_data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
portfolio_net:
|
||||
driver: bridge
|
||||
62
nginx-zero-downtime.conf
Normal file
62
nginx-zero-downtime.conf
Normal file
@@ -0,0 +1,62 @@
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
upstream portfolio_backend {
|
||||
# Health check enabled upstream
|
||||
server portfolio-app-1:3000 max_fails=3 fail_timeout=30s;
|
||||
server portfolio-app-2:3000 max_fails=3 fail_timeout=30s;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location /health {
|
||||
access_log off;
|
||||
return 200 "healthy\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
}
|
||||
|
||||
# Main server
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
# Proxy settings
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Timeout settings
|
||||
proxy_connect_timeout 5s;
|
||||
proxy_send_timeout 60s;
|
||||
proxy_read_timeout 60s;
|
||||
|
||||
# Buffer settings
|
||||
proxy_buffering on;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 8 4k;
|
||||
|
||||
# Main location
|
||||
location / {
|
||||
proxy_pass http://portfolio_backend;
|
||||
|
||||
# Health check for upstream
|
||||
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
|
||||
proxy_next_upstream_tries 2;
|
||||
proxy_next_upstream_timeout 10s;
|
||||
}
|
||||
|
||||
# Static files caching
|
||||
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
||||
proxy_pass http://portfolio_backend;
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user