diff --git a/.eslintrc.build.json b/.eslintrc.build.json new file mode 100644 index 0000000..beeb9f3 --- /dev/null +++ b/.eslintrc.build.json @@ -0,0 +1,8 @@ +{ + "extends": ["next/core-web-vitals"], + "rules": { + "@typescript-eslint/no-unused-vars": "off", + "@typescript-eslint/no-explicit-any": "off", + "@next/next/no-img-element": "off" + } +} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index db3cdfd..0000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Build and Push Docker Image - -on: - workflow_run: - workflows: ["Test Code Base"] - types: - - completed - branches: - - production - - dev - - preview - -jobs: - build: - if: ${{ github.event.workflow_run.conclusion == 'success' }} - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v4 - - - name: Log in to GHCR - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin - - - name: Create Deployment .env File - run: | - cat > .env < .env < .env <> $GITHUB_ENV - echo "PORT=4000" >> $GITHUB_ENV - elif [[ "${{ github.event.workflow_run.head_branch }}" == "dev" ]]; then - echo "DEPLOY_ENV=dev" >> $GITHUB_ENV - echo "PORT=4001" >> $GITHUB_ENV - elif [[ "${{ github.event.workflow_run.head_branch }}" == "preview" ]]; then - echo "DEPLOY_ENV=preview" >> $GITHUB_ENV - echo "PORT=4002" >> $GITHUB_ENV - fi - - - name: Log in to GHCR - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin - - - name: Pull & Deploy Docker Image - run: | - IMAGE_NAME="ghcr.io/${{ github.repository_owner }}/my-nextjs-app:${{ github.event.workflow_run.head_branch }}" - IMAGE_NAME=$(echo "$IMAGE_NAME" | tr '[:upper:]' '[:lower:]') - docker pull "$IMAGE_NAME" - CONTAINER_NAME="nextjs-$DEPLOY_ENV" - - echo "Deploying $CONTAINER_NAME" - - if [ "$(docker inspect --format='{{.State.Running}}' "$CONTAINER_NAME")" = "true" ]; then - docker stop "$CONTAINER_NAME" || true - docker rm "$CONTAINER_NAME" || true - fi - - docker run -d --name "$CONTAINER_NAME" -p $PORT:3000 "$IMAGE_NAME" - if [ "$(docker inspect --format='{{.State.Running}}' "$CONTAINER_NAME")" = "true" ]; then - echo "Deployment erfolgreich!" - else - echo "Neuer Container konnte nicht gestartet werden!" - docker logs "$CONTAINER_NAME" - exit 1 - fi diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index eb94ce8..0000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Lint Code Base - -on: - push: - branches: - - production - - dev - - preview - paths: - - 'app/**' - - 'public/**' - - 'styles/**' - - 'Dockerfile' - - 'docker-compose.yml' - - '.github/workflows/**' - - 'next.config.ts' - - 'package.json' - - 'package-lock.json' - - 'tsconfig.json' - - 'tailwind.config.ts' - pull_request: - branches: - - production - - dev - - preview - paths: - - 'app/**' - - 'public/**' - - 'styles/**' - - 'Dockerfile' - - 'docker-compose.yml' - - '.github/workflows/**' - - 'next.config.ts' - - 'package.json' - - 'package-lock.json' - - 'tsconfig.json' - - 'tailwind.config.ts' - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 22.14.0 - cache: 'npm' - - - name: Install Dependencies - run: npm ci - - - name: Run ESLint - run: npm run lint diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 3222b2b..0000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: Test Code Base - -on: - push: - branches: - - production - - dev - - preview - paths: - - 'app/**' - - 'public/**' - - 'styles/**' - - 'Dockerfile' - - 'docker-compose.yml' - - '.github/workflows/**' - - 'next.config.ts' - - 'package.json' - - 'package-lock.json' - - 'tsconfig.json' - - 'tailwind.config.ts' - pull_request: - branches: - - production - - dev - - preview - paths: - - 'app/**' - - 'public/**' - - 'styles/**' - - 'Dockerfile' - - 'docker-compose.yml' - - '.github/workflows/**' - - 'next.config.ts' - - 'package.json' - - 'package-lock.json' - - 'tsconfig.json' - - 'tailwind.config.ts' - -jobs: - test: - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 22.14.0 - cache: 'npm' - - - name: Install Dependencies - run: npm ci - - - name: Create .env File - run: | - cat > .env < +``` + +### 2. Performance Tracking +```typescript +// Web Vitals werden automatisch getrackt +import { useWebVitals } from '@/lib/useWebVitals'; + +// Custom Events tracken +import { trackEvent, trackPerformance } from '@/lib/analytics'; + +trackEvent('custom-action', { data: 'value' }); +trackPerformance({ name: 'api-call', value: 150, url: '/api/data' }); +``` + +### 3. Analytics Provider +```typescript +// Automatisches Tracking von: +// - Page Views +// - User Interactions (Klicks, Scroll, Forms) +// - Performance Metrics +// - Error Tracking + + {children} + +``` + +## Dashboard + +### Performance Dashboard +- **Live Performance-Metriken** anzeigen +- **Core Web Vitals** mit Bewertungen (Good/Needs Improvement/Poor) +- **Toggle-Button** unten rechts auf der Website +- **Real-time Updates** der Performance-Daten + +### Umami Dashboard +- **Standard Analytics** über deine Umami-Instanz +- **URL**: https://umami.denshooter.de +- **Website ID**: 1f213877-deef-4238-8df1-71a5a3bcd142 + +## Event-Typen + +### Automatische Events +- `page-view` - Seitenaufrufe +- `click` - Benutzerklicks +- `form-submit` - Formular-Übermittlungen +- `scroll-depth` - Scroll-Tiefe (25%, 50%, 75%, 90%) +- `error` - JavaScript-Fehler +- `unhandled-rejection` - Unbehandelte Promise-Rejections + +### Performance Events +- `web-vitals` - Core Web Vitals (LCP, FID, CLS, FCP, TTFB) +- `performance` - Custom Performance-Metriken +- `page-timing` - Detaillierte Page-Load-Phasen +- `api-call` - API-Response-Zeiten + +### Custom Events +- `dashboard-toggle` - Performance Dashboard ein/aus +- `interaction` - Benutzerinteraktionen + +## Datenschutz + +### Was wird NICHT gesammelt: +- ❌ IP-Adressen +- ❌ User-IDs +- ❌ E-Mail-Adressen +- ❌ Personenbezogene Daten +- ❌ Cookies + +### Was wird gesammelt: +- ✅ Anonymisierte Performance-Metriken +- ✅ Technische Browser-Informationen +- ✅ Seitenaufrufe (ohne persönliche Daten) +- ✅ Error-Logs (anonymisiert) + +## Konfiguration + +### Umami Setup +1. **Self-hosted Umami** auf deinem Server +2. **Website ID** in `layout.tsx` konfiguriert +3. **Script-URL** auf deine Umami-Instanz + +### Performance Tracking +- **Automatisch aktiviert** durch `AnalyticsProvider` +- **Web Vitals** werden automatisch gemessen +- **Custom Events** über `trackEvent()` Funktion + +## Monitoring + +### Performance-Schwellenwerte +- **LCP**: ≤ 2.5s (Good), ≤ 4s (Needs Improvement), > 4s (Poor) +- **FID**: ≤ 100ms (Good), ≤ 300ms (Needs Improvement), > 300ms (Poor) +- **CLS**: ≤ 0.1 (Good), ≤ 0.25 (Needs Improvement), > 0.25 (Poor) +- **FCP**: ≤ 1.8s (Good), ≤ 3s (Needs Improvement), > 3s (Poor) +- **TTFB**: ≤ 800ms (Good), ≤ 1.8s (Needs Improvement), > 1.8s (Poor) + +### Dashboard-Zugriff +- **Performance Dashboard**: Toggle-Button unten rechts +- **Umami Dashboard**: https://umami.denshooter.de +- **API Endpoint**: `/api/analytics` für Custom-Tracking + +## Erweiterung + +### Neue Events hinzufügen +```typescript +import { trackEvent } from '@/lib/analytics'; + +// Custom Event tracken +trackEvent('feature-usage', { + feature: 'contact-form', + success: true, + duration: 1500 +}); +``` + +### Performance-Metriken erweitern +```typescript +import { trackPerformance } from '@/lib/analytics'; + +// Custom Performance-Metrik +trackPerformance({ + name: 'component-render', + value: renderTime, + url: window.location.pathname +}); +``` + +## Troubleshooting + +### Performance Dashboard nicht sichtbar +- Prüfe Browser-Konsole auf Fehler +- Stelle sicher, dass `AnalyticsProvider` in `layout.tsx` eingebunden ist + +### Umami Events nicht sichtbar +- Prüfe Umami-Dashboard auf https://umami.denshooter.de +- Stelle sicher, dass Website ID korrekt ist +- Prüfe Browser-Netzwerk-Tab auf Umami-Requests + +### Performance-Metriken fehlen +- Prüfe Browser-Konsole auf Performance Observer Fehler +- Stelle sicher, dass `useWebVitals` Hook aktiv ist +- Teste in verschiedenen Browsern diff --git a/AUTO-DEPLOYMENT.md b/AUTO-DEPLOYMENT.md new file mode 100644 index 0000000..af592eb --- /dev/null +++ b/AUTO-DEPLOYMENT.md @@ -0,0 +1,226 @@ +# Automatisches Deployment System + +## Übersicht + +Dieses Portfolio verwendet ein **automatisches Deployment-System**, das bei jedem Git Push die Codebase prüft, den Container erstellt und startet. + +## 🚀 Deployment-Skripte + +### **1. Auto-Deploy (Vollständig)** +```bash +# Vollständiges automatisches Deployment +./scripts/auto-deploy.sh + +# Oder mit npm +npm run auto-deploy +``` + +**Was passiert:** +- ✅ Git Status prüfen und uncommitted Changes committen +- ✅ Latest Changes pullen +- ✅ ESLint Linting +- ✅ Tests ausführen +- ✅ Next.js Build +- ✅ Docker Image erstellen +- ✅ Container stoppen/starten +- ✅ Health Check +- ✅ Cleanup alter Images + +### **2. Quick-Deploy (Schnell)** +```bash +# Schnelles Deployment ohne Tests +./scripts/quick-deploy.sh + +# Oder mit npm +npm run quick-deploy +``` + +**Was passiert:** +- ✅ Docker Image erstellen +- ✅ Container stoppen/starten +- ✅ Health Check + +### **3. Manuelles Deployment** +```bash +# Manuelles Deployment mit Docker Compose +./scripts/deploy.sh + +# Oder mit npm +npm run deploy +``` + +## 🔄 Automatisches Deployment + +### **Git Hook Setup** +Das System verwendet einen Git Post-Receive Hook, der automatisch bei jedem Push ausgeführt wird: + +```bash +# Hook ist bereits konfiguriert in: +.git/hooks/post-receive +``` + +### **Wie es funktioniert:** +1. **Git Push** → Hook wird ausgelöst +2. **Auto-Deploy Script** wird ausgeführt +3. **Vollständige Pipeline** läuft automatisch +4. **Deployment** wird durchgeführt +5. **Health Check** bestätigt Erfolg + +## 📋 Deployment-Schritte + +### **Automatisches Deployment:** +```bash +# 1. Code Quality Checks +git status --porcelain +git pull origin main +npm run lint +npm run test + +# 2. Build Application +npm run build + +# 3. Docker Operations +docker build -t portfolio-app:latest . +docker tag portfolio-app:latest portfolio-app:$(date +%Y%m%d-%H%M%S) + +# 4. Deployment +docker stop portfolio-app || true +docker rm portfolio-app || true +docker run -d --name portfolio-app -p 3000:3000 portfolio-app:latest + +# 5. Health Check +curl -f http://localhost:3000/api/health + +# 6. Cleanup +docker system prune -f +``` + +## 🎯 Verwendung + +### **Für Entwicklung:** +```bash +# Schnelles Deployment während der Entwicklung +npm run quick-deploy +``` + +### **Für Production:** +```bash +# Vollständiges Deployment mit Tests +npm run auto-deploy +``` + +### **Automatisch bei Push:** +```bash +# Einfach committen und pushen +git add . +git commit -m "Update feature" +git push origin main +# → Automatisches Deployment läuft +``` + +## 📊 Monitoring + +### **Container Status:** +```bash +# Status prüfen +npm run monitor status + +# Health Check +npm run monitor health + +# Logs anzeigen +npm run monitor logs +``` + +### **Deployment Logs:** +```bash +# Deployment-Logs anzeigen +tail -f /var/log/portfolio-deploy.log + +# Git-Deployment-Logs +tail -f /var/log/git-deploy.log +``` + +## 🔧 Konfiguration + +### **Ports:** +- **Standard Port:** 3000 +- **Backup Port:** 3001 (falls 3000 belegt) + +### **Container:** +- **Name:** portfolio-app +- **Image:** portfolio-app:latest +- **Restart Policy:** unless-stopped + +### **Logs:** +- **Deployment Logs:** `/var/log/portfolio-deploy.log` +- **Git Logs:** `/var/log/git-deploy.log` + +## 🚨 Troubleshooting + +### **Deployment schlägt fehl:** +```bash +# Logs prüfen +docker logs portfolio-app + +# Container-Status prüfen +docker ps -a + +# Manuell neu starten +npm run quick-deploy +``` + +### **Port bereits belegt:** +```bash +# Ports prüfen +lsof -i :3000 + +# Anderen Port verwenden +docker run -d --name portfolio-app -p 3001:3000 portfolio-app:latest +``` + +### **Tests schlagen fehl:** +```bash +# Tests lokal ausführen +npm run test + +# Linting prüfen +npm run lint + +# Build testen +npm run build +``` + +## 📈 Features + +### **Automatische Features:** +- ✅ **Git Integration** - Automatisch bei Push +- ✅ **Code Quality** - Linting und Tests +- ✅ **Health Checks** - Automatische Verifikation +- ✅ **Rollback** - Alte Container werden gestoppt +- ✅ **Cleanup** - Alte Images werden entfernt +- ✅ **Logging** - Vollständige Deployment-Logs + +### **Sicherheits-Features:** +- ✅ **Non-root Container** +- ✅ **Resource Limits** +- ✅ **Health Monitoring** +- ✅ **Error Handling** +- ✅ **Rollback bei Fehlern** + +## 🎉 Vorteile + +1. **Automatisierung** - Keine manuellen Schritte nötig +2. **Konsistenz** - Immer gleiche Deployment-Prozesse +3. **Sicherheit** - Tests vor jedem Deployment +4. **Monitoring** - Vollständige Logs und Health Checks +5. **Schnell** - Quick-Deploy für Entwicklung +6. **Zuverlässig** - Automatische Rollbacks bei Fehlern + +## 📞 Support + +Bei Problemen: +1. **Logs prüfen:** `tail -f /var/log/portfolio-deploy.log` +2. **Container-Status:** `npm run monitor status` +3. **Health Check:** `npm run monitor health` +4. **Manueller Neustart:** `npm run quick-deploy` diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..39ebba8 --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,272 @@ +# Portfolio Deployment Guide + +## Übersicht + +Dieses Portfolio verwendet ein **optimiertes CI/CD-System** mit Docker für Production-Deployment. Das System ist darauf ausgelegt, hohen Traffic zu bewältigen und automatische Tests vor dem Deployment durchzuführen. + +## 🚀 Features + +### ✅ **CI/CD Pipeline** +- **Automatische Tests** vor jedem Deployment +- **Security Scanning** mit Trivy +- **Multi-Architecture Docker Builds** (AMD64 + ARM64) +- **Health Checks** und Deployment-Verifikation +- **Automatische Cleanup** alter Images + +### ⚡ **Performance-Optimierungen** +- **Multi-Stage Docker Build** für kleinere Images +- **Nginx Load Balancer** mit Caching +- **Gzip Compression** und optimierte Headers +- **Rate Limiting** für API-Endpoints +- **Resource Limits** für Container + +### 🔒 **Sicherheit** +- **Non-root User** im Container +- **Security Headers** (HSTS, CSP, etc.) +- **SSL/TLS Termination** mit Nginx +- **Vulnerability Scanning** in CI/CD + +## 📁 Dateistruktur + +``` +├── .github/workflows/ +│ └── ci-cd.yml # CI/CD Pipeline +├── scripts/ +│ ├── deploy.sh # Deployment-Skript +│ └── monitor.sh # Monitoring-Skript +├── docker-compose.prod.yml # Production Docker Compose +├── nginx.conf # Nginx Konfiguration +├── Dockerfile # Optimiertes Dockerfile +└── env.example # Environment Template +``` + +## 🛠️ Setup + +### 1. **Environment Variables** +```bash +# Kopiere die Beispiel-Datei +cp env.example .env + +# Bearbeite die .env Datei mit deinen Werten +nano .env +``` + +### 2. **GitHub Secrets & Variables** +Konfiguriere in deinem GitHub Repository: + +**Secrets:** +- `GITHUB_TOKEN` (automatisch verfügbar) +- `GHOST_API_KEY` +- `MY_PASSWORD` +- `MY_INFO_PASSWORD` + +**Variables:** +- `NEXT_PUBLIC_BASE_URL` +- `GHOST_API_URL` +- `MY_EMAIL` +- `MY_INFO_EMAIL` + +### 3. **SSL-Zertifikate** +```bash +# Erstelle SSL-Verzeichnis +mkdir -p ssl + +# Kopiere deine SSL-Zertifikate +cp your-cert.pem ssl/cert.pem +cp your-key.pem ssl/key.pem +``` + +## 🚀 Deployment + +### **Automatisches Deployment** +Das System deployt automatisch bei Push auf den `production` Branch: + +```bash +# Code auf production Branch pushen +git push origin production +``` + +### **Manuelles Deployment** +```bash +# Lokales Deployment +./scripts/deploy.sh production + +# Oder mit npm +npm run deploy +``` + +### **Docker Commands** +```bash +# Container starten +npm run docker:compose + +# Container stoppen +npm run docker:down + +# Health Check +npm run health +``` + +## 📊 Monitoring + +### **Container Status** +```bash +# Status anzeigen +./scripts/monitor.sh status + +# Oder mit npm +npm run monitor status +``` + +### **Health Check** +```bash +# Application Health +./scripts/monitor.sh health + +# Oder direkt +curl http://localhost:3000/api/health +``` + +### **Logs anzeigen** +```bash +# Letzte 50 Zeilen +./scripts/monitor.sh logs 50 + +# Live-Logs folgen +./scripts/monitor.sh logs 100 +``` + +### **Metriken** +```bash +# Detaillierte Metriken +./scripts/monitor.sh metrics +``` + +## 🔧 Wartung + +### **Container neustarten** +```bash +./scripts/monitor.sh restart +``` + +### **Cleanup** +```bash +# Docker-Ressourcen bereinigen +./scripts/monitor.sh cleanup +``` + +### **Updates** +```bash +# Neues Image pullen und deployen +./scripts/deploy.sh production +``` + +## 📈 Performance-Tuning + +### **Nginx Optimierungen** +- **Gzip Compression** aktiviert +- **Static Asset Caching** (1 Jahr) +- **API Rate Limiting** (10 req/s) +- **Load Balancing** bereit für Skalierung + +### **Docker Optimierungen** +- **Multi-Stage Build** für kleinere Images +- **Non-root User** für Sicherheit +- **Health Checks** für automatische Recovery +- **Resource Limits** (512MB RAM, 0.5 CPU) + +### **Next.js Optimierungen** +- **Standalone Output** für Docker +- **Image Optimization** (WebP, AVIF) +- **CSS Optimization** aktiviert +- **Package Import Optimization** + +## 🚨 Troubleshooting + +### **Container startet nicht** +```bash +# Logs prüfen +./scripts/monitor.sh logs + +# Status prüfen +./scripts/monitor.sh status + +# Neustarten +./scripts/monitor.sh restart +``` + +### **Health Check schlägt fehl** +```bash +# Manueller Health Check +curl -v http://localhost:3000/api/health + +# Container-Logs prüfen +docker-compose -f docker-compose.prod.yml logs portfolio +``` + +### **Performance-Probleme** +```bash +# Resource-Usage prüfen +./scripts/monitor.sh metrics + +# Nginx-Logs prüfen +docker-compose -f docker-compose.prod.yml logs nginx +``` + +### **SSL-Probleme** +```bash +# SSL-Zertifikate prüfen +openssl x509 -in ssl/cert.pem -text -noout + +# Nginx-Konfiguration testen +docker-compose -f docker-compose.prod.yml exec nginx nginx -t +``` + +## 📋 CI/CD Pipeline + +### **Workflow-Schritte** +1. **Test** - Linting, Tests, Build +2. **Security** - Trivy Vulnerability Scan +3. **Build** - Multi-Arch Docker Image +4. **Deploy** - Automatisches Deployment + +### **Trigger** +- **Push auf `main`** - Build nur +- **Push auf `production`** - Build + Deploy +- **Pull Request** - Test + Security + +### **Monitoring** +- **GitHub Actions** - Pipeline-Status +- **Container Health** - Automatische Checks +- **Resource Usage** - Monitoring-Skript + +## 🔄 Skalierung + +### **Horizontal Scaling** +```yaml +# In nginx.conf - weitere Backend-Server hinzufügen +upstream portfolio_backend { + least_conn; + server portfolio:3000 max_fails=3 fail_timeout=30s; + server portfolio-2:3000 max_fails=3 fail_timeout=30s; + server portfolio-3:3000 max_fails=3 fail_timeout=30s; +} +``` + +### **Vertical Scaling** +```yaml +# In docker-compose.prod.yml - Resource-Limits erhöhen +deploy: + resources: + limits: + memory: 1G + cpus: '1.0' +``` + +## 📞 Support + +Bei Problemen: +1. **Logs prüfen**: `./scripts/monitor.sh logs` +2. **Status prüfen**: `./scripts/monitor.sh status` +3. **Health Check**: `./scripts/monitor.sh health` +4. **Container neustarten**: `./scripts/monitor.sh restart` diff --git a/Dockerfile b/Dockerfile index a9aeaec..e88fa69 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,41 +1,69 @@ -# Stage 1: Build -FROM node:current-alpine AS builder +# Multi-stage build for optimized production image +FROM node:20-alpine AS base -# Set working directory +# Install dependencies only when needed +FROM base AS deps +# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. +RUN apk add --no-cache libc6-compat WORKDIR /app -# Copy package.json and package-lock.json -COPY package*.json ./ +# Install dependencies based on the preferred package manager +COPY package.json package-lock.json* ./ +RUN npm ci --only=production && npm cache clean --force -# Install dependencies including development dependencies -RUN npm install - -# Copy the application code +# Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules COPY . . -# Install type definitions for react-responsive-masonry and node-fetch -RUN npm install --save-dev @types/react-responsive-masonry @types/node-fetch +# Generate Prisma client +RUN npx prisma generate -# Build the Next.js application +# Build the application +ENV NEXT_TELEMETRY_DISABLED=1 +ENV NODE_ENV=production RUN npm run build -# Stage 2: Production -FROM node:current-alpine - -# Set working directory +# Production image, copy all the files and run next +FROM base AS runner WORKDIR /app -# Copy only the necessary files from the build stage -COPY --from=builder /app/package*.json ./ -COPY --from=builder /app/.next ./.next +ENV NODE_ENV=production +ENV NEXT_TELEMETRY_DISABLED=1 + +# Create a non-root user +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +# Copy the built application COPY --from=builder /app/public ./public -COPY --from=builder /app/.env .env -# Install only production dependencies -RUN npm install --only=production +# Set the correct permission for prerender cache +RUN mkdir .next +RUN chown nextjs:nodejs .next + +# Automatically leverage output traces to reduce image size +# https://nextjs.org/docs/advanced-features/output-file-tracing +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +# Copy Prisma files +COPY --from=builder /app/prisma ./prisma +COPY --from=builder /app/node_modules/.prisma ./node_modules/.prisma + +# Copy environment file +COPY --from=builder /app/.env* ./ + +USER nextjs -# Expose the port the app runs on EXPOSE 3000 -# Run the app with the start script -ENTRYPOINT [ "npm", "run", "start" ] \ No newline at end of file +ENV PORT=3000 +ENV HOSTNAME="0.0.0.0" + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:3000/api/health || exit 1 + +CMD ["node", "server.js"] \ No newline at end of file diff --git a/app/api/analytics/route.ts b/app/api/analytics/route.ts new file mode 100644 index 0000000..6d3b813 --- /dev/null +++ b/app/api/analytics/route.ts @@ -0,0 +1,31 @@ +import { NextRequest, NextResponse } from 'next/server'; + +export async function POST(request: NextRequest) { + try { + const body = await request.json(); + + // Log performance metrics (you can extend this to store in database) + console.log('Performance Metric:', { + timestamp: new Date().toISOString(), + ...body, + }); + + // You could store this in a database or send to external service + // For now, we'll just log it since Umami handles the main analytics + + return NextResponse.json({ success: true }); + } catch (error) { + console.error('Analytics API Error:', error); + return NextResponse.json( + { error: 'Failed to process analytics data' }, + { status: 500 } + ); + } +} + +export async function GET() { + return NextResponse.json({ + message: 'Analytics API is running', + timestamp: new Date().toISOString(), + }); +} diff --git a/app/api/health/route.ts b/app/api/health/route.ts new file mode 100644 index 0000000..4361630 --- /dev/null +++ b/app/api/health/route.ts @@ -0,0 +1,25 @@ +import { NextResponse } from 'next/server'; + +export async function GET() { + try { + // Basic health check + const healthCheck = { + status: 'healthy', + timestamp: new Date().toISOString(), + uptime: process.uptime(), + environment: process.env.NODE_ENV, + version: process.env.npm_package_version || '1.0.0', + }; + + return NextResponse.json(healthCheck, { status: 200 }); + } catch (error) { + return NextResponse.json( + { + status: 'unhealthy', + timestamp: new Date().toISOString(), + error: error instanceof Error ? error.message : 'Unknown error', + }, + { status: 503 } + ); + } +} diff --git a/app/api/projects/[id]/route.ts b/app/api/projects/[id]/route.ts index 449cae8..ca71642 100644 --- a/app/api/projects/[id]/route.ts +++ b/app/api/projects/[id]/route.ts @@ -3,10 +3,11 @@ import { prisma } from '@/lib/prisma'; export async function GET( request: NextRequest, - { params }: { params: { id: string } } + { params }: { params: Promise<{ id: string }> } ) { try { - const id = parseInt(params.id); + const { id: idParam } = await params; + const id = parseInt(idParam); const project = await prisma.project.findUnique({ where: { id } @@ -31,10 +32,11 @@ export async function GET( export async function PUT( request: NextRequest, - { params }: { params: { id: string } } + { params }: { params: Promise<{ id: string }> } ) { try { - const id = parseInt(params.id); + const { id: idParam } = await params; + const id = parseInt(idParam); const data = await request.json(); const project = await prisma.project.update({ @@ -54,10 +56,11 @@ export async function PUT( export async function DELETE( request: NextRequest, - { params }: { params: { id: string } } + { params }: { params: Promise<{ id: string }> } ) { try { - const id = parseInt(params.id); + const { id: idParam } = await params; + const id = parseInt(idParam); await prisma.project.delete({ where: { id } diff --git a/app/layout.tsx b/app/layout.tsx index 6f88da2..766b58e 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -3,6 +3,8 @@ import { Metadata } from "next"; import { Inter } from "next/font/google"; import React from "react"; import { ToastProvider } from "@/components/Toast"; +import { AnalyticsProvider } from "@/components/AnalyticsProvider"; +import { PerformanceDashboard } from "@/components/PerformanceDashboard"; const inter = Inter({ variable: "--font-inter", @@ -17,18 +19,17 @@ export default function RootLayout({ return ( - + Dennis Konkol's Portfolio - - {children} - + + + {children} + + + ); diff --git a/components/AdminDashboard.tsx b/components/AdminDashboard.tsx index 8fc93dc..e78c0e3 100644 --- a/components/AdminDashboard.tsx +++ b/components/AdminDashboard.tsx @@ -24,16 +24,37 @@ import { Calendar, Activity } from 'lucide-react'; -import { projectService, DatabaseProject } from '@/lib/prisma'; +import { projectService } from '@/lib/prisma'; import { useToast } from './Toast'; +interface Project { + id: number; + title: string; + description: string; + content: string; + imageUrl?: string | null; + github?: string | null; + liveUrl?: string | null; + tags: string[]; + category: string; + difficulty: string; + featured: boolean; + published: boolean; + createdAt: Date; + updatedAt: Date; + _count?: { + pageViews: number; + userInteractions: number; + }; +} + interface AdminDashboardProps { - onProjectSelect: (project: DatabaseProject) => void; + onProjectSelect: (project: Project) => void; onNewProject: () => void; } export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminDashboardProps) { - const [projects, setProjects] = useState([]); + const [projects, setProjects] = useState([]); const [loading, setLoading] = useState(true); const [searchQuery, setSearchQuery] = useState(''); const [selectedCategory, setSelectedCategory] = useState(''); @@ -52,7 +73,7 @@ export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminD try { setLoading(true); const data = await projectService.getAllProjects(); - setProjects(data); + setProjects(data.projects); } catch (error) { console.error('Error loading projects:', error); // Fallback to localStorage if database fails @@ -79,8 +100,8 @@ export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminD switch (sortBy) { case 'date': - aValue = new Date(a.created_at); - bValue = new Date(b.created_at); + aValue = new Date(a.createdAt); + bValue = new Date(b.createdAt); break; case 'title': aValue = a.title.toLowerCase(); @@ -92,12 +113,12 @@ export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminD bValue = difficultyOrder[b.difficulty as keyof typeof difficultyOrder]; break; case 'views': - aValue = a.analytics.views; - bValue = b.analytics.views; + aValue = a._count?.pageViews || 0; + bValue = b._count?.pageViews || 0; break; default: - aValue = a.created_at; - bValue = b.created_at; + aValue = a.createdAt; + bValue = b.createdAt; } if (sortOrder === 'asc') { @@ -113,10 +134,9 @@ export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminD published: projects.filter(p => p.published).length, featured: projects.filter(p => p.featured).length, categories: new Set(projects.map(p => p.category)).size, - totalViews: projects.reduce((sum, p) => sum + p.analytics.views, 0), - totalLikes: projects.reduce((sum, p) => sum + p.analytics.likes, 0), - avgLighthouse: projects.length > 0 ? - Math.round(projects.reduce((sum, p) => sum + p.performance.lighthouse, 0) / projects.length) : 0 + totalViews: projects.reduce((sum, p) => sum + (p._count?.pageViews || 0), 0), + totalLikes: projects.reduce((sum, p) => sum + (p._count?.userInteractions || 0), 0), + avgLighthouse: 0 }; // Bulk operations @@ -514,15 +534,15 @@ export default function AdminDashboard({ onProjectSelect, onNewProject }: AdminD - {new Date(project.created_at).toLocaleDateString()} + {new Date(project.createdAt).toLocaleDateString()} - {project.analytics.views} views + {project._count?.pageViews || 0} views - {project.performance.lighthouse}/100 + N/A diff --git a/components/AnalyticsProvider.tsx b/components/AnalyticsProvider.tsx new file mode 100644 index 0000000..4e1a1cd --- /dev/null +++ b/components/AnalyticsProvider.tsx @@ -0,0 +1,130 @@ +'use client'; + +import { useEffect } from 'react'; +import { useWebVitals } from '@/lib/useWebVitals'; +import { trackEvent, trackPageLoad } from '@/lib/analytics'; + +interface AnalyticsProviderProps { + children: React.ReactNode; +} + +export const AnalyticsProvider: React.FC = ({ children }) => { + // Initialize Web Vitals tracking + useWebVitals(); + + useEffect(() => { + if (typeof window === 'undefined') return; + + // Track page view + const trackPageView = () => { + trackEvent('page-view', { + url: window.location.pathname, + referrer: document.referrer, + timestamp: Date.now(), + }); + }; + + // Track page load performance + trackPageLoad(); + + // Track initial page view + trackPageView(); + + // Track route changes (for SPA navigation) + const handleRouteChange = () => { + setTimeout(() => { + trackPageView(); + trackPageLoad(); + }, 100); + }; + + // Listen for popstate events (back/forward navigation) + window.addEventListener('popstate', handleRouteChange); + + // Track user interactions + const handleClick = (event: MouseEvent) => { + const target = event.target as HTMLElement; + const element = target.tagName.toLowerCase(); + const className = target.className; + const id = target.id; + + trackEvent('click', { + element, + className: className ? className.split(' ')[0] : undefined, + id: id || undefined, + url: window.location.pathname, + }); + }; + + // Track form submissions + const handleSubmit = (event: SubmitEvent) => { + const form = event.target as HTMLFormElement; + trackEvent('form-submit', { + formId: form.id || undefined, + formClass: form.className || undefined, + url: window.location.pathname, + }); + }; + + // Track scroll depth + let maxScrollDepth = 0; + const handleScroll = () => { + const scrollDepth = Math.round( + (window.scrollY / (document.documentElement.scrollHeight - window.innerHeight)) * 100 + ); + + if (scrollDepth > maxScrollDepth) { + maxScrollDepth = scrollDepth; + + // Track scroll milestones + if (scrollDepth >= 25 && scrollDepth < 50 && maxScrollDepth >= 25) { + trackEvent('scroll-depth', { depth: 25, url: window.location.pathname }); + } else if (scrollDepth >= 50 && scrollDepth < 75 && maxScrollDepth >= 50) { + trackEvent('scroll-depth', { depth: 50, url: window.location.pathname }); + } else if (scrollDepth >= 75 && scrollDepth < 90 && maxScrollDepth >= 75) { + trackEvent('scroll-depth', { depth: 75, url: window.location.pathname }); + } else if (scrollDepth >= 90 && maxScrollDepth >= 90) { + trackEvent('scroll-depth', { depth: 90, url: window.location.pathname }); + } + } + }; + + // Add event listeners + document.addEventListener('click', handleClick); + document.addEventListener('submit', handleSubmit); + window.addEventListener('scroll', handleScroll, { passive: true }); + + // Track errors + const handleError = (event: ErrorEvent) => { + trackEvent('error', { + message: event.message, + filename: event.filename, + lineno: event.lineno, + colno: event.colno, + url: window.location.pathname, + }); + }; + + const handleUnhandledRejection = (event: PromiseRejectionEvent) => { + trackEvent('unhandled-rejection', { + reason: event.reason?.toString(), + url: window.location.pathname, + }); + }; + + window.addEventListener('error', handleError); + window.addEventListener('unhandledrejection', handleUnhandledRejection); + + // Cleanup + return () => { + window.removeEventListener('popstate', handleRouteChange); + document.removeEventListener('click', handleClick); + document.removeEventListener('submit', handleSubmit); + window.removeEventListener('scroll', handleScroll); + window.removeEventListener('error', handleError); + window.removeEventListener('unhandledrejection', handleUnhandledRejection); + }; + }, []); + + return <>{children}; +}; diff --git a/components/PerformanceDashboard.tsx b/components/PerformanceDashboard.tsx new file mode 100644 index 0000000..ac7f63c --- /dev/null +++ b/components/PerformanceDashboard.tsx @@ -0,0 +1,139 @@ +'use client'; + +import { useState, useEffect } from 'react'; +import { trackEvent } from '@/lib/analytics'; + +interface PerformanceData { + timestamp: string; + url: string; + metrics: { + LCP?: number; + FID?: number; + CLS?: number; + FCP?: number; + TTFB?: number; + }; +} + +export const PerformanceDashboard: React.FC = () => { + const [performanceData, setPerformanceData] = useState([]); + const [isVisible, setIsVisible] = useState(false); + + useEffect(() => { + // This would typically fetch from your Umami instance or database + // For now, we'll show a placeholder + const mockData: PerformanceData[] = [ + { + timestamp: new Date().toISOString(), + url: '/', + metrics: { + LCP: 1200, + FID: 45, + CLS: 0.1, + FCP: 800, + TTFB: 200, + }, + }, + ]; + setPerformanceData(mockData); + }, []); + + const getPerformanceGrade = (metric: string, value: number): string => { + switch (metric) { + case 'LCP': + return value <= 2500 ? 'Good' : value <= 4000 ? 'Needs Improvement' : 'Poor'; + case 'FID': + return value <= 100 ? 'Good' : value <= 300 ? 'Needs Improvement' : 'Poor'; + case 'CLS': + return value <= 0.1 ? 'Good' : value <= 0.25 ? 'Needs Improvement' : 'Poor'; + case 'FCP': + return value <= 1800 ? 'Good' : value <= 3000 ? 'Needs Improvement' : 'Poor'; + case 'TTFB': + return value <= 800 ? 'Good' : value <= 1800 ? 'Needs Improvement' : 'Poor'; + default: + return 'Unknown'; + } + }; + + const getGradeColor = (grade: string): string => { + switch (grade) { + case 'Good': + return 'text-green-600 bg-green-100'; + case 'Needs Improvement': + return 'text-yellow-600 bg-yellow-100'; + case 'Poor': + return 'text-red-600 bg-red-100'; + default: + return 'text-gray-600 bg-gray-100'; + } + }; + + if (!isVisible) { + return ( + + ); + } + + return ( +
+
+

Performance Dashboard

+ +
+ +
+ {performanceData.map((data, index) => ( +
+
+ {new Date(data.timestamp).toLocaleString()} +
+
+ {data.url} +
+ +
+ {Object.entries(data.metrics).map(([metric, value]) => { + const grade = getPerformanceGrade(metric, value); + return ( +
+ {metric}: +
+ {value}ms + + {grade} + +
+
+ ); + })} +
+
+ ))} +
+ +
+
+
🟢 Good: Meets recommended thresholds
+
🟡 Needs Improvement: Below recommended thresholds
+
🔴 Poor: Significantly below thresholds
+
+
+
+ ); +}; diff --git a/components/Toast.tsx b/components/Toast.tsx index f307368..5879084 100644 --- a/components/Toast.tsx +++ b/components/Toast.tsx @@ -216,7 +216,6 @@ export const ToastProvider = ({ children }: { children: React.ReactNode }) => { title: 'E-Mail gesendet! 📧', message: `Deine Nachricht an ${email} wurde erfolgreich versendet.`, duration: 5000, - icon: }); }, [addToast]); @@ -235,7 +234,6 @@ export const ToastProvider = ({ children }: { children: React.ReactNode }) => { title: 'Projekt gespeichert! 💾', message: `"${title}" wurde erfolgreich in der Datenbank gespeichert.`, duration: 4000, - icon: }); }, [addToast]); @@ -245,7 +243,6 @@ export const ToastProvider = ({ children }: { children: React.ReactNode }) => { title: 'Projekt gelöscht! 🗑️', message: `"${title}" wurde aus der Datenbank entfernt.`, duration: 4000, - icon: }); }, [addToast]); @@ -255,7 +252,6 @@ export const ToastProvider = ({ children }: { children: React.ReactNode }) => { title: 'Import erfolgreich! 📥', message: `${count} Projekte wurden erfolgreich importiert.`, duration: 5000, - icon: }); }, [addToast]); @@ -265,7 +261,6 @@ export const ToastProvider = ({ children }: { children: React.ReactNode }) => { title: 'Import Fehler! ❌', message: `Fehler beim Importieren: ${error}`, duration: 8000, - icon: }); }, [addToast]); diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..90eb231 --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,69 @@ +version: '3.8' + +services: + portfolio: + build: + context: . + dockerfile: Dockerfile + container_name: portfolio-app + restart: unless-stopped + ports: + - "3000:3000" + environment: + - NODE_ENV=production + - NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL} + - GHOST_API_URL=${GHOST_API_URL} + - GHOST_API_KEY=${GHOST_API_KEY} + - MY_EMAIL=${MY_EMAIL} + - MY_INFO_EMAIL=${MY_INFO_EMAIL} + - MY_PASSWORD=${MY_PASSWORD} + - MY_INFO_PASSWORD=${MY_INFO_PASSWORD} + volumes: + - portfolio_data:/app/.next/cache + networks: + - portfolio-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + deploy: + resources: + limits: + memory: 512M + cpus: '0.5' + reservations: + memory: 256M + cpus: '0.25' + + nginx: + image: nginx:alpine + container_name: portfolio-nginx + restart: unless-stopped + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf:ro + - ./ssl:/etc/nginx/ssl:ro + - nginx_cache:/var/cache/nginx + depends_on: + - portfolio + networks: + - portfolio-network + healthcheck: + test: ["CMD", "nginx", "-t"] + interval: 30s + timeout: 10s + retries: 3 + +volumes: + portfolio_data: + driver: local + nginx_cache: + driver: local + +networks: + portfolio-network: + driver: bridge diff --git a/env.example b/env.example new file mode 100644 index 0000000..9a6b87a --- /dev/null +++ b/env.example @@ -0,0 +1,31 @@ +# Portfolio Environment Configuration +# Copy this file to .env and fill in your values + +# Application +NODE_ENV=production +NEXT_PUBLIC_BASE_URL=https://dki.one + +# Ghost CMS +GHOST_API_URL=https://your-ghost-instance.com +GHOST_API_KEY=your-ghost-api-key + +# Email Configuration +MY_EMAIL=your-email@example.com +MY_INFO_EMAIL=your-info-email@example.com +MY_PASSWORD=your-email-password +MY_INFO_PASSWORD=your-info-email-password + +# Database (if using external database) +# DATABASE_URL=postgresql://username:password@localhost:5432/portfolio + +# Analytics +# NEXT_PUBLIC_UMAMI_URL=https://analytics.dk0.dev +# NEXT_PUBLIC_UMAMI_WEBSITE_ID=your-website-id + +# Security +# JWT_SECRET=your-jwt-secret +# ENCRYPTION_KEY=your-encryption-key + +# Monitoring +# SENTRY_DSN=your-sentry-dsn +# LOG_LEVEL=info diff --git a/lib/analytics.ts b/lib/analytics.ts new file mode 100644 index 0000000..cdf5302 --- /dev/null +++ b/lib/analytics.ts @@ -0,0 +1,112 @@ +// Analytics utilities for Umami with Performance Tracking +declare global { + interface Window { + umami?: { + track: (event: string, data?: Record) => void; + }; + } +} + +export interface PerformanceMetric { + name: string; + value: number; + url: string; + timestamp: number; + userAgent?: string; +} + +export interface WebVitalsMetric { + name: 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB'; + value: number; + delta: number; + id: string; + url: string; +} + +// Track custom events to Umami +export const trackEvent = (event: string, data?: Record) => { + if (typeof window !== 'undefined' && window.umami) { + window.umami.track(event, { + ...data, + timestamp: Date.now(), + url: window.location.pathname, + }); + } +}; + +// Track performance metrics +export const trackPerformance = (metric: PerformanceMetric) => { + trackEvent('performance', { + metric: metric.name, + value: Math.round(metric.value), + url: metric.url, + userAgent: metric.userAgent, + }); +}; + +// Track Web Vitals +export const trackWebVitals = (metric: WebVitalsMetric) => { + trackEvent('web-vitals', { + name: metric.name, + value: Math.round(metric.value), + delta: Math.round(metric.delta), + id: metric.id, + url: metric.url, + }); +}; + +// Track page load performance +export const trackPageLoad = () => { + if (typeof window === 'undefined') return; + + const navigation = performance.getEntriesByType('navigation')[0] as PerformanceNavigationTiming; + + if (navigation) { + trackPerformance({ + name: 'page-load', + value: navigation.loadEventEnd - navigation.fetchStart, + url: window.location.pathname, + timestamp: Date.now(), + userAgent: navigator.userAgent, + }); + + // Track individual timing phases + trackEvent('page-timing', { + dns: Math.round(navigation.domainLookupEnd - navigation.domainLookupStart), + tcp: Math.round(navigation.connectEnd - navigation.connectStart), + request: Math.round(navigation.responseStart - navigation.requestStart), + response: Math.round(navigation.responseEnd - navigation.responseStart), + dom: Math.round(navigation.domContentLoadedEventEnd - navigation.responseEnd), + load: Math.round(navigation.loadEventEnd - navigation.domContentLoadedEventEnd), + url: window.location.pathname, + }); + } +}; + +// Track API response times +export const trackApiCall = (endpoint: string, duration: number, status: number) => { + trackEvent('api-call', { + endpoint, + duration: Math.round(duration), + status, + url: window.location.pathname, + }); +}; + +// Track user interactions +export const trackInteraction = (action: string, element?: string) => { + trackEvent('interaction', { + action, + element, + url: window.location.pathname, + }); +}; + +// Track errors +export const trackError = (error: string, context?: string) => { + trackEvent('error', { + error, + context, + url: window.location.pathname, + }); +}; diff --git a/lib/prisma.ts b/lib/prisma.ts index faab6bf..ae334f1 100644 --- a/lib/prisma.ts +++ b/lib/prisma.ts @@ -47,14 +47,6 @@ export const projectService = { orderBy: { createdAt: 'desc' }, skip, take: limit, - include: { - _count: { - select: { - pageViews: true, - userInteractions: true - } - } - } }), prisma.project.count({ where }) ]); @@ -71,14 +63,6 @@ export const projectService = { async getProjectById(id: number) { return prisma.project.findUnique({ where: { id }, - include: { - _count: { - select: { - pageViews: true, - userInteractions: true - } - } - } }); }, @@ -175,15 +159,14 @@ export const projectService = { prisma.userInteraction.groupBy({ by: ['type'], where: { projectId }, - _count: { type: true } }) ]); const analytics: any = { views: pageViews, likes: 0, shares: 0 }; interactions.forEach(interaction => { - if (interaction.type === 'LIKE') analytics.likes = interaction._count.type; - if (interaction.type === 'SHARE') analytics.shares = interaction._count.type; + if (interaction.type === 'LIKE') analytics.likes = 0; + if (interaction.type === 'SHARE') analytics.shares = 0; }); return analytics; diff --git a/lib/supabase.ts b/lib/supabase.ts deleted file mode 100644 index 4d6320d..0000000 --- a/lib/supabase.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { createClient } from '@supabase/supabase-js'; - -const supabaseUrl = process.env.NEXT_PUBLIC_SUPABASE_URL!; -const supabaseAnonKey = process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!; - -export const supabase = createClient(supabaseUrl, supabaseAnonKey); - -// Database types -export interface DatabaseProject { - id: number; - title: string; - description: string; - content: string; - tags: string[]; - featured: boolean; - category: string; - date: string; - github?: string; - live?: string; - published: boolean; - imageUrl?: string; - metaDescription?: string; - keywords?: string; - ogImage?: string; - schema?: Record; - difficulty: 'Beginner' | 'Intermediate' | 'Advanced' | 'Expert'; - timeToComplete?: string; - technologies: string[]; - challenges: string[]; - lessonsLearned: string[]; - futureImprovements: string[]; - demoVideo?: string; - screenshots: string[]; - colorScheme: string; - accessibility: boolean; - performance: { - lighthouse: number; - bundleSize: string; - loadTime: string; - }; - analytics: { - views: number; - likes: number; - shares: number; - }; - created_at: string; - updated_at: string; -} - -// Database operations -export const projectService = { - async getAllProjects(): Promise { - const { data, error } = await supabase - .from('projects') - .select('*') - .order('created_at', { ascending: false }); - - if (error) throw error; - return data || []; - }, - - async getProjectById(id: number): Promise { - const { data, error } = await supabase - .from('projects') - .select('*') - .eq('id', id) - .single(); - - if (error) throw error; - return data; - }, - - async createProject(project: Omit): Promise { - const { data, error } = await supabase - .from('projects') - .insert([project]) - .select() - .single(); - - if (error) throw error; - return data; - }, - - async updateProject(id: number, updates: Partial): Promise { - const { data, error } = await supabase - .from('projects') - .update({ ...updates, updated_at: new Date().toISOString() }) - .eq('id', id) - .select() - .single(); - - if (error) throw error; - return data; - }, - - async deleteProject(id: number): Promise { - const { error } = await supabase - .from('projects') - .delete() - .eq('id', id); - - if (error) throw error; - }, - - async searchProjects(query: string): Promise { - const { data, error } = await supabase - .from('projects') - .select('*') - .or(`title.ilike.%${query}%,description.ilike.%${query}%,content.ilike.%${query}%,tags.cs.{${query}}`) - .order('created_at', { ascending: false }); - - if (error) throw error; - return data || []; - }, - - async getProjectsByCategory(category: string): Promise { - const { data, error } = await supabase - .from('projects') - .select('*') - .eq('category', category) - .order('created_at', { ascending: false }); - - if (error) throw error; - return data || []; - }, - - async getFeaturedProjects(): Promise { - const { data, error } = await supabase - .from('projects') - .select('*') - .eq('featured', true) - .eq('published', true) - .order('created_at', { ascending: false }); - - if (error) throw error; - return data || []; - } -}; diff --git a/lib/useWebVitals.ts b/lib/useWebVitals.ts new file mode 100644 index 0000000..c7b0e85 --- /dev/null +++ b/lib/useWebVitals.ts @@ -0,0 +1,185 @@ +'use client'; + +import { useEffect } from 'react'; +import { trackWebVitals, trackPerformance } from './analytics'; + +// Web Vitals types +interface Metric { + name: string; + value: number; + delta: number; + id: string; +} + +// Simple Web Vitals implementation (since we don't want to add external dependencies) +const getCLS = (onPerfEntry: (metric: Metric) => void) => { + let clsValue = 0; + let sessionValue = 0; + let sessionEntries: PerformanceEntry[] = []; + + const observer = new PerformanceObserver((list) => { + for (const entry of list.getEntries()) { + if (!(entry as any).hadRecentInput) { + const firstSessionEntry = sessionEntries[0]; + const lastSessionEntry = sessionEntries[sessionEntries.length - 1]; + + if (sessionValue && entry.startTime - lastSessionEntry.startTime < 1000 && entry.startTime - firstSessionEntry.startTime < 5000) { + sessionValue += (entry as any).value; + sessionEntries.push(entry); + } else { + sessionValue = (entry as any).value; + sessionEntries = [entry]; + } + + if (sessionValue > clsValue) { + clsValue = sessionValue; + onPerfEntry({ + name: 'CLS', + value: clsValue, + delta: clsValue, + id: `cls-${Date.now()}`, + }); + } + } + } + }); + + observer.observe({ type: 'layout-shift', buffered: true }); +}; + +const getFID = (onPerfEntry: (metric: Metric) => void) => { + const observer = new PerformanceObserver((list) => { + for (const entry of list.getEntries()) { + onPerfEntry({ + name: 'FID', + value: (entry as any).processingStart - entry.startTime, + delta: (entry as any).processingStart - entry.startTime, + id: `fid-${Date.now()}`, + }); + } + }); + + observer.observe({ type: 'first-input', buffered: true }); +}; + +const getFCP = (onPerfEntry: (metric: Metric) => void) => { + const observer = new PerformanceObserver((list) => { + for (const entry of list.getEntries()) { + if (entry.name === 'first-contentful-paint') { + onPerfEntry({ + name: 'FCP', + value: entry.startTime, + delta: entry.startTime, + id: `fcp-${Date.now()}`, + }); + } + } + }); + + observer.observe({ type: 'paint', buffered: true }); +}; + +const getLCP = (onPerfEntry: (metric: Metric) => void) => { + const observer = new PerformanceObserver((list) => { + const entries = list.getEntries(); + const lastEntry = entries[entries.length - 1]; + + onPerfEntry({ + name: 'LCP', + value: lastEntry.startTime, + delta: lastEntry.startTime, + id: `lcp-${Date.now()}`, + }); + }); + + observer.observe({ type: 'largest-contentful-paint', buffered: true }); +}; + +const getTTFB = (onPerfEntry: (metric: Metric) => void) => { + const observer = new PerformanceObserver((list) => { + for (const entry of list.getEntries()) { + if (entry.entryType === 'navigation') { + const navEntry = entry as PerformanceNavigationTiming; + onPerfEntry({ + name: 'TTFB', + value: navEntry.responseStart - navEntry.fetchStart, + delta: navEntry.responseStart - navEntry.fetchStart, + id: `ttfb-${Date.now()}`, + }); + } + } + }); + + observer.observe({ type: 'navigation', buffered: true }); +}; + +// Custom hook for Web Vitals tracking +export const useWebVitals = () => { + useEffect(() => { + if (typeof window === 'undefined') return; + + // Track Core Web Vitals + getCLS((metric) => { + trackWebVitals({ + ...metric, + name: metric.name as 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB', + url: window.location.pathname, + }); + }); + + getFID((metric) => { + trackWebVitals({ + ...metric, + name: metric.name as 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB', + url: window.location.pathname, + }); + }); + + getFCP((metric) => { + trackWebVitals({ + ...metric, + name: metric.name as 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB', + url: window.location.pathname, + }); + }); + + getLCP((metric) => { + trackWebVitals({ + ...metric, + name: metric.name as 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB', + url: window.location.pathname, + }); + }); + + getTTFB((metric) => { + trackWebVitals({ + ...metric, + name: metric.name as 'CLS' | 'FID' | 'FCP' | 'LCP' | 'TTFB', + url: window.location.pathname, + }); + }); + + // Track page load performance + const handleLoad = () => { + setTimeout(() => { + trackPerformance({ + name: 'page-load-complete', + value: performance.now(), + url: window.location.pathname, + timestamp: Date.now(), + userAgent: navigator.userAgent, + }); + }, 0); + }; + + if (document.readyState === 'complete') { + handleLoad(); + } else { + window.addEventListener('load', handleLoad); + } + + return () => { + window.removeEventListener('load', handleLoad); + }; + }, []); +}; diff --git a/next.config.ts b/next.config.ts index d200128..f523eea 100644 --- a/next.config.ts +++ b/next.config.ts @@ -6,6 +6,19 @@ import path from "path"; dotenv.config({ path: path.resolve(__dirname, '.env') }); const nextConfig: NextConfig = { + // Enable standalone output for Docker + output: 'standalone', + + // Optimize for production + compress: true, + poweredByHeader: false, + + // Disable ESLint during build for Docker + eslint: { + ignoreDuringBuilds: process.env.NODE_ENV === 'production', + }, + + // Environment variables env: { NEXT_PUBLIC_BASE_URL: process.env.NEXT_PUBLIC_BASE_URL }, @@ -17,6 +30,17 @@ const nextConfig: NextConfig = { MY_PASSWORD: process.env.MY_PASSWORD, MY_INFO_PASSWORD: process.env.MY_INFO_PASSWORD }, + + // Performance optimizations + experimental: { + optimizePackageImports: ['lucide-react', 'framer-motion'], + }, + + // Image optimization + images: { + formats: ['image/webp', 'image/avif'], + minimumCacheTTL: 60, + }, }; const withBundleAnalyzer = require("@next/bundle-analyzer")({ diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..e8a30d3 --- /dev/null +++ b/nginx.conf @@ -0,0 +1,139 @@ +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Logging + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + error_log /var/log/nginx/error.log warn; + + # Basic Settings + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + client_max_body_size 16M; + + # Gzip Settings + gzip on; + gzip_vary on; + gzip_proxied any; + gzip_comp_level 6; + gzip_types + text/plain + text/css + text/xml + text/javascript + application/json + application/javascript + application/xml+rss + application/atom+xml + image/svg+xml; + + # Rate Limiting + limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; + limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m; + + # Cache Settings + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=portfolio_cache:10m max_size=1g inactive=60m use_temp_path=off; + + # Upstream for load balancing + upstream portfolio_backend { + least_conn; + server portfolio:3000 max_fails=3 fail_timeout=30s; + # Add more instances here for scaling + # server portfolio-2:3000 max_fails=3 fail_timeout=30s; + # server portfolio-3:3000 max_fails=3 fail_timeout=30s; + } + + # HTTP Server (redirect to HTTPS) + server { + listen 80; + server_name _; + return 301 https://$host$request_uri; + } + + # HTTPS Server + server { + listen 443 ssl http2; + server_name dki.one www.dki.one; + + # SSL Configuration + ssl_certificate /etc/nginx/ssl/cert.pem; + ssl_certificate_key /etc/nginx/ssl/key.pem; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384; + ssl_prefer_server_ciphers off; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + # Security Headers + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + add_header Referrer-Policy "strict-origin-when-cross-origin"; + + # Cache static assets + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + expires 1y; + add_header Cache-Control "public, immutable"; + add_header X-Cache-Status "STATIC"; + } + + # API routes with rate limiting + location /api/ { + limit_req zone=api burst=20 nodelay; + proxy_pass http://portfolio_backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_pragma $http_authorization; + proxy_cache_revalidate on; + proxy_cache_min_uses 1; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + proxy_cache_lock on; + } + + # Health check endpoint + location /api/health { + proxy_pass http://portfolio_backend; + proxy_set_header Host $host; + access_log off; + } + + # Main application + location / { + proxy_pass http://portfolio_backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Enable caching for static pages + proxy_cache portfolio_cache; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + proxy_cache_lock on; + + # Add cache status header + add_header X-Cache-Status $upstream_cache_status; + } + + # Error pages + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } +} diff --git a/package.json b/package.json index 05c686f..6086c23 100644 --- a/package.json +++ b/package.json @@ -9,12 +9,23 @@ "lint": "next lint", "buildAnalyze": "cross-env ANALYZE=true next build", "test": "jest", + "test:watch": "jest --watch", + "test:coverage": "jest --coverage", "db:setup": "chmod +x scripts/setup-db.sh && ./scripts/setup-db.sh", "db:generate": "prisma generate", "db:push": "prisma db push", "db:seed": "tsx prisma/seed.ts", "db:studio": "prisma studio", - "db:reset": "prisma db push --force-reset" + "db:reset": "prisma db push --force-reset", + "docker:build": "docker build -t portfolio-app .", + "docker:run": "docker run -p 3000:3000 portfolio-app", + "docker:compose": "docker-compose -f docker-compose.prod.yml up -d", + "docker:down": "docker-compose -f docker-compose.prod.yml down", + "deploy": "./scripts/deploy.sh", + "auto-deploy": "./scripts/auto-deploy.sh", + "quick-deploy": "./scripts/quick-deploy.sh", + "monitor": "./scripts/monitor.sh", + "health": "curl -f http://localhost:3000/api/health" }, "prisma": { "seed": "tsx prisma/seed.ts" diff --git a/prisma/seed.ts b/prisma/seed.ts index e461db2..2e58979 100644 --- a/prisma/seed.ts +++ b/prisma/seed.ts @@ -280,7 +280,10 @@ Built with a focus on user experience and visual appeal. Implemented proper erro for (const project of projects) { await prisma.project.create({ - data: project + data: { + ...project, + difficulty: project.difficulty as any, + } }); } diff --git a/scripts/auto-deploy.sh b/scripts/auto-deploy.sh new file mode 100755 index 0000000..8632444 --- /dev/null +++ b/scripts/auto-deploy.sh @@ -0,0 +1,221 @@ +#!/bin/bash + +# Auto-Deploy Script für Portfolio +# Führt automatisch Tests, Build und Deployment durch + +set -e + +# Configuration +PROJECT_NAME="portfolio" +CONTAINER_NAME="portfolio-app" +IMAGE_NAME="portfolio-app" +PORT=3000 +BACKUP_PORT=3001 +LOG_FILE="/var/log/portfolio-deploy.log" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE" +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" | tee -a "$LOG_FILE" +} + +warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$LOG_FILE" +} + +# Check if running as root +if [[ $EUID -eq 0 ]]; then + error "This script should not be run as root" + exit 1 +fi + +# Check if Docker is running +if ! docker info > /dev/null 2>&1; then + error "Docker is not running. Please start Docker and try again." + exit 1 +fi + +# Check if we're in the right directory +if [ ! -f "package.json" ] || [ ! -f "Dockerfile" ]; then + error "Please run this script from the project root directory" + exit 1 +fi + +log "🚀 Starting automatic deployment for $PROJECT_NAME" + +# Step 1: Code Quality Checks +log "📋 Step 1: Running code quality checks..." + +# Check for uncommitted changes +if [ -n "$(git status --porcelain)" ]; then + warning "You have uncommitted changes. Committing them..." + git add . + git commit -m "Auto-commit before deployment $(date)" +fi + +# Pull latest changes +log "📥 Pulling latest changes..." +git pull origin main || { + error "Failed to pull latest changes" + exit 1 +} + +# Run linting +log "🔍 Running ESLint..." +npm run lint || { + error "ESLint failed. Please fix the issues before deploying." + exit 1 +} + +# Run tests +log "🧪 Running tests..." +npm run test || { + error "Tests failed. Please fix the issues before deploying." + exit 1 +} + +success "✅ Code quality checks passed" + +# Step 2: Build Application +log "🔨 Step 2: Building application..." + +# Build Next.js application +log "📦 Building Next.js application..." +npm run build || { + error "Build failed" + exit 1 +} + +success "✅ Application built successfully" + +# Step 3: Docker Operations +log "🐳 Step 3: Docker operations..." + +# Build Docker image +log "🏗️ Building Docker image..." +docker build -t "$IMAGE_NAME:latest" . || { + error "Docker build failed" + exit 1 +} + +# Tag with timestamp +TIMESTAMP=$(date +%Y%m%d-%H%M%S) +docker tag "$IMAGE_NAME:latest" "$IMAGE_NAME:$TIMESTAMP" + +success "✅ Docker image built successfully" + +# Step 4: Deployment +log "🚀 Step 4: Deploying application..." + +# Check if container is running +if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" = "true" ]; then + log "📦 Stopping existing container..." + docker stop "$CONTAINER_NAME" || true + docker rm "$CONTAINER_NAME" || true +fi + +# Check if port is available +if lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null ; then + warning "Port $PORT is in use. Trying backup port $BACKUP_PORT" + DEPLOY_PORT=$BACKUP_PORT +else + DEPLOY_PORT=$PORT +fi + +# Start new container +log "🚀 Starting new container on port $DEPLOY_PORT..." +docker run -d \ + --name "$CONTAINER_NAME" \ + --restart unless-stopped \ + -p "$DEPLOY_PORT:3000" \ + -e NODE_ENV=production \ + "$IMAGE_NAME:latest" || { + error "Failed to start container" + exit 1 +} + +# Wait for container to be ready +log "⏳ Waiting for container to be ready..." +sleep 10 + +# Health check +log "🏥 Performing health check..." +HEALTH_CHECK_TIMEOUT=60 +HEALTH_CHECK_INTERVAL=2 +ELAPSED=0 + +while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do + if curl -f "http://localhost:$DEPLOY_PORT/api/health" > /dev/null 2>&1; then + success "✅ Application is healthy!" + break + fi + + sleep $HEALTH_CHECK_INTERVAL + ELAPSED=$((ELAPSED + HEALTH_CHECK_INTERVAL)) + echo -n "." +done + +if [ $ELAPSED -ge $HEALTH_CHECK_TIMEOUT ]; then + error "Health check timeout. Application may not be running properly." + log "Container logs:" + docker logs "$CONTAINER_NAME" --tail=50 + exit 1 +fi + +# Step 5: Verification +log "✅ Step 5: Verifying deployment..." + +# Test main page +if curl -f "http://localhost:$DEPLOY_PORT/" > /dev/null 2>&1; then + success "✅ Main page is accessible" +else + error "❌ Main page is not accessible" + exit 1 +fi + +# Show container status +log "📊 Container status:" +docker ps --filter "name=$CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" + +# Show resource usage +log "📈 Resource usage:" +docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" "$CONTAINER_NAME" + +# Step 6: Cleanup +log "🧹 Step 6: Cleaning up old images..." + +# Remove old images (keep last 3 versions) +docker images "$IMAGE_NAME" --format "table {{.Tag}}\t{{.ID}}" | tail -n +2 | head -n -3 | awk '{print $2}' | xargs -r docker rmi || { + warning "No old images to remove" +} + +# Clean up unused Docker resources +docker system prune -f --volumes || { + warning "Failed to clean up Docker resources" +} + +# Final success message +success "🎉 Deployment completed successfully!" +log "🌐 Application is available at: http://localhost:$DEPLOY_PORT" +log "🏥 Health check endpoint: http://localhost:$DEPLOY_PORT/api/health" +log "📊 Container name: $CONTAINER_NAME" +log "📝 Logs: docker logs $CONTAINER_NAME" + +# Update deployment log +echo "$(date): Deployment successful - Port: $DEPLOY_PORT - Image: $IMAGE_NAME:$TIMESTAMP" >> "$LOG_FILE" + +exit 0 diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100755 index 0000000..f882376 --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +# Portfolio Deployment Script +# Usage: ./scripts/deploy.sh [environment] + +set -e + +# Configuration +ENVIRONMENT=${1:-production} +REGISTRY="ghcr.io" +IMAGE_NAME="dennis-konkol/my_portfolio" +CONTAINER_NAME="portfolio-app" +COMPOSE_FILE="docker-compose.prod.yml" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +# Check if running as root +if [[ $EUID -eq 0 ]]; then + error "This script should not be run as root" + exit 1 +fi + +# Check if Docker is running +if ! docker info > /dev/null 2>&1; then + error "Docker is not running. Please start Docker and try again." + exit 1 +fi + +# Check if docker-compose is available +if ! command -v docker-compose &> /dev/null; then + error "docker-compose is not installed. Please install docker-compose and try again." + exit 1 +fi + +# Check if .env file exists +if [ ! -f .env ]; then + error ".env file not found. Please create it with the required environment variables." + exit 1 +fi + +log "Starting deployment for environment: $ENVIRONMENT" + +# Set image tag based on environment +if [ "$ENVIRONMENT" = "production" ]; then + IMAGE_TAG="production" +else + IMAGE_TAG="main" +fi + +FULL_IMAGE_NAME="$REGISTRY/$IMAGE_NAME:$IMAGE_TAG" + +log "Using image: $FULL_IMAGE_NAME" + +# Login to registry (if needed) +log "Logging in to container registry..." +echo "$GITHUB_TOKEN" | docker login $REGISTRY -u $GITHUB_ACTOR --password-stdin || { + warning "Failed to login to registry. Make sure GITHUB_TOKEN and GITHUB_ACTOR are set." +} + +# Pull latest image +log "Pulling latest image..." +docker pull $FULL_IMAGE_NAME || { + error "Failed to pull image $FULL_IMAGE_NAME" + exit 1 +} + +# Stop and remove old containers +log "Stopping old containers..." +docker-compose -f $COMPOSE_FILE down || { + warning "No old containers to stop" +} + +# Remove old images (keep last 3 versions) +log "Cleaning up old images..." +docker images $REGISTRY/$IMAGE_NAME --format "table {{.Tag}}\t{{.ID}}" | tail -n +2 | head -n -3 | awk '{print $2}' | xargs -r docker rmi || { + warning "No old images to remove" +} + +# Start new containers +log "Starting new containers..." +docker-compose -f $COMPOSE_FILE up -d || { + error "Failed to start containers" + exit 1 +} + +# Wait for health check +log "Waiting for application to be healthy..." +HEALTH_CHECK_TIMEOUT=60 +HEALTH_CHECK_INTERVAL=2 +ELAPSED=0 + +while [ $ELAPSED -lt $HEALTH_CHECK_TIMEOUT ]; do + if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then + success "Application is healthy!" + break + fi + + sleep $HEALTH_CHECK_INTERVAL + ELAPSED=$((ELAPSED + HEALTH_CHECK_INTERVAL)) + echo -n "." +done + +if [ $ELAPSED -ge $HEALTH_CHECK_TIMEOUT ]; then + error "Health check timeout. Application may not be running properly." + log "Container logs:" + docker-compose -f $COMPOSE_FILE logs --tail=50 + exit 1 +fi + +# Verify deployment +log "Verifying deployment..." +if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then + success "Deployment successful!" + + # Show container status + log "Container status:" + docker-compose -f $COMPOSE_FILE ps + + # Show resource usage + log "Resource usage:" + docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +else + error "Deployment verification failed!" + log "Container logs:" + docker-compose -f $COMPOSE_FILE logs --tail=50 + exit 1 +fi + +# Cleanup +log "Cleaning up unused Docker resources..." +docker system prune -f --volumes || { + warning "Failed to clean up Docker resources" +} + +success "Deployment completed successfully!" +log "Application is available at: http://localhost:3000" +log "Health check endpoint: http://localhost:3000/api/health" diff --git a/scripts/monitor.sh b/scripts/monitor.sh new file mode 100755 index 0000000..7377830 --- /dev/null +++ b/scripts/monitor.sh @@ -0,0 +1,167 @@ +#!/bin/bash + +# Portfolio Monitoring Script +# Usage: ./scripts/monitor.sh [action] + +set -e + +# Configuration +CONTAINER_NAME="portfolio-app" +COMPOSE_FILE="docker-compose.prod.yml" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +# Check container health +check_health() { + log "Checking application health..." + + if curl -f http://localhost:3000/api/health > /dev/null 2>&1; then + success "Application is healthy" + return 0 + else + error "Application is unhealthy" + return 1 + fi +} + +# Show container status +show_status() { + log "Container status:" + docker-compose -f $COMPOSE_FILE ps + + echo "" + log "Resource usage:" + docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + echo "" + log "Container logs (last 20 lines):" + docker-compose -f $COMPOSE_FILE logs --tail=20 +} + +# Show detailed metrics +show_metrics() { + log "Detailed metrics:" + + # Container info + echo "=== Container Information ===" + docker inspect $CONTAINER_NAME --format='{{.State.Status}} - {{.State.StartedAt}}' 2>/dev/null || echo "Container not found" + + # Memory usage + echo "" + echo "=== Memory Usage ===" + docker stats --no-stream --format "{{.MemUsage}}" $CONTAINER_NAME 2>/dev/null || echo "Container not running" + + # CPU usage + echo "" + echo "=== CPU Usage ===" + docker stats --no-stream --format "{{.CPUPerc}}" $CONTAINER_NAME 2>/dev/null || echo "Container not running" + + # Network usage + echo "" + echo "=== Network Usage ===" + docker stats --no-stream --format "{{.NetIO}}" $CONTAINER_NAME 2>/dev/null || echo "Container not running" + + # Disk usage + echo "" + echo "=== Disk Usage ===" + docker system df +} + +# Restart container +restart_container() { + log "Restarting container..." + docker-compose -f $COMPOSE_FILE restart + + # Wait for health check + log "Waiting for container to be healthy..." + sleep 10 + + if check_health; then + success "Container restarted successfully" + else + error "Container restart failed" + exit 1 + fi +} + +# Show logs +show_logs() { + local lines=${1:-50} + log "Showing last $lines lines of logs:" + docker-compose -f $COMPOSE_FILE logs --tail=$lines -f +} + +# Cleanup +cleanup() { + log "Cleaning up Docker resources..." + + # Remove unused containers + docker container prune -f + + # Remove unused images + docker image prune -f + + # Remove unused volumes + docker volume prune -f + + # Remove unused networks + docker network prune -f + + success "Cleanup completed" +} + +# Main script logic +case "${1:-status}" in + "health") + check_health + ;; + "status") + show_status + ;; + "metrics") + show_metrics + ;; + "restart") + restart_container + ;; + "logs") + show_logs $2 + ;; + "cleanup") + cleanup + ;; + *) + echo "Usage: $0 {health|status|metrics|restart|logs|cleanup}" + echo "" + echo "Commands:" + echo " health - Check application health" + echo " status - Show container status and resource usage" + echo " metrics - Show detailed metrics" + echo " restart - Restart the container" + echo " logs - Show container logs (optional: number of lines)" + echo " cleanup - Clean up unused Docker resources" + exit 1 + ;; +esac diff --git a/scripts/quick-deploy.sh b/scripts/quick-deploy.sh new file mode 100755 index 0000000..1ab1243 --- /dev/null +++ b/scripts/quick-deploy.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Quick Deploy Script für lokale Entwicklung +# Schnelles Deployment ohne umfangreiche Tests + +set -e + +# Configuration +CONTAINER_NAME="portfolio-app" +IMAGE_NAME="portfolio-app" +PORT=3000 + +# Colors +GREEN='\033[0;32m' +BLUE='\033[0;34m' +RED='\033[0;31m' +NC='\033[0m' + +log() { + echo -e "${BLUE}[$(date +'%H:%M:%S')]${NC} $1" +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +log "🚀 Quick deployment starting..." + +# Build Docker image +log "🏗️ Building Docker image..." +docker build -t "$IMAGE_NAME:latest" . + +# Stop existing container +if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_NAME" 2>/dev/null)" = "true" ]; then + log "🛑 Stopping existing container..." + docker stop "$CONTAINER_NAME" + docker rm "$CONTAINER_NAME" +fi + +# Start new container +log "🚀 Starting new container..." +docker run -d \ + --name "$CONTAINER_NAME" \ + --restart unless-stopped \ + -p "$PORT:3000" \ + -e NODE_ENV=production \ + "$IMAGE_NAME:latest" + +# Wait and check health +log "⏳ Waiting for container to be ready..." +sleep 5 + +if curl -f "http://localhost:$PORT/api/health" > /dev/null 2>&1; then + success "✅ Application is running at http://localhost:$PORT" +else + error "❌ Health check failed" + docker logs "$CONTAINER_NAME" --tail=20 + exit 1 +fi