- Published on
Containerization and Orchestration for Full-Stack Applications
Containerization and Orchestration for Full-Stack Applications
Containerization has revolutionized application deployment and management. This guide covers Docker fundamentals, Kubernetes orchestration, and best practices for deploying full-stack applications at scale. 🐳
Docker Fundamentals
Multi-Stage Dockerfile for Node.js
# Frontend build stage
FROM node:18-alpine AS frontend-builder
WORKDIR /app/frontend
COPY frontend/package*.json ./
RUN npm ci --only=production
COPY frontend/ ./
RUN npm run build
# Backend build stage
FROM node:18-alpine AS backend-builder
WORKDIR /app/backend
COPY backend/package*.json ./
RUN npm ci --only=production
COPY backend/ ./
# Production stage
FROM node:18-alpine AS production
WORKDIR /app
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Copy built applications
COPY /app/backend ./backend
COPY /app/frontend/dist ./frontend/dist
# Install production dependencies only
WORKDIR /app/backend
RUN npm ci --only=production && npm cache clean --force
# Switch to non-root user
USER nodejs
EXPOSE 3000
CMD ["node", "server.js"]
Docker Compose for Development
# docker-compose.yml
version: '3.8'
services:
frontend:
build:
context: ./frontend
dockerfile: Dockerfile.dev
ports:
- '3000:3000'
volumes:
- ./frontend:/app
- /app/node_modules
environment:
- REACT_APP_API_URL=http://localhost:8000
depends_on:
- backend
backend:
build:
context: ./backend
dockerfile: Dockerfile.dev
ports:
- '8000:8000'
volumes:
- ./backend:/app
- /app/node_modules
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://user:password@postgres:5432/myapp
- REDIS_URL=redis://redis:6379
depends_on:
- postgres
- redis
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_DB=myapp
- POSTGRES_USER=user
- POSTGRES_PASSWORD=password
ports:
- '5432:5432'
volumes:
- postgres_data:/var/lib/postgresql/data
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
redis:
image: redis:7-alpine
ports:
- '6379:6379'
volumes:
- redis_data:/data
nginx:
image: nginx:alpine
ports:
- '80:80'
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
depends_on:
- frontend
- backend
volumes:
postgres_data:
redis_data:
Nginx Configuration
# nginx.conf
events {
worker_connections 1024;
}
http {
upstream backend {
server backend:8000;
}
upstream frontend {
server frontend:3000;
}
server {
listen 80;
# Frontend routes
location / {
proxy_pass http://frontend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# API routes
location /api/ {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# WebSocket support
location /socket.io/ {
proxy_pass http://backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
}
Kubernetes Deployment
Application Manifests
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: myapp
---
# configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
namespace: myapp
data:
NODE_ENV: 'production'
API_URL: 'https://api.myapp.com'
REDIS_HOST: 'redis-service'
POSTGRES_HOST: 'postgres-service'
---
# secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: app-secrets
namespace: myapp
type: Opaque
data:
DATABASE_PASSWORD: cGFzc3dvcmQxMjM= # base64 encoded
JWT_SECRET: bXlqd3RzZWNyZXQ=
REDIS_PASSWORD: cmVkaXNwYXNzd29yZA==
Backend Deployment
# backend-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
namespace: myapp
spec:
replicas: 3
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
spec:
containers:
- name: backend
image: myapp/backend:latest
ports:
- containerPort: 8000
env:
- name: NODE_ENV
valueFrom:
configMapKeyRef:
name: app-config
key: NODE_ENV
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: DATABASE_PASSWORD
- name: DATABASE_URL
value: 'postgresql://user:$(DATABASE_PASSWORD)@$(POSTGRES_HOST):5432/myapp'
resources:
requests:
memory: '256Mi'
cpu: '250m'
limits:
memory: '512Mi'
cpu: '500m'
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8000
initialDelaySeconds: 5
periodSeconds: 5
---
# backend-service.yaml
apiVersion: v1
kind: Service
metadata:
name: backend-service
namespace: myapp
spec:
selector:
app: backend
ports:
- port: 80
targetPort: 8000
type: ClusterIP
Frontend Deployment
# frontend-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: myapp
spec:
replicas: 2
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
containers:
- name: frontend
image: myapp/frontend:latest
ports:
- containerPort: 80
resources:
requests:
memory: '128Mi'
cpu: '100m'
limits:
memory: '256Mi'
cpu: '200m'
---
# frontend-service.yaml
apiVersion: v1
kind: Service
metadata:
name: frontend-service
namespace: myapp
spec:
selector:
app: frontend
ports:
- port: 80
targetPort: 80
type: ClusterIP
Database StatefulSet
# postgres-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: myapp
spec:
serviceName: postgres-service
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: postgres:15-alpine
ports:
- containerPort: 5432
env:
- name: POSTGRES_DB
value: myapp
- name: POSTGRES_USER
value: user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: DATABASE_PASSWORD
volumeMounts:
- name: postgres-storage
mountPath: /var/lib/postgresql/data
resources:
requests:
memory: '256Mi'
cpu: '250m'
limits:
memory: '1Gi'
cpu: '500m'
volumeClaimTemplates:
- metadata:
name: postgres-storage
spec:
accessModes: ['ReadWriteOnce']
resources:
requests:
storage: 10Gi
---
# postgres-service.yaml
apiVersion: v1
kind: Service
metadata:
name: postgres-service
namespace: myapp
spec:
selector:
app: postgres
ports:
- port: 5432
targetPort: 5432
type: ClusterIP
Ingress Configuration
# ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: app-ingress
namespace: myapp
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: '100'
nginx.ingress.kubernetes.io/rate-limit-window: '1m'
spec:
tls:
- hosts:
- myapp.com
- api.myapp.com
secretName: app-tls
rules:
- host: myapp.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend-service
port:
number: 80
- host: api.myapp.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: backend-service
port:
number: 80
Horizontal Pod Autoscaling
# hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: backend-hpa
namespace: myapp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: backend
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 15
Monitoring and Logging
Prometheus Monitoring
# monitoring.yaml
apiVersion: v1
kind: ServiceMonitor
metadata:
name: backend-metrics
namespace: myapp
spec:
selector:
matchLabels:
app: backend
endpoints:
- port: metrics
interval: 30s
path: /metrics
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: app-alerts
namespace: myapp
spec:
groups:
- name: app.rules
rules:
- alert: HighErrorRate
expr: rate(http_requests_total{status=~"5.."}[5m]) > 0.1
for: 5m
labels:
severity: warning
annotations:
summary: High error rate detected
description: 'Error rate is {{ $value }} errors per second'
- alert: HighMemoryUsage
expr: container_memory_usage_bytes / container_spec_memory_limit_bytes > 0.9
for: 5m
labels:
severity: critical
annotations:
summary: High memory usage
description: 'Memory usage is above 90%'
Centralized Logging
# logging.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kube-system
data:
fluent-bit.conf: |
[SERVICE]
Flush 1
Log_Level info
Daemon off
Parsers_File parsers.conf
[INPUT]
Name tail
Path /var/log/containers/*myapp*.log
Parser docker
Tag kube.*
Refresh_Interval 5
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc:443
Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token
[OUTPUT]
Name es
Match *
Host elasticsearch.logging.svc.cluster.local
Port 9200
Index myapp-logs
CI/CD Pipeline
GitHub Actions Workflow
# .github/workflows/deploy.yml
name: Deploy to Kubernetes
on:
push:
branches: [main]
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Backend
uses: docker/build-push-action@v4
with:
context: ./backend
push: true
tags: ghcr.io/${{ github.repository }}/backend:${{ github.sha }}
- name: Build and push Frontend
uses: docker/build-push-action@v4
with:
context: ./frontend
push: true
tags: ghcr.io/${{ github.repository }}/frontend:${{ github.sha }}
- name: Configure kubectl
uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBE_CONFIG }}
- name: Deploy to Kubernetes
run: |
sed -i "s|myapp/backend:latest|ghcr.io/${{ github.repository }}/backend:${{ github.sha }}|g" k8s/backend-deployment.yaml
sed -i "s|myapp/frontend:latest|ghcr.io/${{ github.repository }}/frontend:${{ github.sha }}|g" k8s/frontend-deployment.yaml
kubectl apply -f k8s/
kubectl rollout status deployment/backend -n myapp
kubectl rollout status deployment/frontend -n myapp
Helm Chart Structure
# Chart.yaml
apiVersion: v2
name: myapp
description: Full-stack application Helm chart
version: 0.1.0
appVersion: 1.0.0
# values.yaml
replicaCount:
backend: 3
frontend: 2
image:
backend:
repository: myapp/backend
tag: latest
pullPolicy: IfNotPresent
frontend:
repository: myapp/frontend
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: myapp.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: app-tls
hosts:
- myapp.com
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 70
resources:
backend:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
frontend:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
Security Best Practices
Pod Security Standards
# security-policy.yaml
apiVersion: v1
kind: SecurityContextConstraints
metadata:
name: myapp-scc
allowHostDirVolumePlugin: false
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowPrivilegedContainer: false
allowedCapabilities: []
defaultAddCapabilities: []
requiredDropCapabilities:
- ALL
runAsUser:
type: MustRunAsNonRoot
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- projected
- secret
Network Policies
# network-policy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: myapp-network-policy
namespace: myapp
spec:
podSelector:
matchLabels:
app: backend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector:
matchLabels:
app: frontend
- namespaceSelector:
matchLabels:
name: ingress-nginx
ports:
- protocol: TCP
port: 8000
egress:
- to:
- podSelector:
matchLabels:
app: postgres
ports:
- protocol: TCP
port: 5432
- to:
- podSelector:
matchLabels:
app: redis
ports:
- protocol: TCP
port: 6379
Best Practices Summary
Container Optimization
- Multi-stage builds to reduce image size
- Non-root users for security
- Health checks for reliability
- Resource limits to prevent resource exhaustion
Kubernetes Deployment
- Namespaces for isolation
- ConfigMaps/Secrets for configuration
- StatefulSets for stateful services
- HPA for automatic scaling
Monitoring & Security
- Prometheus metrics for monitoring
- Centralized logging with ELK/EFK stack
- Network policies for security
- RBAC for access control
CI/CD
- Automated testing before deployment
- Image scanning for vulnerabilities
- Rolling updates for zero-downtime
- Rollback strategies for quick recovery
Conclusion
Containerization and orchestration provide powerful tools for deploying scalable applications. Key benefits include:
- Consistency: Same environment across development, staging, and production
- Scalability: Automatic scaling based on demand
- Reliability: Self-healing and fault tolerance
- Efficiency: Better resource utilization
Success requires understanding these patterns and implementing proper monitoring, security, and deployment practices. Start with simple deployments and gradually adopt more advanced patterns as your needs grow. 🐳