Skip to content

Container Orchestration Guide

Learn how to deploy and orchestrate Connix agents at scale using containers. This guide covers Docker deployment, Kubernetes orchestration, and production best practices for containerized AI agent workloads.

Connix provides first-class support for containerized deployments, allowing you to:

  • Deploy agents in isolated container environments
  • Scale agent workloads horizontally across clusters
  • Integrate with existing Kubernetes infrastructure
  • Manage agent lifecycles with container orchestration tools
  • Monitor and observe agent performance in production

Create a simple Dockerfile for your Connix agent:

FROM node:18-alpine
WORKDIR /app
# Copy package files
COPY package*.json ./
RUN npm ci --only=production
# Copy source code
COPY . .
# Set environment variables
ENV NODE_ENV=production
ENV CONNIX_API_KEY=""
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
# Run the agent
EXPOSE 3000
CMD ["npm", "start"]

For optimized production images:

# Build stage
FROM node:18-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
# Production stage
FROM node:18-alpine AS production
RUN apk add --no-cache curl
WORKDIR /app
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S connix -u 1001
# Copy built application
COPY --from=builder --chown=connix:nodejs /app/dist ./dist
COPY --from=builder --chown=connix:nodejs /app/package*.json ./
RUN npm ci --only=production && npm cache clean --force
USER connix
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
CMD ["node", "dist/index.js"]

For local development and testing:

version: '3.8'
services:
connix-agent:
build: .
ports:
- "3000:3000"
environment:
- CONNIX_API_KEY=${CONNIX_API_KEY}
- CONNIX_PROJECT_ID=${CONNIX_PROJECT_ID}
- NODE_ENV=production
volumes:
- ./logs:/app/logs
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis_data:/data
restart: unless-stopped
postgres:
image: postgres:15-alpine
environment:
POSTGRES_DB: connix
POSTGRES_USER: connix
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
restart: unless-stopped
volumes:
redis_data:
postgres_data:
Terminal window
# Build the image
docker build -t connix-agent:latest .
# Run a single container
docker run -d \
--name connix-agent \
-p 3000:3000 \
-e CONNIX_API_KEY=your-api-key \
-e CONNIX_PROJECT_ID=your-project-id \
connix-agent:latest
# Run with Docker Compose
docker-compose up -d
# View logs
docker logs connix-agent -f
# Scale with Docker Compose
docker-compose up -d --scale connix-agent=3

Create a Kubernetes deployment manifest:

apiVersion: apps/v1
kind: Deployment
metadata:
name: connix-agent
namespace: connix
labels:
app: connix-agent
version: v1.0.0
spec:
replicas: 3
selector:
matchLabels:
app: connix-agent
template:
metadata:
labels:
app: connix-agent
version: v1.0.0
spec:
containers:
- name: connix-agent
image: connix-agent:latest
ports:
- containerPort: 3000
name: http
env:
- name: CONNIX_API_KEY
valueFrom:
secretKeyRef:
name: connix-secrets
key: api-key
- name: CONNIX_PROJECT_ID
valueFrom:
configMapKeyRef:
name: connix-config
key: project-id
- name: NODE_ENV
value: "production"
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
securityContext:
runAsNonRoot: true
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: logs
mountPath: /app/logs
volumes:
- name: tmp
emptyDir: {}
- name: logs
emptyDir: {}
securityContext:
fsGroup: 1001
restartPolicy: Always
imagePullPolicy: Always
apiVersion: v1
kind: Service
metadata:
name: connix-agent-service
namespace: connix
labels:
app: connix-agent
spec:
selector:
app: connix-agent
ports:
- name: http
port: 80
targetPort: 3000
protocol: TCP
type: ClusterIP
apiVersion: v1
kind: ConfigMap
metadata:
name: connix-config
namespace: connix
data:
project-id: "your-project-id"
log-level: "info"
max-workers: "4"
---
apiVersion: v1
kind: Secret
metadata:
name: connix-secrets
namespace: connix
type: Opaque
stringData:
api-key: "your-api-key"
database-url: "postgresql://user:password@host:5432/connix"
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: connix-agent-hpa
namespace: connix
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: connix-agent
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 30
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: connix-agent-ingress
namespace: connix
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
tls:
- hosts:
- agents.yourdomain.com
secretName: connix-agent-tls
rules:
- host: agents.yourdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: connix-agent-service
port:
number: 80
Terminal window
# Create namespace
kubectl create namespace connix
# Apply configurations
kubectl apply -f k8s/
# Check deployment status
kubectl get pods -n connix
kubectl get services -n connix
kubectl get ingress -n connix
# View logs
kubectl logs -n connix deployment/connix-agent -f
# Scale deployment
kubectl scale deployment connix-agent --replicas=5 -n connix
# Update deployment
kubectl set image deployment/connix-agent connix-agent=connix-agent:v1.1.0 -n connix
# Monitor rollout
kubectl rollout status deployment/connix-agent -n connix
connix-agent/
├── Chart.yaml
├── values.yaml
├── templates/
│ ├── deployment.yaml
│ ├── service.yaml
│ ├── configmap.yaml
│ ├── secret.yaml
│ ├── hpa.yaml
│ ├── ingress.yaml
│ └── serviceaccount.yaml
└── charts/
apiVersion: v2
name: connix-agent
description: A Helm chart for Connix AI Agent deployment
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- ai
- agent
- connix
home: https://connix.io
sources:
- https://github.com/connix-io/connix-agent
maintainers:
- name: Connix Team
email: engineering@connix.io
# Default values for connix-agent
replicaCount: 3
image:
repository: connix-agent
pullPolicy: IfNotPresent
tag: "latest"
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 1001
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1001
service:
type: ClusterIP
port: 80
targetPort: 3000
ingress:
enabled: false
className: ""
annotations: {}
hosts:
- host: agents.example.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
autoscaling:
enabled: true
minReplicas: 2
maxReplicas: 10
targetCPUUtilizationPercentage: 70
targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
# Connix-specific configuration
connix:
projectId: ""
apiKey: ""
logLevel: "info"
maxWorkers: 4
# External services
postgresql:
enabled: false
auth:
postgresPassword: ""
database: "connix"
redis:
enabled: false
auth:
enabled: false
Terminal window
# Install Helm chart
helm install connix-agent ./connix-agent \
--namespace connix \
--create-namespace \
--set connix.apiKey=your-api-key \
--set connix.projectId=your-project-id
# Upgrade deployment
helm upgrade connix-agent ./connix-agent \
--namespace connix \
--set image.tag=v1.1.0
# Rollback deployment
helm rollback connix-agent 1 --namespace connix
# Uninstall deployment
helm uninstall connix-agent --namespace connix
# Pod Security Standards
apiVersion: v1
kind: Namespace
metadata:
name: connix
labels:
pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/audit: restricted
pod-security.kubernetes.io/warn: restricted
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: connix-agent-netpol
namespace: connix
spec:
podSelector:
matchLabels:
app: connix-agent
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: ingress-nginx
ports:
- protocol: TCP
port: 3000
egress:
- to: []
ports:
- protocol: TCP
port: 443 # HTTPS
- protocol: TCP
port: 53 # DNS
- protocol: UDP
port: 53 # DNS
- to:
- namespaceSelector:
matchLabels:
name: connix
ports:
- protocol: TCP
port: 5432 # PostgreSQL
- protocol: TCP
port: 6379 # Redis
# Resource Quotas
apiVersion: v1
kind: ResourceQuota
metadata:
name: connix-quota
namespace: connix
spec:
hard:
requests.cpu: "2"
requests.memory: 4Gi
limits.cpu: "4"
limits.memory: 8Gi
pods: "10"
---
# Limit Ranges
apiVersion: v1
kind: LimitRange
metadata:
name: connix-limits
namespace: connix
spec:
limits:
- default:
cpu: 500m
memory: 512Mi
defaultRequest:
cpu: 250m
memory: 256Mi
type: Container
# ServiceMonitor for Prometheus
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: connix-agent-metrics
namespace: connix
labels:
app: connix-agent
spec:
selector:
matchLabels:
app: connix-agent
endpoints:
- port: metrics
interval: 30s
path: /metrics
# StatefulSet for persistent agents
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: connix-agent-persistent
namespace: connix
spec:
serviceName: connix-agent-headless
replicas: 3
selector:
matchLabels:
app: connix-agent-persistent
template:
metadata:
labels:
app: connix-agent-persistent
spec:
containers:
- name: connix-agent
image: connix-agent:latest
volumeMounts:
- name: agent-data
mountPath: /app/data
volumeClaimTemplates:
- metadata:
name: agent-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: "fast-ssd"
resources:
requests:
storage: 10Gi
Terminal window
# Create EKS cluster
eksctl create cluster \
--name connix-cluster \
--region us-west-2 \
--node-type m5.large \
--nodes 3 \
--nodes-min 1 \
--nodes-max 10 \
--managed
# Deploy to EKS
kubectl apply -f k8s/
Terminal window
# Create GKE cluster
gcloud container clusters create connix-cluster \
--zone us-central1-c \
--machine-type n1-standard-2 \
--num-nodes 3 \
--enable-autorepair \
--enable-autoupgrade \
--enable-autoscaling \
--min-nodes 1 \
--max-nodes 10
# Deploy to GKE
kubectl apply -f k8s/
Terminal window
# Create AKS cluster
az aks create \
--resource-group connix-rg \
--name connix-cluster \
--node-count 3 \
--node-vm-size Standard_D2s_v3 \
--enable-cluster-autoscaler \
--min-count 1 \
--max-count 10 \
--generate-ssh-keys
# Deploy to AKS
kubectl apply -f k8s/
name: Deploy to Kubernetes
on:
push:
branches: [main]
tags: ['v*']
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: Build and push Docker image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: connix-agent
IMAGE_TAG: ${{ github.sha }}
run: |
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_OUTPUT
- name: Update kubeconfig
run: |
aws eks update-kubeconfig --name connix-cluster --region us-west-2
- name: Deploy to Kubernetes
run: |
kubectl set image deployment/connix-agent \
connix-agent=${{ steps.build-and-push.outputs.image }} \
-n connix
kubectl rollout status deployment/connix-agent -n connix
stages:
- build
- deploy
variables:
DOCKER_IMAGE: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
build:
stage: build
script:
- docker build -t $DOCKER_IMAGE .
- docker push $DOCKER_IMAGE
deploy:
stage: deploy
image: bitnami/kubectl:latest
script:
- kubectl config use-context $KUBE_CONTEXT
- kubectl set image deployment/connix-agent connix-agent=$DOCKER_IMAGE -n connix
- kubectl rollout status deployment/connix-agent -n connix
only:
- main

Pod Startup Failures

Terminal window
# Check pod events
kubectl describe pod <pod-name> -n connix
# View container logs
kubectl logs <pod-name> -c connix-agent -n connix
# Check resource constraints
kubectl top pods -n connix

Network Connectivity

Terminal window
# Test service connectivity
kubectl exec -it <pod-name> -n connix -- curl connix-agent-service
# Check network policies
kubectl get networkpolicies -n connix
# Verify DNS resolution
kubectl exec -it <pod-name> -n connix -- nslookup connix-agent-service

Performance Issues

Terminal window
# Monitor resource usage
kubectl top pods -n connix
kubectl top nodes
# Check HPA status
kubectl get hpa -n connix
kubectl describe hpa connix-agent-hpa -n connix
Terminal window
# Port forward for local debugging
kubectl port-forward deployment/connix-agent 3000:3000 -n connix
# Execute into running container
kubectl exec -it deployment/connix-agent -n connix -- /bin/sh
# Copy files from container
kubectl cp connix/connix-agent-<pod-id>:/app/logs ./logs