Skip to content

AlisChain Technical Implementation Guide

Local Development Environment

Docker Compose Setup

version: '3.8'

services:
  postgres:
    image: postgres:14
    environment:
      POSTGRES_USER: alischain
      POSTGRES_PASSWORD: local_dev
      POSTGRES_DB: alischain_dev
    volumes:
      - postgres_data:/var/lib/postgresql/data
      - ./init-scripts:/docker-entrypoint-initdb.d
    ports:
      - "5432:5432"
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U alischain"]
      interval: 10s
      timeout: 5s
      retries: 5

  redis:
    image: redis:6
    command: redis-server --appendonly yes --maxmemory 2gb --maxmemory-policy allkeys-lru
    ports:
      - "6379:6379"
    volumes:
      - redis_data:/data
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 5s
      retries: 5

  keycloak:
    image: quay.io/keycloak/keycloak:latest
    environment:
      KEYCLOAK_ADMIN: admin
      KEYCLOAK_ADMIN_PASSWORD: admin
      DB_VENDOR: postgres
      DB_ADDR: postgres
      DB_DATABASE: keycloak
      DB_USER: alischain
      DB_PASSWORD: local_dev
    ports:
      - "8080:8080"
    depends_on:
      postgres:
        condition: service_healthy

  prometheus:
    image: prom/prometheus:latest
    volumes:
      - ./prometheus.yml:/etc/prometheus/prometheus.yml
      - prometheus_data:/prometheus
    ports:
      - "9090:9090"
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/usr/share/prometheus/console_libraries'
      - '--web.console.templates=/usr/share/prometheus/consoles'

  grafana:
    image: grafana/grafana:latest
    environment:
      GF_SECURITY_ADMIN_PASSWORD: admin
    ports:
      - "3000:3000"
    volumes:
      - grafana_data:/var/lib/grafana
    depends_on:
      - prometheus

  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0
    environment:
      - discovery.type=single-node
      - ES_JAVA_OPTS=-Xms512m -Xmx512m
    ports:
      - "9200:9200"
    volumes:
      - elasticsearch_data:/usr/share/elasticsearch/data

  kibana:
    image: docker.elastic.co/kibana/kibana:7.17.0
    ports:
      - "5601:5601"
    depends_on:
      - elasticsearch

volumes:
  postgres_data:
  redis_data:
  prometheus_data:
  grafana_data:
  elasticsearch_data:

Service Configurations

PostgreSQL Configuration (postgresql.conf)

# Memory Configuration
shared_buffers = 2GB                  # 25% of RAM for dedicated server
work_mem = 20MB                       # Adjust based on complex query needs
maintenance_work_mem = 512MB          # For maintenance operations
effective_cache_size = 6GB            # 75% of RAM for OS cache

# Checkpoint Configuration
checkpoint_completion_target = 0.9
checkpoint_timeout = 15min
max_wal_size = 2GB
min_wal_size = 1GB

# Query Planning
random_page_cost = 1.1               # Assuming SSD storage
effective_io_concurrency = 200       # Higher for SSD
default_statistics_target = 100

# Connection Settings
max_connections = 200

Redis Configuration (redis.conf)

maxmemory 2gb
maxmemory-policy allkeys-lru
appendonly yes
appendfsync everysec
save 900 1
save 300 10
save 60 10000

Keycloak Realm Configuration

{
  "realm": "alischain",
  "enabled": true,
  "sslRequired": "external",
  "registrationAllowed": true,
  "resetPasswordAllowed": true,
  "editUsernameAllowed": false,
  "bruteForceProtected": true,
  "permanentLockout": false,
  "maxFailureWaitSeconds": 900,
  "minimumQuickLoginWaitSeconds": 60,
  "waitIncrementSeconds": 60,
  "quickLoginCheckMilliSeconds": 1000,
  "maxDeltaTimeSeconds": 43200,
  "failureFactor": 3
}

Local Testing Scenarios

1. Load Testing Setup

Artillery Test Configuration (load-test.yml)

config:
  target: "http://localhost:8000"
  phases:
    - duration: 60
      arrivalRate: 5
      rampTo: 50
      name: "Warm up phase"
    - duration: 300
      arrivalRate: 50
      name: "Sustained load"
  defaults:
    headers:
      Content-Type: "application/json"
      Authorization: "Bearer {{token}}"

scenarios:
  - name: "Claim verification flow"
    flow:
      - post:
          url: "/api/v1/claims"
          json:
            text: "Test claim for verification"
            source: "https://example.com"
          capture:
            json: "$.id"
            as: "claimId"
      - think: 1
      - get:
          url: "/api/v1/claims/{{claimId}}"
      - think: 2
      - post:
          url: "/api/v1/claims/{{claimId}}/verify"
          json:
            verdict: "true"
            confidence: 0.95

2. Integration Testing

Python Test Suite (test_integration.py)

import pytest
import requests
import jwt
from datetime import datetime, timedelta

class TestAlisChainAPI:
    @pytest.fixture
    def auth_token(self):
        # Generate test JWT token
        payload = {
            'sub': 'test-user',
            'exp': datetime.utcnow() + timedelta(days=1)
        }
        return jwt.encode(payload, 'test-secret', algorithm='HS256')

    def test_claim_submission(self, auth_token):
        response = requests.post(
            'http://localhost:8000/api/v1/claims',
            headers={'Authorization': f'Bearer {auth_token}'},
            json={
                'text': 'Test claim',
                'source': 'https://example.com'
            }
        )
        assert response.status_code == 201
        assert 'id' in response.json()

    def test_verification_flow(self, auth_token):
        # Submit claim
        claim_response = requests.post(
            'http://localhost:8000/api/v1/claims',
            headers={'Authorization': f'Bearer {auth_token}'},
            json={
                'text': 'Test claim for verification',
                'source': 'https://example.com'
            }
        )
        claim_id = claim_response.json()['id']

        # Submit verification
        verify_response = requests.post(
            f'http://localhost:8000/api/v1/claims/{claim_id}/verify',
            headers={'Authorization': f'Bearer {auth_token}'},
            json={
                'verdict': True,
                'confidence': 0.95
            }
        )
        assert verify_response.status_code == 200

        # Check final status
        status_response = requests.get(
            f'http://localhost:8000/api/v1/claims/{claim_id}',
            headers={'Authorization': f'Bearer {auth_token}'}
        )
        assert status_response.json()['status'] == 'verified'

Detailed Migration Steps

Phase 1: Database Migration

  1. Prepare Source Database

    # Dump current database
    pg_dump -h source-host -U source-user -d source-db > dump.sql
    
    # Analyze dump size and content
    wc -l dump.sql
    grep -i "copy" dump.sql | wc -l
    
    # Create migration script
    cat > migrate.sh << 'EOF'
    #!/bin/bash
    set -e
    
    echo "Starting database migration..."
    
    # Stop application
    docker-compose down
    
    # Restore database
    psql -h target-host -U target-user -d target-db < dump.sql
    
    # Verify row counts
    psql -h target-host -U target-user -d target-db << 'EOSQL'
    SELECT schemaname, relname, n_live_tup 
    FROM pg_stat_user_tables 
    ORDER BY n_live_tup DESC;
    EOSQL
    
    echo "Migration completed"
    EOF
    
    chmod +x migrate.sh
    

  2. Verify Data Integrity

    -- Run on both source and target databases
    WITH TableCounts AS (
        SELECT schemaname, relname, n_live_tup
        FROM pg_stat_user_tables
    )
    SELECT 
        schemaname,
        relname,
        n_live_tup,
        pg_size_pretty(pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname))) as total_size
    FROM TableCounts
    ORDER BY n_live_tup DESC;
    

Phase 2: Service Migration

  1. Keycloak Migration

    # Export realm configuration
    /opt/keycloak/bin/standalone.sh \
        -Dkeycloak.migration.action=export \
        -Dkeycloak.migration.provider=singleFile \
        -Dkeycloak.migration.file=export.json
    
    # Import on new instance
    /opt/keycloak/bin/standalone.sh \
        -Dkeycloak.migration.action=import \
        -Dkeycloak.migration.provider=singleFile \
        -Dkeycloak.migration.file=export.json \
        -Dkeycloak.migration.strategy=OVERWRITE_EXISTING
    

  2. Redis Migration

    # Save current dataset
    redis-cli SAVE
    
    # Copy dump.rdb to new instance
    scp dump.rdb new-host:/var/lib/redis/
    
    # Verify data
    redis-cli -h new-host INFO keyspace
    

Phase 3: Application Migration

  1. Code Deployment

    # Build new containers
    docker-compose build --no-cache
    
    # Deploy with zero downtime
    docker-compose up -d --scale app=2 --no-deps app
    

  2. Configuration Updates

    # Update environment variables
    cat > .env << EOF
    DB_HOST=new-db-host
    REDIS_HOST=new-redis-host
    KEYCLOAK_URL=https://new-keycloak-host
    EOF
    
    # Apply configurations
    docker-compose up -d
    

Failure Recovery Procedures

1. Database Failures

Quick Recovery Script (db_recovery.sh)

#!/bin/bash
set -e

BACKUP_PATH="/path/to/backups"
POSTGRES_CONTAINER="alischain_postgres_1"

case "$1" in
  "backup")
    echo "Creating backup..."
    docker exec $POSTGRES_CONTAINER pg_dump -U alischain -d alischain_dev > \
      "$BACKUP_PATH/backup-$(date +%Y%m%d_%H%M%S).sql"
    ;;

  "restore")
    if [ -z "$2" ]; then
      echo "Please specify backup file"
      exit 1
    fi
    echo "Restoring from $2..."
    docker exec -i $POSTGRES_CONTAINER psql -U alischain -d alischain_dev < "$2"
    ;;

  "verify")
    echo "Verifying database integrity..."
    docker exec $POSTGRES_CONTAINER psql -U alischain -d alischain_dev \
      -c "SELECT schemaname, relname, n_live_tup FROM pg_stat_user_tables;"
    ;;

  *)
    echo "Usage: $0 {backup|restore|verify}"
    exit 1
    ;;
esac

2. Redis Failures

Redis Recovery Script (redis_recovery.sh)

#!/bin/bash
set -e

REDIS_CONTAINER="alischain_redis_1"
BACKUP_PATH="/path/to/redis/backups"

case "$1" in
  "backup")
    echo "Creating Redis backup..."
    docker exec $REDIS_CONTAINER redis-cli SAVE
    docker cp $REDIS_CONTAINER:/data/dump.rdb \
      "$BACKUP_PATH/dump-$(date +%Y%m%d_%H%M%S).rdb"
    ;;

  "restore")
    if [ -z "$2" ]; then
      echo "Please specify backup file"
      exit 1
    fi
    echo "Restoring Redis from $2..."
    docker cp "$2" $REDIS_CONTAINER:/data/dump.rdb
    docker restart $REDIS_CONTAINER
    ;;

  "verify")
    echo "Verifying Redis..."
    docker exec $REDIS_CONTAINER redis-cli INFO keyspace
    ;;

  *)
    echo "Usage: $0 {backup|restore|verify}"
    exit 1
    ;;
esac

3. Application Recovery

Application Health Check Script (health_check.sh)

#!/bin/bash
set -e

APP_URL="http://localhost:8000"
SLACK_WEBHOOK="https://hooks.slack.com/services/YOUR/WEBHOOK/URL"

check_health() {
  response=$(curl -s -w "%{http_code}" "$APP_URL/health")
  http_code=${response: -3}
  body=${response:0:${#response}-3}

  if [ "$http_code" -eq 200 ]; then
    echo "Health check passed"
    return 0
  else
    echo "Health check failed with code $http_code: $body"
    return 1
  fi
}

notify_slack() {
  curl -X POST -H 'Content-type: application/json' \
    --data "{\"text\":\"$1\"}" \
    "$SLACK_WEBHOOK"
}

if ! check_health; then
  notify_slack "🚨 Application health check failed!"

  echo "Attempting recovery..."
  docker-compose restart app

  sleep 30

  if check_health; then
    notify_slack "✅ Application recovered successfully"
  else
    notify_slack "❌ Recovery failed - manual intervention required"
    exit 1
  fi
fi

4. Monitoring Recovery

Prometheus Recovery (prometheus_recovery.sh)

#!/bin/bash
set -e

PROMETHEUS_CONTAINER="alischain_prometheus_1"
BACKUP_PATH="/path/to/prometheus/backups"

case "$1" in
  "backup")
    echo "Creating Prometheus backup..."
    docker exec $PROMETHEUS_CONTAINER promtool tsdb snapshot \
      /prometheus $BACKUP_PATH
    ;;

  "restore")
    if [ -z "$2" ]; then
      echo "Please specify backup directory"
      exit 1
    fi
    echo "Restoring Prometheus data..."
    docker cp "$2" $PROMETHEUS_CONTAINER:/prometheus/data
    docker restart $PROMETHEUS_CONTAINER
    ;;

  "verify")
    echo "Verifying Prometheus..."
    curl -s localhost:9090/-/healthy
    ;;

  *)
    echo "Usage: $0 {backup|restore|verify}"
    exit 1
    ;;
esac

Automated Testing Pipeline

GitHub Actions Workflow (.github/workflows/test.yml)

name: Test Suite

on:
  push:
    branches: [ main ]
  pull_request:
    branches: [ main ]

jobs:
  test:
    runs-on: ubuntu-latest

    services:
      postgres:
        image: postgres:14
        env:
          POSTGRES_USER: test
          POSTGRES_PASSWORD: test
          POSTGRES_DB: test
        ports:
          - 5432:5432
        options: >-
          --health-cmd pg_isready
          --health-interval 10s
          --health-timeout 5s
          --health-retries 5

      redis:
        image: redis:6
        ports:
          - 6379:6379
        options: >-
          --health-cmd "redis-cli ping"
          --health-interval 10s
          --health-timeout 5s
          --health-retries 5

    steps:
    - uses: actions/checkout@v2

    - name: Set up Python
      uses: actions/setup-python@v2
      with:
        python-version: '3.9'

    - name: Install dependencies
      run: |
        python -m pip install --upgrade pip
        pip install -r requirements.txt
        pip install pytest pytest-cov

    - name: Run tests
      run: |
        pytest --cov=./ --cov-report=xml

    - name: Upload coverage
      uses: codecov/codecov-action@v2
      with:
        file: ./coverage.xml
        fail_ci_if_error: true

This comprehensive guide provides detailed configurations, testing scenarios, migration steps, and recovery procedures. Would you like me to:

  1. Add more specific test cases?
  2. Include additional recovery scenarios?
  3. Expand the monitoring configuration?
  4. Add deployment automation scripts?

Last update: 2024-12-08