c77_secure_db/BEST_PRACTICES.md

20 KiB

c77_secure_db Best Practices Guide

This document provides comprehensive security best practices for implementing and maintaining c77_secure_db in production environments.

🛡️ Core Security Principles

1. Never Bypass Security Controls

-- ❌ NEVER DO THIS - Will be blocked but shows intent to bypass
INSERT INTO secure_table (name) VALUES ('data');
UPDATE secure_table SET value = 'changed' WHERE id = 1;
DELETE FROM secure_table WHERE id = 1;

-- ✅ ALWAYS DO THIS - Use secure operations
SELECT c77_secure_db_operation(jsonb_build_object(
    'schema_name', 'myapp',
    'table_name', 'secure_table',
    'operation', 'insert',
    'data', jsonb_build_object('name', 'data')
));

2. Validate All Inputs

-- ❌ BAD - No validation
SELECT c77_secure_db_operation(user_input::jsonb);

-- ✅ GOOD - Validate structure and content
DO $$
DECLARE
    v_input jsonb := user_input::jsonb;
BEGIN
    -- Validate required fields
    IF NOT (v_input ? 'schema_name' AND v_input ? 'table_name' AND v_input ? 'operation') THEN
        RAISE EXCEPTION 'Missing required fields in operation data';
    END IF;
    
    -- Validate operation type
    IF NOT (v_input->>'operation') IN ('insert', 'update', 'upsert', 'delete', 'soft_delete') THEN
        RAISE EXCEPTION 'Invalid operation type';
    END IF;
    
    -- Proceed with validated input
    PERFORM c77_secure_db_operation(v_input);
END $$;

3. Implement Proper Error Handling

// ✅ Laravel Example - Comprehensive error handling
public function secureOperation(array $data): array
{
    try {
        $result = DB::selectOne(
            'SELECT c77_secure_db_operation(?) as result',
            [json_encode($data)]
        );
        
        $response = json_decode($result->result, true);
        
        if (!$response['success']) {
            // Log security-relevant failures
            Log::warning('Secure operation failed', [
                'operation' => $data['operation'],
                'table' => $data['table_name'],
                'error' => $response['error'],
                'operation_id' => $response['operation_id'] ?? null,
                'user_id' => auth()->id()
            ]);
            
            throw new SecureDbException($response['error']);
        }
        
        return $response;
        
    } catch (Exception $e) {
        // Never expose internal error details to users
        Log::error('Secure DB exception', [
            'error' => $e->getMessage(),
            'data' => $data,
            'user_id' => auth()->id()
        ]);
        
        throw new SecureDbException('Operation failed due to security constraints');
    }
}

🔐 Access Control Best Practices

1. Role-Based Permissions

-- Create application-specific roles
CREATE ROLE myapp_read_only;
CREATE ROLE myapp_operator; 
CREATE ROLE myapp_administrator;

-- Grant appropriate secure database roles
GRANT c77_secure_db_readonly TO myapp_read_only;
GRANT c77_secure_db_user TO myapp_operator;
GRANT c77_secure_db_admin TO myapp_administrator;

-- Assign users to roles (never grant c77_secure_db roles directly)
GRANT myapp_operator TO app_user;
GRANT myapp_read_only TO reporting_user;
GRANT myapp_administrator TO dba_user;

2. RBAC Integration

-- Define granular permissions
SELECT c77_rbac_grant_feature('data_entry_clerk', 'secure_db_insert');
SELECT c77_rbac_grant_feature('data_manager', 'secure_db_insert');
SELECT c77_rbac_grant_feature('data_manager', 'secure_db_update');
SELECT c77_rbac_grant_feature('supervisor', 'secure_db_delete');
SELECT c77_rbac_grant_feature('auditor', 'secure_db_admin');

-- Use scope-based access control
SELECT c77_rbac_assign_subject('emp_001', 'data_entry_clerk', 'department', 'sales');
SELECT c77_rbac_assign_subject('emp_002', 'data_manager', 'region', 'north_america');
SELECT c77_rbac_assign_subject('emp_003', 'supervisor', 'global', 'all');

-- Always set user context in applications
SET "c77_rbac.external_id" TO 'current_user_id';

3. Principle of Least Privilege

-- ❌ BAD - Overly broad permissions
GRANT c77_secure_db_admin TO app_user;

-- ✅ GOOD - Minimal necessary permissions
GRANT c77_secure_db_user TO app_user;

-- ❌ BAD - Global access for everyone
SELECT c77_rbac_assign_subject('user_123', 'admin', 'global', 'all');

-- ✅ GOOD - Scoped access
SELECT c77_rbac_assign_subject('user_123', 'operator', 'department', 'finance');

🔍 Data Integrity Best Practices

1. Regular Integrity Verification

-- Daily verification of critical tables
CREATE OR REPLACE FUNCTION daily_integrity_check()
RETURNS void LANGUAGE plpgsql AS $$
DECLARE
    v_result jsonb;
    v_critical_tables text[] := ARRAY['users', 'transactions', 'audit_logs'];
    v_table text;
BEGIN
    FOREACH v_table IN ARRAY v_critical_tables LOOP
        SELECT c77_secure_db_verify_content_hashes('myapp', v_table) INTO v_result;
        
        IF (v_result->>'mismatch_count')::integer > 0 THEN
            RAISE EXCEPTION 'CRITICAL: Data integrity violation detected in table %', v_table
                USING HINT = 'Immediate investigation required',
                      ERRCODE = 'data_corrupted';
        END IF;
        
        RAISE NOTICE 'Integrity check passed for table %: % records verified', 
            v_table, v_result->>'total_records';
    END LOOP;
END;
$$;

-- Schedule daily execution
SELECT cron.schedule('daily-integrity-check', '0 1 * * *', 'SELECT daily_integrity_check();');

2. Hash Exclusion Strategy

-- Exclude frequently changing metadata from hashes
COMMENT ON COLUMN myapp.users.content_hash IS 
'{"exclude_hash_columns": ["last_login", "login_count", "last_activity", "session_data"]}';

-- Include business-critical data in hashes
COMMENT ON COLUMN myapp.transactions.content_hash IS 
'{"exclude_hash_columns": ["created_at", "updated_at"]}';  -- Minimal exclusions

3. Tamper Detection Response

-- Automated response to tampering detection
CREATE OR REPLACE FUNCTION handle_tampering_detection(
    p_table_name text,
    p_record_id text,
    p_expected_hash text,
    p_actual_hash text
)
RETURNS void LANGUAGE plpgsql AS $$
BEGIN
    -- Log the incident
    INSERT INTO security_incidents (
        incident_type,
        table_name,
        record_id,
        expected_hash,
        actual_hash,
        detected_at,
        severity
    ) VALUES (
        'DATA_TAMPERING',
        p_table_name,
        p_record_id,
        p_expected_hash,
        p_actual_hash,
        now(),
        'CRITICAL'
    );
    
    -- Notify security team
    PERFORM pg_notify('security_alert', jsonb_build_object(
        'type', 'DATA_TAMPERING',
        'table', p_table_name,
        'record_id', p_record_id,
        'severity', 'CRITICAL'
    )::text);
    
    -- Optional: Quarantine the record
    -- UPDATE myapp.table_name SET quarantined = true WHERE id = p_record_id;
END;
$$;

📊 Monitoring and Alerting Best Practices

1. Continuous Health Monitoring

-- Comprehensive health monitoring
CREATE OR REPLACE FUNCTION security_health_monitor()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_health jsonb;
    v_alerts jsonb[] := '{}';
    v_error_rate numeric;
    v_token_count integer;
BEGIN
    -- Get system health
    SELECT c77_secure_db_health_check() INTO v_health;
    
    -- Check error rate
    v_error_rate := (v_health->>'error_rate_1h')::numeric;
    IF v_error_rate > 5 THEN
        v_alerts := v_alerts || jsonb_build_object(
            'type', 'HIGH_ERROR_RATE',
            'severity', 'WARNING',
            'value', v_error_rate,
            'threshold', 5,
            'message', 'Error rate exceeds acceptable threshold'
        );
    END IF;
    
    -- Check token buildup
    v_token_count := (v_health->>'active_tokens')::integer;
    IF v_token_count > 100 THEN
        v_alerts := v_alerts || jsonb_build_object(
            'type', 'TOKEN_BUILDUP',
            'severity', 'WARNING',
            'value', v_token_count,
            'threshold', 100,
            'message', 'Excessive active tokens may indicate issues'
        );
    END IF;
    
    RETURN jsonb_build_object(
        'health_status', v_health,
        'alerts', v_alerts,
        'alert_count', array_length(v_alerts, 1),
        'timestamp', now()
    );
END;
$$;

2. Audit Log Analysis

-- Suspicious activity detection
CREATE OR REPLACE VIEW security_anomalies AS
SELECT 
    'High Error Rate User' as anomaly_type,
    user_name,
    count(*) as error_count,
    count(*) * 100.0 / SUM(count(*)) OVER() as error_percentage
FROM c77_secure_db_operation_audit 
WHERE created_at > now() - interval '24 hours'
  AND success = false
GROUP BY user_name
HAVING count(*) > 10

UNION ALL

SELECT 
    'Unusual Activity Volume' as anomaly_type,
    user_name,
    count(*) as operation_count,
    NULL as error_percentage
FROM c77_secure_db_operation_audit 
WHERE created_at > now() - interval '1 hour'
GROUP BY user_name
HAVING count(*) > 100

UNION ALL

SELECT 
    'Off-Hours Activity' as anomaly_type,
    user_name,
    count(*) as operation_count,
    NULL as error_percentage
FROM c77_secure_db_operation_audit 
WHERE created_at > now() - interval '24 hours'
  AND EXTRACT(hour FROM created_at) NOT BETWEEN 8 AND 18
GROUP BY user_name
HAVING count(*) > 5;

-- Review anomalies regularly
SELECT * FROM security_anomalies ORDER BY anomaly_type, operation_count DESC;

3. Performance Monitoring

-- Performance baseline and alerting
CREATE OR REPLACE FUNCTION performance_monitor()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_slow_operations jsonb;
    v_avg_time numeric;
    v_p95_time numeric;
BEGIN
    -- Calculate performance metrics
    SELECT 
        avg(execution_time_ms),
        percentile_cont(0.95) WITHIN GROUP (ORDER BY execution_time_ms)
    INTO v_avg_time, v_p95_time
    FROM c77_secure_db_operation_audit 
    WHERE created_at > now() - interval '1 hour'
      AND execution_time_ms IS NOT NULL;
    
    -- Identify slow operations
    SELECT jsonb_agg(
        jsonb_build_object(
            'operation_type', operation_type,
            'schema_name', schema_name,
            'table_name', table_name,
            'avg_time_ms', avg(execution_time_ms),
            'operation_count', count(*)
        )
    ) INTO v_slow_operations
    FROM c77_secure_db_operation_audit 
    WHERE created_at > now() - interval '1 hour'
      AND execution_time_ms > 1000  -- Operations > 1 second
    GROUP BY operation_type, schema_name, table_name;
    
    RETURN jsonb_build_object(
        'avg_execution_time_ms', v_avg_time,
        'p95_execution_time_ms', v_p95_time,
        'slow_operations', v_slow_operations,
        'timestamp', now()
    );
END;
$$;

🔧 Maintenance Best Practices

1. Automated Maintenance Schedule

-- Comprehensive maintenance routine
CREATE OR REPLACE FUNCTION automated_maintenance()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_tokens_cleaned integer;
    v_audit_archived integer;
    v_health jsonb;
BEGIN
    -- Clean expired tokens
    SELECT c77_secure_db_cleanup_expired_tokens() INTO v_tokens_cleaned;
    
    -- Archive old audit logs (keep 90 days)
    WITH archived AS (
        DELETE FROM c77_secure_db_operation_audit 
        WHERE created_at < (now() - interval '90 days')
        RETURNING *
    )
    SELECT count(*) INTO v_audit_archived FROM archived;
    
    -- Update statistics
    ANALYZE c77_secure_db_auth_tokens;
    ANALYZE c77_secure_db_operation_audit;
    ANALYZE c77_secure_db_secure_schemas;
    
    -- Health check
    SELECT c77_secure_db_health_check() INTO v_health;
    
    RETURN jsonb_build_object(
        'tokens_cleaned', v_tokens_cleaned,
        'audit_records_archived', v_audit_archived,
        'health_status', v_health,
        'maintenance_completed_at', now()
    );
END;
$$;

-- Schedule maintenance
SELECT cron.schedule('secure-db-maintenance', '0 2 * * *', 'SELECT automated_maintenance();');

2. Backup and Recovery

#!/bin/bash
# secure-db-backup.sh - Backup script for secure database

# Create timestamped backup
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="secure_db_backup_${TIMESTAMP}.sql"

# Full database backup
pg_dump -Fc -h localhost -U postgres myapp_database > "${BACKUP_FILE}"

# Backup extension-specific data
pg_dump -h localhost -U postgres \
    --table=c77_secure_db_operation_audit \
    --table=c77_secure_db_secure_schemas \
    myapp_database > "secure_db_metadata_${TIMESTAMP}.sql"

# Verify backup integrity
if pg_restore --list "${BACKUP_FILE}" > /dev/null 2>&1; then
    echo "Backup ${BACKUP_FILE} created successfully"
else
    echo "ERROR: Backup verification failed"
    exit 1
fi

# Clean old backups (keep 30 days)
find /backup/path -name "secure_db_backup_*.sql" -mtime +30 -delete

3. Security Updates

-- Security update validation procedure
CREATE OR REPLACE FUNCTION validate_security_update()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_test_results jsonb;
    v_health_before jsonb;
    v_health_after jsonb;
BEGIN
    -- Pre-update health check
    SELECT c77_secure_db_health_check() INTO v_health_before;
    
    -- Run comprehensive tests
    SELECT c77_secure_db_run_all_tests() INTO v_test_results;
    
    -- Post-update health check
    SELECT c77_secure_db_health_check() INTO v_health_after;
    
    -- Validate update success
    IF (v_test_results->>'overall_status') != 'ALL_TESTS_PASSED' THEN
        RAISE EXCEPTION 'Security update validation failed: %', v_test_results->>'overall_status';
    END IF;
    
    RETURN jsonb_build_object(
        'update_validated', true,
        'test_results', v_test_results,
        'health_before', v_health_before,
        'health_after', v_health_after,
        'validation_timestamp', now()
    );
END;
$$;

🚨 Incident Response Best Practices

1. Security Incident Classification

-- Security incident severity levels
CREATE TYPE incident_severity AS ENUM ('LOW', 'MEDIUM', 'HIGH', 'CRITICAL');

-- Incident response procedures
CREATE OR REPLACE FUNCTION security_incident_response(
    p_incident_type text,
    p_severity incident_severity,
    p_details jsonb
)
RETURNS void LANGUAGE plpgsql AS $$
BEGIN
    -- Log incident
    INSERT INTO security_incidents (
        incident_type,
        severity,
        details,
        reported_at,
        status
    ) VALUES (
        p_incident_type,
        p_severity,
        p_details,
        now(),
        'REPORTED'
    );
    
    -- Automatic response based on severity
    CASE p_severity
        WHEN 'CRITICAL' THEN
            -- Immediate notification
            PERFORM pg_notify('critical_security_alert', 
                jsonb_build_object(
                    'type', p_incident_type,
                    'details', p_details,
                    'timestamp', now()
                )::text
            );
            
        WHEN 'HIGH' THEN
            -- Priority notification
            PERFORM pg_notify('high_security_alert',
                jsonb_build_object(
                    'type', p_incident_type,
                    'details', p_details,
                    'timestamp', now()
                )::text
            );
            
        ELSE
            -- Standard logging only
            NULL;
    END CASE;
END;
$$;

2. Forensic Data Preservation

-- Preserve forensic evidence
CREATE OR REPLACE FUNCTION preserve_forensic_evidence(
    p_table_name text,
    p_record_id text,
    p_incident_id uuid
)
RETURNS void LANGUAGE plpgsql AS $$
DECLARE
    v_record_data jsonb;
    v_auth_token uuid;
BEGIN
    -- Get authorization token for forensic access
    v_auth_token := c77_secure_db_create_auth_token('forensic_preservation');
    PERFORM set_config('c77_secure_db.auth_token', v_auth_token::text, true);
    
    -- Capture complete record state
    EXECUTE format('SELECT row_to_json(t) FROM %I.%I t WHERE id = $1', 'myapp', p_table_name)
    INTO v_record_data
    USING p_record_id;
    
    -- Store forensic copy
    INSERT INTO forensic_evidence (
        incident_id,
        table_name,
        record_id,
        record_data,
        preserved_at
    ) VALUES (
        p_incident_id,
        p_table_name,
        p_record_id,
        v_record_data,
        now()
    );
    
    -- Clean up token
    PERFORM set_config('c77_secure_db.auth_token', '', true);
END;
$$;

📋 Compliance Best Practices

1. Audit Trail Requirements

-- Ensure comprehensive audit coverage
CREATE OR REPLACE FUNCTION audit_compliance_check()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_coverage jsonb;
    v_gaps text[];
BEGIN
    -- Check audit coverage for critical tables
    WITH critical_tables AS (
        SELECT unnest(ARRAY['users', 'transactions', 'sensitive_data']) AS table_name
    ),
    audit_coverage AS (
        SELECT 
            ct.table_name,
            COUNT(a.id) as audit_count,
            MAX(a.created_at) as last_audit
        FROM critical_tables ct
        LEFT JOIN c77_secure_db_operation_audit a 
            ON ct.table_name = a.table_name
            AND a.created_at > now() - interval '24 hours'
        GROUP BY ct.table_name
    )
    SELECT jsonb_object_agg(table_name, 
        jsonb_build_object(
            'audit_count', audit_count,
            'last_audit', last_audit,
            'compliant', (audit_count > 0)
        )
    ) INTO v_coverage
    FROM audit_coverage;
    
    -- Identify gaps
    SELECT array_agg(table_name)
    INTO v_gaps
    FROM jsonb_each(v_coverage) 
    WHERE NOT (value->>'compliant')::boolean;
    
    RETURN jsonb_build_object(
        'coverage_analysis', v_coverage,
        'compliance_gaps', v_gaps,
        'overall_compliant', (array_length(v_gaps, 1) IS NULL),
        'check_timestamp', now()
    );
END;
$$;

2. Data Retention Policies

-- Implement data retention policies
CREATE OR REPLACE FUNCTION apply_retention_policy()
RETURNS jsonb LANGUAGE plpgsql AS $$
DECLARE
    v_audit_purged integer;
    v_tokens_purged integer;
    v_forensic_archived integer;
BEGIN
    -- Purge old audit logs (beyond retention period)
    WITH purged AS (
        DELETE FROM c77_secure_db_operation_audit 
        WHERE created_at < (now() - interval '7 years')  -- Adjust per compliance requirements
        RETURNING *
    )
    SELECT count(*) INTO v_audit_purged FROM purged;
    
    -- Clean very old auth tokens
    WITH purged_tokens AS (
        DELETE FROM c77_secure_db_auth_tokens 
        WHERE created_at < (now() - interval '7 days')
        RETURNING *
    )
    SELECT count(*) INTO v_tokens_purged FROM purged_tokens;
    
    -- Archive old forensic evidence
    WITH archived AS (
        UPDATE forensic_evidence 
        SET archived = true 
        WHERE preserved_at < (now() - interval '3 years')
          AND archived = false
        RETURNING *
    )
    SELECT count(*) INTO v_forensic_archived FROM archived;
    
    RETURN jsonb_build_object(
        'audit_records_purged', v_audit_purged,
        'tokens_purged', v_tokens_purged,
        'forensic_records_archived', v_forensic_archived,
        'retention_policy_applied_at', now()
    );
END;
$$;

🎯 Implementation Checklist

Pre-Production Checklist

  • Security tests pass: SELECT c77_secure_db_run_all_tests()
  • RBAC permissions properly configured
  • Audit logging enabled and tested
  • Monitoring and alerting configured
  • Backup and recovery procedures tested
  • Incident response plan documented
  • Performance benchmarks established
  • Security training completed for team

Production Checklist

  • Daily integrity checks scheduled
  • Automated maintenance configured
  • Security monitoring active
  • Audit log retention policy implemented
  • Performance monitoring baseline established
  • Emergency procedures documented
  • Security incident response team identified
  • Compliance requirements validated

Ongoing Maintenance

  • Weekly hash verification
  • Monthly security audits
  • Quarterly security reviews
  • Annual security assessment
  • Regular staff security training
  • Security policy updates
  • Threat model reviews
  • Disaster recovery testing

Remember: Security is not a one-time implementation but an ongoing process requiring continuous attention, monitoring, and improvement.