GT AI OS Community Edition v2.0.33
Security hardening release addressing CodeQL and Dependabot alerts: - Fix stack trace exposure in error responses - Add SSRF protection with DNS resolution checking - Implement proper URL hostname validation (replaces substring matching) - Add centralized path sanitization to prevent path traversal - Fix ReDoS vulnerability in email validation regex - Improve HTML sanitization in validation utilities - Fix capability wildcard matching in auth utilities - Update glob dependency to address CVE - Add CodeQL suppression comments for verified false positives 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
44
scripts/demo/setup-demo-data.sh
Executable file
44
scripts/demo/setup-demo-data.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 - Setup Model Configurations
|
||||
# Enables tenant model configs for fresh deployments
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 GT 2.0 Model Configuration Setup"
|
||||
echo "==================================="
|
||||
|
||||
# Check if databases are ready
|
||||
echo "⏳ Waiting for databases..."
|
||||
sleep 5
|
||||
|
||||
# Setup model configs in control panel
|
||||
echo "📦 Setting up model configurations..."
|
||||
|
||||
docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin << 'EOF'
|
||||
-- Clear existing demo configs for tenant 1
|
||||
DELETE FROM tenant_model_configs WHERE tenant_id = 1;
|
||||
|
||||
-- Insert your current model configs
|
||||
INSERT INTO tenant_model_configs (tenant_id, model_id, is_enabled, rate_limits, usage_constraints, priority, created_at, updated_at) VALUES
|
||||
(1, 'groq/compound', true, '{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 100, "tokens_per_minute": 100000, "max_concurrent": 10}', '{}', 5, NOW(), NOW()),
|
||||
(1, 'llama-3.1-8b-instant', true, '{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 100, "tokens_per_minute": 100000, "max_concurrent": 10}', '{}', 5, NOW(), NOW()),
|
||||
(1, 'moonshotai/kimi-k2-instruct-0905', true, '{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 100, "tokens_per_minute": 100000, "max_concurrent": 10}', '{}', 5, NOW(), NOW()),
|
||||
(1, 'llama-3.3-70b-versatile', true, '{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 10000, "tokens_per_minute": 100000, "max_concurrent": 10}', '{}', 5, NOW(), NOW()),
|
||||
(1, 'openai/gpt-oss-120b', true, '{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 10000, "tokens_per_minute": 100000, "max_concurrent": 10}', '{}', 5, NOW(), NOW())
|
||||
ON CONFLICT (tenant_id, model_id) DO UPDATE SET
|
||||
is_enabled = EXCLUDED.is_enabled,
|
||||
rate_limits = EXCLUDED.rate_limits,
|
||||
updated_at = NOW();
|
||||
|
||||
SELECT COUNT(*) || ' model configs configured' FROM tenant_model_configs WHERE tenant_id = 1;
|
||||
EOF
|
||||
|
||||
echo " ✓ Model configs ready"
|
||||
|
||||
echo ""
|
||||
echo "✅ Model configuration complete!"
|
||||
echo ""
|
||||
echo "Summary:"
|
||||
echo " - 5 tenant model configurations enabled"
|
||||
echo ""
|
||||
echo "Your environment is ready to use!"
|
||||
291
scripts/deploy.sh
Executable file
291
scripts/deploy.sh
Executable file
@@ -0,0 +1,291 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Unified Deployment Script
|
||||
# Platform-agnostic deployment and update system
|
||||
# Supports: ARM64 (Mac M2+), x86_64 (Ubuntu), DGX (Grace ARM + Blackwell GPU)
|
||||
|
||||
set -e
|
||||
|
||||
# Script directory for sourcing libraries
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Source library functions
|
||||
source "$SCRIPT_DIR/lib/common.sh"
|
||||
source "$SCRIPT_DIR/lib/platform.sh"
|
||||
source "$SCRIPT_DIR/lib/docker.sh"
|
||||
source "$SCRIPT_DIR/lib/migrations.sh"
|
||||
source "$SCRIPT_DIR/lib/health.sh"
|
||||
source "$SCRIPT_DIR/lib/secrets.sh"
|
||||
|
||||
# Default options
|
||||
DRY_RUN=false
|
||||
DEV_MODE=false
|
||||
SKIP_MIGRATIONS=false
|
||||
SKIP_PULL=false
|
||||
SKIP_CLEANUP=false
|
||||
FORCE=false
|
||||
PLATFORM=""
|
||||
|
||||
# Display help
|
||||
show_help() {
|
||||
cat <<EOF
|
||||
GT 2.0 Unified Deployment Script
|
||||
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Options:
|
||||
--platform PLATFORM Force specific platform (arm64, x86, dgx)
|
||||
--dev Enable development mode (rebuild locally with hot reload)
|
||||
--dry-run Show commands without executing
|
||||
--skip-migrations Skip database migration checks
|
||||
--skip-pull Skip Docker image pull
|
||||
--skip-cleanup Skip Docker cleanup (prune volumes/images/cache)
|
||||
--force Skip confirmation prompts
|
||||
--help Show this help message
|
||||
|
||||
Modes:
|
||||
Production (default) Pulls pre-built images from GHCR, restarts services
|
||||
Development (--dev) Rebuilds containers locally, enables hot reload
|
||||
|
||||
Examples:
|
||||
# Auto-detect platform and deploy (uses GHCR images)
|
||||
$0
|
||||
|
||||
# Deploy with development mode (rebuilds locally)
|
||||
$0 --dev
|
||||
|
||||
# Deploy on specific platform
|
||||
$0 --platform x86
|
||||
|
||||
# Dry run to see what would happen
|
||||
$0 --dry-run
|
||||
|
||||
# Force update without confirmation
|
||||
$0 --force
|
||||
|
||||
Platforms:
|
||||
arm64 Apple Silicon (M2+)
|
||||
x86 x86_64 Linux (Ubuntu)
|
||||
dgx NVIDIA DGX (Grace ARM + Blackwell GPU)
|
||||
|
||||
Environment Variables:
|
||||
PLATFORM Override platform detection
|
||||
DEV_MODE Enable development mode (true/false)
|
||||
DRY_RUN Dry run mode (true/false)
|
||||
IMAGE_TAG Docker image tag to use (default: latest)
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--platform)
|
||||
PLATFORM="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dev)
|
||||
DEV_MODE=true
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--skip-migrations)
|
||||
SKIP_MIGRATIONS=true
|
||||
shift
|
||||
;;
|
||||
--skip-pull)
|
||||
SKIP_PULL=true
|
||||
shift
|
||||
;;
|
||||
--skip-cleanup)
|
||||
SKIP_CLEANUP=true
|
||||
shift
|
||||
;;
|
||||
--force)
|
||||
FORCE=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Main deployment function
|
||||
main() {
|
||||
parse_args "$@"
|
||||
|
||||
log_header "GT 2.0 Unified Deployment"
|
||||
|
||||
# Check root directory
|
||||
check_root_directory
|
||||
|
||||
# Detect platform if not specified
|
||||
if [ -z "$PLATFORM" ]; then
|
||||
PLATFORM=$(detect_platform)
|
||||
fi
|
||||
|
||||
log_info "Platform: $PLATFORM"
|
||||
|
||||
# Create bind mount directories (always safe, required for fresh installs)
|
||||
mkdir -p volumes/tenants/test/tablespaces
|
||||
mkdir -p volumes/tenants/test/files
|
||||
|
||||
if [ "$DEV_MODE" = "true" ]; then
|
||||
log_info "Mode: Development (hot reload enabled)"
|
||||
else
|
||||
log_info "Mode: Production"
|
||||
fi
|
||||
|
||||
# Show platform info
|
||||
get_platform_info "$PLATFORM"
|
||||
echo ""
|
||||
|
||||
# Check platform prerequisites
|
||||
if ! check_platform_prerequisites "$PLATFORM"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate any missing secrets (for fresh installs)
|
||||
log_info "Checking and generating secrets..."
|
||||
generate_all_secrets ".env"
|
||||
echo ""
|
||||
|
||||
# Check if deployment is running
|
||||
if check_deployment_running; then
|
||||
log_info "Current deployment status:"
|
||||
show_service_status
|
||||
echo ""
|
||||
|
||||
# Ask for confirmation unless forced
|
||||
if [ "$FORCE" != "true" ]; then
|
||||
if ! confirm "Continue with update and restart?"; then
|
||||
log_info "Update cancelled"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
else
|
||||
log_info "No running deployment found - starting fresh deployment"
|
||||
fi
|
||||
|
||||
# Git status checks
|
||||
if [ -d ".git" ]; then
|
||||
log_info "Git repository information:"
|
||||
echo "Current branch: $(git branch --show-current)"
|
||||
echo "Current commit: $(git rev-parse --short HEAD)"
|
||||
|
||||
# Check for uncommitted changes
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
log_warning "Uncommitted changes detected"
|
||||
if [ "$FORCE" != "true" ]; then
|
||||
if ! confirm "Continue anyway?"; then
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Offer to pull latest
|
||||
if [ "$FORCE" != "true" ]; then
|
||||
if confirm "Pull latest from git?"; then
|
||||
log_info "Pulling latest changes..."
|
||||
git pull
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Pull Docker images
|
||||
if [ "$SKIP_PULL" != "true" ]; then
|
||||
pull_images
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Restart services
|
||||
if [ "$DEV_MODE" = "true" ]; then
|
||||
log_header "Rebuilding and Restarting Services (Dev Mode)"
|
||||
else
|
||||
log_header "Restarting Services with Pulled Images"
|
||||
fi
|
||||
|
||||
# Remove all existing gentwo-* containers to prevent name conflicts
|
||||
# This handles cases where project name changed (gt2 -> gt-20 or vice versa)
|
||||
cleanup_conflicting_containers
|
||||
|
||||
# Database services must start first (migrations depend on them)
|
||||
DB_SERVICES=(
|
||||
"postgres"
|
||||
"tenant-postgres-primary"
|
||||
)
|
||||
|
||||
# Application services (uses pulled images in prod, rebuilds in dev)
|
||||
APP_SERVICES=(
|
||||
"control-panel-backend"
|
||||
"control-panel-frontend"
|
||||
"tenant-backend"
|
||||
"tenant-app"
|
||||
"resource-cluster"
|
||||
)
|
||||
|
||||
# Other infrastructure services
|
||||
INFRA_SERVICES=(
|
||||
"vllm-embeddings"
|
||||
)
|
||||
|
||||
# Start database services first and wait for them to be healthy
|
||||
log_info "Starting database services..."
|
||||
for service in "${DB_SERVICES[@]}"; do
|
||||
restart_service "$service"
|
||||
done
|
||||
|
||||
# Wait for databases to be healthy before running migrations
|
||||
log_info "Waiting for databases to be healthy..."
|
||||
wait_for_stability 15
|
||||
|
||||
# Run database migrations (now that databases are confirmed running)
|
||||
if [ "$SKIP_MIGRATIONS" != "true" ]; then
|
||||
if ! run_all_migrations; then
|
||||
log_error "Migrations failed - aborting deployment"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Restart/rebuild application services based on mode
|
||||
for service in "${APP_SERVICES[@]}"; do
|
||||
restart_app_service "$service"
|
||||
done
|
||||
|
||||
# Restart other infrastructure services
|
||||
for service in "${INFRA_SERVICES[@]}"; do
|
||||
restart_service "$service"
|
||||
done
|
||||
|
||||
# Wait for stability
|
||||
wait_for_stability 10
|
||||
|
||||
# Health check
|
||||
if ! check_all_services_healthy; then
|
||||
log_error "Health check failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up unused Docker resources
|
||||
if [ "$SKIP_CLEANUP" != "true" ]; then
|
||||
cleanup_docker_resources
|
||||
fi
|
||||
|
||||
# Show final status
|
||||
show_access_points
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
61
scripts/lib/common.sh
Executable file
61
scripts/lib/common.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Common Library Functions
|
||||
# Shared utilities for deployment scripts
|
||||
|
||||
# Color codes for output formatting
|
||||
export RED='\033[0;31m'
|
||||
export GREEN='\033[0;32m'
|
||||
export YELLOW='\033[1;33m'
|
||||
export BLUE='\033[0;34m'
|
||||
export NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions with timestamps
|
||||
log_info() {
|
||||
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')] ℹ️ $*${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] ✅ $*${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] ⚠️ $*${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ❌ $*${NC}"
|
||||
}
|
||||
|
||||
log_header() {
|
||||
echo ""
|
||||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo -e "${BLUE}$*${NC}"
|
||||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Check if running from GT-2.0 root directory
|
||||
check_root_directory() {
|
||||
if [ ! -f "docker-compose.yml" ]; then
|
||||
log_error "docker-compose.yml not found"
|
||||
echo "Please run this script from the GT-2.0 root directory"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Prompt for user confirmation
|
||||
confirm() {
|
||||
local message="$1"
|
||||
read -p "$(echo -e "${YELLOW}${message} (y/N) ${NC}")" -n 1 -r
|
||||
echo
|
||||
[[ $REPLY =~ ^[Yy]$ ]]
|
||||
}
|
||||
|
||||
# Check if deployment is running
|
||||
check_deployment_running() {
|
||||
if ! docker ps --filter "name=gentwo-" --format "{{.Names}}" | grep -q "gentwo-"; then
|
||||
log_warning "No running deployment found"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
330
scripts/lib/docker.sh
Executable file
330
scripts/lib/docker.sh
Executable file
@@ -0,0 +1,330 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Docker Compose Wrapper Functions
|
||||
# Unified interface for platform-specific compose operations
|
||||
|
||||
# ==============================================
|
||||
# VOLUME MIGRATION (DEPRECATED - Removed Dec 2025)
|
||||
# This function has been removed because:
|
||||
# 1. It could overwrite good data with stale data from old volumes
|
||||
# 2. Docker Compose handles volumes naturally - let it manage them
|
||||
# 3. Manual migration is safer for deployments with custom volume names
|
||||
#
|
||||
# For manual migration (if needed):
|
||||
# 1. docker compose down
|
||||
# 2. docker run --rm -v old_vol:/src -v new_vol:/dst alpine cp -a /src/. /dst/
|
||||
# 3. docker compose up -d
|
||||
# ==============================================
|
||||
|
||||
# migrate_volumes_if_needed() - REMOVED
|
||||
# Function body removed to prevent accidental data loss
|
||||
|
||||
# ==============================================
|
||||
# PROJECT MIGRATION (DEPRECATED - Removed Dec 2025)
|
||||
# This function has been removed because:
|
||||
# 1. It aggressively stops/removes all containers
|
||||
# 2. Different project names don't cause issues if volumes persist
|
||||
# 3. Docker Compose derives project name from directory naturally
|
||||
#
|
||||
# Containers from different project names can coexist. If you need
|
||||
# to clean up old containers manually:
|
||||
# docker ps -a --format '{{.Names}}' | grep gentwo- | xargs docker rm -f
|
||||
# ==============================================
|
||||
|
||||
# migrate_project_if_needed() - REMOVED
|
||||
# Function body removed to prevent accidental container/data loss
|
||||
|
||||
# ==============================================
|
||||
# CONTAINER CLEANUP
|
||||
# Removes existing containers to prevent name conflicts during restart
|
||||
# ==============================================
|
||||
|
||||
remove_existing_container() {
|
||||
local service="$1"
|
||||
|
||||
# Get container name from compose config for this service
|
||||
local container_name=$(dc config --format json 2>/dev/null | jq -r ".services[\"$service\"].container_name // empty" 2>/dev/null)
|
||||
|
||||
if [ -n "$container_name" ]; then
|
||||
# Check if container exists (running or stopped)
|
||||
if docker ps -a --format '{{.Names}}' | grep -q "^${container_name}$"; then
|
||||
log_info "Removing existing container $container_name..."
|
||||
docker rm -f "$container_name" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Remove ALL gentwo-* containers to handle project name conflicts
|
||||
# This is needed when switching between project names (gt2 vs gt-20)
|
||||
cleanup_conflicting_containers() {
|
||||
# Skip in dry-run mode
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo "[DRY RUN] Would remove all gentwo-* containers"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Checking for conflicting containers..."
|
||||
|
||||
local containers=$(docker ps -a --format '{{.Names}}' | grep "^gentwo-" || true)
|
||||
|
||||
if [ -n "$containers" ]; then
|
||||
log_info "Removing existing gentwo-* containers to prevent conflicts..."
|
||||
for container in $containers; do
|
||||
docker rm -f "$container" 2>/dev/null || true
|
||||
done
|
||||
log_success "Removed conflicting containers"
|
||||
fi
|
||||
}
|
||||
|
||||
# ==============================================
|
||||
# DOCKER COMPOSE WRAPPER
|
||||
# ==============================================
|
||||
|
||||
# Execute docker compose with platform-specific files
|
||||
# No explicit project name - Docker Compose derives it from directory name
|
||||
# This ensures existing volumes (gt-20_*, gt2_*, etc.) continue to be used
|
||||
dc() {
|
||||
local platform="${PLATFORM:-$(detect_platform)}"
|
||||
local compose_files=$(get_compose_file "$platform" "$DEV_MODE")
|
||||
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo "[DRY RUN] docker compose -f $compose_files $*"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Pipe 'n' to auto-answer "no" to volume recreation prompts
|
||||
# This handles cases where bind mount paths don't match existing volumes
|
||||
yes n 2>/dev/null | docker compose -f $compose_files "$@"
|
||||
}
|
||||
|
||||
# Detect IMAGE_TAG from current git branch if not already set
|
||||
detect_image_tag() {
|
||||
# If IMAGE_TAG is already set, use it
|
||||
if [ -n "$IMAGE_TAG" ]; then
|
||||
log_info "Using IMAGE_TAG=$IMAGE_TAG (from environment)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Detect current git branch
|
||||
local branch=$(git branch --show-current 2>/dev/null)
|
||||
|
||||
case "$branch" in
|
||||
main|master)
|
||||
IMAGE_TAG="latest"
|
||||
;;
|
||||
dev|develop)
|
||||
IMAGE_TAG="dev"
|
||||
;;
|
||||
*)
|
||||
# Feature branches: sanitize branch name for Docker tag
|
||||
# Docker tags only allow [a-zA-Z0-9_.-], so replace / with -
|
||||
IMAGE_TAG="${branch//\//-}"
|
||||
;;
|
||||
esac
|
||||
|
||||
export IMAGE_TAG
|
||||
log_info "Auto-detected IMAGE_TAG=$IMAGE_TAG (branch: $branch)"
|
||||
}
|
||||
|
||||
# Try to authenticate Docker with GHCR using gh CLI (optional, for private repos)
|
||||
# Returns 0 if auth succeeds, 1 if not available or fails
|
||||
try_ghcr_auth() {
|
||||
# Skip in dry-run mode
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo "[DRY RUN] Try GHCR authentication"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if gh CLI is available
|
||||
if ! command -v gh &>/dev/null; then
|
||||
log_info "gh CLI not installed - skipping GHCR auth"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if gh is authenticated
|
||||
if ! gh auth status &>/dev/null 2>&1; then
|
||||
log_info "gh CLI not authenticated - skipping GHCR auth"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get GitHub username
|
||||
local gh_user=$(gh api user --jq '.login' 2>/dev/null)
|
||||
if [ -z "$gh_user" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get token and authenticate Docker
|
||||
local gh_token=$(gh auth token 2>/dev/null)
|
||||
if [ -z "$gh_token" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if echo "$gh_token" | docker login ghcr.io -u "$gh_user" --password-stdin &>/dev/null; then
|
||||
log_success "Authenticated with GHCR as $gh_user"
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Pull images with simplified auth flow
|
||||
# 1. Try pull without auth (works for public repos)
|
||||
# 2. If auth error, try gh CLI auth and retry
|
||||
# 3. If still fails, fall back to local build
|
||||
pull_images() {
|
||||
# Auto-detect image tag from git branch
|
||||
detect_image_tag
|
||||
|
||||
log_info "Pulling Docker images (tag: $IMAGE_TAG)..."
|
||||
|
||||
# Skip in dry-run mode
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo "[DRY RUN] docker compose pull"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# First attempt: try pull without auth (works for public repos)
|
||||
local pull_output
|
||||
pull_output=$(dc pull 2>&1) && {
|
||||
log_success "Successfully pulled images"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check if it's an auth error (private repo)
|
||||
if echo "$pull_output" | grep -qi "unauthorized\|denied\|authentication required\|403"; then
|
||||
log_info "Registry requires authentication, attempting GHCR login..."
|
||||
|
||||
# Try to authenticate with gh CLI
|
||||
if try_ghcr_auth; then
|
||||
# Retry pull after auth
|
||||
if dc pull 2>&1; then
|
||||
log_success "Successfully pulled images after authentication"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
log_warning "Could not pull from registry - will build images locally"
|
||||
log_info "For faster deploys, install gh CLI and run: gh auth login"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check for rate limiting
|
||||
if echo "$pull_output" | grep -qi "rate limit\|too many requests"; then
|
||||
log_warning "Rate limited - continuing with existing images"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Other error - log and continue
|
||||
log_warning "Pull failed: ${pull_output:0:200}"
|
||||
log_info "Continuing with existing or locally built images"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Restart application service (uses pulled images by default, --build in dev mode)
|
||||
restart_app_service() {
|
||||
local service="$1"
|
||||
local build_flag=""
|
||||
|
||||
# Only use --build in dev mode (to apply local code changes)
|
||||
# In production mode, use pre-pulled GHCR images
|
||||
if [ "$DEV_MODE" = "true" ]; then
|
||||
build_flag="--build"
|
||||
log_info "Rebuilding and restarting $service (dev mode)..."
|
||||
else
|
||||
log_info "Restarting $service with pulled image..."
|
||||
fi
|
||||
|
||||
# In dry-run mode, just show the command that would be executed
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
dc up -d $build_flag "$service"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Remove existing container to prevent name conflicts
|
||||
remove_existing_container "$service"
|
||||
|
||||
# Start/restart service regardless of current state
|
||||
# dc up -d handles both starting new and restarting existing containers
|
||||
# Use --force-recreate to ensure container uses new image
|
||||
dc up -d --force-recreate $build_flag "$service" || {
|
||||
log_warning "Service $service may not be defined in compose files, skipping"
|
||||
return 0
|
||||
}
|
||||
sleep 2
|
||||
return 0
|
||||
}
|
||||
|
||||
# Legacy alias for backward compatibility
|
||||
rebuild_service() {
|
||||
restart_app_service "$@"
|
||||
}
|
||||
|
||||
# Restart service without rebuild
|
||||
restart_service() {
|
||||
local service="$1"
|
||||
|
||||
log_info "Restarting $service..."
|
||||
|
||||
# In dry-run mode, just show the command
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
dc up -d "$service"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Remove existing container to prevent name conflicts
|
||||
remove_existing_container "$service"
|
||||
|
||||
# Use dc up -d which handles both starting and restarting
|
||||
# Use --force-recreate to ensure container is recreated cleanly
|
||||
dc up -d --force-recreate "$service" || {
|
||||
log_warning "Service $service may not be defined in compose files, skipping"
|
||||
return 0
|
||||
}
|
||||
sleep 2
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check service health
|
||||
check_service_health() {
|
||||
log_info "Checking service health..."
|
||||
|
||||
local unhealthy=$(dc ps --format json | jq -r 'select(.Health == "unhealthy") | .Service' 2>/dev/null || true)
|
||||
|
||||
if [ -n "$unhealthy" ]; then
|
||||
log_error "Unhealthy services detected: $unhealthy"
|
||||
echo "Check logs with: docker compose logs $unhealthy"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_success "All services healthy"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Display service status
|
||||
show_service_status() {
|
||||
log_info "Service Status:"
|
||||
dc ps --format "table {{.Service}}\t{{.Status}}"
|
||||
}
|
||||
|
||||
# Clean up unused Docker resources after deployment
|
||||
cleanup_docker_resources() {
|
||||
log_info "Cleaning up unused Docker resources..."
|
||||
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo "[DRY RUN] docker image prune -f"
|
||||
echo "[DRY RUN] docker builder prune -f"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# NOTE: Volume prune removed - too risky, can delete important data
|
||||
# if containers were stopped earlier in the deployment process
|
||||
|
||||
# Remove dangling images (untagged, not used by any container)
|
||||
local images_removed=$(docker image prune -f 2>/dev/null | grep "Total reclaimed space" || echo "0B")
|
||||
|
||||
# Remove build cache
|
||||
local cache_removed=$(docker builder prune -f 2>/dev/null | grep "Total reclaimed space" || echo "0B")
|
||||
|
||||
log_success "Cleanup complete"
|
||||
log_info " Images: $images_removed"
|
||||
log_info " Build cache: $cache_removed"
|
||||
}
|
||||
73
scripts/lib/health.sh
Executable file
73
scripts/lib/health.sh
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Health Check and Service Status Functions
|
||||
# Verify service availability and display access points
|
||||
|
||||
# Wait for services to stabilize
|
||||
wait_for_stability() {
|
||||
local wait_time="${1:-10}"
|
||||
log_info "Waiting for services to stabilize..."
|
||||
sleep "$wait_time"
|
||||
}
|
||||
|
||||
# Check if all services are healthy
|
||||
check_all_services_healthy() {
|
||||
check_service_health
|
||||
}
|
||||
|
||||
# Display access points
|
||||
show_access_points() {
|
||||
echo ""
|
||||
log_success "Deployment Complete!"
|
||||
echo ""
|
||||
echo "🌐 Access Points:"
|
||||
echo " • Control Panel: http://localhost:3001"
|
||||
echo " • Tenant App: http://localhost:3002"
|
||||
echo ""
|
||||
echo "📊 Service Status:"
|
||||
show_service_status
|
||||
echo ""
|
||||
echo "📊 View Logs: docker compose logs -f"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Comprehensive health check with detailed output
|
||||
health_check_detailed() {
|
||||
log_header "Health Check"
|
||||
|
||||
# Check PostgreSQL databases
|
||||
log_info "Checking PostgreSQL databases..."
|
||||
if docker exec gentwo-controlpanel-postgres pg_isready -U postgres -d gt2_admin &>/dev/null; then
|
||||
log_success "Admin database: healthy"
|
||||
else
|
||||
log_error "Admin database: unhealthy"
|
||||
fi
|
||||
|
||||
if docker exec gentwo-tenant-postgres-primary pg_isready -U postgres -d gt2_tenants &>/dev/null; then
|
||||
log_success "Tenant database: healthy"
|
||||
else
|
||||
log_error "Tenant database: unhealthy"
|
||||
fi
|
||||
|
||||
# Check backend services
|
||||
log_info "Checking backend services..."
|
||||
if curl -sf http://localhost:8001/health &>/dev/null; then
|
||||
log_success "Control Panel backend: healthy"
|
||||
else
|
||||
log_warning "Control Panel backend: not responding"
|
||||
fi
|
||||
|
||||
if curl -sf http://localhost:8002/health &>/dev/null; then
|
||||
log_success "Tenant backend: healthy"
|
||||
else
|
||||
log_warning "Tenant backend: not responding"
|
||||
fi
|
||||
|
||||
if curl -sf http://localhost:8004/health &>/dev/null; then
|
||||
log_success "Resource cluster: healthy"
|
||||
else
|
||||
log_warning "Resource cluster: not responding"
|
||||
fi
|
||||
|
||||
# Check overall container health
|
||||
check_all_services_healthy
|
||||
}
|
||||
473
scripts/lib/migrations.sh
Executable file
473
scripts/lib/migrations.sh
Executable file
@@ -0,0 +1,473 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Database Migration Functions
|
||||
# Idempotent migration checks and execution for admin and tenant databases
|
||||
|
||||
# Check if admin postgres container is running
|
||||
check_admin_db_running() {
|
||||
docker ps --filter "name=gentwo-controlpanel-postgres" --filter "status=running" --format "{{.Names}}" | grep -q "gentwo-controlpanel-postgres"
|
||||
}
|
||||
|
||||
# Check if tenant postgres container is running
|
||||
check_tenant_db_running() {
|
||||
docker ps --filter "name=gentwo-tenant-postgres-primary" --filter "status=running" --format "{{.Names}}" | grep -q "gentwo-tenant-postgres-primary"
|
||||
}
|
||||
|
||||
# Wait for a container to be healthy (up to 60 seconds)
|
||||
wait_for_container_healthy() {
|
||||
local container="$1"
|
||||
local max_wait=60
|
||||
local waited=0
|
||||
|
||||
log_info "Waiting for $container to be healthy..."
|
||||
while [ $waited -lt $max_wait ]; do
|
||||
local status=$(docker inspect --format='{{.State.Health.Status}}' "$container" 2>/dev/null || echo "none")
|
||||
if [ "$status" = "healthy" ]; then
|
||||
log_success "$container is healthy"
|
||||
return 0
|
||||
fi
|
||||
# Also accept running containers without healthcheck
|
||||
local running=$(docker inspect --format='{{.State.Running}}' "$container" 2>/dev/null || echo "false")
|
||||
if [ "$running" = "true" ] && [ "$status" = "none" ]; then
|
||||
sleep 5 # Give it a few seconds to initialize
|
||||
log_success "$container is running"
|
||||
return 0
|
||||
fi
|
||||
sleep 2
|
||||
waited=$((waited + 2))
|
||||
done
|
||||
log_error "$container failed to become healthy after ${max_wait}s"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Ensure admin database is running
|
||||
ensure_admin_db_running() {
|
||||
if check_admin_db_running; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Starting admin database containers..."
|
||||
dc up -d postgres 2>/dev/null || {
|
||||
log_error "Failed to start admin database"
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_for_container_healthy "gentwo-controlpanel-postgres" || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
# Ensure tenant database is running
|
||||
ensure_tenant_db_running() {
|
||||
if check_tenant_db_running; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Starting tenant database containers..."
|
||||
dc up -d tenant-postgres-primary 2>/dev/null || {
|
||||
log_error "Failed to start tenant database"
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_for_container_healthy "gentwo-tenant-postgres-primary" || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run admin database migration
|
||||
run_admin_migration() {
|
||||
local migration_num="$1"
|
||||
local migration_file="$2"
|
||||
local check_func="$3"
|
||||
|
||||
# Run check function if provided
|
||||
if [ -n "$check_func" ] && type "$check_func" &>/dev/null; then
|
||||
if ! $check_func; then
|
||||
return 0 # Migration already applied
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "Applying migration $migration_num..."
|
||||
|
||||
if [ ! -f "$migration_file" ]; then
|
||||
log_error "Migration script not found: $migration_file"
|
||||
echo "Run: git pull"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if docker exec -i gentwo-controlpanel-postgres psql -U postgres -d gt2_admin < "$migration_file"; then
|
||||
log_success "Migration $migration_num applied successfully"
|
||||
return 0
|
||||
else
|
||||
log_error "Migration $migration_num failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run tenant database migration
|
||||
run_tenant_migration() {
|
||||
local migration_num="$1"
|
||||
local migration_file="$2"
|
||||
local check_func="$3"
|
||||
|
||||
# Run check function if provided
|
||||
if [ -n "$check_func" ] && type "$check_func" &>/dev/null; then
|
||||
if ! $check_func; then
|
||||
return 0 # Migration already applied
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "Applying migration $migration_num..."
|
||||
|
||||
if [ ! -f "$migration_file" ]; then
|
||||
log_error "Migration script not found: $migration_file"
|
||||
echo "Run: git pull"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if docker exec -i gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants < "$migration_file"; then
|
||||
log_success "Migration $migration_num applied successfully"
|
||||
return 0
|
||||
else
|
||||
log_error "Migration $migration_num failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Admin migration checks
|
||||
check_migration_006() {
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='tenants' AND column_name='frontend_url');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_008() {
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='password_reset_rate_limits' AND column_name='ip_address');" 2>/dev/null || echo "false")
|
||||
[ "$exists" = "t" ]
|
||||
}
|
||||
|
||||
check_migration_009() {
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='users' AND column_name='tfa_enabled');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_010() {
|
||||
local count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE (context_window IS NULL OR max_tokens IS NULL) AND provider = 'groq';" 2>/dev/null || echo "error")
|
||||
[ "$count" != "0" ] && [ "$count" != "error" ] && [ -n "$count" ]
|
||||
}
|
||||
|
||||
check_migration_011() {
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='public' AND table_name='system_versions');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_012() {
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='tenants' AND column_name='optics_enabled');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_013() {
|
||||
# Returns true (needs migration) if old column exists
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='model_configs' AND column_name='cost_per_1k_input');" 2>/dev/null || echo "false")
|
||||
[ "$exists" = "t" ]
|
||||
}
|
||||
|
||||
check_migration_014() {
|
||||
# Returns true (needs migration) if any Groq model has NULL or 0 pricing
|
||||
local count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE provider = 'groq' AND (cost_per_million_input IS NULL OR cost_per_million_input = 0 OR cost_per_million_output IS NULL OR cost_per_million_output = 0);" 2>/dev/null || echo "0")
|
||||
[ "$count" != "0" ] && [ -n "$count" ]
|
||||
}
|
||||
|
||||
check_migration_015() {
|
||||
# Returns true (needs migration) if pricing is outdated
|
||||
# Check if gpt-oss-120b has old pricing ($1.20) instead of new ($0.15)
|
||||
local price=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT cost_per_million_input FROM model_configs WHERE model_id LIKE '%gpt-oss-120b%' LIMIT 1;" 2>/dev/null || echo "0")
|
||||
# Needs migration if price is > 1.0 (old pricing was $1.20)
|
||||
[ "$(echo "$price > 1.0" | bc -l 2>/dev/null || echo "0")" = "1" ]
|
||||
}
|
||||
|
||||
check_migration_016() {
|
||||
# Returns true (needs migration) if is_compound column doesn't exist
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='model_configs' AND column_name='is_compound');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_017() {
|
||||
# Returns true (needs migration) if compound pricing is incorrect (> $0.50 input means old pricing)
|
||||
local price=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT cost_per_million_input FROM model_configs WHERE model_id LIKE '%compound%' AND model_id NOT LIKE '%mini%' LIMIT 1;" 2>/dev/null || echo "0")
|
||||
[ "$(echo "$price > 0.50" | bc -l 2>/dev/null || echo "0")" = "1" ]
|
||||
}
|
||||
|
||||
check_migration_018() {
|
||||
# Returns true (needs migration) if monthly_budget_cents column doesn't exist
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='tenants' AND column_name='monthly_budget_cents');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_019() {
|
||||
# Returns true (needs migration) if embedding_usage_logs table doesn't exist
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='public' AND table_name='embedding_usage_logs');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_020() {
|
||||
# Returns true (needs migration) if:
|
||||
# 1. GROQ_API_KEY env var exists and is not a placeholder
|
||||
# 2. AND test-company tenant exists
|
||||
# 3. AND groq key is NOT already in database for test-company
|
||||
|
||||
# Check if GROQ_API_KEY env var exists
|
||||
local groq_key="${GROQ_API_KEY:-}"
|
||||
if [ -z "$groq_key" ] || [ "$groq_key" = "gsk_your_actual_groq_api_key_here" ] || [ "$groq_key" = "gsk_placeholder" ]; then
|
||||
# No valid env key to migrate
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if test-company tenant exists and has groq key already
|
||||
local has_key=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT 1 FROM tenants WHERE domain = 'test-company' AND api_keys IS NOT NULL AND api_keys->>'groq' IS NOT NULL AND api_keys->'groq'->>'key' IS NOT NULL);" 2>/dev/null || echo "false")
|
||||
|
||||
# If tenant already has key, no migration needed
|
||||
[ "$has_key" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_021() {
|
||||
# Returns true (needs migration) if NVIDIA models don't exist in model_configs
|
||||
local count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE provider = 'nvidia';" 2>/dev/null || echo "0")
|
||||
[ "$count" = "0" ] || [ -z "$count" ]
|
||||
}
|
||||
|
||||
check_migration_022() {
|
||||
# Returns true (needs migration) if sessions table doesn't exist
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='public' AND table_name='sessions');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_023() {
|
||||
# Returns true (needs migration) if model_configs.id UUID column doesn't exist
|
||||
# This migration adds proper UUID primary key instead of using model_id string
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='public' AND table_name='model_configs' AND column_name='id' AND data_type='uuid');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_024() {
|
||||
# Returns true (needs migration) if model_configs still has unique constraint on model_id alone
|
||||
# (should be unique on model_id + provider instead)
|
||||
local exists=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.table_constraints WHERE constraint_name='model_configs_model_id_unique' AND table_name='model_configs' AND table_schema='public');" 2>/dev/null || echo "false")
|
||||
[ "$exists" = "t" ]
|
||||
}
|
||||
|
||||
check_migration_025() {
|
||||
# Returns true (needs migration) if old nvidia model format exists (nvidia/meta-* prefix)
|
||||
local count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE provider = 'nvidia' AND model_id LIKE 'nvidia/meta-%';" 2>/dev/null || echo "0")
|
||||
[ "$count" != "0" ] && [ -n "$count" ]
|
||||
}
|
||||
|
||||
check_migration_026() {
|
||||
# Returns true (needs migration) if old format exists (moonshot-ai with hyphen instead of moonshotai)
|
||||
local count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE provider = 'nvidia' AND model_id LIKE 'moonshot-ai/%';" 2>/dev/null || echo "0")
|
||||
[ "$count" != "0" ] && [ -n "$count" ]
|
||||
}
|
||||
|
||||
check_migration_027() {
|
||||
# Returns true (needs migration) if any tenant is missing NVIDIA model assignments
|
||||
# Counts tenants that don't have ALL active nvidia models assigned
|
||||
local nvidia_count=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM model_configs WHERE provider = 'nvidia' AND is_active = true;" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$nvidia_count" = "0" ] || [ -z "$nvidia_count" ]; then
|
||||
return 1 # No nvidia models, nothing to assign
|
||||
fi
|
||||
|
||||
# Check if any tenant is missing nvidia assignments
|
||||
local missing=$(docker exec gentwo-controlpanel-postgres psql -U postgres -d gt2_admin -tAc \
|
||||
"SELECT COUNT(*) FROM tenants t WHERE NOT EXISTS (
|
||||
SELECT 1 FROM tenant_model_configs tmc
|
||||
JOIN model_configs mc ON mc.id = tmc.model_config_id
|
||||
WHERE tmc.tenant_id = t.id AND mc.provider = 'nvidia'
|
||||
);" 2>/dev/null || echo "0")
|
||||
|
||||
[ "$missing" != "0" ] && [ -n "$missing" ]
|
||||
}
|
||||
|
||||
# Tenant migration checks
|
||||
check_migration_T001() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='tenant_test_company' AND table_name='tenants');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T002() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='tenant_test_company' AND table_name='team_memberships');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T002B() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.columns WHERE table_schema='tenant_test_company' AND table_name='team_memberships' AND column_name='status');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T003() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='tenant_test_company' AND table_name='team_resource_shares');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T005() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SET search_path TO tenant_test_company; SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conrelid = 'team_memberships'::regclass AND conname = 'check_team_permission' AND pg_get_constraintdef(oid) LIKE '%manager%');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T006() {
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='tenant_test_company' AND table_name='auth_logs');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
check_migration_T009() {
|
||||
# Returns true (needs migration) if categories table doesn't exist
|
||||
local exists=$(docker exec gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants -tAc \
|
||||
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='tenant_test_company' AND table_name='categories');" 2>/dev/null || echo "false")
|
||||
[ "$exists" != "t" ]
|
||||
}
|
||||
|
||||
# Run all admin migrations
|
||||
run_admin_migrations() {
|
||||
log_header "Admin Database Migrations"
|
||||
|
||||
# Ensure admin database is running (start if needed)
|
||||
if ! ensure_admin_db_running; then
|
||||
log_error "Could not start admin database, skipping admin migrations"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run_admin_migration "006" "scripts/migrations/006_add_tenant_frontend_url.sql" "check_migration_006" || return 1
|
||||
run_admin_migration "008" "scripts/migrations/008_remove_ip_address_from_rate_limits.sql" "check_migration_008" || return 1
|
||||
run_admin_migration "009" "scripts/migrations/009_add_tfa_schema.sql" "check_migration_009" || return 1
|
||||
run_admin_migration "010" "scripts/migrations/010_update_model_context_windows.sql" "check_migration_010" || return 1
|
||||
run_admin_migration "011" "scripts/migrations/011_add_system_management_tables.sql" "check_migration_011" || return 1
|
||||
run_admin_migration "012" "scripts/migrations/012_add_optics_enabled.sql" "check_migration_012" || return 1
|
||||
run_admin_migration "013" "scripts/migrations/013_rename_cost_columns.sql" "check_migration_013" || return 1
|
||||
run_admin_migration "014" "scripts/migrations/014_backfill_groq_pricing.sql" "check_migration_014" || return 1
|
||||
run_admin_migration "015" "scripts/migrations/015_update_groq_pricing_dec_2025.sql" "check_migration_015" || return 1
|
||||
run_admin_migration "016" "scripts/migrations/016_add_is_compound_column.sql" "check_migration_016" || return 1
|
||||
run_admin_migration "017" "scripts/migrations/017_fix_compound_pricing.sql" "check_migration_017" || return 1
|
||||
run_admin_migration "018" "scripts/migrations/018_add_budget_storage_pricing.sql" "check_migration_018" || return 1
|
||||
run_admin_migration "019" "scripts/migrations/019_add_embedding_usage.sql" "check_migration_019" || return 1
|
||||
|
||||
# Migration 020: Import GROQ_API_KEY from environment to database (Python script)
|
||||
# This is a one-time migration for existing installations
|
||||
if check_migration_020 2>/dev/null; then
|
||||
log_info "Applying migration 020 (API key migration)..."
|
||||
if [ -f "scripts/migrations/020_migrate_env_api_keys.py" ]; then
|
||||
# Run the Python migration script
|
||||
if python3 scripts/migrations/020_migrate_env_api_keys.py; then
|
||||
log_success "Migration 020 applied successfully"
|
||||
else
|
||||
log_warning "Migration 020 skipped or failed (this is OK for fresh installs)"
|
||||
fi
|
||||
else
|
||||
log_warning "Migration 020 script not found, skipping"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Migration 021: Add NVIDIA NIM models to model_configs (Issue #266)
|
||||
run_admin_migration "021" "scripts/migrations/021_add_nvidia_models.sql" "check_migration_021" || return 1
|
||||
|
||||
# Migration 022: Add sessions table for OWASP/NIST compliant session management (Issue #264)
|
||||
run_admin_migration "022" "scripts/migrations/022_add_session_management.sql" "check_migration_022" || return 1
|
||||
|
||||
# Migration 023: Add UUID primary key to model_configs (fix using model_id string as PK)
|
||||
run_admin_migration "023" "scripts/migrations/023_add_uuid_primary_key_to_model_configs.sql" "check_migration_023" || return 1
|
||||
|
||||
# Migration 024: Allow same model_id with different providers
|
||||
run_admin_migration "024" "scripts/migrations/024_allow_same_model_id_different_providers.sql" "check_migration_024" || return 1
|
||||
|
||||
# Migration 025: Fix NVIDIA model names to match API format
|
||||
run_admin_migration "025" "scripts/migrations/025_fix_nvidia_model_names.sql" "check_migration_025" || return 1
|
||||
|
||||
# Migration 026: Fix NVIDIA model_ids to exact API format
|
||||
run_admin_migration "026" "scripts/migrations/026_fix_nvidia_model_ids_api_format.sql" "check_migration_026" || return 1
|
||||
|
||||
# Migration 027: Ensure NVIDIA models are assigned to all tenants
|
||||
# This fixes partial 021 migrations where models were added but not assigned
|
||||
run_admin_migration "027" "scripts/migrations/027_assign_nvidia_models_to_tenants.sql" "check_migration_027" || return 1
|
||||
|
||||
log_success "All admin migrations complete"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run all tenant migrations
|
||||
run_tenant_migrations() {
|
||||
log_header "Tenant Database Migrations"
|
||||
|
||||
# Ensure tenant database is running (start if needed)
|
||||
if ! ensure_tenant_db_running; then
|
||||
log_error "Could not start tenant database, skipping tenant migrations"
|
||||
return 1
|
||||
fi
|
||||
|
||||
run_tenant_migration "T001" "scripts/postgresql/migrations/T001_rename_teams_to_tenants.sql" "check_migration_T001" || return 1
|
||||
run_tenant_migration "T002" "scripts/postgresql/migrations/T002_create_collaboration_teams.sql" "check_migration_T002" || return 1
|
||||
run_tenant_migration "T002B" "scripts/postgresql/migrations/T002B_add_invitation_status.sql" "check_migration_T002B" || return 1
|
||||
run_tenant_migration "T003" "scripts/postgresql/migrations/T003_team_resource_shares.sql" "check_migration_T003" || return 1
|
||||
|
||||
# T004 is always run (idempotent - updates trigger function)
|
||||
log_info "Applying migration T004 (update validate_resource_share)..."
|
||||
if [ -f "scripts/postgresql/migrations/T004_update_validate_resource_share.sql" ]; then
|
||||
docker exec -i gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants \
|
||||
< scripts/postgresql/migrations/T004_update_validate_resource_share.sql || return 1
|
||||
log_success "Migration T004 applied successfully"
|
||||
fi
|
||||
|
||||
run_tenant_migration "T005" "scripts/postgresql/migrations/T005_team_observability.sql" "check_migration_T005" || return 1
|
||||
run_tenant_migration "T006" "scripts/postgresql/migrations/T006_auth_logs.sql" "check_migration_T006" || return 1
|
||||
|
||||
# T007 is always run (idempotent - creates indexes if not exists)
|
||||
log_info "Applying migration T007 (query optimization indexes)..."
|
||||
if [ -f "scripts/postgresql/migrations/T007_optimize_queries.sql" ]; then
|
||||
docker exec -i gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants \
|
||||
< scripts/postgresql/migrations/T007_optimize_queries.sql || return 1
|
||||
log_success "Migration T007 applied successfully"
|
||||
fi
|
||||
|
||||
# T008 is always run (idempotent - creates indexes if not exists)
|
||||
# Fixes GitHub Issue #173 - Database Optimizations
|
||||
log_info "Applying migration T008 (performance indexes for agents/datasets/teams)..."
|
||||
if [ -f "scripts/postgresql/migrations/T008_add_performance_indexes.sql" ]; then
|
||||
docker exec -i gentwo-tenant-postgres-primary psql -U postgres -d gt2_tenants \
|
||||
< scripts/postgresql/migrations/T008_add_performance_indexes.sql || return 1
|
||||
log_success "Migration T008 applied successfully"
|
||||
fi
|
||||
|
||||
# T009 - Tenant-scoped agent categories (Issue #215)
|
||||
run_tenant_migration "T009" "scripts/postgresql/migrations/T009_tenant_scoped_categories.sql" "check_migration_T009" || return 1
|
||||
|
||||
log_success "All tenant migrations complete"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run all migrations
|
||||
run_all_migrations() {
|
||||
run_admin_migrations || return 1
|
||||
run_tenant_migrations || return 1
|
||||
return 0
|
||||
}
|
||||
141
scripts/lib/platform.sh
Executable file
141
scripts/lib/platform.sh
Executable file
@@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Platform Detection and Compose File Selection
|
||||
# Handles ARM64, x86_64, and DGX platform differences
|
||||
|
||||
# Detect NVIDIA GPU and Container Toolkit availability
|
||||
detect_nvidia_gpu() {
|
||||
# Check for nvidia-smi command (indicates NVIDIA drivers installed)
|
||||
if ! command -v nvidia-smi &> /dev/null; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify GPU is accessible
|
||||
if ! nvidia-smi &> /dev/null; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check NVIDIA Container Toolkit is configured in Docker
|
||||
if ! docker info 2>/dev/null | grep -qi "nvidia"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Detect platform architecture
|
||||
detect_platform() {
|
||||
local arch=$(uname -m)
|
||||
local os=$(uname -s)
|
||||
|
||||
# Check for DGX specific environment
|
||||
if [ -f "/etc/dgx-release" ] || [ -n "${DGX_PLATFORM}" ]; then
|
||||
echo "dgx"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Detect architecture
|
||||
case "$arch" in
|
||||
aarch64|arm64)
|
||||
echo "arm64"
|
||||
;;
|
||||
x86_64|amd64)
|
||||
echo "x86"
|
||||
;;
|
||||
*)
|
||||
log_error "Unsupported architecture: $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Get compose file for platform
|
||||
get_compose_file() {
|
||||
local platform="${1:-$(detect_platform)}"
|
||||
local dev_mode="${2:-false}"
|
||||
local files=""
|
||||
|
||||
case "$platform" in
|
||||
arm64)
|
||||
files="docker-compose.yml -f docker-compose.arm64.yml"
|
||||
;;
|
||||
x86)
|
||||
files="docker-compose.yml -f docker-compose.x86.yml"
|
||||
# Add GPU overlay if NVIDIA GPU detected
|
||||
if detect_nvidia_gpu; then
|
||||
files="$files -f docker-compose.x86-gpu.yml"
|
||||
fi
|
||||
;;
|
||||
dgx)
|
||||
files="docker-compose.yml -f docker-compose.dgx.yml"
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown platform: $platform"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Add dev overlay if requested
|
||||
if [ "$dev_mode" = "true" ]; then
|
||||
files="$files -f docker-compose.dev.yml"
|
||||
fi
|
||||
|
||||
echo "$files"
|
||||
}
|
||||
|
||||
# Get platform-specific settings
|
||||
get_platform_info() {
|
||||
local platform="${1:-$(detect_platform)}"
|
||||
|
||||
case "$platform" in
|
||||
arm64)
|
||||
echo "Platform: Apple Silicon (ARM64)"
|
||||
echo "Compose: docker-compose.yml + docker-compose.arm64.yml"
|
||||
echo "PgBouncer: pgbouncer/pgbouncer:latest"
|
||||
;;
|
||||
x86)
|
||||
echo "Platform: x86_64 Linux"
|
||||
if detect_nvidia_gpu; then
|
||||
echo "Compose: docker-compose.yml + docker-compose.x86.yml + docker-compose.x86-gpu.yml"
|
||||
echo "GPU: NVIDIA (accelerated embeddings)"
|
||||
else
|
||||
echo "Compose: docker-compose.yml + docker-compose.x86.yml"
|
||||
echo "GPU: None (CPU mode)"
|
||||
fi
|
||||
echo "PgBouncer: pgbouncer/pgbouncer:latest"
|
||||
;;
|
||||
dgx)
|
||||
echo "Platform: NVIDIA DGX (ARM64 Grace + Blackwell GPU)"
|
||||
echo "Compose: docker-compose.yml + docker-compose.dgx.yml"
|
||||
echo "PgBouncer: bitnamilegacy/pgbouncer:latest"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check platform-specific prerequisites
|
||||
check_platform_prerequisites() {
|
||||
local platform="${1:-$(detect_platform)}"
|
||||
|
||||
case "$platform" in
|
||||
x86|dgx)
|
||||
# Check if user is in docker group
|
||||
if ! groups | grep -q '\bdocker\b'; then
|
||||
log_error "User $USER is not in the docker group"
|
||||
log_warning "Docker group membership is required on Linux"
|
||||
echo ""
|
||||
echo "Please run the following command:"
|
||||
echo -e "${BLUE} sudo usermod -aG docker $USER${NC}"
|
||||
echo ""
|
||||
echo "Then either:"
|
||||
echo " 1. Log out and log back in (recommended)"
|
||||
echo " 2. Run: newgrp docker (temporary for this session)"
|
||||
return 1
|
||||
fi
|
||||
log_success "Docker group membership confirmed"
|
||||
;;
|
||||
arm64)
|
||||
# macOS - no docker group check needed
|
||||
log_success "Platform prerequisites OK (macOS)"
|
||||
;;
|
||||
esac
|
||||
return 0
|
||||
}
|
||||
273
scripts/lib/secrets.sh
Executable file
273
scripts/lib/secrets.sh
Executable file
@@ -0,0 +1,273 @@
|
||||
#!/bin/bash
|
||||
# GT AI OS Secret Generation Library
|
||||
# Centralized, idempotent secret generation for deployment scripts
|
||||
#
|
||||
# Usage: source scripts/lib/secrets.sh
|
||||
# generate_all_secrets # Populates .env with missing secrets only
|
||||
|
||||
set -e
|
||||
|
||||
# Source common functions if available
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
if [ -f "$SCRIPT_DIR/common.sh" ]; then
|
||||
source "$SCRIPT_DIR/common.sh"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SECRET GENERATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Generate a random hex string (for JWT secrets, encryption keys)
|
||||
# Usage: generate_secret_hex [length]
|
||||
# Default length: 64 characters (32 bytes)
|
||||
generate_secret_hex() {
|
||||
local length=${1:-64}
|
||||
openssl rand -hex $((length / 2))
|
||||
}
|
||||
|
||||
# Generate a Fernet key (for TFA encryption, API key encryption)
|
||||
# Fernet requires base64-encoded 32-byte key
|
||||
generate_fernet_key() {
|
||||
python3 -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())" 2>/dev/null || \
|
||||
openssl rand -base64 32
|
||||
}
|
||||
|
||||
# Generate a secure password (for database passwords)
|
||||
# Usage: generate_password [length]
|
||||
# Default length: 32 characters
|
||||
generate_password() {
|
||||
local length=${1:-32}
|
||||
# Use alphanumeric + special chars, avoiding problematic shell chars
|
||||
openssl rand -base64 48 | tr -dc 'a-zA-Z0-9!@#$%^&*()_+-=' | head -c "$length"
|
||||
}
|
||||
|
||||
# Generate a simple alphanumeric password (for services that don't handle special chars well)
|
||||
# Usage: generate_simple_password [length]
|
||||
generate_simple_password() {
|
||||
local length=${1:-32}
|
||||
openssl rand -base64 48 | tr -dc 'a-zA-Z0-9' | head -c "$length"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# ENV FILE MANAGEMENT
|
||||
# =============================================================================
|
||||
|
||||
# Get value from .env file
|
||||
# Usage: get_env_value "KEY_NAME" ".env"
|
||||
get_env_value() {
|
||||
local key="$1"
|
||||
local env_file="${2:-.env}"
|
||||
|
||||
if [ -f "$env_file" ]; then
|
||||
grep "^${key}=" "$env_file" 2>/dev/null | cut -d'=' -f2- | head -1
|
||||
fi
|
||||
}
|
||||
|
||||
# Set value in .env file (preserves existing, only sets if missing or empty)
|
||||
# Usage: set_env_value "KEY_NAME" "value" ".env"
|
||||
set_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local env_file="${3:-.env}"
|
||||
|
||||
# Create file if it doesn't exist
|
||||
touch "$env_file"
|
||||
|
||||
local existing=$(get_env_value "$key" "$env_file")
|
||||
|
||||
if [ -z "$existing" ]; then
|
||||
# Key doesn't exist or is empty, add/update it
|
||||
if grep -q "^${key}=" "$env_file" 2>/dev/null; then
|
||||
# Key exists but is empty, update it
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' "s|^${key}=.*|${key}=${value}|" "$env_file"
|
||||
else
|
||||
sed -i "s|^${key}=.*|${key}=${value}|" "$env_file"
|
||||
fi
|
||||
else
|
||||
# Key doesn't exist, append it
|
||||
echo "${key}=${value}" >> "$env_file"
|
||||
fi
|
||||
return 0 # Secret was generated
|
||||
fi
|
||||
return 1 # Secret already exists
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN SECRET GENERATION
|
||||
# =============================================================================
|
||||
|
||||
# Generate all required secrets for GT AI OS
|
||||
# This function is IDEMPOTENT - it only generates missing secrets
|
||||
# Usage: generate_all_secrets [env_file]
|
||||
generate_all_secrets() {
|
||||
local env_file="${1:-.env}"
|
||||
local generated_count=0
|
||||
|
||||
echo "Checking and generating missing secrets..."
|
||||
|
||||
# JWT and Authentication Secrets
|
||||
if set_env_value "JWT_SECRET" "$(generate_secret_hex 64)" "$env_file"; then
|
||||
echo " Generated: JWT_SECRET"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if set_env_value "CONTROL_PANEL_JWT_SECRET" "$(generate_secret_hex 64)" "$env_file"; then
|
||||
echo " Generated: CONTROL_PANEL_JWT_SECRET"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if set_env_value "RESOURCE_CLUSTER_SECRET_KEY" "$(generate_secret_hex 64)" "$env_file"; then
|
||||
echo " Generated: RESOURCE_CLUSTER_SECRET_KEY"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
# Encryption Keys
|
||||
if set_env_value "TFA_ENCRYPTION_KEY" "$(generate_fernet_key)" "$env_file"; then
|
||||
echo " Generated: TFA_ENCRYPTION_KEY"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if set_env_value "API_KEY_ENCRYPTION_KEY" "$(generate_fernet_key)" "$env_file"; then
|
||||
echo " Generated: API_KEY_ENCRYPTION_KEY"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
# Database Passwords (use simple passwords for PostgreSQL compatibility)
|
||||
if set_env_value "ADMIN_POSTGRES_PASSWORD" "$(generate_simple_password 32)" "$env_file"; then
|
||||
echo " Generated: ADMIN_POSTGRES_PASSWORD"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if set_env_value "TENANT_POSTGRES_PASSWORD" "$(generate_simple_password 32)" "$env_file"; then
|
||||
echo " Generated: TENANT_POSTGRES_PASSWORD"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
# Sync TENANT_USER_PASSWORD with TENANT_POSTGRES_PASSWORD
|
||||
local tenant_pass=$(get_env_value "TENANT_POSTGRES_PASSWORD" "$env_file")
|
||||
if set_env_value "TENANT_USER_PASSWORD" "$tenant_pass" "$env_file"; then
|
||||
echo " Set: TENANT_USER_PASSWORD (synced with TENANT_POSTGRES_PASSWORD)"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if set_env_value "TENANT_REPLICATOR_PASSWORD" "$(generate_simple_password 32)" "$env_file"; then
|
||||
echo " Generated: TENANT_REPLICATOR_PASSWORD"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
# Other Service Passwords
|
||||
if set_env_value "RABBITMQ_PASSWORD" "$(generate_simple_password 24)" "$env_file"; then
|
||||
echo " Generated: RABBITMQ_PASSWORD"
|
||||
((++generated_count))
|
||||
fi
|
||||
|
||||
if [ $generated_count -eq 0 ]; then
|
||||
echo " All secrets already present (no changes needed)"
|
||||
else
|
||||
echo " Generated $generated_count new secret(s)"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate that all required secrets are present (non-empty)
|
||||
# Usage: validate_secrets [env_file]
|
||||
validate_secrets() {
|
||||
local env_file="${1:-.env}"
|
||||
local missing=0
|
||||
|
||||
local required_secrets=(
|
||||
"JWT_SECRET"
|
||||
"CONTROL_PANEL_JWT_SECRET"
|
||||
"RESOURCE_CLUSTER_SECRET_KEY"
|
||||
"TFA_ENCRYPTION_KEY"
|
||||
"API_KEY_ENCRYPTION_KEY"
|
||||
"ADMIN_POSTGRES_PASSWORD"
|
||||
"TENANT_POSTGRES_PASSWORD"
|
||||
"TENANT_USER_PASSWORD"
|
||||
"RABBITMQ_PASSWORD"
|
||||
)
|
||||
|
||||
echo "Validating required secrets..."
|
||||
|
||||
for secret in "${required_secrets[@]}"; do
|
||||
local value=$(get_env_value "$secret" "$env_file")
|
||||
if [ -z "$value" ]; then
|
||||
echo " MISSING: $secret"
|
||||
((missing++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $missing -gt 0 ]; then
|
||||
echo " $missing required secret(s) missing!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo " All required secrets present"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# TEMPLATE CREATION
|
||||
# =============================================================================
|
||||
|
||||
# Create a .env.template file with placeholder values
|
||||
# Usage: create_env_template [output_file]
|
||||
create_env_template() {
|
||||
local output_file="${1:-.env.template}"
|
||||
|
||||
cat > "$output_file" << 'EOF'
|
||||
# GT AI OS Environment Configuration
|
||||
# Copy this file to .env and customize values
|
||||
# Secrets are auto-generated on first install if not provided
|
||||
|
||||
# =============================================================================
|
||||
# AUTHENTICATION (Auto-generated if empty)
|
||||
# =============================================================================
|
||||
JWT_SECRET=
|
||||
CONTROL_PANEL_JWT_SECRET=
|
||||
RESOURCE_CLUSTER_SECRET_KEY=
|
||||
|
||||
# =============================================================================
|
||||
# ENCRYPTION KEYS (Auto-generated if empty)
|
||||
# =============================================================================
|
||||
PASSWORD_RESET_ENCRYPTION_KEY=
|
||||
TFA_ENCRYPTION_KEY=
|
||||
API_KEY_ENCRYPTION_KEY=
|
||||
|
||||
# =============================================================================
|
||||
# DATABASE PASSWORDS (Auto-generated if empty)
|
||||
# =============================================================================
|
||||
ADMIN_POSTGRES_PASSWORD=
|
||||
TENANT_POSTGRES_PASSWORD=
|
||||
TENANT_USER_PASSWORD=
|
||||
TENANT_REPLICATOR_PASSWORD=
|
||||
RABBITMQ_PASSWORD=
|
||||
|
||||
# =============================================================================
|
||||
# API KEYS (Configure via Control Panel UI after installation)
|
||||
# =============================================================================
|
||||
# Note: LLM API keys (Groq, OpenAI, Anthropic) are configured through
|
||||
# the Control Panel UI, not environment variables.
|
||||
|
||||
# =============================================================================
|
||||
# SMTP (Enterprise Edition Only - Password Reset)
|
||||
# =============================================================================
|
||||
# Set via environment variables or configure below
|
||||
# SMTP_HOST=smtp-relay.brevo.com
|
||||
# SMTP_PORT=587
|
||||
# SMTP_USERNAME=
|
||||
# SMTP_PASSWORD=
|
||||
# SMTP_FROM_EMAIL=noreply@yourdomain.com
|
||||
# SMTP_FROM_NAME=GT AI OS
|
||||
|
||||
# =============================================================================
|
||||
# DEPLOYMENT
|
||||
# =============================================================================
|
||||
COMPOSE_PROJECT_NAME=gentwo
|
||||
ENVIRONMENT=production
|
||||
EOF
|
||||
|
||||
echo "Created $output_file"
|
||||
}
|
||||
41
scripts/migrations/006_add_tenant_frontend_url.sql
Normal file
41
scripts/migrations/006_add_tenant_frontend_url.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
-- Add frontend_url column to tenants table
|
||||
-- Migration: 006_add_tenant_frontend_url
|
||||
-- Date: October 6, 2025
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Add frontend_url column if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'tenants'
|
||||
AND column_name = 'frontend_url'
|
||||
) THEN
|
||||
ALTER TABLE tenants ADD COLUMN frontend_url VARCHAR(255);
|
||||
RAISE NOTICE 'Added frontend_url column to tenants table';
|
||||
ELSE
|
||||
RAISE NOTICE 'Column frontend_url already exists in tenants table';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Mark migration as applied in Alembic version table (if it exists)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'alembic_version') THEN
|
||||
INSERT INTO alembic_version (version_num)
|
||||
VALUES ('006_frontend_url')
|
||||
ON CONFLICT (version_num) DO NOTHING;
|
||||
RAISE NOTICE 'Marked migration in alembic_version table';
|
||||
ELSE
|
||||
RAISE NOTICE 'No alembic_version table found (skipping)';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verify column was added
|
||||
\echo 'Migration 006_add_tenant_frontend_url completed successfully'
|
||||
@@ -0,0 +1,54 @@
|
||||
-- Remove ip_address column from password_reset_rate_limits
|
||||
-- Migration: 008_remove_ip_address_from_rate_limits
|
||||
-- Date: October 7, 2025
|
||||
-- Database: gt2_admin (Control Panel)
|
||||
--
|
||||
-- Description:
|
||||
-- Removes ip_address column that was incorrectly added by Alembic auto-migration
|
||||
-- Application only uses email-based rate limiting, not IP-based
|
||||
--
|
||||
-- Usage:
|
||||
-- psql -U postgres -d gt2_admin -f 008_remove_ip_address_from_rate_limits.sql
|
||||
--
|
||||
-- OR via Docker:
|
||||
-- docker exec -i gentwo-controlpanel-postgres psql -U postgres -d gt2_admin < 008_remove_ip_address_from_rate_limits.sql
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Remove ip_address column if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'password_reset_rate_limits'
|
||||
AND column_name = 'ip_address'
|
||||
) THEN
|
||||
ALTER TABLE password_reset_rate_limits DROP COLUMN ip_address CASCADE;
|
||||
RAISE NOTICE 'Removed ip_address column from password_reset_rate_limits';
|
||||
ELSE
|
||||
RAISE NOTICE 'Column ip_address does not exist, skipping';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Mark migration as applied in Alembic version table (if it exists)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'alembic_version') THEN
|
||||
INSERT INTO alembic_version (version_num)
|
||||
VALUES ('008_remove_ip')
|
||||
ON CONFLICT (version_num) DO NOTHING;
|
||||
RAISE NOTICE 'Marked migration in alembic_version table';
|
||||
ELSE
|
||||
RAISE NOTICE 'No alembic_version table found (skipping)';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verify table structure
|
||||
\d password_reset_rate_limits
|
||||
|
||||
\echo 'Migration 008_remove_ip_address_from_rate_limits completed successfully'
|
||||
42
scripts/migrations/009_add_tfa_schema.sql
Normal file
42
scripts/migrations/009_add_tfa_schema.sql
Normal file
@@ -0,0 +1,42 @@
|
||||
-- Migration 009: Add Two-Factor Authentication Schema
|
||||
-- Creates TFA fields in users table and supporting tables for rate limiting and token management
|
||||
|
||||
-- Add TFA fields to users table
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS tfa_enabled BOOLEAN NOT NULL DEFAULT false;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS tfa_secret TEXT;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS tfa_required BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
-- Add indexes for query optimization
|
||||
CREATE INDEX IF NOT EXISTS ix_users_tfa_enabled ON users(tfa_enabled);
|
||||
CREATE INDEX IF NOT EXISTS ix_users_tfa_required ON users(tfa_required);
|
||||
|
||||
-- Create TFA verification rate limits table
|
||||
CREATE TABLE IF NOT EXISTS tfa_verification_rate_limits (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
request_count INTEGER NOT NULL DEFAULT 1,
|
||||
window_start TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
window_end TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_tfa_verification_rate_limits_user_id ON tfa_verification_rate_limits(user_id);
|
||||
CREATE INDEX IF NOT EXISTS ix_tfa_verification_rate_limits_window_end ON tfa_verification_rate_limits(window_end);
|
||||
|
||||
-- Create used temp tokens table for replay prevention
|
||||
CREATE TABLE IF NOT EXISTS used_temp_tokens (
|
||||
id SERIAL PRIMARY KEY,
|
||||
token_id VARCHAR(255) NOT NULL UNIQUE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
user_email VARCHAR(255),
|
||||
tfa_configured BOOLEAN,
|
||||
qr_code_uri TEXT,
|
||||
manual_entry_key VARCHAR(255),
|
||||
temp_token TEXT,
|
||||
used_at TIMESTAMP WITH TIME ZONE,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ix_used_temp_tokens_token_id ON used_temp_tokens(token_id);
|
||||
CREATE INDEX IF NOT EXISTS ix_used_temp_tokens_expires_at ON used_temp_tokens(expires_at);
|
||||
167
scripts/migrations/010_update_model_context_windows.sql
Normal file
167
scripts/migrations/010_update_model_context_windows.sql
Normal file
@@ -0,0 +1,167 @@
|
||||
-- Migration 010: Update Model Context Windows and Max Tokens
|
||||
-- Ensures all models in model_configs have proper context_window and max_tokens set
|
||||
|
||||
-- Update models with missing context_window and max_tokens based on deployment configs
|
||||
-- Reference: scripts/seed/groq-models.sql and actual Groq API specifications
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
updated_count INTEGER := 0;
|
||||
BEGIN
|
||||
-- LLaMA 3.1 8B Instant
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 131072,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'llama-3.1-8b-instant'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for llama-3.1-8b-instant', updated_count;
|
||||
END IF;
|
||||
|
||||
-- LLaMA 3.3 70B Versatile
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 32768,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'llama-3.3-70b-versatile'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for llama-3.3-70b-versatile', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Groq Compound
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 8192,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'groq/compound'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for groq/compound', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Groq Compound Mini
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 8192,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'groq/compound-mini'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for groq/compound-mini', updated_count;
|
||||
END IF;
|
||||
|
||||
-- GPT OSS 120B
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 65536,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'openai/gpt-oss-120b'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for openai/gpt-oss-120b', updated_count;
|
||||
END IF;
|
||||
|
||||
-- GPT OSS 20B
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 65536,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'openai/gpt-oss-20b'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for openai/gpt-oss-20b', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Meta LLaMA Guard 4 12B
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 1024,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'meta-llama/llama-guard-4-12b'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for meta-llama/llama-guard-4-12b', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Meta LLaMA 4 Maverick 17B
|
||||
UPDATE model_configs
|
||||
SET context_window = 131072,
|
||||
max_tokens = 8192,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'meta-llama/llama-4-maverick-17b-128e-instruct'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for meta-llama/llama-4-maverick-17b-128e-instruct', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Moonshot AI Kimi K2 (checking for common variations)
|
||||
UPDATE model_configs
|
||||
SET context_window = 262144,
|
||||
max_tokens = 16384,
|
||||
updated_at = NOW()
|
||||
WHERE model_id IN ('moonshotai/kimi-k2-instruct-0905', 'kimi-k2-instruct-0905', 'moonshotai/kimi-k2')
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for moonshotai/kimi-k2-instruct-0905', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Whisper Large v3
|
||||
UPDATE model_configs
|
||||
SET context_window = 0,
|
||||
max_tokens = 0,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'whisper-large-v3'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for whisper-large-v3', updated_count;
|
||||
END IF;
|
||||
|
||||
-- Whisper Large v3 Turbo
|
||||
UPDATE model_configs
|
||||
SET context_window = 0,
|
||||
max_tokens = 0,
|
||||
updated_at = NOW()
|
||||
WHERE model_id = 'whisper-large-v3-turbo'
|
||||
AND (context_window IS NULL OR max_tokens IS NULL);
|
||||
|
||||
GET DIAGNOSTICS updated_count = ROW_COUNT;
|
||||
IF updated_count > 0 THEN
|
||||
RAISE NOTICE 'Updated % records for whisper-large-v3-turbo', updated_count;
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE 'Migration 010 completed: Updated model context windows and max tokens';
|
||||
END $$;
|
||||
|
||||
-- Display updated models
|
||||
SELECT
|
||||
model_id,
|
||||
name,
|
||||
provider,
|
||||
model_type,
|
||||
context_window,
|
||||
max_tokens
|
||||
FROM model_configs
|
||||
WHERE provider = 'groq' OR model_id LIKE '%moonshot%' OR model_id LIKE '%kimi%'
|
||||
ORDER BY model_id;
|
||||
70
scripts/migrations/011_add_system_management_tables.sql
Normal file
70
scripts/migrations/011_add_system_management_tables.sql
Normal file
@@ -0,0 +1,70 @@
|
||||
-- Migration 011: Add system management tables for version tracking, updates, and backups
|
||||
-- Idempotent: Uses CREATE TABLE IF NOT EXISTS and exception handling for enums
|
||||
|
||||
-- Create enum types (safe to recreate)
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE updatestatus AS ENUM ('pending', 'in_progress', 'completed', 'failed', 'rolled_back');
|
||||
EXCEPTION WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE backuptype AS ENUM ('manual', 'pre_update', 'scheduled');
|
||||
EXCEPTION WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- System versions table - tracks installed system versions
|
||||
CREATE TABLE IF NOT EXISTS system_versions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
uuid VARCHAR(36) NOT NULL UNIQUE DEFAULT gen_random_uuid()::text,
|
||||
version VARCHAR(50) NOT NULL,
|
||||
installed_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
installed_by VARCHAR(255),
|
||||
is_current BOOLEAN NOT NULL DEFAULT true,
|
||||
release_notes TEXT,
|
||||
git_commit VARCHAR(40)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS ix_system_versions_id ON system_versions(id);
|
||||
CREATE INDEX IF NOT EXISTS ix_system_versions_version ON system_versions(version);
|
||||
|
||||
-- Update jobs table - tracks update execution
|
||||
CREATE TABLE IF NOT EXISTS update_jobs (
|
||||
id SERIAL PRIMARY KEY,
|
||||
uuid VARCHAR(36) NOT NULL UNIQUE DEFAULT gen_random_uuid()::text,
|
||||
target_version VARCHAR(50) NOT NULL,
|
||||
status updatestatus NOT NULL DEFAULT 'pending',
|
||||
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
completed_at TIMESTAMP WITH TIME ZONE,
|
||||
current_stage VARCHAR(100),
|
||||
logs JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||
error_message TEXT,
|
||||
backup_id INTEGER,
|
||||
started_by VARCHAR(255),
|
||||
rollback_reason TEXT
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS ix_update_jobs_id ON update_jobs(id);
|
||||
CREATE INDEX IF NOT EXISTS ix_update_jobs_uuid ON update_jobs(uuid);
|
||||
CREATE INDEX IF NOT EXISTS ix_update_jobs_status ON update_jobs(status);
|
||||
|
||||
-- Backup records table - tracks system backups
|
||||
CREATE TABLE IF NOT EXISTS backup_records (
|
||||
id SERIAL PRIMARY KEY,
|
||||
uuid VARCHAR(36) NOT NULL UNIQUE DEFAULT gen_random_uuid()::text,
|
||||
backup_type backuptype NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
size_bytes BIGINT,
|
||||
location VARCHAR(500) NOT NULL,
|
||||
version VARCHAR(50),
|
||||
components JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
checksum VARCHAR(64),
|
||||
created_by VARCHAR(255),
|
||||
description TEXT,
|
||||
is_valid BOOLEAN NOT NULL DEFAULT true,
|
||||
expires_at TIMESTAMP WITH TIME ZONE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS ix_backup_records_id ON backup_records(id);
|
||||
CREATE INDEX IF NOT EXISTS ix_backup_records_uuid ON backup_records(uuid);
|
||||
|
||||
-- Seed initial version (idempotent - only inserts if no current version exists)
|
||||
INSERT INTO system_versions (uuid, version, installed_by, is_current)
|
||||
SELECT 'initial-version-uuid', 'v2.0.31', 'system', true
|
||||
WHERE NOT EXISTS (SELECT 1 FROM system_versions WHERE is_current = true);
|
||||
36
scripts/migrations/012_add_optics_enabled.sql
Normal file
36
scripts/migrations/012_add_optics_enabled.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- T008_optics_feature.sql
|
||||
-- Add Optics cost tracking feature toggle for tenants
|
||||
-- This enables the Optics tab in tenant observability for cost visibility
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Add optics_enabled column to tenants table in control panel database
|
||||
-- This column controls whether the Optics cost tracking tab is visible for a tenant
|
||||
ALTER TABLE public.tenants
|
||||
ADD COLUMN IF NOT EXISTS optics_enabled BOOLEAN DEFAULT FALSE;
|
||||
|
||||
-- Add comment for documentation
|
||||
COMMENT ON COLUMN public.tenants.optics_enabled IS
|
||||
'Enable Optics cost tracking tab in tenant observability dashboard';
|
||||
|
||||
-- Update existing test tenant to have optics enabled for demo purposes
|
||||
UPDATE public.tenants
|
||||
SET optics_enabled = TRUE
|
||||
WHERE domain = 'test-company';
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== T008 OPTICS FEATURE MIGRATION ===';
|
||||
RAISE NOTICE 'Added optics_enabled column to tenants table';
|
||||
RAISE NOTICE 'Default: FALSE (disabled)';
|
||||
RAISE NOTICE 'Test tenant (test-company): enabled';
|
||||
RAISE NOTICE '=====================================';
|
||||
END $$;
|
||||
|
||||
-- Rollback (if needed):
|
||||
-- BEGIN;
|
||||
-- ALTER TABLE public.tenants DROP COLUMN IF EXISTS optics_enabled;
|
||||
-- COMMIT;
|
||||
15
scripts/migrations/013_rename_cost_columns.sql
Normal file
15
scripts/migrations/013_rename_cost_columns.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- Migration 013: Rename cost columns from per_1k to per_million
|
||||
-- This is idempotent - only runs if old columns exist
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'model_configs'
|
||||
AND column_name = 'cost_per_1k_input') THEN
|
||||
ALTER TABLE model_configs RENAME COLUMN cost_per_1k_input TO cost_per_million_input;
|
||||
ALTER TABLE model_configs RENAME COLUMN cost_per_1k_output TO cost_per_million_output;
|
||||
RAISE NOTICE 'Renamed cost columns from per_1k to per_million';
|
||||
ELSE
|
||||
RAISE NOTICE 'Cost columns already renamed or do not exist';
|
||||
END IF;
|
||||
END $$;
|
||||
108
scripts/migrations/014_backfill_groq_pricing.sql
Normal file
108
scripts/migrations/014_backfill_groq_pricing.sql
Normal file
@@ -0,0 +1,108 @@
|
||||
-- Migration 014: Backfill missing Groq model pricing
|
||||
-- Updates models with NULL or 0 pricing to use standard Groq rates
|
||||
-- Prices sourced from https://groq.com/pricing (verified Dec 2, 2025)
|
||||
-- Idempotent - only updates rows that need fixing
|
||||
|
||||
-- Groq Compound (estimated: includes underlying model + tool costs)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 2.50, cost_per_million_output = 6.00, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound'
|
||||
AND model_id NOT LIKE '%mini%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Groq Compound Mini (estimated: includes underlying model + tool costs)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 1.00, cost_per_million_output = 2.50, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound-mini%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- LLaMA 3.1 8B Instant
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.05, cost_per_million_output = 0.08, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-3.1-8b-instant%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- LLaMA 3.3 70B Versatile
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.59, cost_per_million_output = 0.79, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-3.3-70b-versatile%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Meta Llama 4 Maverick 17B (17Bx128E MoE)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.20, cost_per_million_output = 0.60, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-4-maverick%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Meta Llama 4 Scout 17B (17Bx16E MoE)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.11, cost_per_million_output = 0.34, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-4-scout%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- LLaMA Guard 4 12B
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.20, cost_per_million_output = 0.20, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-guard%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Moonshot AI Kimi K2 (1T params, 256k context)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 1.00, cost_per_million_output = 3.00, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%kimi-k2%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- OpenAI GPT OSS 120B 128k
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.15, cost_per_million_output = 0.60, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-120b%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- OpenAI GPT OSS 20B 128k
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.075, cost_per_million_output = 0.30, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-20b%'
|
||||
AND model_id NOT LIKE '%safeguard%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- OpenAI GPT OSS Safeguard 20B
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.075, cost_per_million_output = 0.30, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-safeguard%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Qwen3 32B 131k
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.29, cost_per_million_output = 0.59, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%qwen3-32b%'
|
||||
AND (cost_per_million_input IS NULL OR cost_per_million_input = 0
|
||||
OR cost_per_million_output IS NULL OR cost_per_million_output = 0);
|
||||
|
||||
-- Report results
|
||||
SELECT model_id, name, cost_per_million_input, cost_per_million_output
|
||||
FROM model_configs
|
||||
WHERE provider = 'groq'
|
||||
ORDER BY model_id;
|
||||
84
scripts/migrations/015_update_groq_pricing_dec_2025.sql
Normal file
84
scripts/migrations/015_update_groq_pricing_dec_2025.sql
Normal file
@@ -0,0 +1,84 @@
|
||||
-- Migration 015: Update Groq model pricing to December 2025 rates
|
||||
-- Source: https://groq.com/pricing (verified Dec 2, 2025)
|
||||
-- This migration updates ALL pricing values (not just NULL/0)
|
||||
|
||||
-- GPT OSS 120B 128k: Was $1.20/$1.20, now $0.15/$0.60
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.15, cost_per_million_output = 0.60, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-120b%'
|
||||
AND model_id NOT LIKE '%safeguard%';
|
||||
|
||||
-- GPT OSS 20B 128k: Was $0.30/$0.30, now $0.075/$0.30
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.075, cost_per_million_output = 0.30, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-20b%'
|
||||
AND model_id NOT LIKE '%safeguard%';
|
||||
|
||||
-- GPT OSS Safeguard 20B: $0.075/$0.30
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.075, cost_per_million_output = 0.30, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%gpt-oss-safeguard%';
|
||||
|
||||
-- Llama 4 Maverick (17Bx128E): Was $0.15/$0.25, now $0.20/$0.60
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.20, cost_per_million_output = 0.60, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-4-maverick%';
|
||||
|
||||
-- Llama 4 Scout (17Bx16E): $0.11/$0.34 (new model)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.11, cost_per_million_output = 0.34, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-4-scout%';
|
||||
|
||||
-- Kimi K2: Was $0.30/$0.50, now $1.00/$3.00
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 1.00, cost_per_million_output = 3.00, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%kimi-k2%';
|
||||
|
||||
-- Llama Guard 4 12B: $0.20/$0.20
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.20, cost_per_million_output = 0.20, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-guard%';
|
||||
|
||||
-- Groq Compound: Was $2.00/$2.00, now $2.50/$6.00 (estimated with tool costs)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 2.50, cost_per_million_output = 6.00, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound%'
|
||||
AND model_id NOT LIKE '%mini%';
|
||||
|
||||
-- Groq Compound Mini: Was $0.80/$0.80, now $1.00/$2.50 (estimated with tool costs)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 1.00, cost_per_million_output = 2.50, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound-mini%';
|
||||
|
||||
-- Qwen3 32B 131k: $0.29/$0.59 (new model)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.29, cost_per_million_output = 0.59, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%qwen3-32b%';
|
||||
|
||||
-- LLaMA 3.1 8B Instant: $0.05/$0.08 (unchanged, ensure consistency)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.05, cost_per_million_output = 0.08, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-3.1-8b-instant%';
|
||||
|
||||
-- LLaMA 3.3 70B Versatile: $0.59/$0.79 (unchanged, ensure consistency)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.59, cost_per_million_output = 0.79, updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%llama-3.3-70b-versatile%';
|
||||
|
||||
-- Report updated pricing
|
||||
SELECT model_id, name, cost_per_million_input as input_per_1m, cost_per_million_output as output_per_1m
|
||||
FROM model_configs
|
||||
WHERE provider = 'groq'
|
||||
ORDER BY cost_per_million_input DESC, model_id;
|
||||
24
scripts/migrations/016_add_is_compound_column.sql
Normal file
24
scripts/migrations/016_add_is_compound_column.sql
Normal file
@@ -0,0 +1,24 @@
|
||||
-- Migration 016: Add is_compound column to model_configs
|
||||
-- Required for Compound model pass-through pricing
|
||||
-- Date: 2025-12-02
|
||||
|
||||
-- Add column if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'model_configs' AND column_name = 'is_compound'
|
||||
) THEN
|
||||
ALTER TABLE public.model_configs
|
||||
ADD COLUMN is_compound BOOLEAN DEFAULT FALSE;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Mark compound models
|
||||
UPDATE public.model_configs
|
||||
SET is_compound = true
|
||||
WHERE model_id LIKE '%compound%'
|
||||
AND is_compound IS NOT TRUE;
|
||||
|
||||
-- Verify
|
||||
SELECT model_id, is_compound FROM public.model_configs WHERE model_id LIKE '%compound%';
|
||||
31
scripts/migrations/017_fix_compound_pricing.sql
Normal file
31
scripts/migrations/017_fix_compound_pricing.sql
Normal file
@@ -0,0 +1,31 @@
|
||||
-- Migration 017: Fix Compound model pricing with correct blended rates
|
||||
-- Source: https://groq.com/pricing (Dec 2025) + actual API response analysis
|
||||
--
|
||||
-- Compound uses GPT-OSS-120B ($0.15/$0.60) + Llama 4 Scout ($0.11/$0.34)
|
||||
-- Blended 50/50: ($0.15+$0.11)/2 = $0.13 input, ($0.60+$0.34)/2 = $0.47 output
|
||||
--
|
||||
-- Compound Mini uses GPT-OSS-120B ($0.15/$0.60) + Llama 3.3 70B ($0.59/$0.79)
|
||||
-- Blended 50/50: ($0.15+$0.59)/2 = $0.37 input, ($0.60+$0.79)/2 = $0.695 output
|
||||
|
||||
-- Fix Compound pricing (was incorrectly set to $2.50/$6.00)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.13,
|
||||
cost_per_million_output = 0.47,
|
||||
updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound%'
|
||||
AND model_id NOT LIKE '%mini%';
|
||||
|
||||
-- Fix Compound Mini pricing (was incorrectly set to $1.00/$2.50)
|
||||
UPDATE model_configs
|
||||
SET cost_per_million_input = 0.37,
|
||||
cost_per_million_output = 0.695,
|
||||
updated_at = NOW()
|
||||
WHERE provider = 'groq'
|
||||
AND model_id LIKE '%compound-mini%';
|
||||
|
||||
-- Report updated pricing
|
||||
SELECT model_id, name, cost_per_million_input as input_per_1m, cost_per_million_output as output_per_1m
|
||||
FROM model_configs
|
||||
WHERE provider = 'groq' AND model_id LIKE '%compound%'
|
||||
ORDER BY model_id;
|
||||
19
scripts/migrations/018_add_budget_storage_pricing.sql
Normal file
19
scripts/migrations/018_add_budget_storage_pricing.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- Migration 018: Add budget and storage pricing fields to tenants
|
||||
-- Supports #234 (Budget Limits), #218 (Storage Tier Pricing)
|
||||
-- Updated: Removed warm tier, changed cold tier to allocation-based model
|
||||
|
||||
-- Budget fields
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS monthly_budget_cents INTEGER DEFAULT NULL;
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS budget_warning_threshold INTEGER DEFAULT 80;
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS budget_critical_threshold INTEGER DEFAULT 90;
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS budget_enforcement_enabled BOOLEAN DEFAULT true;
|
||||
|
||||
-- Hot tier storage pricing overrides (NULL = use system defaults)
|
||||
-- Default: $0.15/GiB/month (in cents per MiB: ~0.0146 cents/MiB)
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS storage_price_dataset_hot DECIMAL(10,4) DEFAULT NULL;
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS storage_price_conversation_hot DECIMAL(10,4) DEFAULT NULL;
|
||||
|
||||
-- Cold tier: Allocation-based model
|
||||
-- Monthly cost = allocated_tibs × price_per_tib
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS cold_storage_allocated_tibs DECIMAL(10,4) DEFAULT NULL;
|
||||
ALTER TABLE public.tenants ADD COLUMN IF NOT EXISTS cold_storage_price_per_tib DECIMAL(10,2) DEFAULT 10.00;
|
||||
17
scripts/migrations/019_add_embedding_usage.sql
Normal file
17
scripts/migrations/019_add_embedding_usage.sql
Normal file
@@ -0,0 +1,17 @@
|
||||
-- Migration 019: Add embedding usage tracking table
|
||||
-- Supports #241 (Embedding Model Pricing)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS public.embedding_usage_logs (
|
||||
id SERIAL PRIMARY KEY,
|
||||
tenant_id VARCHAR(100) NOT NULL,
|
||||
user_id VARCHAR(100) NOT NULL,
|
||||
tokens_used INTEGER NOT NULL,
|
||||
embedding_count INTEGER NOT NULL,
|
||||
model VARCHAR(100) DEFAULT 'BAAI/bge-m3',
|
||||
cost_cents DECIMAL(10,4) NOT NULL,
|
||||
request_id VARCHAR(100),
|
||||
timestamp TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_embedding_usage_tenant_timestamp
|
||||
ON public.embedding_usage_logs(tenant_id, timestamp);
|
||||
224
scripts/migrations/020_migrate_env_api_keys.py
Normal file
224
scripts/migrations/020_migrate_env_api_keys.py
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migration 020: Import GROQ_API_KEY from environment to database
|
||||
|
||||
Migrates API keys from .env file to encrypted database storage for test-company tenant.
|
||||
This is part of the move away from environment variables for API keys (#158, #219).
|
||||
|
||||
Idempotency: Checks if key already exists before importing
|
||||
Target: test-company tenant only (as specified in requirements)
|
||||
|
||||
Usage:
|
||||
python scripts/migrations/020_migrate_env_api_keys.py
|
||||
|
||||
Environment variables required:
|
||||
- GROQ_API_KEY: The Groq API key to migrate (optional - skips if not set)
|
||||
- API_KEY_ENCRYPTION_KEY: Fernet encryption key (auto-generated if not set)
|
||||
- CONTROL_PANEL_DB_HOST: Database host (default: localhost)
|
||||
- CONTROL_PANEL_DB_PORT: Database port (default: 5432)
|
||||
- CONTROL_PANEL_DB_NAME: Database name (default: gt2_admin)
|
||||
- CONTROL_PANEL_DB_USER: Database user (default: postgres)
|
||||
- ADMIN_POSTGRES_PASSWORD: Database password
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from cryptography.fernet import Fernet
|
||||
import psycopg2
|
||||
except ImportError as e:
|
||||
print(f"Missing required package: {e}")
|
||||
print("Run: pip install cryptography psycopg2-binary")
|
||||
sys.exit(1)
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Migration constants
|
||||
TARGET_TENANT_DOMAIN = "test-company"
|
||||
PROVIDER = "groq"
|
||||
MIGRATION_ID = "020"
|
||||
|
||||
|
||||
def get_db_connection():
|
||||
"""Get database connection using environment variables or defaults"""
|
||||
try:
|
||||
conn = psycopg2.connect(
|
||||
host=os.getenv("CONTROL_PANEL_DB_HOST", "localhost"),
|
||||
port=os.getenv("CONTROL_PANEL_DB_PORT", "5432"),
|
||||
database=os.getenv("CONTROL_PANEL_DB_NAME", "gt2_admin"),
|
||||
user=os.getenv("CONTROL_PANEL_DB_USER", "postgres"),
|
||||
password=os.getenv("ADMIN_POSTGRES_PASSWORD", "dev_password_change_in_prod")
|
||||
)
|
||||
return conn
|
||||
except psycopg2.Error as e:
|
||||
logger.error(f"Database connection failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def get_encryption_key() -> str:
|
||||
"""Get or generate Fernet encryption key"""
|
||||
key = os.getenv("API_KEY_ENCRYPTION_KEY")
|
||||
if not key:
|
||||
# Generate a new key - in production this should be persisted
|
||||
key = Fernet.generate_key().decode()
|
||||
logger.warning("Generated new API_KEY_ENCRYPTION_KEY - add to .env for persistence:")
|
||||
logger.warning(f" API_KEY_ENCRYPTION_KEY={key}")
|
||||
return key
|
||||
|
||||
|
||||
def check_env_key_exists() -> str | None:
|
||||
"""Check if GROQ_API_KEY environment variable exists and is valid"""
|
||||
groq_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Skip placeholder values
|
||||
placeholder_values = [
|
||||
"gsk_your_actual_groq_api_key_here",
|
||||
"gsk_placeholder",
|
||||
"",
|
||||
None
|
||||
]
|
||||
|
||||
if groq_key in placeholder_values:
|
||||
logger.info("GROQ_API_KEY not set or is placeholder - skipping migration")
|
||||
return None
|
||||
|
||||
# Validate format
|
||||
if not groq_key.startswith("gsk_"):
|
||||
logger.warning(f"GROQ_API_KEY has invalid format (should start with 'gsk_')")
|
||||
return None
|
||||
|
||||
return groq_key
|
||||
|
||||
|
||||
def get_tenant_id(conn, domain: str) -> int | None:
|
||||
"""Get tenant ID by domain"""
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"SELECT id FROM tenants WHERE domain = %s AND deleted_at IS NULL",
|
||||
(domain,)
|
||||
)
|
||||
row = cur.fetchone()
|
||||
return row[0] if row else None
|
||||
|
||||
|
||||
def check_db_key_exists(conn, tenant_id: int) -> bool:
|
||||
"""Check if Groq key already exists in database for tenant"""
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"SELECT api_keys FROM tenants WHERE id = %s",
|
||||
(tenant_id,)
|
||||
)
|
||||
row = cur.fetchone()
|
||||
if row and row[0]:
|
||||
api_keys = row[0] if isinstance(row[0], dict) else json.loads(row[0])
|
||||
if PROVIDER in api_keys and api_keys[PROVIDER].get("key"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def migrate_api_key(conn, tenant_id: int, api_key: str, encryption_key: str) -> bool:
|
||||
"""Encrypt and store API key in database"""
|
||||
try:
|
||||
cipher = Fernet(encryption_key.encode())
|
||||
encrypted_key = cipher.encrypt(api_key.encode()).decode()
|
||||
|
||||
api_keys_data = {
|
||||
PROVIDER: {
|
||||
"key": encrypted_key,
|
||||
"secret": None,
|
||||
"enabled": True,
|
||||
"metadata": {
|
||||
"migrated_from": "environment",
|
||||
"migration_id": MIGRATION_ID,
|
||||
"migration_date": datetime.utcnow().isoformat()
|
||||
},
|
||||
"updated_at": datetime.utcnow().isoformat(),
|
||||
"updated_by": f"migration-{MIGRATION_ID}"
|
||||
}
|
||||
}
|
||||
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE tenants
|
||||
SET api_keys = %s::jsonb,
|
||||
api_key_encryption_version = 'v1',
|
||||
updated_at = NOW()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(json.dumps(api_keys_data), tenant_id)
|
||||
)
|
||||
conn.commit()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logger.error(f"Failed to migrate API key: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def run_migration() -> bool:
|
||||
"""Main migration logic"""
|
||||
logger.info(f"=== Migration {MIGRATION_ID}: Import GROQ_API_KEY from environment ===")
|
||||
|
||||
# Step 1: Check if env var exists
|
||||
groq_key = check_env_key_exists()
|
||||
if not groq_key:
|
||||
logger.info("Migration skipped: No valid GROQ_API_KEY in environment")
|
||||
return True # Not an error - just nothing to migrate
|
||||
|
||||
logger.info(f"Found GROQ_API_KEY in environment (length: {len(groq_key)})")
|
||||
|
||||
# Step 2: Connect to database
|
||||
try:
|
||||
conn = get_db_connection()
|
||||
logger.info("Connected to database")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to database: {e}")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Step 3: Get tenant ID
|
||||
tenant_id = get_tenant_id(conn, TARGET_TENANT_DOMAIN)
|
||||
if not tenant_id:
|
||||
logger.warning(f"Tenant '{TARGET_TENANT_DOMAIN}' not found - skipping migration")
|
||||
logger.info("This is expected for fresh installs before tenant creation")
|
||||
return True
|
||||
|
||||
logger.info(f"Found tenant '{TARGET_TENANT_DOMAIN}' with ID: {tenant_id}")
|
||||
|
||||
# Step 4: Check if DB key already exists (idempotency)
|
||||
if check_db_key_exists(conn, tenant_id):
|
||||
logger.info("Migration already complete - Groq key exists in database")
|
||||
return True
|
||||
|
||||
# Step 5: Get/generate encryption key
|
||||
encryption_key = get_encryption_key()
|
||||
|
||||
# Step 6: Migrate the key
|
||||
logger.info(f"Migrating GROQ_API_KEY to database for tenant {tenant_id}...")
|
||||
if migrate_api_key(conn, tenant_id, groq_key, encryption_key):
|
||||
logger.info(f"=== Migration {MIGRATION_ID} completed successfully ===")
|
||||
logger.info("The GROQ_API_KEY env var can now be removed from docker-compose.yml")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Migration {MIGRATION_ID} failed")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Migration failed with error: {e}")
|
||||
return False
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = run_migration()
|
||||
sys.exit(0 if success else 1)
|
||||
432
scripts/migrations/021_add_nvidia_models.sql
Normal file
432
scripts/migrations/021_add_nvidia_models.sql
Normal file
@@ -0,0 +1,432 @@
|
||||
-- Migration: 021_add_nvidia_models.sql
|
||||
-- Description: Add NVIDIA NIM models to model_configs table
|
||||
-- Date: 2025-12-08
|
||||
-- Issue: #266 - Add NVIDIA API endpoint support
|
||||
-- Reference: https://build.nvidia.com/models
|
||||
|
||||
-- NVIDIA NIM Models (build.nvidia.com)
|
||||
-- Pricing: Estimated based on third-party providers and model size (Dec 2025)
|
||||
-- Models selected: SOTA reasoning, coding, and general-purpose LLMs
|
||||
|
||||
INSERT INTO model_configs (
|
||||
model_id,
|
||||
name,
|
||||
version,
|
||||
provider,
|
||||
model_type,
|
||||
endpoint,
|
||||
context_window,
|
||||
max_tokens,
|
||||
cost_per_million_input,
|
||||
cost_per_million_output,
|
||||
capabilities,
|
||||
is_active,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
request_count,
|
||||
error_count,
|
||||
success_rate,
|
||||
avg_latency_ms,
|
||||
health_status
|
||||
)
|
||||
VALUES
|
||||
-- ==========================================
|
||||
-- NVIDIA Llama Nemotron Family (Flagship)
|
||||
-- ==========================================
|
||||
|
||||
-- Llama 3.3 Nemotron Super 49B v1 - Latest flagship reasoning model
|
||||
(
|
||||
'nvidia/llama-3.3-nemotron-super-49b-v1',
|
||||
'NVIDIA Llama 3.3 Nemotron Super 49B',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
131072,
|
||||
8192,
|
||||
0.5,
|
||||
1.5,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true}',
|
||||
true,
|
||||
'NVIDIA flagship reasoning model - best accuracy/throughput on single GPU',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- Llama 3.1 Nemotron Ultra 253B - Maximum accuracy
|
||||
(
|
||||
'nvidia/llama-3.1-nemotron-ultra-253b-v1',
|
||||
'NVIDIA Llama 3.1 Nemotron Ultra 253B',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
131072,
|
||||
8192,
|
||||
0.6,
|
||||
1.8,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true}',
|
||||
true,
|
||||
'Maximum agentic accuracy for scientific reasoning, math, and coding',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- Nemotron Nano 8B - Edge/PC deployment
|
||||
(
|
||||
'nvidia/llama-3.1-nemotron-nano-8b-v1',
|
||||
'NVIDIA Llama 3.1 Nemotron Nano 8B',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
131072,
|
||||
8192,
|
||||
0.02,
|
||||
0.06,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Cost-effective model optimized for edge devices and low latency',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- Meta Llama 3.3 (via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
-- Llama 3.3 70B Instruct - Latest Llama
|
||||
(
|
||||
'nvidia/meta-llama-3.3-70b-instruct',
|
||||
'NVIDIA Meta Llama 3.3 70B Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
4096,
|
||||
0.13,
|
||||
0.4,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Latest Meta Llama 3.3 - excellent for instruction following',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- DeepSeek Models (via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
-- DeepSeek V3 - Hybrid inference with Think/Non-Think modes
|
||||
(
|
||||
'nvidia/deepseek-ai-deepseek-v3',
|
||||
'NVIDIA DeepSeek V3',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.5,
|
||||
1.5,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true}',
|
||||
true,
|
||||
'Hybrid LLM with Think/Non-Think modes, 128K context, strong tool use',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- DeepSeek R1 - Enhanced reasoning
|
||||
(
|
||||
'nvidia/deepseek-ai-deepseek-r1',
|
||||
'NVIDIA DeepSeek R1',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.6,
|
||||
2.4,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true}',
|
||||
true,
|
||||
'Enhanced reasoning model - reduced hallucination, strong math/coding',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- Kimi K2 (Moonshot AI via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
(
|
||||
'nvidia/moonshot-ai-kimi-k2-instruct',
|
||||
'NVIDIA Kimi K2 Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.4,
|
||||
1.2,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true}',
|
||||
true,
|
||||
'Long context window with enhanced reasoning capabilities',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- Mistral Models (via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
-- Mistral Large 3 - State-of-the-art MoE
|
||||
(
|
||||
'nvidia/mistralai-mistral-large-3-instruct',
|
||||
'NVIDIA Mistral Large 3 Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.8,
|
||||
2.4,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'State-of-the-art general purpose MoE model',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- Qwen Models (via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
-- Qwen 3 - Ultra-long context (131K with YaRN extension)
|
||||
(
|
||||
'nvidia/qwen-qwen3-235b-a22b-fp8-instruct',
|
||||
'NVIDIA Qwen 3 235B Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
131072,
|
||||
8192,
|
||||
0.7,
|
||||
2.1,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Ultra-long context AI with strong multilingual support',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- Meta Llama 3.1 (via NVIDIA NIM)
|
||||
-- ==========================================
|
||||
|
||||
-- Llama 3.1 405B - Largest open model
|
||||
(
|
||||
'nvidia/meta-llama-3.1-405b-instruct',
|
||||
'NVIDIA Meta Llama 3.1 405B Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
4096,
|
||||
1.0,
|
||||
3.0,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Largest open-source LLM - exceptional quality across all tasks',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- Llama 3.1 70B
|
||||
(
|
||||
'nvidia/meta-llama-3.1-70b-instruct',
|
||||
'NVIDIA Meta Llama 3.1 70B Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
4096,
|
||||
0.13,
|
||||
0.4,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Excellent balance of quality and speed',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- Llama 3.1 8B - Fast and efficient
|
||||
(
|
||||
'nvidia/meta-llama-3.1-8b-instruct',
|
||||
'NVIDIA Meta Llama 3.1 8B Instruct',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
4096,
|
||||
0.02,
|
||||
0.06,
|
||||
'{"streaming": true, "function_calling": true}',
|
||||
true,
|
||||
'Fast and cost-effective for simpler tasks',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
|
||||
-- ==========================================
|
||||
-- OpenAI GPT-OSS Models (via NVIDIA NIM)
|
||||
-- Released August 2025 - Apache 2.0 License
|
||||
-- ==========================================
|
||||
|
||||
-- GPT-OSS 120B via NVIDIA NIM - Production flagship, MoE architecture (117B params, 5.7B active)
|
||||
(
|
||||
'nvidia/openai-gpt-oss-120b',
|
||||
'NVIDIA OpenAI GPT-OSS 120B',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.7,
|
||||
2.1,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true, "tool_use": true}',
|
||||
true,
|
||||
'OpenAI flagship open model via NVIDIA NIM - production-grade reasoning, fits single H100 GPU',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
),
|
||||
-- GPT-OSS 20B via NVIDIA NIM - Lightweight MoE for edge/local (21B params, 4B active)
|
||||
(
|
||||
'nvidia/openai-gpt-oss-20b',
|
||||
'NVIDIA OpenAI GPT-OSS 20B',
|
||||
'1.0',
|
||||
'nvidia',
|
||||
'llm',
|
||||
'https://integrate.api.nvidia.com/v1/chat/completions',
|
||||
128000,
|
||||
8192,
|
||||
0.15,
|
||||
0.45,
|
||||
'{"streaming": true, "function_calling": true, "reasoning": true, "tool_use": true}',
|
||||
true,
|
||||
'OpenAI lightweight open model via NVIDIA NIM - low latency, runs in 16GB VRAM',
|
||||
NOW(),
|
||||
NOW(),
|
||||
0,
|
||||
0,
|
||||
100.0,
|
||||
0,
|
||||
'unknown'
|
||||
)
|
||||
|
||||
ON CONFLICT (model_id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
version = EXCLUDED.version,
|
||||
provider = EXCLUDED.provider,
|
||||
endpoint = EXCLUDED.endpoint,
|
||||
context_window = EXCLUDED.context_window,
|
||||
max_tokens = EXCLUDED.max_tokens,
|
||||
cost_per_million_input = EXCLUDED.cost_per_million_input,
|
||||
cost_per_million_output = EXCLUDED.cost_per_million_output,
|
||||
capabilities = EXCLUDED.capabilities,
|
||||
is_active = EXCLUDED.is_active,
|
||||
description = EXCLUDED.description,
|
||||
updated_at = NOW();
|
||||
|
||||
-- Assign NVIDIA models to all existing tenants with 1000 RPM rate limits
|
||||
-- Note: model_config_id (UUID) is the foreign key, model_id kept for convenience
|
||||
INSERT INTO tenant_model_configs (tenant_id, model_config_id, model_id, is_enabled, priority, rate_limits, created_at, updated_at)
|
||||
SELECT
|
||||
t.id,
|
||||
m.id, -- UUID foreign key (auto-generated in model_configs)
|
||||
m.model_id, -- String identifier (kept for easier queries)
|
||||
true,
|
||||
5,
|
||||
'{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 1000, "tokens_per_minute": 100000, "max_concurrent": 10}'::json,
|
||||
NOW(),
|
||||
NOW()
|
||||
FROM tenants t
|
||||
CROSS JOIN model_configs m
|
||||
WHERE m.provider = 'nvidia'
|
||||
ON CONFLICT (tenant_id, model_config_id) DO UPDATE SET
|
||||
rate_limits = EXCLUDED.rate_limits;
|
||||
|
||||
-- Log migration completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Migration 021: Added NVIDIA NIM models (Nemotron, Llama 3.3, DeepSeek, Kimi K2, Mistral, Qwen, OpenAI GPT-OSS) to model_configs and assigned to tenants';
|
||||
END $$;
|
||||
238
scripts/migrations/022_add_session_management.sql
Normal file
238
scripts/migrations/022_add_session_management.sql
Normal file
@@ -0,0 +1,238 @@
|
||||
-- Migration: 022_add_session_management.sql
|
||||
-- Description: Server-side session tracking for OWASP/NIST compliance
|
||||
-- Date: 2025-12-08
|
||||
-- Issue: #264 - Session timeout warning not appearing
|
||||
--
|
||||
-- Timeout Configuration:
|
||||
-- Idle Timeout: 4 hours (240 minutes) - covers meetings, lunch, context-switching
|
||||
-- Absolute Timeout: 8 hours (maximum session lifetime) - full work day
|
||||
-- Warning Threshold: 5 minutes before idle expiry
|
||||
|
||||
-- Active sessions table for server-side session tracking
|
||||
-- This is the authoritative source of truth for session validity,
|
||||
-- not the JWT expiration time alone.
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
session_token_hash VARCHAR(64) NOT NULL UNIQUE, -- SHA-256 of session token (never store plaintext)
|
||||
|
||||
-- Session timing (NIST SP 800-63B compliant)
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_activity_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
absolute_expires_at TIMESTAMP WITH TIME ZONE NOT NULL, -- 8 hours from creation
|
||||
|
||||
-- Session metadata for security auditing
|
||||
ip_address VARCHAR(45), -- IPv6 compatible (max 45 chars)
|
||||
user_agent TEXT,
|
||||
tenant_id INTEGER REFERENCES tenants(id),
|
||||
|
||||
-- Session state
|
||||
is_active BOOLEAN NOT NULL DEFAULT true,
|
||||
revoked_at TIMESTAMP WITH TIME ZONE,
|
||||
revoke_reason VARCHAR(50), -- 'logout', 'idle_timeout', 'absolute_timeout', 'admin_revoke', 'password_change', 'cleanup_stale'
|
||||
ended_at TIMESTAMP WITH TIME ZONE, -- When session ended (any reason: logout, timeout, etc.)
|
||||
app_type VARCHAR(20) NOT NULL DEFAULT 'control_panel' -- 'control_panel' or 'tenant_app'
|
||||
);
|
||||
|
||||
-- Indexes for session lookup and cleanup
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_token_hash ON sessions(session_token_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_last_activity ON sessions(last_activity_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_absolute_expires ON sessions(absolute_expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_active ON sessions(is_active) WHERE is_active = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_tenant_id ON sessions(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_ended_at ON sessions(ended_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_app_type ON sessions(app_type);
|
||||
|
||||
-- Function to clean up expired sessions (run periodically via cron or scheduled task)
|
||||
CREATE OR REPLACE FUNCTION cleanup_expired_sessions()
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
rows_affected INTEGER := 0;
|
||||
idle_rows INTEGER := 0;
|
||||
idle_timeout_minutes INTEGER := 240; -- 4 hours
|
||||
absolute_cutoff TIMESTAMP WITH TIME ZONE;
|
||||
idle_cutoff TIMESTAMP WITH TIME ZONE;
|
||||
BEGIN
|
||||
absolute_cutoff := CURRENT_TIMESTAMP;
|
||||
idle_cutoff := CURRENT_TIMESTAMP - (idle_timeout_minutes * INTERVAL '1 minute');
|
||||
|
||||
-- Mark sessions as inactive if absolute timeout exceeded
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = 'absolute_timeout'
|
||||
WHERE is_active = true
|
||||
AND absolute_expires_at < absolute_cutoff;
|
||||
|
||||
GET DIAGNOSTICS rows_affected = ROW_COUNT;
|
||||
|
||||
-- Mark sessions as inactive if idle timeout exceeded
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = 'idle_timeout'
|
||||
WHERE is_active = true
|
||||
AND last_activity_at < idle_cutoff;
|
||||
|
||||
GET DIAGNOSTICS idle_rows = ROW_COUNT;
|
||||
rows_affected := rows_affected + idle_rows;
|
||||
|
||||
RETURN rows_affected;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Function to get session status (for internal API validation)
|
||||
CREATE OR REPLACE FUNCTION get_session_status(p_token_hash VARCHAR(64))
|
||||
RETURNS TABLE (
|
||||
is_valid BOOLEAN,
|
||||
expiry_reason VARCHAR(50),
|
||||
seconds_until_idle_timeout INTEGER,
|
||||
seconds_until_absolute_timeout INTEGER,
|
||||
user_id INTEGER,
|
||||
tenant_id INTEGER
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_session RECORD;
|
||||
v_idle_timeout_minutes INTEGER := 240; -- 4 hours
|
||||
v_warning_threshold_minutes INTEGER := 5;
|
||||
v_idle_expires_at TIMESTAMP WITH TIME ZONE;
|
||||
v_seconds_until_idle INTEGER;
|
||||
v_seconds_until_absolute INTEGER;
|
||||
BEGIN
|
||||
-- Find the session
|
||||
SELECT s.* INTO v_session
|
||||
FROM sessions s
|
||||
WHERE s.session_token_hash = p_token_hash
|
||||
AND s.is_active = true;
|
||||
|
||||
-- Session not found or inactive
|
||||
IF NOT FOUND THEN
|
||||
RETURN QUERY SELECT
|
||||
false::BOOLEAN,
|
||||
NULL::VARCHAR(50),
|
||||
NULL::INTEGER,
|
||||
NULL::INTEGER,
|
||||
NULL::INTEGER,
|
||||
NULL::INTEGER;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Calculate expiration times
|
||||
v_idle_expires_at := v_session.last_activity_at + (v_idle_timeout_minutes * INTERVAL '1 minute');
|
||||
|
||||
-- Check absolute timeout first
|
||||
IF CURRENT_TIMESTAMP >= v_session.absolute_expires_at THEN
|
||||
-- Mark session as expired
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = 'absolute_timeout'
|
||||
WHERE session_token_hash = p_token_hash;
|
||||
|
||||
RETURN QUERY SELECT
|
||||
false::BOOLEAN,
|
||||
'absolute'::VARCHAR(50),
|
||||
NULL::INTEGER,
|
||||
NULL::INTEGER,
|
||||
v_session.user_id,
|
||||
v_session.tenant_id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Check idle timeout
|
||||
IF CURRENT_TIMESTAMP >= v_idle_expires_at THEN
|
||||
-- Mark session as expired
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = 'idle_timeout'
|
||||
WHERE session_token_hash = p_token_hash;
|
||||
|
||||
RETURN QUERY SELECT
|
||||
false::BOOLEAN,
|
||||
'idle'::VARCHAR(50),
|
||||
NULL::INTEGER,
|
||||
NULL::INTEGER,
|
||||
v_session.user_id,
|
||||
v_session.tenant_id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Session is valid - calculate remaining times
|
||||
v_seconds_until_idle := EXTRACT(EPOCH FROM (v_idle_expires_at - CURRENT_TIMESTAMP))::INTEGER;
|
||||
v_seconds_until_absolute := EXTRACT(EPOCH FROM (v_session.absolute_expires_at - CURRENT_TIMESTAMP))::INTEGER;
|
||||
|
||||
RETURN QUERY SELECT
|
||||
true::BOOLEAN,
|
||||
NULL::VARCHAR(50),
|
||||
v_seconds_until_idle,
|
||||
v_seconds_until_absolute,
|
||||
v_session.user_id,
|
||||
v_session.tenant_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Function to update session activity (called on each authenticated request)
|
||||
CREATE OR REPLACE FUNCTION update_session_activity(p_token_hash VARCHAR(64))
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_updated INTEGER;
|
||||
BEGIN
|
||||
UPDATE sessions
|
||||
SET last_activity_at = CURRENT_TIMESTAMP
|
||||
WHERE session_token_hash = p_token_hash
|
||||
AND is_active = true;
|
||||
|
||||
GET DIAGNOSTICS v_updated = ROW_COUNT;
|
||||
RETURN v_updated > 0;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Function to revoke a session
|
||||
CREATE OR REPLACE FUNCTION revoke_session(p_token_hash VARCHAR(64), p_reason VARCHAR(50) DEFAULT 'logout')
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_updated INTEGER;
|
||||
BEGIN
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = p_reason
|
||||
WHERE session_token_hash = p_token_hash
|
||||
AND is_active = true;
|
||||
|
||||
GET DIAGNOSTICS v_updated = ROW_COUNT;
|
||||
RETURN v_updated > 0;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Function to revoke all sessions for a user (e.g., on password change)
|
||||
CREATE OR REPLACE FUNCTION revoke_all_user_sessions(p_user_id INTEGER, p_reason VARCHAR(50) DEFAULT 'password_change')
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
v_updated INTEGER;
|
||||
BEGIN
|
||||
UPDATE sessions
|
||||
SET is_active = false,
|
||||
revoked_at = CURRENT_TIMESTAMP,
|
||||
ended_at = CURRENT_TIMESTAMP,
|
||||
revoke_reason = p_reason
|
||||
WHERE user_id = p_user_id
|
||||
AND is_active = true;
|
||||
|
||||
GET DIAGNOSTICS v_updated = ROW_COUNT;
|
||||
RETURN v_updated;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Log migration completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Migration 022: Created sessions table and session management functions for OWASP/NIST compliance';
|
||||
END $$;
|
||||
278
scripts/migrations/023_add_uuid_primary_key_to_model_configs.sql
Normal file
278
scripts/migrations/023_add_uuid_primary_key_to_model_configs.sql
Normal file
@@ -0,0 +1,278 @@
|
||||
-- Migration: 023_add_uuid_primary_key_to_model_configs.sql
|
||||
-- Description: Add UUID primary key to model_configs table instead of using model_id string
|
||||
-- This fixes the database design issue where model_id (a human-readable string) was used as primary key
|
||||
-- Author: Claude Code
|
||||
-- Date: 2025-12-08
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 1: Ensure uuid-ossp extension is available
|
||||
-- ============================================================================
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 2: Add new UUID 'id' column to model_configs
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if 'id' column already exists
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'model_configs' AND column_name = 'id' AND table_schema = 'public'
|
||||
) THEN
|
||||
-- Add the new UUID column
|
||||
ALTER TABLE model_configs ADD COLUMN id UUID DEFAULT uuid_generate_v4();
|
||||
|
||||
-- Populate UUIDs for all existing rows
|
||||
UPDATE model_configs SET id = uuid_generate_v4() WHERE id IS NULL;
|
||||
|
||||
-- Make id NOT NULL
|
||||
ALTER TABLE model_configs ALTER COLUMN id SET NOT NULL;
|
||||
|
||||
RAISE NOTICE 'Added id column to model_configs';
|
||||
ELSE
|
||||
RAISE NOTICE 'id column already exists in model_configs';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 3: Add new UUID 'model_config_id' column to tenant_model_configs
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if 'model_config_id' column already exists
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'tenant_model_configs' AND column_name = 'model_config_id' AND table_schema = 'public'
|
||||
) THEN
|
||||
-- Add the new UUID column
|
||||
ALTER TABLE tenant_model_configs ADD COLUMN model_config_id UUID;
|
||||
|
||||
RAISE NOTICE 'Added model_config_id column to tenant_model_configs';
|
||||
ELSE
|
||||
RAISE NOTICE 'model_config_id column already exists in tenant_model_configs';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 4: Populate model_config_id based on model_id mapping
|
||||
-- ============================================================================
|
||||
UPDATE tenant_model_configs tmc
|
||||
SET model_config_id = mc.id
|
||||
FROM model_configs mc
|
||||
WHERE tmc.model_id = mc.model_id
|
||||
AND tmc.model_config_id IS NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 5: Drop the old foreign key constraint
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Drop foreign key if it exists
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'tenant_model_configs_model_id_fkey'
|
||||
AND table_name = 'tenant_model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE tenant_model_configs DROP CONSTRAINT tenant_model_configs_model_id_fkey;
|
||||
RAISE NOTICE 'Dropped old foreign key constraint tenant_model_configs_model_id_fkey';
|
||||
ELSE
|
||||
RAISE NOTICE 'Foreign key constraint tenant_model_configs_model_id_fkey does not exist';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 6: Drop old unique constraint on (tenant_id, model_id)
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'unique_tenant_model'
|
||||
AND table_name = 'tenant_model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE tenant_model_configs DROP CONSTRAINT unique_tenant_model;
|
||||
RAISE NOTICE 'Dropped old unique constraint unique_tenant_model';
|
||||
ELSE
|
||||
RAISE NOTICE 'Unique constraint unique_tenant_model does not exist';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 7: Drop the old primary key on model_configs.model_id
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'model_configs_pkey'
|
||||
AND constraint_type = 'PRIMARY KEY'
|
||||
AND table_name = 'model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE model_configs DROP CONSTRAINT model_configs_pkey;
|
||||
RAISE NOTICE 'Dropped old primary key model_configs_pkey';
|
||||
ELSE
|
||||
RAISE NOTICE 'Primary key model_configs_pkey does not exist';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 8: Add new primary key on model_configs.id
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if primary key already exists on id column
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu ON tc.constraint_name = kcu.constraint_name
|
||||
WHERE tc.table_name = 'model_configs'
|
||||
AND tc.constraint_type = 'PRIMARY KEY'
|
||||
AND kcu.column_name = 'id'
|
||||
AND tc.table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE model_configs ADD CONSTRAINT model_configs_pkey PRIMARY KEY (id);
|
||||
RAISE NOTICE 'Added new primary key on model_configs.id';
|
||||
ELSE
|
||||
RAISE NOTICE 'Primary key on model_configs.id already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 9: Add unique constraint on model_configs.model_id
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'model_configs_model_id_unique'
|
||||
AND table_name = 'model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE model_configs ADD CONSTRAINT model_configs_model_id_unique UNIQUE (model_id);
|
||||
RAISE NOTICE 'Added unique constraint on model_configs.model_id';
|
||||
ELSE
|
||||
RAISE NOTICE 'Unique constraint on model_configs.model_id already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 10: Make model_config_id NOT NULL and add foreign key
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Make model_config_id NOT NULL (only if all values are populated)
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM tenant_model_configs WHERE model_config_id IS NULL
|
||||
) THEN
|
||||
RAISE EXCEPTION 'Cannot make model_config_id NOT NULL: some values are NULL. Run the UPDATE first.';
|
||||
END IF;
|
||||
|
||||
-- Alter column to NOT NULL
|
||||
ALTER TABLE tenant_model_configs ALTER COLUMN model_config_id SET NOT NULL;
|
||||
RAISE NOTICE 'Set model_config_id to NOT NULL';
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
RAISE NOTICE 'Could not set model_config_id to NOT NULL: %', SQLERRM;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 11: Add foreign key from tenant_model_configs.model_config_id to model_configs.id
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'tenant_model_configs_model_config_id_fkey'
|
||||
AND table_name = 'tenant_model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE tenant_model_configs
|
||||
ADD CONSTRAINT tenant_model_configs_model_config_id_fkey
|
||||
FOREIGN KEY (model_config_id) REFERENCES model_configs(id) ON DELETE CASCADE;
|
||||
RAISE NOTICE 'Added foreign key on tenant_model_configs.model_config_id';
|
||||
ELSE
|
||||
RAISE NOTICE 'Foreign key tenant_model_configs_model_config_id_fkey already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 12: Add new unique constraint on (tenant_id, model_config_id)
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'unique_tenant_model_config'
|
||||
AND table_name = 'tenant_model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE tenant_model_configs
|
||||
ADD CONSTRAINT unique_tenant_model_config UNIQUE (tenant_id, model_config_id);
|
||||
RAISE NOTICE 'Added unique constraint unique_tenant_model_config';
|
||||
ELSE
|
||||
RAISE NOTICE 'Unique constraint unique_tenant_model_config already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 13: Add index on model_configs.model_id for fast lookups
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_indexes
|
||||
WHERE tablename = 'model_configs'
|
||||
AND indexname = 'ix_model_configs_model_id'
|
||||
AND schemaname = 'public'
|
||||
) THEN
|
||||
CREATE INDEX ix_model_configs_model_id ON model_configs(model_id);
|
||||
RAISE NOTICE 'Created index ix_model_configs_model_id';
|
||||
ELSE
|
||||
RAISE NOTICE 'Index ix_model_configs_model_id already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 14: Add index on tenant_model_configs.model_config_id
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_indexes
|
||||
WHERE tablename = 'tenant_model_configs'
|
||||
AND indexname = 'ix_tenant_model_configs_model_config_id'
|
||||
AND schemaname = 'public'
|
||||
) THEN
|
||||
CREATE INDEX ix_tenant_model_configs_model_config_id ON tenant_model_configs(model_config_id);
|
||||
RAISE NOTICE 'Created index ix_tenant_model_configs_model_config_id';
|
||||
ELSE
|
||||
RAISE NOTICE 'Index ix_tenant_model_configs_model_config_id already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- VERIFICATION: Show final schema
|
||||
-- ============================================================================
|
||||
SELECT 'model_configs schema:' AS info;
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'model_configs' AND table_schema = 'public'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
SELECT 'tenant_model_configs schema:' AS info;
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'tenant_model_configs' AND table_schema = 'public'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
SELECT 'model_configs constraints:' AS info;
|
||||
SELECT constraint_name, constraint_type
|
||||
FROM information_schema.table_constraints
|
||||
WHERE table_name = 'model_configs' AND table_schema = 'public';
|
||||
|
||||
SELECT 'tenant_model_configs constraints:' AS info;
|
||||
SELECT constraint_name, constraint_type
|
||||
FROM information_schema.table_constraints
|
||||
WHERE table_name = 'tenant_model_configs' AND table_schema = 'public';
|
||||
@@ -0,0 +1,51 @@
|
||||
-- Migration: 024_allow_same_model_id_different_providers.sql
|
||||
-- Description: Allow same model_id with different providers
|
||||
-- The unique constraint should be on (model_id, provider) not just model_id
|
||||
-- This allows the same model to be registered from multiple providers (e.g., Groq and NVIDIA)
|
||||
-- Author: Claude Code
|
||||
-- Date: 2025-12-08
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 1: Drop the unique constraint on model_id alone
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'model_configs_model_id_unique'
|
||||
AND table_name = 'model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE model_configs DROP CONSTRAINT model_configs_model_id_unique;
|
||||
RAISE NOTICE 'Dropped unique constraint model_configs_model_id_unique';
|
||||
ELSE
|
||||
RAISE NOTICE 'Constraint model_configs_model_id_unique does not exist';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 2: Add new unique constraint on (model_id, provider)
|
||||
-- ============================================================================
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'model_configs_model_id_provider_unique'
|
||||
AND table_name = 'model_configs'
|
||||
AND table_schema = 'public'
|
||||
) THEN
|
||||
ALTER TABLE model_configs ADD CONSTRAINT model_configs_model_id_provider_unique UNIQUE (model_id, provider);
|
||||
RAISE NOTICE 'Added unique constraint on (model_id, provider)';
|
||||
ELSE
|
||||
RAISE NOTICE 'Constraint model_configs_model_id_provider_unique already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ============================================================================
|
||||
-- VERIFICATION
|
||||
-- ============================================================================
|
||||
SELECT 'model_configs constraints after migration:' AS info;
|
||||
SELECT constraint_name, constraint_type
|
||||
FROM information_schema.table_constraints
|
||||
WHERE table_name = 'model_configs' AND table_schema = 'public'
|
||||
ORDER BY constraint_type, constraint_name;
|
||||
117
scripts/migrations/025_fix_nvidia_model_names.sql
Normal file
117
scripts/migrations/025_fix_nvidia_model_names.sql
Normal file
@@ -0,0 +1,117 @@
|
||||
-- Migration 025: Fix NVIDIA model names to match API format
|
||||
--
|
||||
-- Problem: Model names stored with incorrect format (e.g., nvidia/meta-llama-3.1-8b-instruct)
|
||||
-- Solution: Update to match NVIDIA NIM API expected format (e.g., meta/llama-3.1-8b-instruct)
|
||||
--
|
||||
-- NVIDIA NIM API model naming:
|
||||
-- - Models from Meta: meta/llama-3.1-8b-instruct (NOT nvidia/meta-llama-*)
|
||||
-- - Models from NVIDIA: nvidia/llama-3.1-nemotron-70b-instruct
|
||||
-- - Models from Mistral: mistralai/mistral-large-3-instruct
|
||||
-- - Models from DeepSeek: deepseek-ai/deepseek-v3
|
||||
-- - Models from OpenAI-compatible: openai/gpt-oss-120b (already correct in groq provider)
|
||||
|
||||
-- Idempotency: Only update if old format exists
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Fix Meta Llama models (remove nvidia/ prefix for meta models)
|
||||
UPDATE model_configs
|
||||
SET model_id = 'meta/llama-3.1-8b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-8b-instruct' AND provider = 'nvidia';
|
||||
|
||||
UPDATE model_configs
|
||||
SET model_id = 'meta/llama-3.1-70b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-70b-instruct' AND provider = 'nvidia';
|
||||
|
||||
UPDATE model_configs
|
||||
SET model_id = 'meta/llama-3.1-405b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-405b-instruct' AND provider = 'nvidia';
|
||||
|
||||
UPDATE model_configs
|
||||
SET model_id = 'meta/llama-3.3-70b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.3-70b-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix DeepSeek models
|
||||
UPDATE model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-v3'
|
||||
WHERE model_id = 'nvidia/deepseek-ai-deepseek-v3' AND provider = 'nvidia';
|
||||
|
||||
UPDATE model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-r1'
|
||||
WHERE model_id = 'nvidia/deepseek-ai-deepseek-r1' AND provider = 'nvidia';
|
||||
|
||||
-- Fix Mistral models
|
||||
UPDATE model_configs
|
||||
SET model_id = 'mistralai/mistral-large-3-instruct'
|
||||
WHERE model_id = 'nvidia/mistralai-mistral-large-3-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix Moonshot/Kimi models
|
||||
UPDATE model_configs
|
||||
SET model_id = 'moonshot-ai/kimi-k2-instruct'
|
||||
WHERE model_id = 'nvidia/moonshot-ai-kimi-k2-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix Qwen models
|
||||
UPDATE model_configs
|
||||
SET model_id = 'qwen/qwen3-235b-a22b-fp8-instruct'
|
||||
WHERE model_id = 'nvidia/qwen-qwen3-235b-a22b-fp8-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix OpenAI-compatible models (for NVIDIA provider)
|
||||
UPDATE model_configs
|
||||
SET model_id = 'openai/gpt-oss-120b'
|
||||
WHERE model_id = 'nvidia/openai-gpt-oss-120b' AND provider = 'nvidia';
|
||||
|
||||
UPDATE model_configs
|
||||
SET model_id = 'openai/gpt-oss-20b'
|
||||
WHERE model_id = 'nvidia/openai-gpt-oss-20b' AND provider = 'nvidia';
|
||||
|
||||
-- Also update tenant_model_configs to match (if they reference old model_ids)
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'meta/llama-3.1-8b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-8b-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'meta/llama-3.1-70b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-70b-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'meta/llama-3.1-405b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.1-405b-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'meta/llama-3.3-70b-instruct'
|
||||
WHERE model_id = 'nvidia/meta-llama-3.3-70b-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-v3'
|
||||
WHERE model_id = 'nvidia/deepseek-ai-deepseek-v3';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-r1'
|
||||
WHERE model_id = 'nvidia/deepseek-ai-deepseek-r1';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'mistralai/mistral-large-3-instruct'
|
||||
WHERE model_id = 'nvidia/mistralai-mistral-large-3-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'moonshot-ai/kimi-k2-instruct'
|
||||
WHERE model_id = 'nvidia/moonshot-ai-kimi-k2-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'qwen/qwen3-235b-a22b-fp8-instruct'
|
||||
WHERE model_id = 'nvidia/qwen-qwen3-235b-a22b-fp8-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'openai/gpt-oss-120b'
|
||||
WHERE model_id = 'nvidia/openai-gpt-oss-120b';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'openai/gpt-oss-20b'
|
||||
WHERE model_id = 'nvidia/openai-gpt-oss-20b';
|
||||
|
||||
RAISE NOTICE 'Migration 025: Fixed NVIDIA model names to match API format';
|
||||
END $$;
|
||||
|
||||
-- Log migration completion
|
||||
INSERT INTO system_versions (version, component, description, applied_at)
|
||||
VALUES ('025', 'model_configs', 'Fixed NVIDIA model names to match API format', NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
59
scripts/migrations/026_fix_nvidia_model_ids_api_format.sql
Normal file
59
scripts/migrations/026_fix_nvidia_model_ids_api_format.sql
Normal file
@@ -0,0 +1,59 @@
|
||||
-- Migration 026: Fix NVIDIA model_ids to exact NVIDIA NIM API format
|
||||
--
|
||||
-- Verified against docs.api.nvidia.com and build.nvidia.com (December 2025)
|
||||
--
|
||||
-- Issues found:
|
||||
-- 1. moonshot-ai/kimi-k2-instruct -> should be moonshotai/kimi-k2-instruct (no hyphen)
|
||||
-- 2. mistralai/mistral-large-3-instruct -> model doesn't exist, should be mistralai/mistral-large
|
||||
-- 3. deepseek-ai/deepseek-v3 -> model doesn't exist on NVIDIA, should be deepseek-ai/deepseek-v3.1
|
||||
-- 4. qwen/qwen3-235b-a22b-fp8-instruct -> should be qwen/qwen3-235b-a22b (no fp8-instruct suffix)
|
||||
--
|
||||
-- Note: These are the model_id strings passed to NVIDIA's API, not the names shown to users
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Fix Kimi K2: moonshot-ai -> moonshotai (NVIDIA uses no hyphen)
|
||||
UPDATE model_configs
|
||||
SET model_id = 'moonshotai/kimi-k2-instruct'
|
||||
WHERE model_id = 'moonshot-ai/kimi-k2-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix Mistral Large 3: Use the correct model name from NVIDIA
|
||||
-- The full name is mistralai/mistral-large or mistralai/mistral-large-3-675b-instruct-2512
|
||||
UPDATE model_configs
|
||||
SET model_id = 'mistralai/mistral-large'
|
||||
WHERE model_id = 'mistralai/mistral-large-3-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Fix DeepSeek V3: NVIDIA only has v3.1, not plain v3
|
||||
UPDATE model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-v3.1'
|
||||
WHERE model_id = 'deepseek-ai/deepseek-v3' AND provider = 'nvidia';
|
||||
|
||||
-- Fix Qwen 3 235B: Remove fp8-instruct suffix
|
||||
UPDATE model_configs
|
||||
SET model_id = 'qwen/qwen3-235b-a22b'
|
||||
WHERE model_id = 'qwen/qwen3-235b-a22b-fp8-instruct' AND provider = 'nvidia';
|
||||
|
||||
-- Also update tenant_model_configs to match
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'moonshotai/kimi-k2-instruct'
|
||||
WHERE model_id = 'moonshot-ai/kimi-k2-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'mistralai/mistral-large'
|
||||
WHERE model_id = 'mistralai/mistral-large-3-instruct';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'deepseek-ai/deepseek-v3.1'
|
||||
WHERE model_id = 'deepseek-ai/deepseek-v3';
|
||||
|
||||
UPDATE tenant_model_configs
|
||||
SET model_id = 'qwen/qwen3-235b-a22b'
|
||||
WHERE model_id = 'qwen/qwen3-235b-a22b-fp8-instruct';
|
||||
|
||||
RAISE NOTICE 'Migration 026: Fixed NVIDIA model_ids to match exact API format';
|
||||
END $$;
|
||||
|
||||
-- Log migration completion
|
||||
INSERT INTO system_versions (version, component, description, applied_at)
|
||||
VALUES ('026', 'model_configs', 'Fixed NVIDIA model_ids to exact API format', NOW())
|
||||
ON CONFLICT DO NOTHING;
|
||||
35
scripts/migrations/027_assign_nvidia_models_to_tenants.sql
Normal file
35
scripts/migrations/027_assign_nvidia_models_to_tenants.sql
Normal file
@@ -0,0 +1,35 @@
|
||||
-- Migration: 027_assign_nvidia_models_to_tenants.sql
|
||||
-- Description: Ensure NVIDIA models are assigned to all tenants (fix for partial 021 migration)
|
||||
-- Date: 2025-12-08
|
||||
-- Issue: Deploy.sh updates add models but don't assign to existing tenants
|
||||
|
||||
-- Assign NVIDIA models to all existing tenants with 1000 RPM rate limits
|
||||
-- This is idempotent - ON CONFLICT DO NOTHING means it won't duplicate
|
||||
INSERT INTO tenant_model_configs (tenant_id, model_config_id, model_id, is_enabled, priority, rate_limits, created_at, updated_at)
|
||||
SELECT
|
||||
t.id,
|
||||
m.id, -- UUID foreign key (auto-generated in model_configs)
|
||||
m.model_id, -- String identifier (kept for easier queries)
|
||||
true,
|
||||
5,
|
||||
'{"max_requests_per_hour": 1000, "max_tokens_per_request": 4000, "concurrent_requests": 5, "max_cost_per_hour": 10.0, "requests_per_minute": 1000, "tokens_per_minute": 100000, "max_concurrent": 10}'::json,
|
||||
NOW(),
|
||||
NOW()
|
||||
FROM tenants t
|
||||
CROSS JOIN model_configs m
|
||||
WHERE m.provider = 'nvidia'
|
||||
AND m.is_active = true
|
||||
ON CONFLICT (tenant_id, model_config_id) DO NOTHING;
|
||||
|
||||
-- Log migration completion
|
||||
DO $$
|
||||
DECLARE
|
||||
assigned_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO assigned_count
|
||||
FROM tenant_model_configs tmc
|
||||
JOIN model_configs mc ON mc.id = tmc.model_config_id
|
||||
WHERE mc.provider = 'nvidia';
|
||||
|
||||
RAISE NOTICE 'Migration 027: Ensured NVIDIA models are assigned to all tenants (% total assignments)', assigned_count;
|
||||
END $$;
|
||||
44
scripts/postgresql/admin-entrypoint-wrapper.sh
Executable file
44
scripts/postgresql/admin-entrypoint-wrapper.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 Admin PostgreSQL Custom Entrypoint
|
||||
# Ensures postgres user password is synced from environment variable on every startup
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 GT 2.0 Admin PostgreSQL Startup..."
|
||||
|
||||
# Function to update postgres user password from environment variable
|
||||
update_postgres_password() {
|
||||
echo "🔐 Syncing postgres user password from environment..."
|
||||
|
||||
# Update postgres superuser password if POSTGRES_PASSWORD is set
|
||||
if [ -n "$POSTGRES_PASSWORD" ]; then
|
||||
psql -U postgres -d gt2_admin -c "ALTER USER postgres WITH PASSWORD '$POSTGRES_PASSWORD';" >/dev/null 2>&1 && \
|
||||
echo "✅ Updated postgres user password" || \
|
||||
echo "⚠️ Could not update postgres password (database may not be ready yet)"
|
||||
fi
|
||||
|
||||
# Also update gt2_admin if it exists and ADMIN_USER_PASSWORD is set
|
||||
if [ -n "$ADMIN_USER_PASSWORD" ]; then
|
||||
psql -U postgres -d gt2_admin -c "ALTER USER gt2_admin WITH PASSWORD '$ADMIN_USER_PASSWORD';" >/dev/null 2>&1 && \
|
||||
echo "✅ Updated gt2_admin user password" || \
|
||||
echo "⚠️ Could not update gt2_admin password (user may not exist yet)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to configure after PostgreSQL starts
|
||||
configure_after_start() {
|
||||
sleep 5 # Wait for PostgreSQL to fully start
|
||||
|
||||
# Update passwords from environment variables if PostgreSQL is running
|
||||
if pg_isready -U postgres >/dev/null 2>&1; then
|
||||
update_postgres_password
|
||||
fi
|
||||
}
|
||||
|
||||
# Configure after PostgreSQL starts (in background)
|
||||
configure_after_start &
|
||||
|
||||
echo "🚀 Starting Admin PostgreSQL..."
|
||||
|
||||
# Call the original PostgreSQL entrypoint
|
||||
exec docker-entrypoint.sh "$@"
|
||||
26
scripts/postgresql/admin-extensions.sql
Normal file
26
scripts/postgresql/admin-extensions.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- GT 2.0 Admin Cluster Extensions Initialization
|
||||
-- Installs basic extensions for admin/control panel databases
|
||||
-- Does NOT include PGVector (not available in postgres:15-alpine image)
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- NOTE: Removed \c gt2_admin - Docker entrypoint runs this script
|
||||
-- against POSTGRES_DB (gt2_admin) automatically.
|
||||
|
||||
-- Basic extensions for admin database
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_buffercache";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 ADMIN EXTENSIONS SETUP ===';
|
||||
RAISE NOTICE 'Extensions configured in admin database:';
|
||||
RAISE NOTICE '- gt2_admin: uuid-ossp, pg_stat_statements, pg_buffercache, pgcrypto';
|
||||
RAISE NOTICE 'Note: PGVector NOT installed (admin cluster uses standard PostgreSQL)';
|
||||
RAISE NOTICE '=====================================';
|
||||
END $$;
|
||||
93
scripts/postgresql/docker-entrypoint-wrapper.sh
Executable file
93
scripts/postgresql/docker-entrypoint-wrapper.sh
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
# GT 2.0 PostgreSQL Custom Entrypoint
|
||||
# Ensures pg_hba.conf is configured on EVERY startup, not just initialization
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 GT 2.0 PostgreSQL Startup - Configuring replication..."
|
||||
|
||||
# Function to configure pg_hba.conf
|
||||
configure_pg_hba() {
|
||||
local pg_hba_path="/var/lib/postgresql/data/pg_hba.conf"
|
||||
|
||||
if [ -f "$pg_hba_path" ]; then
|
||||
echo "📝 Configuring pg_hba.conf for replication..."
|
||||
|
||||
# Remove any existing GT 2.0 replication entries to avoid duplicates
|
||||
grep -v "# GT 2.0 Replication" "$pg_hba_path" > /tmp/pg_hba_clean.conf || true
|
||||
mv /tmp/pg_hba_clean.conf "$pg_hba_path"
|
||||
|
||||
# Add replication entries
|
||||
cat >> "$pg_hba_path" << 'EOF'
|
||||
|
||||
# GT 2.0 Replication Configuration
|
||||
host replication replicator 172.16.0.0/12 md5
|
||||
host replication replicator 172.20.0.0/16 md5
|
||||
host replication replicator 172.18.0.0/16 md5
|
||||
host replication replicator 10.0.0.0/8 md5
|
||||
host all all 172.16.0.0/12 md5
|
||||
host all all 172.20.0.0/16 md5
|
||||
host all all 172.18.0.0/16 md5
|
||||
host all all 10.0.0.0/8 md5
|
||||
EOF
|
||||
|
||||
echo "✅ pg_hba.conf configured for replication"
|
||||
else
|
||||
echo "⚠️ pg_hba.conf not found - will be created during initialization"
|
||||
fi
|
||||
}
|
||||
|
||||
# If PostgreSQL data directory exists, configure it before starting
|
||||
if [ -d /var/lib/postgresql/data ] && [ -f /var/lib/postgresql/data/PG_VERSION ]; then
|
||||
configure_pg_hba
|
||||
fi
|
||||
|
||||
# Function to update user passwords from environment variables
|
||||
update_user_passwords() {
|
||||
echo "🔐 Updating user passwords from environment variables..."
|
||||
|
||||
# Update gt2_tenant_user password if TENANT_USER_PASSWORD is set
|
||||
if [ -n "$TENANT_USER_PASSWORD" ]; then
|
||||
psql -U postgres -d gt2_tenants -c "ALTER USER gt2_tenant_user WITH PASSWORD '$TENANT_USER_PASSWORD';" >/dev/null 2>&1 && \
|
||||
echo "✅ Updated gt2_tenant_user password" || \
|
||||
echo "⚠️ Could not update gt2_tenant_user password (user may not exist yet)"
|
||||
fi
|
||||
|
||||
# Update replicator password if TENANT_REPLICATOR_PASSWORD is set
|
||||
if [ -n "$POSTGRES_REPLICATION_PASSWORD" ]; then
|
||||
psql -U postgres -d gt2_tenants -c "ALTER USER replicator WITH PASSWORD '$POSTGRES_REPLICATION_PASSWORD';" >/dev/null 2>&1 && \
|
||||
echo "✅ Updated replicator password" || \
|
||||
echo "⚠️ Could not update replicator password (user may not exist yet)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to configure after PostgreSQL starts
|
||||
configure_after_start() {
|
||||
sleep 5 # Wait for PostgreSQL to fully start
|
||||
configure_pg_hba
|
||||
|
||||
# Reload configuration if PostgreSQL is running
|
||||
if pg_isready -U postgres >/dev/null 2>&1; then
|
||||
echo "🔄 Reloading PostgreSQL configuration..."
|
||||
psql -U postgres -c "SELECT pg_reload_conf();" >/dev/null 2>&1 || true
|
||||
|
||||
# Update passwords from environment variables
|
||||
update_user_passwords
|
||||
fi
|
||||
}
|
||||
|
||||
# Configure after PostgreSQL starts (in background)
|
||||
configure_after_start &
|
||||
|
||||
echo "🚀 Starting PostgreSQL with GT 2.0 configuration..."
|
||||
|
||||
# Pre-create tablespace directories with proper ownership for Linux compatibility
|
||||
# Required for x86/DGX deployments where bind mounts preserve host ownership
|
||||
echo "📁 Preparing tablespace directories..."
|
||||
mkdir -p /var/lib/postgresql/tablespaces/tenant_test
|
||||
chown postgres:postgres /var/lib/postgresql/tablespaces/tenant_test
|
||||
chmod 700 /var/lib/postgresql/tablespaces/tenant_test
|
||||
echo "✅ Tablespace directories ready"
|
||||
|
||||
# Call the original PostgreSQL entrypoint
|
||||
exec docker-entrypoint.sh "$@"
|
||||
106
scripts/postgresql/migrations/T001_rename_teams_to_tenants.sql
Normal file
106
scripts/postgresql/migrations/T001_rename_teams_to_tenants.sql
Normal file
@@ -0,0 +1,106 @@
|
||||
-- Migration T001: Rename 'teams' table to 'tenants' for semantic clarity
|
||||
-- Date: November 6, 2025
|
||||
--
|
||||
-- RATIONALE:
|
||||
-- The 'teams' table is misnamed - it stores TENANT metadata (one row per tenant),
|
||||
-- not user collaboration teams. This rename eliminates confusion and frees up the
|
||||
-- 'teams' name for actual user collaboration features.
|
||||
--
|
||||
-- IMPACT:
|
||||
-- - Renames table: teams → tenants
|
||||
-- - Renames all foreign key columns: team_id → tenant_id
|
||||
-- - Updates all constraints and indexes
|
||||
-- - NO DATA LOSS - purely structural rename
|
||||
--
|
||||
-- IDEMPOTENT: Can be run multiple times safely
|
||||
-- ROLLBACK: See rollback script: T001_rollback.sql
|
||||
|
||||
-- Note: When run via docker exec, we're already connected to gt2_tenants
|
||||
-- So we don't use \c command here
|
||||
|
||||
SET search_path TO tenant_test_company, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Idempotency wrapper: Only run if migration hasn't been applied yet
|
||||
DO $$
|
||||
DECLARE
|
||||
teams_exists BOOLEAN;
|
||||
tenants_exists BOOLEAN;
|
||||
BEGIN
|
||||
-- Check if old 'teams' table exists and new 'tenants' table doesn't
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'teams'
|
||||
) INTO teams_exists;
|
||||
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'tenants'
|
||||
) INTO tenants_exists;
|
||||
|
||||
IF teams_exists AND NOT tenants_exists THEN
|
||||
RAISE NOTICE 'Migration T001: Applying teams → tenants rename...';
|
||||
|
||||
-- Step 1: Rename the table
|
||||
ALTER TABLE teams RENAME TO tenants;
|
||||
|
||||
-- Step 2: Rename foreign key columns in all dependent tables
|
||||
ALTER TABLE users RENAME COLUMN team_id TO tenant_id;
|
||||
ALTER TABLE agents RENAME COLUMN team_id TO tenant_id;
|
||||
ALTER TABLE datasets RENAME COLUMN team_id TO tenant_id;
|
||||
ALTER TABLE conversations RENAME COLUMN team_id TO tenant_id;
|
||||
ALTER TABLE documents RENAME COLUMN team_id TO tenant_id;
|
||||
ALTER TABLE document_chunks RENAME COLUMN team_id TO tenant_id;
|
||||
|
||||
-- Step 3: Rename foreign key constraints
|
||||
ALTER TABLE users RENAME CONSTRAINT users_team_id_fkey TO users_tenant_id_fkey;
|
||||
ALTER TABLE agents RENAME CONSTRAINT agents_team_id_fkey TO agents_tenant_id_fkey;
|
||||
ALTER TABLE datasets RENAME CONSTRAINT datasets_team_id_fkey TO datasets_tenant_id_fkey;
|
||||
ALTER TABLE conversations RENAME CONSTRAINT conversations_team_id_fkey TO conversations_tenant_id_fkey;
|
||||
ALTER TABLE documents RENAME CONSTRAINT documents_team_id_fkey TO documents_tenant_id_fkey;
|
||||
ALTER TABLE document_chunks RENAME CONSTRAINT document_chunks_team_id_fkey TO document_chunks_tenant_id_fkey;
|
||||
|
||||
-- Step 4: Rename indexes
|
||||
ALTER INDEX IF EXISTS idx_teams_domain RENAME TO idx_tenants_domain;
|
||||
ALTER INDEX IF EXISTS idx_users_team_id RENAME TO idx_users_tenant_id;
|
||||
ALTER INDEX IF EXISTS idx_agents_team_id RENAME TO idx_agents_tenant_id;
|
||||
ALTER INDEX IF EXISTS idx_datasets_team_id RENAME TO idx_datasets_tenant_id;
|
||||
ALTER INDEX IF EXISTS idx_conversations_team_id RENAME TO idx_conversations_tenant_id;
|
||||
ALTER INDEX IF EXISTS idx_documents_team_id RENAME TO idx_documents_tenant_id;
|
||||
ALTER INDEX IF EXISTS idx_document_chunks_team_id RENAME TO idx_document_chunks_tenant_id;
|
||||
|
||||
RAISE NOTICE '✅ Migration T001 applied successfully!';
|
||||
RAISE NOTICE ' - Table renamed: teams → tenants';
|
||||
RAISE NOTICE ' - Columns renamed: team_id → tenant_id (6 tables)';
|
||||
RAISE NOTICE ' - Constraints renamed: 6 foreign keys';
|
||||
RAISE NOTICE ' - Indexes renamed: 7 indexes';
|
||||
|
||||
ELSIF NOT teams_exists AND tenants_exists THEN
|
||||
RAISE NOTICE '✅ Migration T001 already applied (tenants table exists, teams table renamed)';
|
||||
ELSIF teams_exists AND tenants_exists THEN
|
||||
RAISE WARNING '⚠️ Migration T001 in inconsistent state: both teams and tenants tables exist!';
|
||||
RAISE WARNING ' Manual intervention may be required.';
|
||||
ELSE
|
||||
RAISE WARNING '⚠️ Migration T001 cannot run: neither teams nor tenants table exists!';
|
||||
RAISE WARNING ' Check if schema is properly initialized.';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verification query
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_count INTEGER;
|
||||
user_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO tenant_count FROM tenants;
|
||||
SELECT COUNT(*) INTO user_count FROM users;
|
||||
|
||||
RAISE NOTICE 'Migration T001 verification:';
|
||||
RAISE NOTICE ' Tenants: % rows', tenant_count;
|
||||
RAISE NOTICE ' Users: % rows', user_count;
|
||||
END $$;
|
||||
91
scripts/postgresql/migrations/T001_rollback.sql
Normal file
91
scripts/postgresql/migrations/T001_rollback.sql
Normal file
@@ -0,0 +1,91 @@
|
||||
-- Rollback Migration T001: Rename 'tenants' table back to 'teams'
|
||||
-- Date: November 6, 2025
|
||||
--
|
||||
-- This script reverses the T001_rename_teams_to_tenants.sql migration
|
||||
-- Use only if you need to rollback the migration for any reason
|
||||
--
|
||||
-- NO DATA LOSS - purely structural rename back to original state
|
||||
-- IDEMPOTENT: Can be run multiple times safely
|
||||
|
||||
SET search_path TO tenant_test_company, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Idempotency wrapper: Only run if rollback hasn't been applied yet
|
||||
DO $$
|
||||
DECLARE
|
||||
teams_exists BOOLEAN;
|
||||
tenants_exists BOOLEAN;
|
||||
BEGIN
|
||||
-- Check current state
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'teams'
|
||||
) INTO teams_exists;
|
||||
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'tenants'
|
||||
) INTO tenants_exists;
|
||||
|
||||
IF NOT teams_exists AND tenants_exists THEN
|
||||
RAISE NOTICE 'Rollback T001: Reverting tenants → teams rename...';
|
||||
|
||||
-- Step 1: Rename the table back
|
||||
ALTER TABLE tenants RENAME TO teams;
|
||||
|
||||
-- Step 2: Rename foreign key columns back
|
||||
ALTER TABLE users RENAME COLUMN tenant_id TO team_id;
|
||||
ALTER TABLE agents RENAME COLUMN tenant_id TO team_id;
|
||||
ALTER TABLE datasets RENAME COLUMN tenant_id TO team_id;
|
||||
ALTER TABLE conversations RENAME COLUMN tenant_id TO team_id;
|
||||
ALTER TABLE documents RENAME COLUMN tenant_id TO team_id;
|
||||
ALTER TABLE document_chunks RENAME COLUMN tenant_id TO team_id;
|
||||
|
||||
-- Step 3: Rename foreign key constraints back
|
||||
ALTER TABLE users RENAME CONSTRAINT users_tenant_id_fkey TO users_team_id_fkey;
|
||||
ALTER TABLE agents RENAME CONSTRAINT agents_tenant_id_fkey TO agents_team_id_fkey;
|
||||
ALTER TABLE datasets RENAME CONSTRAINT datasets_tenant_id_fkey TO datasets_team_id_fkey;
|
||||
ALTER TABLE conversations RENAME CONSTRAINT conversations_tenant_id_fkey TO conversations_team_id_fkey;
|
||||
ALTER TABLE documents RENAME CONSTRAINT documents_tenant_id_fkey TO documents_team_id_fkey;
|
||||
ALTER TABLE document_chunks RENAME CONSTRAINT document_chunks_tenant_id_fkey TO document_chunks_team_id_fkey;
|
||||
|
||||
-- Step 4: Rename indexes back
|
||||
ALTER INDEX IF EXISTS idx_tenants_domain RENAME TO idx_teams_domain;
|
||||
ALTER INDEX IF EXISTS idx_users_tenant_id RENAME TO idx_users_team_id;
|
||||
ALTER INDEX IF EXISTS idx_agents_tenant_id RENAME TO idx_agents_team_id;
|
||||
ALTER INDEX IF EXISTS idx_datasets_tenant_id RENAME TO idx_datasets_team_id;
|
||||
ALTER INDEX IF EXISTS idx_conversations_tenant_id RENAME TO idx_conversations_team_id;
|
||||
ALTER INDEX IF EXISTS idx_documents_tenant_id RENAME TO idx_documents_team_id;
|
||||
ALTER INDEX IF EXISTS idx_document_chunks_tenant_id RENAME TO idx_document_chunks_team_id;
|
||||
|
||||
RAISE NOTICE '✅ Rollback T001 completed successfully!';
|
||||
RAISE NOTICE ' - Table renamed: tenants → teams';
|
||||
RAISE NOTICE ' - Columns renamed: tenant_id → team_id (6 tables)';
|
||||
RAISE NOTICE ' - Constraints renamed: 6 foreign keys';
|
||||
RAISE NOTICE ' - Indexes renamed: 7 indexes';
|
||||
|
||||
ELSIF teams_exists AND NOT tenants_exists THEN
|
||||
RAISE NOTICE '✅ Rollback T001 already applied (teams table exists, tenants table not found)';
|
||||
ELSE
|
||||
RAISE WARNING '⚠️ Rollback T001 cannot determine state: teams=%,tenants=%', teams_exists, tenants_exists;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verification
|
||||
DO $$
|
||||
DECLARE
|
||||
team_count INTEGER;
|
||||
user_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO team_count FROM teams;
|
||||
SELECT COUNT(*) INTO user_count FROM users;
|
||||
|
||||
RAISE NOTICE 'Rollback T001 verification:';
|
||||
RAISE NOTICE ' Teams: % rows', team_count;
|
||||
RAISE NOTICE ' Users: % rows', user_count;
|
||||
END $$;
|
||||
@@ -0,0 +1,34 @@
|
||||
-- Migration: Add invitation status tracking to team_memberships
|
||||
-- Created: 2025-01-07
|
||||
-- Purpose: Enable team invitation accept/decline workflow
|
||||
|
||||
SET search_path TO tenant_test_company, public;
|
||||
|
||||
-- Add status tracking columns
|
||||
ALTER TABLE team_memberships
|
||||
ADD COLUMN IF NOT EXISTS status VARCHAR(20) DEFAULT 'accepted'
|
||||
CHECK (status IN ('pending', 'accepted', 'declined'));
|
||||
|
||||
ALTER TABLE team_memberships
|
||||
ADD COLUMN IF NOT EXISTS invited_at TIMESTAMPTZ DEFAULT NOW();
|
||||
|
||||
ALTER TABLE team_memberships
|
||||
ADD COLUMN IF NOT EXISTS responded_at TIMESTAMPTZ;
|
||||
|
||||
-- Update existing memberships to 'accepted' status
|
||||
-- This ensures backward compatibility with existing data
|
||||
UPDATE team_memberships
|
||||
SET status = 'accepted', invited_at = created_at
|
||||
WHERE status IS NULL;
|
||||
|
||||
-- Create index for efficient pending invitation queries
|
||||
CREATE INDEX IF NOT EXISTS idx_team_memberships_status
|
||||
ON team_memberships(user_id, status);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_team_memberships_team_status
|
||||
ON team_memberships(team_id, status);
|
||||
|
||||
-- Add comment for documentation
|
||||
COMMENT ON COLUMN team_memberships.status IS 'Invitation status: pending (invited), accepted (active member), declined (rejected invitation)';
|
||||
COMMENT ON COLUMN team_memberships.invited_at IS 'Timestamp when invitation was sent';
|
||||
COMMENT ON COLUMN team_memberships.responded_at IS 'Timestamp when invitation was accepted or declined';
|
||||
@@ -0,0 +1,216 @@
|
||||
-- Migration T002: Create User Collaboration Teams Tables
|
||||
-- Date: November 6, 2025
|
||||
--
|
||||
-- PURPOSE:
|
||||
-- Creates tables for user collaboration teams (different from tenant metadata).
|
||||
-- Users can create teams, invite members, and share agents/datasets with team members.
|
||||
--
|
||||
-- TABLES CREATED:
|
||||
-- 1. teams - User collaboration teams (NOT tenant metadata)
|
||||
-- 2. team_memberships - Team members with two-tier permissions
|
||||
--
|
||||
-- PERMISSION MODEL:
|
||||
-- Tier 1 (Team-level): 'read' (access resources) or 'share' (access + share own resources)
|
||||
-- Tier 2 (Resource-level): Per-user permissions stored in JSONB {"agent:uuid": "read|edit"}
|
||||
--
|
||||
-- IDEMPOTENT: Can be run multiple times safely
|
||||
-- DEPENDS ON: T001_rename_teams_to_tenants.sql (must run first)
|
||||
|
||||
-- Note: When run via docker exec, we're already connected to gt2_tenants
|
||||
|
||||
SET search_path TO tenant_test_company, public;
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Table 1: User Collaboration Teams
|
||||
-- This is the NEW teams table for user collaboration (replaces old misnamed tenant table)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'teams'
|
||||
) THEN
|
||||
CREATE TABLE teams (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
description TEXT,
|
||||
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, -- Tenant isolation
|
||||
owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, -- Team owner
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
RAISE NOTICE '✅ Created teams table for user collaboration';
|
||||
ELSE
|
||||
RAISE NOTICE '✅ Teams table already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Table 2: Team Memberships with Two-Tier Permissions
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'tenant_test_company'
|
||||
AND table_name = 'team_memberships'
|
||||
) THEN
|
||||
CREATE TABLE team_memberships (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
team_id UUID NOT NULL REFERENCES teams(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
|
||||
-- Tier 1: Team-level permission (set by team owner)
|
||||
team_permission VARCHAR(20) NOT NULL DEFAULT 'read'
|
||||
CHECK (team_permission IN ('read', 'share')),
|
||||
-- 'read' = can access resources shared to this team
|
||||
-- 'share' = can access resources AND share own resources to this team
|
||||
|
||||
-- Tier 2: Resource-level permissions (set by resource sharer when sharing)
|
||||
-- JSONB structure: {"agent:uuid": "read|edit", "dataset:uuid": "read|edit"}
|
||||
resource_permissions JSONB DEFAULT '{}',
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(team_id, user_id) -- Prevent duplicate memberships
|
||||
);
|
||||
|
||||
RAISE NOTICE '✅ Created team_memberships table';
|
||||
ELSE
|
||||
RAISE NOTICE '✅ Team_memberships table already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Performance indexes
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
AND indexname = 'idx_teams_owner_id'
|
||||
) THEN
|
||||
CREATE INDEX idx_teams_owner_id ON teams(owner_id);
|
||||
RAISE NOTICE '✅ Created index: idx_teams_owner_id';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
AND indexname = 'idx_teams_tenant_id'
|
||||
) THEN
|
||||
CREATE INDEX idx_teams_tenant_id ON teams(tenant_id);
|
||||
RAISE NOTICE '✅ Created index: idx_teams_tenant_id';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
AND indexname = 'idx_team_memberships_team_id'
|
||||
) THEN
|
||||
CREATE INDEX idx_team_memberships_team_id ON team_memberships(team_id);
|
||||
RAISE NOTICE '✅ Created index: idx_team_memberships_team_id';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
AND indexname = 'idx_team_memberships_user_id'
|
||||
) THEN
|
||||
CREATE INDEX idx_team_memberships_user_id ON team_memberships(user_id);
|
||||
RAISE NOTICE '✅ Created index: idx_team_memberships_user_id';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
AND indexname = 'idx_team_memberships_resources'
|
||||
) THEN
|
||||
CREATE INDEX idx_team_memberships_resources ON team_memberships USING gin(resource_permissions);
|
||||
RAISE NOTICE '✅ Created index: idx_team_memberships_resources';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Function: Auto-unshare resources when user loses 'share' permission
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = 'tenant_test_company'
|
||||
AND p.proname = 'auto_unshare_on_permission_downgrade'
|
||||
) THEN
|
||||
CREATE FUNCTION auto_unshare_on_permission_downgrade()
|
||||
RETURNS TRIGGER AS $func$
|
||||
BEGIN
|
||||
-- If team_permission changed from 'share' to 'read'
|
||||
IF OLD.team_permission = 'share' AND NEW.team_permission = 'read' THEN
|
||||
-- Clear all resource permissions for this user
|
||||
-- (they can no longer share resources, so remove what they shared)
|
||||
NEW.resource_permissions := '{}'::jsonb;
|
||||
|
||||
RAISE NOTICE 'Auto-unshared all resources for user % in team % due to permission downgrade',
|
||||
NEW.user_id, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$func$ LANGUAGE plpgsql;
|
||||
|
||||
RAISE NOTICE '✅ Created function: auto_unshare_on_permission_downgrade';
|
||||
ELSE
|
||||
RAISE NOTICE '✅ Function auto_unshare_on_permission_downgrade already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Trigger: Apply auto-unshare logic
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_trigger
|
||||
WHERE tgname = 'trigger_auto_unshare'
|
||||
) THEN
|
||||
CREATE TRIGGER trigger_auto_unshare
|
||||
BEFORE UPDATE OF team_permission ON team_memberships
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION auto_unshare_on_permission_downgrade();
|
||||
|
||||
RAISE NOTICE '✅ Created trigger: trigger_auto_unshare';
|
||||
ELSE
|
||||
RAISE NOTICE '✅ Trigger trigger_auto_unshare already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Grant permissions
|
||||
DO $$
|
||||
BEGIN
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON teams TO gt2_tenant_user;
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON team_memberships TO gt2_tenant_user;
|
||||
RAISE NOTICE '✅ Granted permissions to gt2_tenant_user';
|
||||
EXCEPTION
|
||||
WHEN undefined_object THEN
|
||||
RAISE NOTICE '⚠️ Role gt2_tenant_user does not exist (ok for fresh installs)';
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Final verification
|
||||
DO $$
|
||||
DECLARE
|
||||
teams_count INTEGER;
|
||||
memberships_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO teams_count FROM teams;
|
||||
SELECT COUNT(*) INTO memberships_count FROM team_memberships;
|
||||
|
||||
RAISE NOTICE '';
|
||||
RAISE NOTICE '========================================';
|
||||
RAISE NOTICE '✅ Migration T002 completed successfully!';
|
||||
RAISE NOTICE '========================================';
|
||||
RAISE NOTICE 'Tables created:';
|
||||
RAISE NOTICE ' - teams (user collaboration): % rows', teams_count;
|
||||
RAISE NOTICE ' - team_memberships: % rows', memberships_count;
|
||||
RAISE NOTICE 'Indexes: 5 created';
|
||||
RAISE NOTICE 'Functions: 1 created';
|
||||
RAISE NOTICE 'Triggers: 1 created';
|
||||
RAISE NOTICE '========================================';
|
||||
END $$;
|
||||
313
scripts/postgresql/migrations/T003_team_resource_shares.sql
Normal file
313
scripts/postgresql/migrations/T003_team_resource_shares.sql
Normal file
@@ -0,0 +1,313 @@
|
||||
-- Migration T003: Team Resource Sharing System
|
||||
-- Purpose: Enable multi-team resource sharing for agents and datasets
|
||||
-- Dependencies: T002_create_collaboration_teams.sql
|
||||
-- Author: GT 2.0 Development Team
|
||||
-- Date: 2025-01-07
|
||||
|
||||
-- Set schema for tenant isolation
|
||||
SET search_path TO tenant_test_company;
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 1: Junction Table for Many-to-Many Resource Sharing
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS team_resource_shares (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
team_id UUID NOT NULL REFERENCES teams(id) ON DELETE CASCADE,
|
||||
resource_type VARCHAR(20) NOT NULL CHECK (resource_type IN ('agent', 'dataset')),
|
||||
resource_id UUID NOT NULL,
|
||||
shared_by UUID NOT NULL REFERENCES users(id),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
-- Ensure each resource can only be shared once per team
|
||||
UNIQUE(team_id, resource_type, resource_id)
|
||||
);
|
||||
|
||||
COMMENT ON TABLE team_resource_shares IS 'Junction table for sharing agents/datasets with collaboration teams';
|
||||
COMMENT ON COLUMN team_resource_shares.resource_type IS 'Type of resource: agent or dataset';
|
||||
COMMENT ON COLUMN team_resource_shares.resource_id IS 'UUID of the agent or dataset being shared';
|
||||
COMMENT ON COLUMN team_resource_shares.shared_by IS 'User who shared this resource with the team';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 2: Performance Indexes
|
||||
-- ============================================================================
|
||||
|
||||
-- Index for finding all teams a resource is shared with
|
||||
CREATE INDEX idx_trs_resource ON team_resource_shares(resource_type, resource_id);
|
||||
|
||||
-- Index for finding all resources shared with a team
|
||||
CREATE INDEX idx_trs_team ON team_resource_shares(team_id);
|
||||
|
||||
-- Index for finding resources shared by a specific user
|
||||
CREATE INDEX idx_trs_shared_by ON team_resource_shares(shared_by);
|
||||
|
||||
-- Composite index for common access checks
|
||||
CREATE INDEX idx_trs_lookup ON team_resource_shares(team_id, resource_type, resource_id);
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 3: Helper View #1 - Individual User Resource Access
|
||||
-- ============================================================================
|
||||
-- Purpose: Flatten team memberships + resource shares for fast permission checks
|
||||
-- Usage: Check if specific user has access to specific resource
|
||||
|
||||
CREATE VIEW user_resource_access AS
|
||||
SELECT
|
||||
tm.user_id,
|
||||
trs.resource_type,
|
||||
trs.resource_id,
|
||||
tm.resource_permissions->(trs.resource_type || ':' || trs.resource_id::text) as permission,
|
||||
tm.team_id,
|
||||
tm.team_permission,
|
||||
trs.shared_by,
|
||||
trs.created_at
|
||||
FROM team_memberships tm
|
||||
JOIN team_resource_shares trs ON tm.team_id = trs.team_id
|
||||
WHERE tm.resource_permissions ? (trs.resource_type || ':' || trs.resource_id::text);
|
||||
|
||||
COMMENT ON VIEW user_resource_access IS 'Flattened view of user access to resources via team memberships';
|
||||
|
||||
-- Note: Indexes on views are not supported in standard PostgreSQL
|
||||
-- For performance, consider creating a materialized view if needed
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 4: Helper View #2 - Aggregated User Accessible Resources
|
||||
-- ============================================================================
|
||||
-- Purpose: Aggregate resources by user for efficient listing
|
||||
-- Usage: Get all agents/datasets accessible to a user (for list views)
|
||||
|
||||
CREATE VIEW user_accessible_resources AS
|
||||
SELECT
|
||||
tm.user_id,
|
||||
trs.resource_type,
|
||||
trs.resource_id,
|
||||
MAX(CASE
|
||||
WHEN tm.resource_permissions->(trs.resource_type || ':' || trs.resource_id::text) = '"edit"'::jsonb
|
||||
THEN 'edit'
|
||||
WHEN tm.resource_permissions->(trs.resource_type || ':' || trs.resource_id::text) = '"read"'::jsonb
|
||||
THEN 'read'
|
||||
ELSE 'none'
|
||||
END) as best_permission,
|
||||
COUNT(DISTINCT tm.team_id) as shared_in_teams,
|
||||
ARRAY_AGG(DISTINCT tm.team_id) as team_ids,
|
||||
MIN(trs.created_at) as first_shared_at
|
||||
FROM team_memberships tm
|
||||
JOIN team_resource_shares trs ON tm.team_id = trs.team_id
|
||||
WHERE tm.resource_permissions ? (trs.resource_type || ':' || trs.resource_id::text)
|
||||
GROUP BY tm.user_id, trs.resource_type, trs.resource_id;
|
||||
|
||||
COMMENT ON VIEW user_accessible_resources IS 'Aggregated view showing all resources accessible to each user with best permission level';
|
||||
|
||||
-- Note: Indexes on views are not supported in standard PostgreSQL
|
||||
-- For performance, consider creating a materialized view if needed
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 5: Cascade Cleanup Trigger
|
||||
-- ============================================================================
|
||||
-- Purpose: When a resource is unshared from a team, clean up member permissions
|
||||
-- Note: The ON DELETE CASCADE on team_resource_shares already handles team deletion
|
||||
|
||||
CREATE OR REPLACE FUNCTION cleanup_resource_permissions()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Remove the resource permission key from all team members
|
||||
UPDATE team_memberships
|
||||
SET resource_permissions = resource_permissions - (OLD.resource_type || ':' || OLD.resource_id::text)
|
||||
WHERE team_id = OLD.team_id
|
||||
AND resource_permissions ? (OLD.resource_type || ':' || OLD.resource_id::text);
|
||||
|
||||
RETURN OLD;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER trigger_cleanup_resource_permissions
|
||||
BEFORE DELETE ON team_resource_shares
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION cleanup_resource_permissions();
|
||||
|
||||
COMMENT ON FUNCTION cleanup_resource_permissions IS 'Removes resource permission entries from team members when resource is unshared';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 6: Validation Function
|
||||
-- ============================================================================
|
||||
-- Purpose: Validate that a user has 'share' permission before sharing resources
|
||||
|
||||
CREATE OR REPLACE FUNCTION validate_resource_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
user_team_permission VARCHAR(20);
|
||||
BEGIN
|
||||
-- Check if the user has 'share' permission on the team
|
||||
SELECT team_permission INTO user_team_permission
|
||||
FROM team_memberships
|
||||
WHERE team_id = NEW.team_id
|
||||
AND user_id = NEW.shared_by;
|
||||
|
||||
IF user_team_permission IS NULL THEN
|
||||
RAISE EXCEPTION 'User % is not a member of team %', NEW.shared_by, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
IF user_team_permission != 'share' THEN
|
||||
RAISE EXCEPTION 'User % does not have share permission on team %', NEW.shared_by, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER trigger_validate_resource_share
|
||||
BEFORE INSERT ON team_resource_shares
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION validate_resource_share();
|
||||
|
||||
COMMENT ON FUNCTION validate_resource_share IS 'Ensures only users with share permission can share resources to teams';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 6B: Sync JSONB Permissions When Resource Shared
|
||||
-- ============================================================================
|
||||
-- Purpose: Automatically update team_memberships.resource_permissions when
|
||||
-- a resource is shared to a team. This ensures database-level consistency.
|
||||
|
||||
CREATE OR REPLACE FUNCTION sync_resource_permissions_on_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Note: This trigger is called AFTER validation, so we know the share is valid
|
||||
-- The actual permission levels (read/edit) are set by the application layer
|
||||
-- This trigger just ensures the resource key exists in the JSONB
|
||||
--
|
||||
-- The application will call a separate function to set individual user permissions
|
||||
-- after this trigger runs. This is a two-step process:
|
||||
-- 1. This trigger: Ensure resource is known to the team
|
||||
-- 2. Application: Set per-user permissions via update_member_resource_permission()
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Note: We're keeping this trigger simple for now. The application layer handles
|
||||
-- per-user permission assignment. A future optimization could move all permission
|
||||
-- logic into triggers, but that requires storing default permissions in team_resource_shares.
|
||||
|
||||
COMMENT ON FUNCTION sync_resource_permissions_on_share IS 'Placeholder for future JSONB sync automation';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 7: Helper Functions for Application Layer
|
||||
-- ============================================================================
|
||||
|
||||
-- Function to get all resources shared with a team
|
||||
CREATE OR REPLACE FUNCTION get_team_resources(p_team_id UUID, p_resource_type VARCHAR DEFAULT NULL)
|
||||
RETURNS TABLE (
|
||||
resource_id UUID,
|
||||
resource_type VARCHAR,
|
||||
shared_by UUID,
|
||||
created_at TIMESTAMP,
|
||||
member_count BIGINT
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
trs.resource_id,
|
||||
trs.resource_type,
|
||||
trs.shared_by,
|
||||
trs.created_at,
|
||||
COUNT(DISTINCT tm.user_id) as member_count
|
||||
FROM team_resource_shares trs
|
||||
JOIN team_memberships tm ON tm.team_id = trs.team_id
|
||||
WHERE trs.team_id = p_team_id
|
||||
AND (p_resource_type IS NULL OR trs.resource_type = p_resource_type)
|
||||
AND tm.resource_permissions ? (trs.resource_type || ':' || trs.resource_id::text)
|
||||
GROUP BY trs.resource_id, trs.resource_type, trs.shared_by, trs.created_at
|
||||
ORDER BY trs.created_at DESC;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION get_team_resources IS 'Get all resources shared with a team, optionally filtered by resource type';
|
||||
|
||||
-- Function to check if a user has permission on a resource
|
||||
CREATE OR REPLACE FUNCTION check_user_resource_permission(
|
||||
p_user_id UUID,
|
||||
p_resource_type VARCHAR,
|
||||
p_resource_id UUID,
|
||||
p_required_permission VARCHAR DEFAULT 'read'
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
user_permission VARCHAR;
|
||||
BEGIN
|
||||
-- Get the user's permission from any team that has this resource
|
||||
SELECT (ura.permission::text)
|
||||
INTO user_permission
|
||||
FROM user_resource_access ura
|
||||
WHERE ura.user_id = p_user_id
|
||||
AND ura.resource_type = p_resource_type
|
||||
AND ura.resource_id = p_resource_id
|
||||
LIMIT 1;
|
||||
|
||||
-- If no permission found, return false
|
||||
IF user_permission IS NULL THEN
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
|
||||
-- Remove quotes from JSONB string value
|
||||
user_permission := TRIM(BOTH '"' FROM user_permission);
|
||||
|
||||
-- Check permission level
|
||||
IF p_required_permission = 'read' THEN
|
||||
RETURN user_permission IN ('read', 'edit');
|
||||
ELSIF p_required_permission = 'edit' THEN
|
||||
RETURN user_permission = 'edit';
|
||||
ELSE
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION check_user_resource_permission IS 'Check if user has required permission (read/edit) on a resource';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 8: Migration Data (if needed)
|
||||
-- ============================================================================
|
||||
|
||||
-- If there are any existing agents/datasets with visibility='team',
|
||||
-- they would need to be migrated here. Since this is a fresh feature,
|
||||
-- no data migration is needed.
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 9: Grant Permissions
|
||||
-- ============================================================================
|
||||
|
||||
-- Grant appropriate permissions to application roles
|
||||
-- Note: Adjust role names based on your PostgreSQL setup
|
||||
|
||||
-- GRANT SELECT, INSERT, UPDATE, DELETE ON team_resource_shares TO gt2_tenant_user;
|
||||
-- GRANT SELECT ON user_resource_access TO gt2_tenant_user;
|
||||
-- GRANT SELECT ON user_accessible_resources TO gt2_tenant_user;
|
||||
-- GRANT EXECUTE ON FUNCTION get_team_resources TO gt2_tenant_user;
|
||||
-- GRANT EXECUTE ON FUNCTION check_user_resource_permission TO gt2_tenant_user;
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 10: Verification Queries
|
||||
-- ============================================================================
|
||||
|
||||
-- Verify table was created
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'team_resource_shares') THEN
|
||||
RAISE NOTICE 'SUCCESS: team_resource_shares table created';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'FAILURE: team_resource_shares table not found';
|
||||
END IF;
|
||||
|
||||
IF EXISTS (SELECT 1 FROM information_schema.views WHERE table_name = 'user_resource_access') THEN
|
||||
RAISE NOTICE 'SUCCESS: user_resource_access view created';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'FAILURE: user_resource_access view not found';
|
||||
END IF;
|
||||
|
||||
IF EXISTS (SELECT 1 FROM information_schema.views WHERE table_name = 'user_accessible_resources') THEN
|
||||
RAISE NOTICE 'SUCCESS: user_accessible_resources view created';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'FAILURE: user_accessible_resources view not found';
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE 'Migration T003 completed successfully!';
|
||||
END $$;
|
||||
@@ -0,0 +1,78 @@
|
||||
-- Migration T004: Update validate_resource_share Trigger Function
|
||||
-- Purpose: Allow team owners and admins to share resources without requiring team membership
|
||||
-- Dependencies: T003_team_resource_shares.sql
|
||||
-- Author: GT 2.0 Development Team
|
||||
-- Date: 2025-01-07
|
||||
--
|
||||
-- Changes:
|
||||
-- - Add team owner bypass check (owners don't need team membership)
|
||||
-- - Add admin/developer role bypass check (admins can share to any team)
|
||||
-- - Preserve original team membership + share permission check for regular users
|
||||
--
|
||||
-- This migration is idempotent via CREATE OR REPLACE FUNCTION
|
||||
|
||||
SET search_path TO tenant_test_company;
|
||||
|
||||
CREATE OR REPLACE FUNCTION validate_resource_share()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
user_team_permission VARCHAR(20);
|
||||
is_team_owner BOOLEAN;
|
||||
user_role VARCHAR(50);
|
||||
user_tenant_id UUID;
|
||||
team_tenant_id UUID;
|
||||
BEGIN
|
||||
-- Check if user is team owner
|
||||
SELECT (owner_id = NEW.shared_by), tenant_id INTO is_team_owner, team_tenant_id
|
||||
FROM teams
|
||||
WHERE id = NEW.team_id;
|
||||
|
||||
-- Allow team owners to share
|
||||
IF is_team_owner THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
-- Check if user is admin/developer (bypass membership requirement)
|
||||
SELECT u.user_type, u.tenant_id INTO user_role, user_tenant_id
|
||||
FROM users u
|
||||
WHERE u.id = NEW.shared_by;
|
||||
|
||||
-- Allow admins/developers in the same tenant
|
||||
IF user_role IN ('admin', 'developer', 'super_admin') AND user_tenant_id = team_tenant_id THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
-- Check if the user has 'share' permission on the team
|
||||
SELECT team_permission INTO user_team_permission
|
||||
FROM team_memberships
|
||||
WHERE team_id = NEW.team_id
|
||||
AND user_id = NEW.shared_by;
|
||||
|
||||
IF user_team_permission IS NULL THEN
|
||||
RAISE EXCEPTION 'User % is not a member of team %', NEW.shared_by, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
IF user_team_permission != 'share' THEN
|
||||
RAISE EXCEPTION 'User % does not have share permission on team %', NEW.shared_by, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Verification: Check that the function exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = 'tenant_test_company'
|
||||
AND p.proname = 'validate_resource_share'
|
||||
) THEN
|
||||
RAISE NOTICE 'SUCCESS: T004 migration completed - validate_resource_share function updated';
|
||||
ELSE
|
||||
RAISE EXCEPTION 'FAILED: T004 migration - validate_resource_share function not found';
|
||||
END IF;
|
||||
END $$;
|
||||
214
scripts/postgresql/migrations/T005_team_observability.sql
Normal file
214
scripts/postgresql/migrations/T005_team_observability.sql
Normal file
@@ -0,0 +1,214 @@
|
||||
-- Migration T005: Team Observability System
|
||||
-- Purpose: Add Observable member tracking for team-level activity monitoring
|
||||
-- Dependencies: T003_team_resource_shares.sql
|
||||
-- Author: GT 2.0 Development Team
|
||||
-- Date: 2025-01-10
|
||||
|
||||
-- Set schema for tenant isolation
|
||||
SET search_path TO tenant_test_company;
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 1: Add Observable Columns to team_memberships
|
||||
-- ============================================================================
|
||||
|
||||
-- Add Observable status tracking columns
|
||||
ALTER TABLE team_memberships
|
||||
ADD COLUMN IF NOT EXISTS is_observable BOOLEAN DEFAULT FALSE,
|
||||
ADD COLUMN IF NOT EXISTS observable_consent_status VARCHAR(20) DEFAULT 'none',
|
||||
ADD COLUMN IF NOT EXISTS observable_consent_at TIMESTAMPTZ;
|
||||
|
||||
-- Add constraint for observable_consent_status values
|
||||
ALTER TABLE team_memberships
|
||||
ADD CONSTRAINT check_observable_consent_status
|
||||
CHECK (observable_consent_status IN ('none', 'pending', 'approved', 'revoked'));
|
||||
|
||||
COMMENT ON COLUMN team_memberships.is_observable IS 'Member consents to team managers viewing their activity';
|
||||
COMMENT ON COLUMN team_memberships.observable_consent_status IS 'Consent workflow status: none, pending, approved, revoked';
|
||||
COMMENT ON COLUMN team_memberships.observable_consent_at IS 'Timestamp when Observable status was approved';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 2: Extend team_permission to Include Manager Role
|
||||
-- ============================================================================
|
||||
|
||||
-- Drop existing constraint if it exists (handles both explicit and auto-generated names)
|
||||
ALTER TABLE team_memberships DROP CONSTRAINT IF EXISTS check_team_permission;
|
||||
ALTER TABLE team_memberships DROP CONSTRAINT IF EXISTS team_memberships_team_permission_check;
|
||||
|
||||
-- Add updated constraint with 'manager' role
|
||||
ALTER TABLE team_memberships
|
||||
ADD CONSTRAINT check_team_permission
|
||||
CHECK (team_permission IN ('read', 'share', 'manager'));
|
||||
|
||||
COMMENT ON COLUMN team_memberships.team_permission IS
|
||||
'Team role: read=Member (view only), share=Contributor (can share resources), manager=Manager (can manage members + view Observable activity)';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 3: Update Auto-Unshare Trigger for Manager Role
|
||||
-- ============================================================================
|
||||
|
||||
-- Update trigger function to handle 'manager' role
|
||||
CREATE OR REPLACE FUNCTION auto_unshare_on_permission_downgrade()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Clear resource_permissions when downgrading from share/manager to read
|
||||
-- Manager and Contributor (share) can share resources
|
||||
-- Member (read) cannot share resources
|
||||
IF OLD.team_permission IN ('share', 'manager')
|
||||
AND NEW.team_permission = 'read' THEN
|
||||
NEW.resource_permissions := '{}'::jsonb;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION auto_unshare_on_permission_downgrade IS
|
||||
'Clears resource_permissions when member is downgraded to read-only (Member role)';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 4: Update Resource Share Validation for Manager Role
|
||||
-- ============================================================================
|
||||
|
||||
-- Update validation function to allow managers to share
|
||||
CREATE OR REPLACE FUNCTION validate_resource_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
user_team_permission VARCHAR(20);
|
||||
is_team_owner BOOLEAN;
|
||||
user_role VARCHAR(50);
|
||||
BEGIN
|
||||
-- Get user's team permission
|
||||
SELECT team_permission INTO user_team_permission
|
||||
FROM team_memberships
|
||||
WHERE team_id = NEW.team_id
|
||||
AND user_id = NEW.shared_by;
|
||||
|
||||
-- Check if user is the team owner
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM teams
|
||||
WHERE id = NEW.team_id AND owner_id = NEW.shared_by
|
||||
) INTO is_team_owner;
|
||||
|
||||
-- Get user's system role for admin bypass
|
||||
SELECT role INTO user_role
|
||||
FROM users
|
||||
WHERE id = NEW.shared_by;
|
||||
|
||||
-- Allow if: owner, or has share/manager permission, or is admin/developer
|
||||
IF is_team_owner THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
IF user_role IN ('admin', 'developer') THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
IF user_team_permission IS NULL THEN
|
||||
RAISE EXCEPTION 'User % is not a member of team %', NEW.shared_by, NEW.team_id;
|
||||
END IF;
|
||||
|
||||
IF user_team_permission NOT IN ('share', 'manager') THEN
|
||||
RAISE EXCEPTION 'User % does not have permission to share resources (current permission: %)',
|
||||
NEW.shared_by, user_team_permission;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION validate_resource_share IS
|
||||
'Validates that only owners, managers, contributors (share), or admins can share resources to teams';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 5: Performance Indexes
|
||||
-- ============================================================================
|
||||
|
||||
-- Index for finding Observable members (used for activity queries)
|
||||
CREATE INDEX IF NOT EXISTS idx_team_memberships_observable
|
||||
ON team_memberships(team_id, is_observable, observable_consent_status)
|
||||
WHERE is_observable = true AND observable_consent_status = 'approved';
|
||||
|
||||
-- Index for finding members by role (for permission checks)
|
||||
CREATE INDEX IF NOT EXISTS idx_team_memberships_permission
|
||||
ON team_memberships(team_id, team_permission);
|
||||
|
||||
COMMENT ON INDEX idx_team_memberships_observable IS
|
||||
'Optimizes queries for Observable member activity (partial index for approved Observable members only)';
|
||||
COMMENT ON INDEX idx_team_memberships_permission IS
|
||||
'Optimizes role-based permission checks (finding managers, contributors, etc.)';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 6: Helper Function - Get Observable Members
|
||||
-- ============================================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION get_observable_members(p_team_id UUID)
|
||||
RETURNS TABLE (
|
||||
user_id UUID,
|
||||
user_email TEXT,
|
||||
user_name TEXT,
|
||||
observable_since TIMESTAMPTZ
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
tm.user_id,
|
||||
u.email::text as user_email,
|
||||
u.full_name::text as user_name,
|
||||
tm.observable_consent_at
|
||||
FROM team_memberships tm
|
||||
JOIN users u ON tm.user_id = u.id
|
||||
WHERE tm.team_id = p_team_id
|
||||
AND tm.is_observable = true
|
||||
AND tm.observable_consent_status = 'approved'
|
||||
AND tm.status = 'accepted'
|
||||
ORDER BY tm.observable_consent_at DESC;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION get_observable_members IS
|
||||
'Returns list of Observable team members with approved consent status';
|
||||
|
||||
-- ============================================================================
|
||||
-- SECTION 7: Verification
|
||||
-- ============================================================================
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
observable_count INTEGER;
|
||||
manager_count INTEGER;
|
||||
BEGIN
|
||||
-- Verify Observable columns exist
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'team_memberships'
|
||||
AND column_name = 'is_observable'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'FAILURE: is_observable column not created';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'team_memberships'
|
||||
AND column_name = 'observable_consent_status'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'FAILURE: observable_consent_status column not created';
|
||||
END IF;
|
||||
|
||||
-- Verify indexes
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_indexes
|
||||
WHERE indexname = 'idx_team_memberships_observable'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'FAILURE: idx_team_memberships_observable index not created';
|
||||
END IF;
|
||||
|
||||
-- Count Observable members (should be 0 initially)
|
||||
SELECT COUNT(*) INTO observable_count
|
||||
FROM team_memberships
|
||||
WHERE is_observable = true;
|
||||
|
||||
RAISE NOTICE 'SUCCESS: Observable columns added (current Observable members: %)', observable_count;
|
||||
RAISE NOTICE 'SUCCESS: team_permission constraint updated to support manager role';
|
||||
RAISE NOTICE 'SUCCESS: Indexes created for Observable queries';
|
||||
RAISE NOTICE 'Migration T005 completed successfully!';
|
||||
END $$;
|
||||
60
scripts/postgresql/migrations/T006_auth_logs.sql
Normal file
60
scripts/postgresql/migrations/T006_auth_logs.sql
Normal file
@@ -0,0 +1,60 @@
|
||||
-- Migration: T006_auth_logs
|
||||
-- Description: Add authentication logging for user logins, logouts, and failed attempts
|
||||
-- Date: 2025-11-17
|
||||
-- Issue: #152
|
||||
|
||||
-- This migration creates the auth_logs table to track authentication events
|
||||
-- for observability and security auditing purposes.
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Apply to existing tenant schemas
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_schema TEXT;
|
||||
BEGIN
|
||||
FOR tenant_schema IN
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
LOOP
|
||||
-- Create auth_logs table
|
||||
EXECUTE format('
|
||||
CREATE TABLE IF NOT EXISTS %I.auth_logs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id TEXT NOT NULL,
|
||||
email TEXT NOT NULL,
|
||||
event_type TEXT NOT NULL CHECK (event_type IN (''login'', ''logout'', ''failed_login'')),
|
||||
success BOOLEAN NOT NULL DEFAULT true,
|
||||
failure_reason TEXT,
|
||||
ip_address TEXT,
|
||||
user_agent TEXT,
|
||||
tenant_domain TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
metadata JSONB DEFAULT ''{}''::jsonb
|
||||
)', tenant_schema);
|
||||
|
||||
-- Create indexes
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_user_id ON %I.auth_logs(user_id)', tenant_schema);
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_email ON %I.auth_logs(email)', tenant_schema);
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_event_type ON %I.auth_logs(event_type)', tenant_schema);
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_created_at ON %I.auth_logs(created_at DESC)', tenant_schema);
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_tenant_domain ON %I.auth_logs(tenant_domain)', tenant_schema);
|
||||
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_auth_logs_event_created ON %I.auth_logs(event_type, created_at DESC)', tenant_schema);
|
||||
|
||||
RAISE NOTICE 'Applied T006_auth_logs migration to schema: %', tenant_schema;
|
||||
END LOOP;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verification query
|
||||
SELECT
|
||||
n.nspname AS schema_name,
|
||||
c.relname AS table_name,
|
||||
pg_size_pretty(pg_total_relation_size(c.oid)) AS total_size
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relname = 'auth_logs'
|
||||
AND n.nspname LIKE 'tenant_%'
|
||||
ORDER BY n.nspname;
|
||||
61
scripts/postgresql/migrations/T007_optimize_queries.sql
Normal file
61
scripts/postgresql/migrations/T007_optimize_queries.sql
Normal file
@@ -0,0 +1,61 @@
|
||||
-- T007_optimize_queries.sql
|
||||
-- Phase 1 Performance Optimization: Composite Indexes
|
||||
-- Creates composite indexes for common query patterns to improve performance
|
||||
-- Estimated improvement: 60-80% faster conversation and message queries
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Apply to all existing tenant schemas
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_schema TEXT;
|
||||
BEGIN
|
||||
FOR tenant_schema IN
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
LOOP
|
||||
-- Composite index for message queries
|
||||
-- Optimizes: SELECT * FROM messages WHERE conversation_id = ? ORDER BY created_at
|
||||
-- Common in: conversation_service.get_messages() with pagination
|
||||
-- Impact: Covers both filter and sort in single index scan
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conversation_created
|
||||
ON %I.messages
|
||||
USING btree (conversation_id, created_at ASC)
|
||||
', tenant_schema);
|
||||
|
||||
-- Composite index for conversation list queries
|
||||
-- Optimizes: SELECT * FROM conversations WHERE user_id = ? AND is_archived = false ORDER BY updated_at DESC
|
||||
-- Common in: conversation_service.list_conversations()
|
||||
-- Impact: Enables index-only scan for conversation lists
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_conversations_user_updated
|
||||
ON %I.conversations
|
||||
USING btree (user_id, is_archived, updated_at DESC)
|
||||
', tenant_schema);
|
||||
|
||||
RAISE NOTICE 'Applied T007 optimization indexes to schema: %', tenant_schema;
|
||||
END LOOP;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Performance Notes:
|
||||
-- - Both indexes support common access patterns in the application
|
||||
-- - No schema changes - purely additive optimization
|
||||
-- - Safe to run multiple times (IF NOT EXISTS)
|
||||
-- - Note: CONCURRENTLY cannot be used inside DO $$ blocks
|
||||
--
|
||||
-- Rollback (if needed):
|
||||
-- DO $$
|
||||
-- DECLARE tenant_schema TEXT;
|
||||
-- BEGIN
|
||||
-- FOR tenant_schema IN
|
||||
-- SELECT schema_name FROM information_schema.schemata
|
||||
-- WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
-- LOOP
|
||||
-- EXECUTE format('DROP INDEX IF EXISTS %I.idx_messages_conversation_created', tenant_schema);
|
||||
-- EXECUTE format('DROP INDEX IF EXISTS %I.idx_conversations_user_updated', tenant_schema);
|
||||
-- END LOOP;
|
||||
-- END $$;
|
||||
@@ -0,0 +1,73 @@
|
||||
-- T008_add_performance_indexes.sql
|
||||
-- Performance optimization: Add missing FK indexes for agents, datasets, and team shares
|
||||
-- Fixes: GitHub Issue #173 - Database Optimizations
|
||||
-- Impact: 60-80% faster API response times by eliminating full table scans
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Apply to all existing tenant schemas
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_schema TEXT;
|
||||
BEGIN
|
||||
FOR tenant_schema IN
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
LOOP
|
||||
-- Index for conversations.agent_id FK
|
||||
-- Optimizes: Queries filtering/joining conversations by agent
|
||||
-- Common in: agent_service.py aggregations, dashboard stats
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_conversations_agent_id
|
||||
ON %I.conversations
|
||||
USING btree (agent_id)
|
||||
', tenant_schema);
|
||||
|
||||
-- Index for documents.dataset_id FK
|
||||
-- Optimizes: Queries filtering documents by dataset
|
||||
-- Common in: dataset_service.py stats, document counts per dataset
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_dataset_id
|
||||
ON %I.documents
|
||||
USING btree (dataset_id)
|
||||
', tenant_schema);
|
||||
|
||||
-- Composite index for team_resource_shares lookup
|
||||
-- Optimizes: get_resource_teams() queries by resource type and ID
|
||||
-- Fixes N+1: Enables batch lookups for agent/dataset team shares
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_team_resource_shares_lookup
|
||||
ON %I.team_resource_shares
|
||||
USING btree (resource_type, resource_id)
|
||||
', tenant_schema);
|
||||
|
||||
RAISE NOTICE 'Applied T008 performance indexes to schema: %', tenant_schema;
|
||||
END LOOP;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Performance Notes:
|
||||
-- - idx_conversations_agent_id: Required for agent-to-conversation joins
|
||||
-- - idx_documents_dataset_id: Required for dataset-to-document joins
|
||||
-- - idx_team_resource_shares_lookup: Enables batch team share lookups
|
||||
-- - All indexes are additive (IF NOT EXISTS) - safe to run multiple times
|
||||
--
|
||||
-- Expected impact at scale:
|
||||
-- - 1,000 users: 50-100ms queries → 5-15ms
|
||||
-- - 10,000 users: 500-1500ms queries → 20-80ms
|
||||
--
|
||||
-- Rollback (if needed):
|
||||
-- DO $$
|
||||
-- DECLARE tenant_schema TEXT;
|
||||
-- BEGIN
|
||||
-- FOR tenant_schema IN
|
||||
-- SELECT schema_name FROM information_schema.schemata
|
||||
-- WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
-- LOOP
|
||||
-- EXECUTE format('DROP INDEX IF EXISTS %I.idx_conversations_agent_id', tenant_schema);
|
||||
-- EXECUTE format('DROP INDEX IF EXISTS %I.idx_documents_dataset_id', tenant_schema);
|
||||
-- EXECUTE format('DROP INDEX IF EXISTS %I.idx_team_resource_shares_lookup', tenant_schema);
|
||||
-- END LOOP;
|
||||
-- END $$;
|
||||
143
scripts/postgresql/migrations/T009_tenant_scoped_categories.sql
Normal file
143
scripts/postgresql/migrations/T009_tenant_scoped_categories.sql
Normal file
@@ -0,0 +1,143 @@
|
||||
-- T009_tenant_scoped_categories.sql
|
||||
-- Tenant-Scoped Editable/Deletable Agent Categories
|
||||
-- Issue: #215 - FR: Editable/Deletable Default Agent Categories
|
||||
--
|
||||
-- Changes:
|
||||
-- 1. Creates categories table in each tenant schema
|
||||
-- 2. Seeds default categories (General, Coding, Writing, etc.)
|
||||
-- 3. Migrates existing per-user custom categories to tenant-scoped
|
||||
--
|
||||
-- Rollback: See bottom of file
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Apply to all existing tenant schemas
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_schema TEXT;
|
||||
BEGIN
|
||||
FOR tenant_schema IN
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
LOOP
|
||||
-- Create categories table
|
||||
EXECUTE format('
|
||||
CREATE TABLE IF NOT EXISTS %I.categories (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
slug VARCHAR(100) NOT NULL,
|
||||
description TEXT,
|
||||
icon VARCHAR(10),
|
||||
is_default BOOLEAN DEFAULT FALSE,
|
||||
created_by UUID,
|
||||
sort_order INTEGER DEFAULT 0,
|
||||
is_deleted BOOLEAN DEFAULT FALSE,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT fk_categories_tenant FOREIGN KEY (tenant_id)
|
||||
REFERENCES %I.tenants(id) ON DELETE CASCADE,
|
||||
CONSTRAINT fk_categories_created_by FOREIGN KEY (created_by)
|
||||
REFERENCES %I.users(id) ON DELETE SET NULL,
|
||||
CONSTRAINT uq_categories_tenant_slug UNIQUE (tenant_id, slug)
|
||||
)
|
||||
', tenant_schema, tenant_schema, tenant_schema);
|
||||
|
||||
-- Create indexes
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_categories_tenant_id
|
||||
ON %I.categories(tenant_id)
|
||||
', tenant_schema);
|
||||
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_categories_slug
|
||||
ON %I.categories(tenant_id, slug)
|
||||
', tenant_schema);
|
||||
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_categories_created_by
|
||||
ON %I.categories(created_by)
|
||||
', tenant_schema);
|
||||
|
||||
EXECUTE format('
|
||||
CREATE INDEX IF NOT EXISTS idx_categories_is_deleted
|
||||
ON %I.categories(is_deleted) WHERE is_deleted = FALSE
|
||||
', tenant_schema);
|
||||
|
||||
-- Seed default categories for each tenant in this schema
|
||||
EXECUTE format('
|
||||
INSERT INTO %I.categories (tenant_id, name, slug, description, icon, is_default, sort_order)
|
||||
SELECT
|
||||
t.id,
|
||||
c.name,
|
||||
c.slug,
|
||||
c.description,
|
||||
c.icon,
|
||||
TRUE,
|
||||
c.sort_order
|
||||
FROM %I.tenants t
|
||||
CROSS JOIN (VALUES
|
||||
(''General'', ''general'', ''All-purpose agent for various tasks'', NULL, 10),
|
||||
(''Coding'', ''coding'', ''Programming and development assistance'', NULL, 20),
|
||||
(''Writing'', ''writing'', ''Content creation and editing'', NULL, 30),
|
||||
(''Analysis'', ''analysis'', ''Data analysis and insights'', NULL, 40),
|
||||
(''Creative'', ''creative'', ''Creative projects and brainstorming'', NULL, 50),
|
||||
(''Research'', ''research'', ''Research and fact-checking'', NULL, 60),
|
||||
(''Business'', ''business'', ''Business strategy and operations'', NULL, 70),
|
||||
(''Education'', ''education'', ''Teaching and learning assistance'', NULL, 80)
|
||||
) AS c(name, slug, description, icon, sort_order)
|
||||
ON CONFLICT (tenant_id, slug) DO NOTHING
|
||||
', tenant_schema, tenant_schema);
|
||||
|
||||
-- Migrate existing per-user custom categories from users.preferences
|
||||
-- Custom categories are stored as: preferences->'custom_categories' = [{"name": "...", "description": "..."}, ...]
|
||||
EXECUTE format('
|
||||
INSERT INTO %I.categories (tenant_id, name, slug, description, created_by, is_default, sort_order)
|
||||
SELECT DISTINCT ON (u.tenant_id, lower(regexp_replace(cc.name, ''[^a-zA-Z0-9]+'', ''-'', ''g'')))
|
||||
u.tenant_id,
|
||||
cc.name,
|
||||
lower(regexp_replace(cc.name, ''[^a-zA-Z0-9]+'', ''-'', ''g'')),
|
||||
COALESCE(cc.description, ''Custom category''),
|
||||
u.id,
|
||||
FALSE,
|
||||
100 + ROW_NUMBER() OVER (PARTITION BY u.tenant_id ORDER BY cc.name)
|
||||
FROM %I.users u
|
||||
CROSS JOIN LATERAL jsonb_array_elements(
|
||||
COALESCE(u.preferences->''custom_categories'', ''[]''::jsonb)
|
||||
) AS cc_json
|
||||
CROSS JOIN LATERAL (
|
||||
SELECT
|
||||
cc_json->>''name'' AS name,
|
||||
cc_json->>''description'' AS description
|
||||
) AS cc
|
||||
WHERE cc.name IS NOT NULL AND cc.name != ''''
|
||||
ON CONFLICT (tenant_id, slug) DO NOTHING
|
||||
', tenant_schema, tenant_schema);
|
||||
|
||||
RAISE NOTICE 'Applied T009 categories table to schema: %', tenant_schema;
|
||||
END LOOP;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- Verification query (run manually):
|
||||
-- SELECT schema_name,
|
||||
-- (SELECT COUNT(*) FROM information_schema.tables
|
||||
-- WHERE table_schema = s.schema_name AND table_name = 'categories') as has_categories_table
|
||||
-- FROM information_schema.schemata s
|
||||
-- WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template';
|
||||
|
||||
-- Rollback (if needed):
|
||||
-- DO $$
|
||||
-- DECLARE tenant_schema TEXT;
|
||||
-- BEGIN
|
||||
-- FOR tenant_schema IN
|
||||
-- SELECT schema_name FROM information_schema.schemata
|
||||
-- WHERE schema_name LIKE 'tenant_%' AND schema_name != 'tenant_template'
|
||||
-- LOOP
|
||||
-- EXECUTE format('DROP TABLE IF EXISTS %I.categories CASCADE', tenant_schema);
|
||||
-- RAISE NOTICE 'Dropped categories table from schema: %', tenant_schema;
|
||||
-- END LOOP;
|
||||
-- END $$;
|
||||
88
scripts/postgresql/setup-tenant-tablespaces.sql
Normal file
88
scripts/postgresql/setup-tenant-tablespaces.sql
Normal file
@@ -0,0 +1,88 @@
|
||||
-- GT 2.0 Tenant Tablespace Setup
|
||||
-- Creates dedicated tablespaces for tenant data isolation on persistent volumes
|
||||
|
||||
-- Create tablespace directory if it doesn't exist (PostgreSQL will create it)
|
||||
-- This tablespace will be on the dedicated tenant persistent volume
|
||||
-- Note: CREATE TABLESPACE cannot be in DO block or EXECUTE, must be top-level SQL
|
||||
-- Note: IF NOT EXISTS not supported until PostgreSQL 16, using conditional with DROP IF EXISTS
|
||||
|
||||
-- Drop and recreate to ensure clean state (safe for init scripts on fresh DB)
|
||||
DROP TABLESPACE IF EXISTS tenant_test_company_ts;
|
||||
CREATE TABLESPACE tenant_test_company_ts LOCATION '/var/lib/postgresql/tablespaces/tenant_test';
|
||||
|
||||
-- Set default tablespace for tenant schema (PostgreSQL doesn't support ALTER SCHEMA SET default_tablespace)
|
||||
-- Instead, we'll set the default for the database connection when needed
|
||||
|
||||
-- Move existing tenant tables to the dedicated tablespace
|
||||
-- This ensures all tenant data is stored on the tenant-specific persistent volume
|
||||
|
||||
-- Move users table
|
||||
ALTER TABLE tenant_test_company.users SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move teams table
|
||||
ALTER TABLE tenant_test_company.teams SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move agents table
|
||||
ALTER TABLE tenant_test_company.agents SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move conversations table
|
||||
ALTER TABLE tenant_test_company.conversations SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move messages table
|
||||
ALTER TABLE tenant_test_company.messages SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move documents table
|
||||
ALTER TABLE tenant_test_company.documents SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move document_chunks table (contains PGVector embeddings)
|
||||
ALTER TABLE tenant_test_company.document_chunks SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move datasets table
|
||||
ALTER TABLE tenant_test_company.datasets SET TABLESPACE tenant_test_company_ts;
|
||||
|
||||
-- Move all indexes to the tenant tablespace as well
|
||||
DO $$
|
||||
DECLARE
|
||||
rec RECORD;
|
||||
BEGIN
|
||||
FOR rec IN
|
||||
SELECT schemaname, indexname, tablename
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
LOOP
|
||||
BEGIN
|
||||
EXECUTE format('ALTER INDEX %I.%I SET TABLESPACE tenant_test_company_ts',
|
||||
rec.schemaname, rec.indexname);
|
||||
RAISE NOTICE 'Moved index %.% to tenant tablespace', rec.schemaname, rec.indexname;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
RAISE WARNING 'Failed to move index %.%: %', rec.schemaname, rec.indexname, SQLERRM;
|
||||
END;
|
||||
END LOOP;
|
||||
END $$;
|
||||
|
||||
-- Grant permissions for the tablespace
|
||||
GRANT CREATE ON TABLESPACE tenant_test_company_ts TO gt2_tenant_user;
|
||||
|
||||
-- Display tablespace information
|
||||
SELECT
|
||||
spcname as tablespace_name,
|
||||
pg_tablespace_location(oid) as location,
|
||||
pg_size_pretty(pg_tablespace_size(spcname)) as size
|
||||
FROM pg_tablespace
|
||||
WHERE spcname LIKE 'tenant_%';
|
||||
|
||||
-- Display tenant table locations
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
tablespace
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'tenant_test_company'
|
||||
ORDER BY tablename;
|
||||
|
||||
-- Display completion notice
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Tenant tablespace setup completed for test';
|
||||
END $$;
|
||||
71
scripts/postgresql/tenant-extensions.sql
Normal file
71
scripts/postgresql/tenant-extensions.sql
Normal file
@@ -0,0 +1,71 @@
|
||||
-- GT 2.0 Tenant Cluster Extensions Initialization
|
||||
-- Installs all extensions for tenant database including PGVector
|
||||
-- Requires pgvector/pgvector:pg15 Docker image
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- NOTE: Removed \c gt2_tenants - Docker entrypoint runs this script
|
||||
-- against POSTGRES_DB (gt2_tenants) automatically.
|
||||
|
||||
-- Vector extension for embeddings (PGVector) - Required for RAG/embeddings
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
|
||||
-- Full-text search support
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
CREATE EXTENSION IF NOT EXISTS unaccent;
|
||||
|
||||
-- Statistics and monitoring
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
CREATE EXTENSION IF NOT EXISTS pg_buffercache;
|
||||
|
||||
-- UUID generation
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- JSON support enhancements
|
||||
CREATE EXTENSION IF NOT EXISTS "btree_gin";
|
||||
CREATE EXTENSION IF NOT EXISTS "btree_gist";
|
||||
|
||||
-- Security extensions
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- Verify critical extensions are loaded
|
||||
DO $$
|
||||
DECLARE
|
||||
ext_count INTEGER;
|
||||
BEGIN
|
||||
-- Check vector extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'vector';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE EXCEPTION 'Vector extension not loaded - PGVector support required for embeddings';
|
||||
ELSE
|
||||
RAISE NOTICE 'Vector extension loaded successfully - PGVector enabled';
|
||||
END IF;
|
||||
|
||||
-- Check pg_trgm extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'pg_trgm';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE EXCEPTION 'pg_trgm extension not loaded - Full-text search support required';
|
||||
ELSE
|
||||
RAISE NOTICE 'pg_trgm extension loaded successfully - Full-text search enabled';
|
||||
END IF;
|
||||
|
||||
-- Check pg_stat_statements extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'pg_stat_statements';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE WARNING 'pg_stat_statements extension not loaded - Query monitoring limited';
|
||||
ELSE
|
||||
RAISE NOTICE 'pg_stat_statements extension loaded successfully - Query monitoring enabled';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 TENANT EXTENSIONS SETUP ===';
|
||||
RAISE NOTICE 'Extensions configured in tenant database:';
|
||||
RAISE NOTICE '- gt2_tenants: PGVector + full-text search + monitoring + crypto';
|
||||
RAISE NOTICE 'All critical extensions verified and loaded';
|
||||
RAISE NOTICE '======================================';
|
||||
END $$;
|
||||
26
scripts/postgresql/unified/00-create-databases.sql
Normal file
26
scripts/postgresql/unified/00-create-databases.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- GT 2.0 Admin Database Creation Script
|
||||
-- Creates databases for admin/control panel cluster only
|
||||
-- This MUST run first (00-prefix ensures execution order)
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create gt2_admin database for control panel
|
||||
SELECT 'CREATE DATABASE gt2_admin'
|
||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'gt2_admin')\gexec
|
||||
|
||||
-- Create gt2_control_panel database for control panel backend
|
||||
SELECT 'CREATE DATABASE gt2_control_panel'
|
||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'gt2_control_panel')\gexec
|
||||
|
||||
-- Log database creation completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 ADMIN DATABASE CREATION ===';
|
||||
RAISE NOTICE 'Databases created successfully:';
|
||||
RAISE NOTICE '- gt2_admin (control panel metadata)';
|
||||
RAISE NOTICE '- gt2_control_panel (control panel backend)';
|
||||
RAISE NOTICE 'Note: gt2_tenants created in tenant cluster separately';
|
||||
RAISE NOTICE '======================================';
|
||||
END $$;
|
||||
20
scripts/postgresql/unified/00-create-tenant-database.sql
Normal file
20
scripts/postgresql/unified/00-create-tenant-database.sql
Normal file
@@ -0,0 +1,20 @@
|
||||
-- GT 2.0 Tenant Database Creation Script
|
||||
-- Creates database for tenant cluster only
|
||||
-- This MUST run first (00-prefix ensures execution order)
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create gt2_tenants database for tenant data storage
|
||||
SELECT 'CREATE DATABASE gt2_tenants'
|
||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'gt2_tenants')\gexec
|
||||
|
||||
-- Log database creation completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 TENANT DATABASE CREATION ===';
|
||||
RAISE NOTICE 'Database created successfully:';
|
||||
RAISE NOTICE '- gt2_tenants (tenant data storage with PGVector)';
|
||||
RAISE NOTICE '=======================================';
|
||||
END $$;
|
||||
33
scripts/postgresql/unified/01-create-admin-roles.sql
Normal file
33
scripts/postgresql/unified/01-create-admin-roles.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- GT 2.0 Admin Cluster Role Creation Script
|
||||
-- Creates PostgreSQL roles for admin/control panel cluster
|
||||
-- Runs in admin postgres container only
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create admin user for control panel database
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'gt2_admin') THEN
|
||||
CREATE ROLE gt2_admin LOGIN PASSWORD 'dev_password_change_in_prod';
|
||||
RAISE NOTICE 'Created gt2_admin role for control panel access';
|
||||
ELSE
|
||||
RAISE NOTICE 'gt2_admin role already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Grant database connection permissions (only on databases that exist in admin container)
|
||||
GRANT CONNECT ON DATABASE gt2_admin TO gt2_admin;
|
||||
GRANT CONNECT ON DATABASE gt2_control_panel TO gt2_admin;
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 ADMIN CLUSTER ROLE CREATION ===';
|
||||
RAISE NOTICE 'Role created: gt2_admin';
|
||||
RAISE NOTICE 'Permissions granted on:';
|
||||
RAISE NOTICE ' - gt2_admin database';
|
||||
RAISE NOTICE ' - gt2_control_panel database';
|
||||
RAISE NOTICE '=========================================';
|
||||
END $$;
|
||||
62
scripts/postgresql/unified/01-create-tenant-roles.sql
Normal file
62
scripts/postgresql/unified/01-create-tenant-roles.sql
Normal file
@@ -0,0 +1,62 @@
|
||||
-- GT 2.0 Tenant Cluster Role Creation Script
|
||||
-- Creates PostgreSQL roles for tenant cluster (including replication)
|
||||
-- Runs in tenant postgres container only
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create replication user for High Availability
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'replicator') THEN
|
||||
CREATE ROLE replicator WITH REPLICATION PASSWORD 'tenant_replicator_dev_password' LOGIN;
|
||||
RAISE NOTICE 'Created replicator role for HA cluster';
|
||||
ELSE
|
||||
RAISE NOTICE 'Replicator role already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create application user for tenant backend connections (legacy)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'gt2_app') THEN
|
||||
CREATE ROLE gt2_app LOGIN PASSWORD 'gt2_app_password';
|
||||
RAISE NOTICE 'Created gt2_app role for tenant backend';
|
||||
ELSE
|
||||
RAISE NOTICE 'gt2_app role already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create tenant user for tenant database operations (current)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'gt2_tenant_user') THEN
|
||||
CREATE ROLE gt2_tenant_user LOGIN PASSWORD 'gt2_tenant_dev_password';
|
||||
RAISE NOTICE 'Created gt2_tenant_user role for tenant operations';
|
||||
ELSE
|
||||
RAISE NOTICE 'gt2_tenant_user role already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Set default search_path for gt2_tenant_user role
|
||||
-- This ensures all connections automatically use tenant_test_company schema
|
||||
ALTER ROLE gt2_tenant_user SET search_path TO tenant_test_company, public;
|
||||
|
||||
-- Grant database connection permissions (only on gt2_tenants which exists in tenant container)
|
||||
GRANT CONNECT ON DATABASE gt2_tenants TO gt2_app;
|
||||
GRANT CONNECT ON DATABASE gt2_tenants TO gt2_tenant_user;
|
||||
GRANT CONNECT ON DATABASE gt2_tenants TO replicator;
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 TENANT CLUSTER ROLE CREATION ===';
|
||||
RAISE NOTICE 'Roles created:';
|
||||
RAISE NOTICE ' - replicator (for HA replication)';
|
||||
RAISE NOTICE ' - gt2_app (tenant backend - legacy)';
|
||||
RAISE NOTICE ' - gt2_tenant_user (tenant operations - current)';
|
||||
RAISE NOTICE 'Permissions granted on:';
|
||||
RAISE NOTICE ' - gt2_tenants database';
|
||||
RAISE NOTICE '==========================================';
|
||||
END $$;
|
||||
2962
scripts/postgresql/unified/01-init-control-panel-schema-complete.sql
Normal file
2962
scripts/postgresql/unified/01-init-control-panel-schema-complete.sql
Normal file
File diff suppressed because it is too large
Load Diff
98
scripts/postgresql/unified/02-init-extensions.sql
Normal file
98
scripts/postgresql/unified/02-init-extensions.sql
Normal file
@@ -0,0 +1,98 @@
|
||||
-- GT 2.0 Unified Extensions Initialization
|
||||
-- Ensures all required extensions are properly configured for all databases
|
||||
-- Run after user creation (02-prefix ensures execution order)
|
||||
|
||||
-- Enable logging (but don't stop on errors for database connections)
|
||||
\set ECHO all
|
||||
|
||||
-- Connect to gt2_tenants database first for PGVector setup
|
||||
\c gt2_tenants
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
-- Vector extension for embeddings (PGVector) - Required for tenant database
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
|
||||
-- Full-text search support
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
CREATE EXTENSION IF NOT EXISTS unaccent;
|
||||
|
||||
-- Statistics and monitoring
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
CREATE EXTENSION IF NOT EXISTS pg_buffercache;
|
||||
|
||||
-- UUID generation (built-in in PostgreSQL 13+, but ensure availability)
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- JSON support enhancements
|
||||
CREATE EXTENSION IF NOT EXISTS "btree_gin";
|
||||
CREATE EXTENSION IF NOT EXISTS "btree_gist";
|
||||
|
||||
-- Security extensions
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- Connect to control panel database and add required extensions (if it exists)
|
||||
\set ON_ERROR_STOP off
|
||||
\c gt2_control_panel
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
-- Basic extensions for control panel
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- Connect to admin database and add required extensions (if it exists)
|
||||
\set ON_ERROR_STOP off
|
||||
\c gt2_admin
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
-- Basic extensions for admin database
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- Switch back to tenant database for verification
|
||||
\set ON_ERROR_STOP off
|
||||
\c gt2_tenants
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
-- Verify critical extensions are loaded
|
||||
DO $$
|
||||
DECLARE
|
||||
ext_count INTEGER;
|
||||
BEGIN
|
||||
-- Check vector extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'vector';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE EXCEPTION 'Vector extension not loaded - PGVector support required for embeddings';
|
||||
ELSE
|
||||
RAISE NOTICE 'Vector extension loaded successfully - PGVector enabled';
|
||||
END IF;
|
||||
|
||||
-- Check pg_trgm extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'pg_trgm';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE EXCEPTION 'pg_trgm extension not loaded - Full-text search support required';
|
||||
ELSE
|
||||
RAISE NOTICE 'pg_trgm extension loaded successfully - Full-text search enabled';
|
||||
END IF;
|
||||
|
||||
-- Check pg_stat_statements extension
|
||||
SELECT COUNT(*) INTO ext_count FROM pg_extension WHERE extname = 'pg_stat_statements';
|
||||
IF ext_count = 0 THEN
|
||||
RAISE WARNING 'pg_stat_statements extension not loaded - Query monitoring limited';
|
||||
ELSE
|
||||
RAISE NOTICE 'pg_stat_statements extension loaded successfully - Query monitoring enabled';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '=== GT 2.0 UNIFIED EXTENSIONS SETUP ===';
|
||||
RAISE NOTICE 'Extensions configured in all databases:';
|
||||
RAISE NOTICE '- gt2_tenants: PGVector + full-text + monitoring';
|
||||
RAISE NOTICE '- gt2_control_panel: Basic extensions + crypto';
|
||||
RAISE NOTICE '- gt2_admin: Basic extensions + crypto';
|
||||
RAISE NOTICE 'All critical extensions verified and loaded';
|
||||
RAISE NOTICE '=====================================';
|
||||
END $$;
|
||||
2431
scripts/postgresql/unified/04-init-tenant-schema-complete.sql
Normal file
2431
scripts/postgresql/unified/04-init-tenant-schema-complete.sql
Normal file
File diff suppressed because it is too large
Load Diff
64
scripts/postgresql/unified/05-create-tenant-test-data.sql
Normal file
64
scripts/postgresql/unified/05-create-tenant-test-data.sql
Normal file
@@ -0,0 +1,64 @@
|
||||
-- GT 2.0 Tenant Test Data Creation Script
|
||||
-- Creates test tenant and gtadmin@test.com user in tenant database
|
||||
-- Mirrors the control panel test data for user sync compatibility
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create test tenant in tenant schema
|
||||
INSERT INTO tenant_test_company.tenants (
|
||||
domain,
|
||||
name,
|
||||
created_at,
|
||||
updated_at
|
||||
) VALUES (
|
||||
'test-company',
|
||||
'HW Workstation Test Deployment',
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (domain) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
updated_at = NOW();
|
||||
|
||||
-- Create test super admin user in tenant schema
|
||||
-- Role mapping: super_admin from control panel → 'admin' in tenant database
|
||||
-- This mirrors what sync_user_to_tenant_database() does in control-panel-backend
|
||||
INSERT INTO tenant_test_company.users (
|
||||
email,
|
||||
username,
|
||||
full_name,
|
||||
tenant_id,
|
||||
role,
|
||||
created_at,
|
||||
updated_at
|
||||
) VALUES (
|
||||
'gtadmin@test.com',
|
||||
'gtadmin',
|
||||
'GT Admin',
|
||||
(SELECT id FROM tenant_test_company.tenants WHERE domain = 'test-company' LIMIT 1),
|
||||
'admin',
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (email, tenant_id) DO UPDATE SET
|
||||
username = EXCLUDED.username,
|
||||
full_name = EXCLUDED.full_name,
|
||||
role = EXCLUDED.role,
|
||||
updated_at = NOW();
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_count INTEGER;
|
||||
user_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO tenant_count FROM tenant_test_company.tenants WHERE domain = 'test-company';
|
||||
SELECT COUNT(*) INTO user_count FROM tenant_test_company.users WHERE email = 'gtadmin@test.com';
|
||||
|
||||
RAISE NOTICE '=== GT 2.0 TENANT TEST DATA CREATION ===';
|
||||
RAISE NOTICE 'Test tenant created: % (domain: test-company)', tenant_count;
|
||||
RAISE NOTICE 'Test user created: % (email: gtadmin@test.com)', user_count;
|
||||
RAISE NOTICE 'User role: admin (mapped from super_admin)';
|
||||
RAISE NOTICE 'Note: User can now log into tenant app at localhost:3002';
|
||||
RAISE NOTICE '========================================';
|
||||
END $$;
|
||||
245
scripts/postgresql/unified/05-create-test-data.sql
Normal file
245
scripts/postgresql/unified/05-create-test-data.sql
Normal file
@@ -0,0 +1,245 @@
|
||||
-- GT 2.0 Test Data Creation Script
|
||||
-- Creates test tenant and gtadmin@test.com user for development/testing
|
||||
-- This is the ONLY place where the test user should be created
|
||||
|
||||
-- Enable logging
|
||||
\set ON_ERROR_STOP on
|
||||
\set ECHO all
|
||||
|
||||
-- Create test tenant
|
||||
INSERT INTO public.tenants (
|
||||
uuid,
|
||||
name,
|
||||
domain,
|
||||
template,
|
||||
status,
|
||||
max_users,
|
||||
resource_limits,
|
||||
namespace,
|
||||
subdomain,
|
||||
optics_enabled,
|
||||
created_at,
|
||||
updated_at
|
||||
) VALUES (
|
||||
'test-tenant-uuid-001',
|
||||
'GT AI OS',
|
||||
'test-company',
|
||||
'enterprise',
|
||||
'active',
|
||||
100,
|
||||
'{"cpu": "4000m", "memory": "8Gi", "storage": "50Gi"}',
|
||||
'gt-test',
|
||||
'test',
|
||||
false, -- Optics disabled by default (enable via Control Panel)
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (domain) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
template = EXCLUDED.template,
|
||||
status = EXCLUDED.status,
|
||||
max_users = EXCLUDED.max_users,
|
||||
resource_limits = EXCLUDED.resource_limits,
|
||||
namespace = EXCLUDED.namespace,
|
||||
subdomain = EXCLUDED.subdomain,
|
||||
optics_enabled = EXCLUDED.optics_enabled,
|
||||
updated_at = NOW();
|
||||
|
||||
-- Create test super admin user
|
||||
-- Password: Test@123
|
||||
-- Hash generated with: python -c "from passlib.context import CryptContext; print(CryptContext(schemes=['bcrypt']).hash('Test@123'))"
|
||||
INSERT INTO public.users (
|
||||
uuid,
|
||||
email,
|
||||
full_name,
|
||||
hashed_password,
|
||||
user_type,
|
||||
tenant_id,
|
||||
capabilities,
|
||||
is_active,
|
||||
created_at,
|
||||
updated_at
|
||||
) VALUES (
|
||||
'test-admin-uuid-001',
|
||||
'gtadmin@test.com',
|
||||
'GT Admin Test User',
|
||||
'$2b$12$otRZHfXz7GJUjA.ULeIc4ev612FSAK3tDcOYZdZCJ219j7WFNjFye',
|
||||
'super_admin',
|
||||
(SELECT id FROM public.tenants WHERE domain = 'test-company'),
|
||||
'[{"resource": "*", "actions": ["*"], "constraints": {}}]',
|
||||
true,
|
||||
NOW(),
|
||||
NOW()
|
||||
) ON CONFLICT (email) DO UPDATE SET
|
||||
hashed_password = EXCLUDED.hashed_password,
|
||||
user_type = EXCLUDED.user_type,
|
||||
tenant_id = EXCLUDED.tenant_id,
|
||||
capabilities = EXCLUDED.capabilities,
|
||||
is_active = EXCLUDED.is_active,
|
||||
updated_at = NOW();
|
||||
|
||||
-- ===================================================================
|
||||
-- MODEL CONFIGURATIONS
|
||||
-- ===================================================================
|
||||
|
||||
-- Insert LLM model configurations
|
||||
INSERT INTO public.model_configs (
|
||||
model_id, name, version, provider, model_type, endpoint,
|
||||
context_window, max_tokens, capabilities,
|
||||
cost_per_million_input, cost_per_million_output,
|
||||
is_active, health_status, request_count, error_count,
|
||||
success_rate, avg_latency_ms,
|
||||
tenant_restrictions, required_capabilities,
|
||||
created_at, updated_at
|
||||
) VALUES
|
||||
-- Groq Llama 3.1 8B Instant (fast, cheap)
|
||||
('llama-3.1-8b-instant', 'Groq Llama 3.1 8b Instant', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 131072,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.05, 0.08, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Groq Compound AI Search (blended: GPT-OSS-120B + Llama 4 Scout)
|
||||
('groq/compound', 'Groq Compound AI Search', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 8192,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.13, 0.47, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Groq OpenAI GPT OSS 120B (large OSS)
|
||||
('openai/gpt-oss-120b', 'Groq Open AI GPT OSS 120b', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 32000,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.15, 0.60, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Groq OpenAI GPT OSS 20B (medium OSS)
|
||||
('openai/gpt-oss-20b', 'Groq Open AI GPT OSS 20b', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 65536,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.075, 0.30, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Groq Meta Llama 4 Maverick 17B (17Bx128E MoE)
|
||||
('meta-llama/llama-4-maverick-17b-128e-instruct', 'Groq Meta Llama 4 Maverick 17b 128 MOE Instruct', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 8192,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.20, 0.60, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Moonshot AI Kimi K2 (1T parameters, 256k context)
|
||||
('moonshotai/kimi-k2-instruct-0905', 'Groq Moonshot AI Kimi K2 instruct 0905', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
262144, 16384,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
1.00, 3.00, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- Groq Llama Guard 4 12B (safety/moderation model)
|
||||
('meta-llama/llama-guard-4-12b', 'Groq Llama Guard 4 12B', '1.0', 'groq', 'llm',
|
||||
'https://api.groq.com/openai/v1/chat/completions',
|
||||
131072, 8192,
|
||||
'{"reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false, "multilingual": false}'::json,
|
||||
0.20, 0.20, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW()),
|
||||
|
||||
-- BGE-M3 Multilingual Embedding Model (embeddings, input only)
|
||||
('BAAI/bge-m3', 'BGE-M3 Multilingual Embedding', '1.0', 'external', 'embedding',
|
||||
'http://gentwo-vllm-embeddings:8000/v1/embeddings',
|
||||
8192, 8193,
|
||||
'{"multilingual": true, "reasoning": false, "function_calling": false, "vision": false, "audio": false, "streaming": false}'::json,
|
||||
0.01, 0.00, true, 'unknown', 0, 0, 100, 0,
|
||||
'{"global_access": true}'::json, '[]'::json,
|
||||
NOW(), NOW())
|
||||
|
||||
ON CONFLICT (model_id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
version = EXCLUDED.version,
|
||||
provider = EXCLUDED.provider,
|
||||
model_type = EXCLUDED.model_type,
|
||||
endpoint = EXCLUDED.endpoint,
|
||||
context_window = EXCLUDED.context_window,
|
||||
max_tokens = EXCLUDED.max_tokens,
|
||||
capabilities = EXCLUDED.capabilities,
|
||||
cost_per_million_input = EXCLUDED.cost_per_million_input,
|
||||
cost_per_million_output = EXCLUDED.cost_per_million_output,
|
||||
is_active = EXCLUDED.is_active,
|
||||
tenant_restrictions = EXCLUDED.tenant_restrictions,
|
||||
required_capabilities = EXCLUDED.required_capabilities,
|
||||
updated_at = NOW();
|
||||
|
||||
-- ===================================================================
|
||||
-- TENANT MODEL ACCESS
|
||||
-- ===================================================================
|
||||
|
||||
-- Enable all models for test tenant with 10,000 requests/min rate limit
|
||||
INSERT INTO public.tenant_model_configs (
|
||||
tenant_id, model_id, is_enabled, tenant_capabilities,
|
||||
rate_limits, usage_constraints, priority,
|
||||
created_at, updated_at
|
||||
) VALUES
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'llama-3.1-8b-instant', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'groq/compound', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'openai/gpt-oss-120b', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'openai/gpt-oss-20b', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'meta-llama/llama-4-maverick-17b-128e-instruct', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'moonshotai/kimi-k2-instruct-0905', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'meta-llama/llama-guard-4-12b', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW()),
|
||||
|
||||
((SELECT id FROM public.tenants WHERE domain = 'test-company'), 'BAAI/bge-m3', true, '{}'::json,
|
||||
'{"requests_per_minute": 10000}'::json, '{}'::json, 5, NOW(), NOW())
|
||||
|
||||
ON CONFLICT (tenant_id, model_id) DO UPDATE SET
|
||||
is_enabled = EXCLUDED.is_enabled,
|
||||
rate_limits = EXCLUDED.rate_limits,
|
||||
updated_at = NOW();
|
||||
|
||||
-- Log completion
|
||||
DO $$
|
||||
DECLARE
|
||||
tenant_count INTEGER;
|
||||
user_count INTEGER;
|
||||
model_count INTEGER;
|
||||
tenant_model_count INTEGER;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO tenant_count FROM public.tenants WHERE domain = 'test-company';
|
||||
SELECT COUNT(*) INTO user_count FROM public.users WHERE email = 'gtadmin@test.com';
|
||||
SELECT COUNT(*) INTO model_count FROM public.model_configs;
|
||||
SELECT COUNT(*) INTO tenant_model_count FROM public.tenant_model_configs WHERE tenant_id = (SELECT id FROM public.tenants WHERE domain = 'test-company');
|
||||
|
||||
RAISE NOTICE '=== GT 2.0 TEST DATA CREATION ===';
|
||||
RAISE NOTICE 'Test tenant created: % (domain: test-company)', tenant_count;
|
||||
RAISE NOTICE 'Test user created: % (email: gtadmin@test.com)', user_count;
|
||||
RAISE NOTICE 'Login credentials:';
|
||||
RAISE NOTICE ' Email: gtadmin@test.com';
|
||||
RAISE NOTICE ' Password: Test@123';
|
||||
RAISE NOTICE '';
|
||||
RAISE NOTICE 'LLM Models configured: %', model_count;
|
||||
RAISE NOTICE 'Tenant model access enabled: %', tenant_model_count;
|
||||
RAISE NOTICE 'Rate limit: 10,000 requests/minute per model';
|
||||
RAISE NOTICE '====================================';
|
||||
END $$;
|
||||
Reference in New Issue
Block a user