FROM python:3.11-slim # Install system dependencies for ARM64 with optimized BLAS libraries RUN apt-get update && apt-get install -y \ gcc \ g++ \ curl \ libblas-dev \ liblapack-dev \ libopenblas-dev \ gfortran \ pkg-config \ && rm -rf /var/lib/apt/lists/* # Install PyTorch CPU-only for ARM with optimized BLAS RUN pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu # Install optimized dependencies for ARM64 RUN pip install --no-cache-dir \ transformers>=4.36.0 \ sentence-transformers \ fastapi \ uvicorn \ numpy \ accelerate \ onnxruntime \ optimum[onnxruntime] # Set comprehensive ARM64 environment variables for maximum performance ENV OMP_NUM_THREADS=8 ENV MKL_NUM_THREADS=8 ENV BLIS_NUM_THREADS=8 ENV VECLIB_MAXIMUM_THREADS=8 ENV PYTORCH_NUM_THREADS=8 ENV PYTORCH_ENABLE_MPS_FALLBACK=1 ENV PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 ENV CUDA_VISIBLE_DEVICES="" ENV USE_ONNX_RUNTIME=true ENV CFLAGS="-march=armv8-a+simd+fp16 -O3" ENV CXXFLAGS="-march=armv8-a+simd+fp16 -O3" # Create app directory WORKDIR /app # Copy the custom OpenAI-compatible BGE-M3 server COPY .deployment/docker/embedding_server.py /app/embedding_server.py # Expose port EXPOSE 8000 # Health check HEALTHCHECK --interval=30s --timeout=30s --start-period=300s --retries=3 \ CMD curl -f http://localhost:8000/health || exit 1 # Run the embedding server CMD ["python", "embedding_server.py"]