GT AI OS Community v2.0.33 - Add NVIDIA NIM and Nemotron agents
- Updated python_coding_microproject.csv to use NVIDIA NIM Kimi K2 - Updated kali_linux_shell_simulator.csv to use NVIDIA NIM Kimi K2 - Made more general-purpose (flexible targets, expanded tools) - Added nemotron-mini-agent.csv for fast local inference via Ollama - Added nemotron-agent.csv for advanced reasoning via Ollama - Added wiki page: Projects for NVIDIA NIMs and Nemotron
This commit is contained in:
29
docker-compose.arm64.yml
Normal file
29
docker-compose.arm64.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
# Docker Compose ARM64 Overlay
|
||||
# Platform-specific overrides for Apple Silicon (M2+)
|
||||
#
|
||||
# Usage: docker compose -f docker-compose.yml -f docker-compose.arm64.yml up -d
|
||||
|
||||
services:
|
||||
# VLLM Embeddings Service - ARM64 CPU Optimized
|
||||
vllm-embeddings:
|
||||
platform: linux/arm64
|
||||
build:
|
||||
context: .
|
||||
dockerfile: .deployment/docker/Dockerfile.vllm-arm
|
||||
environment:
|
||||
- MODEL_NAME=BAAI/bge-m3
|
||||
- OMP_NUM_THREADS=8
|
||||
- MKL_NUM_THREADS=8
|
||||
- PYTORCH_NUM_THREADS=8
|
||||
- PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0
|
||||
- PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
- CUDA_VISIBLE_DEVICES=""
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 3G
|
||||
labels:
|
||||
- "gt2.platform=arm64"
|
||||
- "gt2.architecture=apple-silicon"
|
||||
Reference in New Issue
Block a user