mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 06:04:51 +00:00
Initial monorepo setup with npm workspaces and dual TypeScript configuration
- Set up npm workspaces for three packages: pi-tui, pi-agent, and pi (pods) - Implemented dual TypeScript configuration: - Root tsconfig.json with path mappings for development and type checking - Package-specific tsconfig.build.json for clean production builds - Configured lockstep versioning with sync script for inter-package dependencies - Added comprehensive documentation for development and publishing workflows - All packages at version 0.5.0 ready for npm publishing
This commit is contained in:
commit
a74c5da112
63 changed files with 14558 additions and 0 deletions
83
packages/pods/scripts/model_run.sh
Normal file
83
packages/pods/scripts/model_run.sh
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
#!/usr/bin/env bash
|
||||
# Model runner script - runs sequentially, killed by pi stop
|
||||
set -euo pipefail
|
||||
|
||||
# These values are replaced before upload by pi CLI
|
||||
MODEL_ID="{{MODEL_ID}}"
|
||||
NAME="{{NAME}}"
|
||||
PORT="{{PORT}}"
|
||||
VLLM_ARGS="{{VLLM_ARGS}}"
|
||||
|
||||
# Trap to ensure cleanup on exit and kill any child processes
|
||||
cleanup() {
|
||||
local exit_code=$?
|
||||
echo "Model runner exiting with code $exit_code"
|
||||
# Kill any child processes
|
||||
pkill -P $$ 2>/dev/null || true
|
||||
exit $exit_code
|
||||
}
|
||||
trap cleanup EXIT TERM INT
|
||||
|
||||
# Force colored output even when not a TTY
|
||||
export FORCE_COLOR=1
|
||||
export PYTHONUNBUFFERED=1
|
||||
export TERM=xterm-256color
|
||||
export RICH_FORCE_TERMINAL=1
|
||||
export CLICOLOR_FORCE=1
|
||||
|
||||
# Source virtual environment
|
||||
source /root/venv/bin/activate
|
||||
|
||||
echo "========================================="
|
||||
echo "Model Run: $NAME"
|
||||
echo "Model ID: $MODEL_ID"
|
||||
echo "Port: $PORT"
|
||||
if [ -n "$VLLM_ARGS" ]; then
|
||||
echo "vLLM Args: $VLLM_ARGS"
|
||||
fi
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
# Download model (with color progress bars)
|
||||
echo "Downloading model (will skip if cached)..."
|
||||
HF_HUB_ENABLE_HF_TRANSFER=1 hf download "$MODEL_ID"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "❌ ERROR: Failed to download model" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Model download complete"
|
||||
echo ""
|
||||
|
||||
# Build vLLM command
|
||||
VLLM_CMD="vllm serve '$MODEL_ID' --port $PORT --api-key '$PI_API_KEY'"
|
||||
if [ -n "$VLLM_ARGS" ]; then
|
||||
VLLM_CMD="$VLLM_CMD $VLLM_ARGS"
|
||||
fi
|
||||
|
||||
echo "Starting vLLM server..."
|
||||
echo "Command: $VLLM_CMD"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
# Run vLLM in background so we can monitor it
|
||||
echo "Starting vLLM process..."
|
||||
bash -c "$VLLM_CMD" &
|
||||
VLLM_PID=$!
|
||||
|
||||
# Monitor the vLLM process
|
||||
echo "Monitoring vLLM process (PID: $VLLM_PID)..."
|
||||
wait $VLLM_PID
|
||||
VLLM_EXIT_CODE=$?
|
||||
|
||||
if [ $VLLM_EXIT_CODE -ne 0 ]; then
|
||||
echo "❌ ERROR: vLLM exited with code $VLLM_EXIT_CODE" >&2
|
||||
# Make sure to exit the script command too
|
||||
kill -TERM $$ 2>/dev/null || true
|
||||
exit $VLLM_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "✅ vLLM exited normally"
|
||||
exit 0
|
||||
Loading…
Add table
Add a link
Reference in a new issue