mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 04:02:21 +00:00
Merge origin/main into feature/detect-vertex-ai
This commit is contained in:
commit
eede2e1644
136 changed files with 5747 additions and 7517 deletions
|
|
@ -6,6 +6,8 @@
|
|||
|
||||
- Vertex AI dummy value for `getEnvApiKey()`: Returns `"<authenticated>"` when Application Default Credentials are configured (`~/.config/gcloud/application_default_credentials.json` exists) and both `GOOGLE_CLOUD_PROJECT` (or `GCLOUD_PROJECT`) and `GOOGLE_CLOUD_LOCATION` are set. This allows `streamSimple()` to work with Vertex AI without explicit `apiKey` option. The ADC credentials file existence check is cached per-process to avoid repeated filesystem access.
|
||||
|
||||
## [0.35.0] - 2026-01-05
|
||||
|
||||
## [0.34.2] - 2026-01-04
|
||||
|
||||
## [0.34.1] - 2026-01-04
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@mariozechner/pi-ai",
|
||||
"version": "0.34.2",
|
||||
"version": "0.35.0",
|
||||
"description": "Unified LLM API with automatic model discovery and provider configuration",
|
||||
"type": "module",
|
||||
"main": "./dist/index.js",
|
||||
|
|
|
|||
|
|
@ -424,13 +424,15 @@ describe("Context overflow error handling", () => {
|
|||
// Ollama (local)
|
||||
// =============================================================================
|
||||
|
||||
// Check if ollama is installed
|
||||
// Check if ollama is installed and local LLM tests are enabled
|
||||
let ollamaInstalled = false;
|
||||
try {
|
||||
execSync("which ollama", { stdio: "ignore" });
|
||||
ollamaInstalled = true;
|
||||
} catch {
|
||||
ollamaInstalled = false;
|
||||
if (!process.env.PI_NO_LOCAL_LLM) {
|
||||
try {
|
||||
execSync("which ollama", { stdio: "ignore" });
|
||||
ollamaInstalled = true;
|
||||
} catch {
|
||||
ollamaInstalled = false;
|
||||
}
|
||||
}
|
||||
|
||||
describe.skipIf(!ollamaInstalled)("Ollama (local)", () => {
|
||||
|
|
@ -514,15 +516,17 @@ describe("Context overflow error handling", () => {
|
|||
});
|
||||
|
||||
// =============================================================================
|
||||
// LM Studio (local) - Skip if not running
|
||||
// LM Studio (local) - Skip if not running or local LLM tests disabled
|
||||
// =============================================================================
|
||||
|
||||
let lmStudioRunning = false;
|
||||
try {
|
||||
execSync("curl -s --max-time 1 http://localhost:1234/v1/models > /dev/null", { stdio: "ignore" });
|
||||
lmStudioRunning = true;
|
||||
} catch {
|
||||
lmStudioRunning = false;
|
||||
if (!process.env.PI_NO_LOCAL_LLM) {
|
||||
try {
|
||||
execSync("curl -s --max-time 1 http://localhost:1234/v1/models > /dev/null", { stdio: "ignore" });
|
||||
lmStudioRunning = true;
|
||||
} catch {
|
||||
lmStudioRunning = false;
|
||||
}
|
||||
}
|
||||
|
||||
describe.skipIf(!lmStudioRunning)("LM Studio (local)", () => {
|
||||
|
|
|
|||
|
|
@ -878,13 +878,15 @@ describe("Generate E2E Tests", () => {
|
|||
});
|
||||
});
|
||||
|
||||
// Check if ollama is installed
|
||||
// Check if ollama is installed and local LLM tests are enabled
|
||||
let ollamaInstalled = false;
|
||||
try {
|
||||
execSync("which ollama", { stdio: "ignore" });
|
||||
ollamaInstalled = true;
|
||||
} catch {
|
||||
ollamaInstalled = false;
|
||||
if (!process.env.PI_NO_LOCAL_LLM) {
|
||||
try {
|
||||
execSync("which ollama", { stdio: "ignore" });
|
||||
ollamaInstalled = true;
|
||||
} catch {
|
||||
ollamaInstalled = false;
|
||||
}
|
||||
}
|
||||
|
||||
describe.skipIf(!ollamaInstalled)("Ollama Provider (gpt-oss-20b via OpenAI Completions)", () => {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue