diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42d826b7..1df21c42 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,7 @@ jobs: run: npm ci - name: Build workspaces - run: npm run build --workspaces --if-present + run: npm run build - name: Check run: npm run --if-present check --workspace ${{ matrix.package }} diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 47738d6c..55e272f9 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -5204,22 +5204,22 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"openai-completions">, - "meta-llama/llama-3.1-70b-instruct": { - id: "meta-llama/llama-3.1-70b-instruct", - name: "Meta: Llama 3.1 70B Instruct", + "meta-llama/llama-3.1-8b-instruct": { + id: "meta-llama/llama-3.1-8b-instruct", + name: "Meta: Llama 3.1 8B Instruct", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, input: ["text"], cost: { - input: 0.39999999999999997, - output: 0.39999999999999997, + input: 0.02, + output: 0.03, cacheRead: 0, cacheWrite: 0, }, contextWindow: 131072, - maxTokens: 4096, + maxTokens: 16384, } satisfies Model<"openai-completions">, "meta-llama/llama-3.1-405b-instruct": { id: "meta-llama/llama-3.1-405b-instruct", @@ -5238,22 +5238,22 @@ export const MODELS = { contextWindow: 130815, maxTokens: 4096, } satisfies Model<"openai-completions">, - "meta-llama/llama-3.1-8b-instruct": { - id: "meta-llama/llama-3.1-8b-instruct", - name: "Meta: Llama 3.1 8B Instruct", + "meta-llama/llama-3.1-70b-instruct": { + id: "meta-llama/llama-3.1-70b-instruct", + name: "Meta: Llama 3.1 70B Instruct", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, input: ["text"], cost: { - input: 0.02, - output: 0.03, + input: 0.39999999999999997, + output: 0.39999999999999997, cacheRead: 0, cacheWrite: 0, }, contextWindow: 131072, - maxTokens: 16384, + maxTokens: 4096, } satisfies Model<"openai-completions">, "mistralai/mistral-nemo": { id: "mistralai/mistral-nemo", @@ -5391,6 +5391,23 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, + "openai/gpt-4o-2024-05-13": { + id: "openai/gpt-4o-2024-05-13", + name: "OpenAI: GPT-4o (2024-05-13)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 5, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "openai/gpt-4o": { id: "openai/gpt-4o", name: "OpenAI: GPT-4o", @@ -5425,22 +5442,22 @@ export const MODELS = { contextWindow: 128000, maxTokens: 64000, } satisfies Model<"openai-completions">, - "openai/gpt-4o-2024-05-13": { - id: "openai/gpt-4o-2024-05-13", - name: "OpenAI: GPT-4o (2024-05-13)", + "meta-llama/llama-3-70b-instruct": { + id: "meta-llama/llama-3-70b-instruct", + name: "Meta: Llama 3 70B Instruct", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, - input: ["text", "image"], + input: ["text"], cost: { - input: 5, - output: 15, + input: 0.3, + output: 0.39999999999999997, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 128000, - maxTokens: 4096, + contextWindow: 8192, + maxTokens: 16384, } satisfies Model<"openai-completions">, "meta-llama/llama-3-8b-instruct": { id: "meta-llama/llama-3-8b-instruct", @@ -5459,23 +5476,6 @@ export const MODELS = { contextWindow: 8192, maxTokens: 16384, } satisfies Model<"openai-completions">, - "meta-llama/llama-3-70b-instruct": { - id: "meta-llama/llama-3-70b-instruct", - name: "Meta: Llama 3 70B Instruct", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.3, - output: 0.39999999999999997, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 8192, - maxTokens: 16384, - } satisfies Model<"openai-completions">, "mistralai/mixtral-8x22b-instruct": { id: "mistralai/mixtral-8x22b-instruct", name: "Mistral: Mixtral 8x22B Instruct", @@ -5561,23 +5561,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4-turbo-preview": { - id: "openai/gpt-4-turbo-preview", - name: "OpenAI: GPT-4 Turbo Preview", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 10, - output: 30, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-3.5-turbo-0613": { id: "openai/gpt-3.5-turbo-0613", name: "OpenAI: GPT-3.5 Turbo (older v0613)", @@ -5595,21 +5578,21 @@ export const MODELS = { contextWindow: 4095, maxTokens: 4096, } satisfies Model<"openai-completions">, - "mistralai/mistral-tiny": { - id: "mistralai/mistral-tiny", - name: "Mistral Tiny", + "openai/gpt-4-turbo-preview": { + id: "openai/gpt-4-turbo-preview", + name: "OpenAI: GPT-4 Turbo Preview", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, input: ["text"], cost: { - input: 0.25, - output: 0.25, + input: 10, + output: 30, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 32768, + contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, "mistralai/mistral-small": { @@ -5629,6 +5612,23 @@ export const MODELS = { contextWindow: 32768, maxTokens: 4096, } satisfies Model<"openai-completions">, + "mistralai/mistral-tiny": { + id: "mistralai/mistral-tiny", + name: "Mistral Tiny", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 0.25, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "mistralai/mixtral-8x7b-instruct": { id: "mistralai/mixtral-8x7b-instruct", name: "Mistral: Mixtral 8x7B Instruct", @@ -5680,21 +5680,21 @@ export const MODELS = { contextWindow: 16385, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-3.5-turbo": { - id: "openai/gpt-3.5-turbo", - name: "OpenAI: GPT-3.5 Turbo", + "openai/gpt-4-0314": { + id: "openai/gpt-4-0314", + name: "OpenAI: GPT-4 (older v0314)", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, input: ["text"], cost: { - input: 0.5, - output: 1.5, + input: 30, + output: 60, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 16385, + contextWindow: 8191, maxTokens: 4096, } satisfies Model<"openai-completions">, "openai/gpt-4": { @@ -5714,21 +5714,21 @@ export const MODELS = { contextWindow: 8191, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4-0314": { - id: "openai/gpt-4-0314", - name: "OpenAI: GPT-4 (older v0314)", + "openai/gpt-3.5-turbo": { + id: "openai/gpt-3.5-turbo", + name: "OpenAI: GPT-3.5 Turbo", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, input: ["text"], cost: { - input: 30, - output: 60, + input: 0.5, + output: 1.5, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 8191, + contextWindow: 16385, maxTokens: 4096, } satisfies Model<"openai-completions">, "openrouter/auto": {