diff --git a/.husky/pre-commit b/.husky/pre-commit index 6b5d4fa9..a0951d87 100755 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -12,7 +12,22 @@ echo "Running checks on staged files..." run_checks() { # shellcheck disable=SC2086 # intentionally preserving word splitting for file list - npx -y @biomejs/biome check --write --error-on-warnings $1 + CHECK_OUTPUT="" + CHECK_STATUS=0 + set +e + CHECK_OUTPUT="$(npx -y @biomejs/biome check --write --error-on-warnings "$1" 2>&1)" + CHECK_STATUS=$? + set -e + + if [ "$CHECK_STATUS" -ne 0 ]; then + if printf '%s\n' "$CHECK_OUTPUT" | grep -Fq "No files were processed in the specified paths."; then + return 0 + fi + echo "$CHECK_OUTPUT" + return "$CHECK_STATUS" + fi + + [ -n "$CHECK_OUTPUT" ] && echo "$CHECK_OUTPUT" } # Run Biome only when staged files include style targets diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 4a4ef7bf..5bec5412 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -2304,6 +2304,23 @@ export const MODELS = { contextWindow: 272000, maxTokens: 128000, } satisfies Model<"azure-openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, "o1": { id: "o1", name: "o1", @@ -2880,7 +2897,7 @@ export const MODELS = { } satisfies Model<"openai-responses">, "gpt-5.3-codex": { id: "gpt-5.3-codex", - name: "GPT-5.3 Codex", + name: "GPT-5.3-Codex", api: "openai-responses", provider: "github-copilot", baseUrl: "https://api.individual.githubcopilot.com", @@ -2893,7 +2910,7 @@ export const MODELS = { cacheRead: 0, cacheWrite: 0, }, - contextWindow: 272000, + contextWindow: 400000, maxTokens: 128000, } satisfies Model<"openai-responses">, "grok-code-fast-1": { @@ -5454,6 +5471,23 @@ export const MODELS = { contextWindow: 272000, maxTokens: 128000, } satisfies Model<"openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, "o1": { id: "o1", name: "o1", @@ -6172,6 +6206,23 @@ export const MODELS = { contextWindow: 272000, maxTokens: 128000, } satisfies Model<"openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 30, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, "kimi-k2.5": { id: "kimi-k2.5", name: "Kimi K2.5",