This commit is contained in:
Harivansh Rathi 2026-03-05 16:45:36 -08:00
parent 18f723480f
commit a20a72cd2e
2 changed files with 69 additions and 3 deletions

View file

@ -12,7 +12,22 @@ echo "Running checks on staged files..."
run_checks() {
# shellcheck disable=SC2086 # intentionally preserving word splitting for file list
npx -y @biomejs/biome check --write --error-on-warnings $1
CHECK_OUTPUT=""
CHECK_STATUS=0
set +e
CHECK_OUTPUT="$(npx -y @biomejs/biome check --write --error-on-warnings "$1" 2>&1)"
CHECK_STATUS=$?
set -e
if [ "$CHECK_STATUS" -ne 0 ]; then
if printf '%s\n' "$CHECK_OUTPUT" | grep -Fq "No files were processed in the specified paths."; then
return 0
fi
echo "$CHECK_OUTPUT"
return "$CHECK_STATUS"
fi
[ -n "$CHECK_OUTPUT" ] && echo "$CHECK_OUTPUT"
}
# Run Biome only when staged files include style targets

View file

@ -2304,6 +2304,23 @@ export const MODELS = {
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"azure-openai-responses">,
"gpt-5.4-pro": {
id: "gpt-5.4-pro",
name: "GPT-5.4 Pro",
api: "azure-openai-responses",
provider: "azure-openai-responses",
baseUrl: "",
reasoning: true,
input: ["text", "image"],
cost: {
input: 30,
output: 180,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1050000,
maxTokens: 128000,
} satisfies Model<"azure-openai-responses">,
"o1": {
id: "o1",
name: "o1",
@ -2880,7 +2897,7 @@ export const MODELS = {
} satisfies Model<"openai-responses">,
"gpt-5.3-codex": {
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
name: "GPT-5.3-Codex",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
@ -2893,7 +2910,7 @@ export const MODELS = {
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 272000,
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"grok-code-fast-1": {
@ -5454,6 +5471,23 @@ export const MODELS = {
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.4-pro": {
id: "gpt-5.4-pro",
name: "GPT-5.4 Pro",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 30,
output: 180,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1050000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"o1": {
id: "o1",
name: "o1",
@ -6172,6 +6206,23 @@ export const MODELS = {
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.4-pro": {
id: "gpt-5.4-pro",
name: "GPT-5.4 Pro",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 30,
output: 180,
cacheRead: 30,
cacheWrite: 0,
},
contextWindow: 1050000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"kimi-k2.5": {
id: "kimi-k2.5",
name: "Kimi K2.5",