feat(coding-agent): add OAuth authentication for Claude Pro/Max

- Add /login and /logout commands for OAuth flow
- OAuth tokens stored in ~/.pi/agent/oauth.json with 0600 permissions
- Auto-refresh tokens when expired (5min buffer)
- Priority: OAuth > ANTHROPIC_OAUTH_TOKEN env > ANTHROPIC_API_KEY env
- Fix model selector async loading and re-render
- Add bracketed paste support to Input component for long codes
- Update README.md with OAuth documentation
- Add implementation docs and testing checklist
This commit is contained in:
Mario Zechner 2025-11-18 17:33:33 +01:00
parent 387cc97bac
commit 587d7c39a4
17 changed files with 1632 additions and 76 deletions

View file

@ -653,6 +653,23 @@ export const MODELS = {
contextWindow: 1000000,
maxTokens: 8192,
} satisfies Model<"google-generative-ai">,
"gemini-3-pro-preview": {
id: "gemini-3-pro-preview",
name: "Gemini 3 Pro Preview",
api: "google-generative-ai",
provider: "google",
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.25,
output: 10,
cacheRead: 0.31,
cacheWrite: 0,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"google-generative-ai">,
},
openai: {
"gpt-4.1-nano": {
@ -1906,6 +1923,23 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
},
openrouter: {
"google/gemini-3-pro-preview": {
id: "google/gemini-3-pro-preview",
name: "Gemini 3 Pro Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2,
output: 12,
cacheRead: 0.19999999999999998,
cacheWrite: 2.375,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"openrouter/sherlock-dash-alpha": {
id: "openrouter/sherlock-dash-alpha",
name: "Sherlock Dash Alpha",
@ -1957,6 +1991,23 @@ export const MODELS = {
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5.1-chat": {
id: "openai/gpt-5.1-chat",
name: "OpenAI: GPT-5.1 Chat",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-5.1-codex": {
id: "openai/gpt-5.1-codex",
name: "OpenAI: GPT-5.1-Codex",
@ -2017,13 +2068,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.55,
output: 2.25,
input: 0.5,
output: 2.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 16384,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"amazon/nova-premier-v1": {
id: "amazon/nova-premier-v1",
@ -2212,40 +2263,6 @@ export const MODELS = {
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"inclusionai/ring-1t": {
id: "inclusionai/ring-1t",
name: "inclusionAI: Ring 1T",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.5700000000000001,
output: 2.2800000000000002,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"inclusionai/ling-1t": {
id: "inclusionai/ling-1t",
name: "inclusionAI: Ling-1T",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.5700000000000001,
output: 2.2800000000000002,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"openai/o3-deep-research": {
id: "openai/o3-deep-research",
name: "OpenAI: o3 Deep Research",
@ -2476,13 +2493,13 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.22,
output: 0.88,
input: 0.21,
output: 1.9,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 4096,
contextWindow: 131072,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"qwen/qwen3-max": {
id: "qwen/qwen3-max",
@ -2850,13 +2867,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.09,
output: 0.3,
input: 0.051,
output: 0.33999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 131072,
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"x-ai/grok-code-fast-1": {
id: "x-ai/grok-code-fast-1",
@ -3734,8 +3751,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 1.75,
input: 0.19999999999999998,
output: 4.5,
cacheRead: 0,
cacheWrite: 0,
},
@ -4261,13 +4278,13 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.09,
output: 0.16,
input: 0.07,
output: 0.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"qwen/qwq-32b": {
id: "qwen/qwq-32b",

View file

@ -74,6 +74,7 @@ export interface ToolCall {
id: string;
name: string;
arguments: Record<string, any>;
thoughtSignature?: string; // Google-specific: opaque signature for reusing thought context
}
export interface Usage {