refactor: improve build ID generation with consistent timestamp format (#130)

refactor: improve build ID generation with consistent timestamp format

fix: lazy-start native opencode and simplify binary resolution
This commit is contained in:
NathanFlurry 2026-02-07 07:56:07 +00:00
parent 77f741ff62
commit 54d537fb23
No known key found for this signature in database
GPG key ID: 6A5F43A4F3241BCA
7 changed files with 257 additions and 95 deletions

View file

@ -71,7 +71,6 @@ sandbox-agent opencode [OPTIONS]
| `-H, --host <HOST>` | `127.0.0.1` | Host to bind to |
| `-p, --port <PORT>` | `2468` | Port to bind to |
| `--session-title <TITLE>` | - | Title for the OpenCode session |
| `--opencode-bin <PATH>` | - | Override `opencode` binary path |
```bash
sandbox-agent opencode --token "$TOKEN"
@ -79,7 +78,7 @@ sandbox-agent opencode --token "$TOKEN"
The daemon logs to a per-host log file under the sandbox-agent data directory (for example, `~/.local/share/sandbox-agent/daemon/daemon-127-0-0-1-2468.log`).
Requires the `opencode` binary to be installed (or set `OPENCODE_BIN` / `--opencode-bin`). If it is not found on `PATH`, sandbox-agent installs it automatically.
Existing installs are reused and missing binaries are installed automatically.
---

View file

@ -168,9 +168,7 @@ impl AgentManager {
if agent == AgentId::Mock {
return true;
}
self.binary_path(agent).exists()
|| find_in_path(agent.binary_name()).is_some()
|| default_install_dir().join(agent.binary_name()).exists()
self.binary_path(agent).exists() || find_in_path(agent.binary_name()).is_some()
}
pub fn binary_path(&self, agent: AgentId) -> PathBuf {
@ -641,10 +639,6 @@ impl AgentManager {
if let Some(path) = find_in_path(agent.binary_name()) {
return Ok(path);
}
let fallback = default_install_dir().join(agent.binary_name());
if fallback.exists() {
return Ok(fallback);
}
Err(AgentError::BinaryNotFound { agent })
}
}
@ -1193,12 +1187,6 @@ fn find_in_path(binary_name: &str) -> Option<PathBuf> {
None
}
fn default_install_dir() -> PathBuf {
dirs::data_dir()
.map(|dir| dir.join("sandbox-agent").join("bin"))
.unwrap_or_else(|| PathBuf::from(".").join(".sandbox-agent").join("bin"))
}
fn download_bytes(url: &Url) -> Result<Vec<u8>, AgentError> {
let client = Client::builder().build()?;
let mut response = client.get(url.clone()).send()?;

View file

@ -99,26 +99,23 @@ fn generate_version(out_dir: &Path) {
fn generate_build_id(out_dir: &Path) {
use std::process::Command;
let build_id = Command::new("git")
let source_id = Command::new("git")
.args(["rev-parse", "--short", "HEAD"])
.output()
.ok()
.filter(|o| o.status.success())
.and_then(|o| String::from_utf8(o.stdout).ok())
.map(|s| s.trim().to_string())
.unwrap_or_else(|| {
// Fallback: use the package version + compile-time timestamp
let version = env::var("CARGO_PKG_VERSION").unwrap_or_default();
.unwrap_or_else(|| env::var("CARGO_PKG_VERSION").unwrap_or_default());
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs().to_string())
.unwrap_or_default();
format!("{version}-{timestamp}")
});
.map(|d| d.as_nanos().to_string())
.unwrap_or_else(|_| "0".to_string());
let build_id = format!("{source_id}-{timestamp}");
let out_file = out_dir.join("build_id.rs");
let contents = format!(
"/// Unique identifier for this build (git short hash or version-timestamp fallback).\n\
"/// Unique identifier for this build (source id + build timestamp).\n\
pub const BUILD_ID: &str = \"{}\";\n",
build_id
);

View file

@ -126,9 +126,6 @@ pub struct OpencodeArgs {
#[arg(long)]
session_title: Option<String>,
#[arg(long)]
opencode_bin: Option<PathBuf>,
}
impl Default for OpencodeArgs {
@ -137,7 +134,6 @@ impl Default for OpencodeArgs {
host: DEFAULT_HOST.to_string(),
port: DEFAULT_PORT,
session_title: None,
opencode_bin: None,
}
}
}
@ -606,7 +602,7 @@ fn run_opencode(cli: &CliConfig, args: &OpencodeArgs) -> Result<(), CliError> {
write_stdout_line(&format!("OpenCode session: {session_id}"))?;
let attach_url = format!("{base_url}/opencode");
let opencode_bin = resolve_opencode_bin(args.opencode_bin.as_ref())?;
let opencode_bin = resolve_opencode_bin()?;
let mut opencode_cmd = ProcessCommand::new(opencode_bin);
opencode_cmd
.arg("attach")
@ -844,50 +840,19 @@ fn create_opencode_session(
Ok(session_id.to_string())
}
fn resolve_opencode_bin(explicit: Option<&PathBuf>) -> Result<PathBuf, CliError> {
if let Some(path) = explicit {
return Ok(path.clone());
}
if let Ok(path) = std::env::var("OPENCODE_BIN") {
return Ok(PathBuf::from(path));
}
if let Some(path) = find_in_path("opencode") {
write_stderr_line(&format!(
"using opencode binary from PATH: {}",
path.display()
))?;
return Ok(path);
}
fn resolve_opencode_bin() -> Result<PathBuf, CliError> {
let manager = AgentManager::new(default_install_dir())
.map_err(|err| CliError::Server(err.to_string()))?;
match manager.resolve_binary(AgentId::Opencode) {
Ok(path) => Ok(path),
Err(_) => {
write_stderr_line("opencode not found; installing...")?;
let result = manager
.install(
match manager.install(
AgentId::Opencode,
InstallOptions {
reinstall: false,
version: None,
},
)
.map_err(|err| CliError::Server(err.to_string()))?;
Ok(result.path)
) {
Ok(result) => Ok(result.path),
Err(err) => Err(CliError::Server(err.to_string())),
}
}
}
fn find_in_path(binary_name: &str) -> Option<PathBuf> {
let path_var = std::env::var_os("PATH")?;
for path in std::env::split_paths(&path_var) {
let candidate = path.join(binary_name);
if candidate.exists() {
return Some(candidate);
}
}
None
}
fn run_credentials(command: &CredentialsCommand) -> Result<(), CliError> {

View file

@ -10,7 +10,6 @@ use crate::cli::{CliConfig, CliError};
mod build_id {
include!(concat!(env!("OUT_DIR"), "/build_id.rs"));
}
pub use build_id::BUILD_ID;
const DAEMON_HEALTH_TIMEOUT: Duration = Duration::from_secs(30);
@ -446,7 +445,10 @@ pub fn ensure_running(
// Check build version
if !is_version_current(host, port) {
let old = read_daemon_version(host, port).unwrap_or_else(|| "unknown".to_string());
eprintln!("daemon outdated (build {old} -> {BUILD_ID}), restarting...");
eprintln!(
"daemon outdated (build {old} -> {}), restarting...",
BUILD_ID
);
stop(host, port)?;
return start(cli, host, port, token);
}

View file

@ -23,7 +23,7 @@ use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tokio::sync::{broadcast, Mutex};
use tokio::time::interval;
use tracing::warn;
use tracing::{info, warn};
use utoipa::{IntoParams, OpenApi, ToSchema};
use crate::router::{
@ -656,21 +656,38 @@ fn default_agent_mode() -> &'static str {
}
async fn opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCache {
{
let cache = state.opencode.model_cache.lock().await;
if let Some(cache) = cache.as_ref() {
// Keep this lock for the full build to enforce singleflight behavior.
// Concurrent requests wait for the same in-flight build instead of
// spawning duplicate provider/model fetches.
let mut slot = state.opencode.model_cache.lock().await;
if let Some(cache) = slot.as_ref() {
info!(
entries = cache.entries.len(),
groups = cache.group_names.len(),
connected = cache.connected.len(),
"opencode model cache hit"
);
return cache.clone();
}
}
let started = std::time::Instant::now();
info!("opencode model cache miss; building cache");
let cache = build_opencode_model_cache(state).await;
let mut slot = state.opencode.model_cache.lock().await;
info!(
elapsed_ms = started.elapsed().as_millis() as u64,
entries = cache.entries.len(),
groups = cache.group_names.len(),
connected = cache.connected.len(),
"opencode model cache built"
);
*slot = Some(cache.clone());
cache
}
async fn build_opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCache {
let started = std::time::Instant::now();
// Check credentials upfront
let creds_started = std::time::Instant::now();
let credentials = match tokio::task::spawn_blocking(|| {
extract_all_credentials(&CredentialExtractionOptions::new())
})
@ -684,6 +701,10 @@ async fn build_opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCa
};
let has_anthropic = credentials.anthropic.is_some();
let has_openai = credentials.openai.is_some();
info!(
elapsed_ms = creds_started.elapsed().as_millis() as u64,
has_anthropic, has_openai, "opencode model cache credential scan complete"
);
let mut entries = Vec::new();
let mut model_lookup = HashMap::new();
@ -693,11 +714,38 @@ async fn build_opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCa
let mut group_names: HashMap<String, String> = HashMap::new();
let mut default_model: Option<String> = None;
for agent in available_agent_ids() {
let response = match state.inner.session_manager().agent_models(agent).await {
let agents = available_agent_ids();
let manager = state.inner.session_manager();
let fetches = agents.iter().copied().map(|agent| {
let manager = manager.clone();
async move {
let agent_started = std::time::Instant::now();
let response = manager.agent_models(agent).await;
(agent, agent_started.elapsed(), response)
}
});
let fetch_results = futures::future::join_all(fetches).await;
for (agent, elapsed, response) in fetch_results {
let response = match response {
Ok(response) => response,
Err(_) => continue,
Err(err) => {
warn!(
agent = agent.as_str(),
elapsed_ms = elapsed.as_millis() as u64,
?err,
"opencode model cache failed fetching agent models"
);
continue;
}
};
info!(
agent = agent.as_str(),
elapsed_ms = elapsed.as_millis() as u64,
model_count = response.models.len(),
has_default = response.default_model.is_some(),
"opencode model cache fetched agent models"
);
let first_model_id = response.models.first().map(|model| model.id.clone());
for model in response.models {
@ -805,7 +853,7 @@ async fn build_opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCa
}
}
OpenCodeModelCache {
let cache = OpenCodeModelCache {
entries,
model_lookup,
group_defaults,
@ -814,7 +862,17 @@ async fn build_opencode_model_cache(state: &OpenCodeAppState) -> OpenCodeModelCa
default_group,
default_model,
connected,
}
};
info!(
elapsed_ms = started.elapsed().as_millis() as u64,
entries = cache.entries.len(),
groups = cache.group_names.len(),
connected = cache.connected.len(),
default_group = cache.default_group.as_str(),
default_model = cache.default_model.as_str(),
"opencode model cache build complete"
);
cache
}
fn resolve_agent_from_model(
@ -1123,8 +1181,16 @@ async fn proxy_native_opencode(
headers: &HeaderMap,
body: Option<Value>,
) -> Option<Response> {
let Some(base_url) = state.opencode.proxy_base_url() else {
let base_url = if let Some(base_url) = state.opencode.proxy_base_url() {
base_url.to_string()
} else {
match state.inner.ensure_opencode_server().await {
Ok(base_url) => base_url,
Err(err) => {
warn!(path, ?err, "failed to lazily start native opencode server");
return None;
}
}
};
let mut request = state

View file

@ -31,7 +31,8 @@ use sandbox_agent_universal_agent_schema::{
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
use tokio::sync::futures::OwnedNotified;
use tokio::sync::{broadcast, mpsc, oneshot, Mutex, Notify};
use tokio::time::sleep;
use tokio_stream::wrappers::BroadcastStream;
use tower_http::trace::TraceLayer;
@ -54,6 +55,7 @@ const MOCK_EVENT_DELAY_MS: u64 = 200;
static USER_MESSAGE_COUNTER: AtomicU64 = AtomicU64::new(1);
const ANTHROPIC_MODELS_URL: &str = "https://api.anthropic.com/v1/models?beta=true";
const ANTHROPIC_VERSION: &str = "2023-06-01";
const CODEX_MODEL_LIST_TIMEOUT_SECS: u64 = 10;
fn claude_fallback_models() -> AgentModelsResponse {
// Claude Code accepts model aliases: default, sonnet, opus, haiku
@ -146,6 +148,10 @@ impl AppState {
pub(crate) fn session_manager(&self) -> Arc<SessionManager> {
self.session_manager.clone()
}
pub(crate) async fn ensure_opencode_server(&self) -> Result<String, SandboxError> {
self.session_manager.ensure_opencode_server().await
}
}
#[derive(Debug, Clone)]
@ -922,6 +928,13 @@ pub(crate) struct SessionManager {
sessions: Mutex<Vec<SessionState>>,
server_manager: Arc<AgentServerManager>,
http_client: Client,
model_catalog: Mutex<ModelCatalogState>,
}
#[derive(Debug, Default)]
struct ModelCatalogState {
models: HashMap<AgentId, AgentModelsResponse>,
in_flight: HashMap<AgentId, Arc<Notify>>,
}
/// Shared Codex app-server process that handles multiple sessions via JSON-RPC.
@ -1642,6 +1655,7 @@ impl SessionManager {
sessions: Mutex::new(Vec::new()),
server_manager,
http_client: Client::new(),
model_catalog: Mutex::new(ModelCatalogState::default()),
}
}
@ -1830,6 +1844,49 @@ impl SessionManager {
pub(crate) async fn agent_models(
self: &Arc<Self>,
agent: AgentId,
) -> Result<AgentModelsResponse, SandboxError> {
enum Acquisition {
Hit(AgentModelsResponse),
Wait(OwnedNotified),
Build(Arc<Notify>),
}
loop {
let acquisition = {
let mut catalog = self.model_catalog.lock().await;
if let Some(response) = catalog.models.get(&agent) {
Acquisition::Hit(response.clone())
} else if let Some(notify) = catalog.in_flight.get(&agent) {
Acquisition::Wait(notify.clone().notified_owned())
} else {
let notify = Arc::new(Notify::new());
catalog.in_flight.insert(agent, notify.clone());
Acquisition::Build(notify)
}
};
match acquisition {
Acquisition::Hit(response) => return Ok(response),
Acquisition::Wait(waiting) => waiting.await,
Acquisition::Build(notify) => {
let response = self.fetch_agent_models_uncached(agent).await;
let mut catalog = self.model_catalog.lock().await;
catalog.in_flight.remove(&agent);
if let Ok(response_value) = &response {
if should_cache_agent_models(agent, response_value) {
catalog.models.insert(agent, response_value.clone());
}
}
notify.notify_waiters();
return response;
}
}
}
}
async fn fetch_agent_models_uncached(
self: &Arc<Self>,
agent: AgentId,
) -> Result<AgentModelsResponse, SandboxError> {
match agent {
AgentId::Claude => match self.fetch_claude_models().await {
@ -2243,8 +2300,7 @@ impl SessionManager {
.clone()
.unwrap_or_else(|| session_id.to_string());
let response_text = response.clone().unwrap_or_default();
let line =
claude_tool_result_line(&native_sid, question_id, &response_text, false);
let line = claude_tool_result_line(&native_sid, question_id, &response_text, false);
sender
.send(line)
.map_err(|_| SandboxError::InvalidRequest {
@ -3468,8 +3524,13 @@ impl SessionManager {
}
async fn fetch_claude_models(&self) -> Result<AgentModelsResponse, SandboxError> {
let started = Instant::now();
let credentials = self.extract_credentials().await?;
let Some(cred) = credentials.anthropic else {
tracing::info!(
elapsed_ms = started.elapsed().as_millis() as u64,
"claude model fetch skipped (no anthropic credentials)"
);
return Ok(AgentModelsResponse {
models: Vec::new(),
default_model: None,
@ -3492,6 +3553,7 @@ impl SessionManager {
if matches!(cred.auth_type, AuthType::Oauth) {
tracing::warn!(
status = %status,
elapsed_ms = started.elapsed().as_millis() as u64,
"Anthropic model list rejected OAuth credentials; using Claude OAuth fallback models"
);
return Ok(claude_fallback_models());
@ -3552,11 +3614,18 @@ impl SessionManager {
if models.is_empty() && matches!(cred.auth_type, AuthType::Oauth) {
tracing::warn!(
elapsed_ms = started.elapsed().as_millis() as u64,
"Anthropic model list was empty for OAuth credentials; using Claude OAuth fallback models"
);
return Ok(claude_fallback_models());
}
tracing::info!(
elapsed_ms = started.elapsed().as_millis() as u64,
model_count = models.len(),
has_default = default_model.is_some(),
"claude model fetch completed"
);
Ok(AgentModelsResponse {
models,
default_model,
@ -3564,14 +3633,21 @@ impl SessionManager {
}
async fn fetch_codex_models(self: &Arc<Self>) -> Result<AgentModelsResponse, SandboxError> {
let started = Instant::now();
let server = self.ensure_codex_server().await?;
tracing::info!(
elapsed_ms = started.elapsed().as_millis() as u64,
"codex model fetch server ready"
);
let mut models: Vec<AgentModelInfo> = Vec::new();
let mut default_model: Option<String> = None;
let mut seen = HashSet::new();
let mut cursor: Option<String> = None;
let mut pages: usize = 0;
loop {
let id = server.next_request_id();
let page_started = Instant::now();
let request = json!({
"jsonrpc": "2.0",
"id": id,
@ -3588,20 +3664,39 @@ impl SessionManager {
message: "failed to send model/list request".to_string(),
})?;
let result = tokio::time::timeout(Duration::from_secs(30), rx).await;
let result =
tokio::time::timeout(Duration::from_secs(CODEX_MODEL_LIST_TIMEOUT_SECS), rx).await;
let value = match result {
Ok(Ok(value)) => value,
Ok(Err(_)) => {
tracing::warn!(
elapsed_ms = started.elapsed().as_millis() as u64,
page = pages + 1,
"codex model/list request cancelled"
);
return Err(SandboxError::StreamError {
message: "model/list request cancelled".to_string(),
})
});
}
Err(_) => {
tracing::warn!(
elapsed_ms = started.elapsed().as_millis() as u64,
page = pages + 1,
timeout_secs = CODEX_MODEL_LIST_TIMEOUT_SECS,
"codex model/list request timed out"
);
return Err(SandboxError::StreamError {
message: "model/list request timed out".to_string(),
})
});
}
};
pages += 1;
tracing::info!(
page = pages,
elapsed_ms = page_started.elapsed().as_millis() as u64,
total_elapsed_ms = started.elapsed().as_millis() as u64,
"codex model/list page fetched"
);
let data = value
.get("data")
@ -3683,6 +3778,13 @@ impl SessionManager {
default_model = models.first().map(|model| model.id.clone());
}
tracing::info!(
elapsed_ms = started.elapsed().as_millis() as u64,
page_count = pages,
model_count = models.len(),
has_default = default_model.is_some(),
"codex model fetch completed"
);
Ok(AgentModelsResponse {
models,
default_model,
@ -3690,18 +3792,36 @@ impl SessionManager {
}
async fn fetch_opencode_models(&self) -> Result<AgentModelsResponse, SandboxError> {
let started = Instant::now();
let base_url = self.ensure_opencode_server().await?;
let endpoints = [
format!("{base_url}/config/providers"),
format!("{base_url}/provider"),
];
for url in endpoints {
let endpoint_started = Instant::now();
let response = self.http_client.get(&url).send().await;
let response = match response {
Ok(response) => response,
Err(_) => continue,
Err(err) => {
tracing::warn!(
url,
elapsed_ms = endpoint_started.elapsed().as_millis() as u64,
total_elapsed_ms = started.elapsed().as_millis() as u64,
?err,
"opencode model endpoint request failed"
);
continue;
}
};
if !response.status().is_success() {
tracing::warn!(
url,
status = %response.status(),
elapsed_ms = endpoint_started.elapsed().as_millis() as u64,
total_elapsed_ms = started.elapsed().as_millis() as u64,
"opencode model endpoint returned non-success status"
);
continue;
}
let value: Value = response
@ -3711,9 +3831,27 @@ impl SessionManager {
message: err.to_string(),
})?;
if let Some(models) = parse_opencode_models(&value) {
tracing::info!(
url,
elapsed_ms = endpoint_started.elapsed().as_millis() as u64,
total_elapsed_ms = started.elapsed().as_millis() as u64,
model_count = models.models.len(),
has_default = models.default_model.is_some(),
"opencode model fetch completed"
);
return Ok(models);
}
tracing::warn!(
url,
elapsed_ms = endpoint_started.elapsed().as_millis() as u64,
total_elapsed_ms = started.elapsed().as_millis() as u64,
"opencode model endpoint parse returned no models"
);
}
tracing::warn!(
elapsed_ms = started.elapsed().as_millis() as u64,
"opencode model fetch failed"
);
Err(SandboxError::StreamError {
message: "OpenCode models unavailable".to_string(),
})
@ -4917,6 +5055,13 @@ fn codex_fallback_models() -> AgentModelsResponse {
}
}
fn should_cache_agent_models(agent: AgentId, response: &AgentModelsResponse) -> bool {
if agent == AgentId::Opencode && response.models.is_empty() {
return false;
}
true
}
fn amp_variants() -> Vec<String> {
vec!["medium", "high", "xhigh"]
.into_iter()