chore: fix bad merge

This commit is contained in:
Nathan Flurry 2026-02-11 07:52:48 -08:00
parent 1dd45908a3
commit 94353f7696
205 changed files with 19244 additions and 14866 deletions

View file

@ -0,0 +1,132 @@
use std::sync::Arc;
use std::time::Duration;
use axum::extract::State;
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::sse::KeepAlive;
use axum::response::{IntoResponse, Response, Sse};
use axum::routing::{get, post};
use axum::{Json, Router};
use serde::Serialize;
use serde_json::{json, Value};
use crate::process::{AdapterError, AdapterRuntime, PostOutcome};
#[derive(Debug, Serialize)]
struct HealthResponse {
ok: bool,
}
#[derive(Debug, Serialize)]
struct Problem {
r#type: &'static str,
title: &'static str,
status: u16,
detail: String,
}
pub fn build_router(runtime: Arc<AdapterRuntime>) -> Router {
Router::new()
.route("/v1/health", get(get_health))
.route("/v1/rpc", post(post_rpc).get(get_rpc).delete(delete_rpc))
.with_state(runtime)
}
async fn get_health() -> Json<HealthResponse> {
Json(HealthResponse { ok: true })
}
async fn post_rpc(
State(runtime): State<Arc<AdapterRuntime>>,
headers: HeaderMap,
Json(payload): Json<Value>,
) -> Response {
if !is_json_content_type(&headers) {
return problem(
StatusCode::UNSUPPORTED_MEDIA_TYPE,
"unsupported_media_type",
"content-type must be application/json",
);
}
match runtime.post(payload).await {
Ok(PostOutcome::Response(value)) => (StatusCode::OK, Json(value)).into_response(),
Ok(PostOutcome::Accepted) => StatusCode::ACCEPTED.into_response(),
Err(err) => map_error(err),
}
}
async fn get_rpc(
State(runtime): State<Arc<AdapterRuntime>>,
headers: HeaderMap,
) -> impl IntoResponse {
let last_event_id = headers
.get("last-event-id")
.and_then(|value| value.to_str().ok())
.and_then(|value| value.parse::<u64>().ok());
let stream = runtime.clone().sse_stream(last_event_id).await;
Sse::new(stream).keep_alive(KeepAlive::new().interval(Duration::from_secs(15)))
}
async fn delete_rpc() -> StatusCode {
StatusCode::NO_CONTENT
}
fn is_json_content_type(headers: &HeaderMap) -> bool {
headers
.get(header::CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map(|value| value.starts_with("application/json"))
.unwrap_or(false)
}
fn map_error(err: AdapterError) -> Response {
match err {
AdapterError::InvalidEnvelope => problem(
StatusCode::BAD_REQUEST,
"invalid_envelope",
"request body must be a JSON-RPC object",
),
AdapterError::Timeout => problem(
StatusCode::GATEWAY_TIMEOUT,
"timeout",
"timed out waiting for agent response",
),
AdapterError::Write(write) => problem(
StatusCode::BAD_GATEWAY,
"write_failed",
&format!("failed writing to agent stdin: {write}"),
),
AdapterError::Serialize(ser) => problem(
StatusCode::BAD_REQUEST,
"serialize_failed",
&format!("failed to serialize JSON payload: {ser}"),
),
AdapterError::Spawn(spawn) => problem(
StatusCode::BAD_GATEWAY,
"spawn_failed",
&format!("failed to start agent process: {spawn}"),
),
AdapterError::MissingStdin | AdapterError::MissingStdout | AdapterError::MissingStderr => {
problem(
StatusCode::BAD_GATEWAY,
"io_setup_failed",
"agent subprocess pipes were not available",
)
}
}
}
fn problem(status: StatusCode, title: &'static str, detail: &str) -> Response {
(
status,
Json(json!(Problem {
r#type: "about:blank",
title,
status: status.as_u16(),
detail: detail.to_string(),
})),
)
.into_response()
}

View file

@ -0,0 +1,50 @@
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use app::build_router;
use process::AdapterRuntime;
use registry::LaunchSpec;
pub mod app;
pub mod process;
pub mod registry;
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
pub registry_json: String,
pub registry_agent_id: Option<String>,
pub rpc_timeout: Duration,
}
pub async fn run_server(
config: ServerConfig,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let launch =
LaunchSpec::from_registry_blob(&config.registry_json, config.registry_agent_id.as_deref())?;
let runtime = Arc::new(AdapterRuntime::start(launch, config.rpc_timeout).await?);
run_server_with_runtime(config.host, config.port, runtime).await
}
pub async fn run_server_with_runtime(
host: String,
port: u16,
runtime: Arc<AdapterRuntime>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let app = build_router(runtime.clone());
let addr: SocketAddr = format!("{host}:{port}").parse()?;
let listener = tokio::net::TcpListener::bind(addr).await?;
tracing::info!(addr = %addr, "acp-http-adapter listening");
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal(runtime))
.await?;
Ok(())
}
async fn shutdown_signal(runtime: Arc<AdapterRuntime>) {
let _ = tokio::signal::ctrl_c().await;
runtime.shutdown().await;
}

View file

@ -0,0 +1,55 @@
use std::time::Duration;
use acp_http_adapter::{run_server, ServerConfig};
use clap::Parser;
#[derive(Debug, Parser)]
#[command(name = "acp-http-adapter")]
#[command(about = "Minimal ACP HTTP->stdio adapter", version)]
struct Cli {
#[arg(long, default_value = "127.0.0.1")]
host: String,
#[arg(long, default_value_t = 7591)]
port: u16,
#[arg(long)]
registry_json: String,
#[arg(long)]
registry_agent_id: Option<String>,
#[arg(long)]
rpc_timeout_ms: Option<u64>,
}
#[tokio::main]
async fn main() {
if let Err(err) = run().await {
tracing::error!(error = %err, "acp-http-adapter failed");
std::process::exit(1);
}
}
async fn run() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.compact()
.init();
let cli = Cli::parse();
run_server(ServerConfig {
host: cli.host,
port: cli.port,
registry_json: cli.registry_json,
registry_agent_id: cli.registry_agent_id,
rpc_timeout: cli
.rpc_timeout_ms
.map(Duration::from_millis)
.unwrap_or_else(|| Duration::from_secs(120)),
})
.await
}

View file

@ -0,0 +1,567 @@
use std::collections::{HashMap, VecDeque};
use std::convert::Infallible;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use axum::response::sse::Event;
use futures::{stream, Stream, StreamExt};
use serde_json::{json, Value};
use thiserror::Error;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::{Child, ChildStdin, Command};
use tokio::sync::{broadcast, oneshot, Mutex};
use tokio_stream::wrappers::BroadcastStream;
use crate::registry::LaunchSpec;
const RING_BUFFER_SIZE: usize = 1024;
#[derive(Debug, Error)]
pub enum AdapterError {
#[error("failed to spawn subprocess: {0}")]
Spawn(std::io::Error),
#[error("failed to capture subprocess stdin")]
MissingStdin,
#[error("failed to capture subprocess stdout")]
MissingStdout,
#[error("failed to capture subprocess stderr")]
MissingStderr,
#[error("invalid json-rpc envelope")]
InvalidEnvelope,
#[error("failed to serialize json-rpc message: {0}")]
Serialize(serde_json::Error),
#[error("failed to write subprocess stdin: {0}")]
Write(std::io::Error),
#[error("timeout waiting for response")]
Timeout,
}
#[derive(Debug)]
pub enum PostOutcome {
Response(Value),
Accepted,
}
#[derive(Debug, Clone)]
struct StreamMessage {
sequence: u64,
payload: Value,
}
#[derive(Debug)]
pub struct AdapterRuntime {
stdin: Arc<Mutex<ChildStdin>>,
child: Arc<Mutex<Child>>,
pending: Arc<Mutex<HashMap<String, oneshot::Sender<Value>>>>,
sender: broadcast::Sender<StreamMessage>,
ring: Arc<Mutex<VecDeque<StreamMessage>>>,
sequence: Arc<AtomicU64>,
request_timeout: Duration,
shutting_down: AtomicBool,
spawned_at: Instant,
first_stdout: Arc<AtomicBool>,
}
impl AdapterRuntime {
pub async fn start(
launch: LaunchSpec,
request_timeout: Duration,
) -> Result<Self, AdapterError> {
let spawn_start = Instant::now();
let mut command = Command::new(&launch.program);
command
.args(&launch.args)
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
for (key, value) in &launch.env {
command.env(key, value);
}
tracing::info!(
program = ?launch.program,
args = ?launch.args,
"spawning agent process"
);
let mut child = command.spawn().map_err(|err| {
tracing::error!(
program = ?launch.program,
error = %err,
"failed to spawn agent process"
);
AdapterError::Spawn(err)
})?;
let pid = child.id().unwrap_or(0);
let spawn_elapsed = spawn_start.elapsed();
tracing::info!(
pid = pid,
elapsed_ms = spawn_elapsed.as_millis() as u64,
"agent process spawned"
);
let stdin = child.stdin.take().ok_or(AdapterError::MissingStdin)?;
let stdout = child.stdout.take().ok_or(AdapterError::MissingStdout)?;
let stderr = child.stderr.take().ok_or(AdapterError::MissingStderr)?;
let (sender, _rx) = broadcast::channel(512);
let runtime = Self {
stdin: Arc::new(Mutex::new(stdin)),
child: Arc::new(Mutex::new(child)),
pending: Arc::new(Mutex::new(HashMap::new())),
sender,
ring: Arc::new(Mutex::new(VecDeque::with_capacity(RING_BUFFER_SIZE))),
sequence: Arc::new(AtomicU64::new(0)),
request_timeout,
shutting_down: AtomicBool::new(false),
spawned_at: spawn_start,
first_stdout: Arc::new(AtomicBool::new(false)),
};
runtime.spawn_stdout_loop(stdout);
runtime.spawn_stderr_loop(stderr);
runtime.spawn_exit_watcher();
Ok(runtime)
}
pub async fn post(&self, payload: Value) -> Result<PostOutcome, AdapterError> {
if !payload.is_object() {
return Err(AdapterError::InvalidEnvelope);
}
let method: String = payload
.get("method")
.and_then(|v| v.as_str())
.unwrap_or("<none>")
.to_string();
let has_method = payload.get("method").is_some();
let id = payload.get("id");
if has_method && id.is_some() {
let id_value = id.expect("checked");
let key = id_key(id_value);
let (tx, rx) = oneshot::channel();
let pending_count = self.pending.lock().await.len();
tracing::info!(
method = %method,
id = %key,
pending_count = pending_count,
"post: request → agent (awaiting response)"
);
self.pending.lock().await.insert(key.clone(), tx);
let write_start = Instant::now();
if let Err(err) = self.send_to_subprocess(&payload).await {
tracing::error!(
method = %method,
id = %key,
error = %err,
"post: failed to write to agent stdin"
);
self.pending.lock().await.remove(&key);
return Err(err);
}
let write_ms = write_start.elapsed().as_millis() as u64;
tracing::debug!(
method = %method,
id = %key,
write_ms = write_ms,
"post: stdin write complete, waiting for response"
);
let wait_start = Instant::now();
match tokio::time::timeout(self.request_timeout, rx).await {
Ok(Ok(response)) => {
let wait_ms = wait_start.elapsed().as_millis() as u64;
tracing::info!(
method = %method,
id = %key,
response_ms = wait_ms,
total_ms = write_ms + wait_ms,
"post: got response from agent"
);
Ok(PostOutcome::Response(response))
}
Ok(Err(_)) => {
let wait_ms = wait_start.elapsed().as_millis() as u64;
tracing::error!(
method = %method,
id = %key,
wait_ms = wait_ms,
"post: response channel dropped (agent process may have exited)"
);
self.pending.lock().await.remove(&key);
Err(AdapterError::Timeout)
}
Err(_) => {
let pending_keys: Vec<String> =
self.pending.lock().await.keys().cloned().collect();
tracing::error!(
method = %method,
id = %key,
timeout_ms = self.request_timeout.as_millis() as u64,
age_ms = self.spawned_at.elapsed().as_millis() as u64,
pending_keys = ?pending_keys,
first_stdout_seen = self.first_stdout.load(Ordering::Relaxed),
"post: TIMEOUT waiting for agent response"
);
self.pending.lock().await.remove(&key);
Err(AdapterError::Timeout)
}
}
} else {
tracing::debug!(
method = %method,
"post: notification → agent (fire-and-forget)"
);
self.send_to_subprocess(&payload).await?;
Ok(PostOutcome::Accepted)
}
}
async fn subscribe(
&self,
last_event_id: Option<u64>,
) -> (Vec<(u64, Value)>, broadcast::Receiver<StreamMessage>) {
let replay = {
let ring = self.ring.lock().await;
ring.iter()
.filter(|message| {
if let Some(last_event_id) = last_event_id {
message.sequence > last_event_id
} else {
true
}
})
.map(|message| (message.sequence, message.payload.clone()))
.collect::<Vec<_>>()
};
(replay, self.sender.subscribe())
}
pub async fn sse_stream(
self: Arc<Self>,
last_event_id: Option<u64>,
) -> impl Stream<Item = Result<Event, Infallible>> + Send + 'static {
let (replay, rx) = self.subscribe(last_event_id).await;
let replay_stream = stream::iter(replay.into_iter().map(|(sequence, payload)| {
let event = Event::default()
.event("message")
.id(sequence.to_string())
.data(payload.to_string());
Ok(event)
}));
let live_stream = BroadcastStream::new(rx).filter_map(|item| async move {
match item {
Ok(message) => {
let event = Event::default()
.event("message")
.id(message.sequence.to_string())
.data(message.payload.to_string());
Some(Ok(event))
}
Err(_) => None,
}
});
replay_stream.chain(live_stream)
}
/// Stream of raw JSON-RPC `Value` payloads (without SSE framing).
/// Useful for consumers that need to inspect the payload contents
/// rather than forward them as SSE events.
pub async fn value_stream(
self: Arc<Self>,
last_event_id: Option<u64>,
) -> impl Stream<Item = Value> + Send + 'static {
let (replay, rx) = self.subscribe(last_event_id).await;
let replay_stream = stream::iter(replay.into_iter().map(|(_sequence, payload)| payload));
let live_stream = BroadcastStream::new(rx).filter_map(|item| async move {
match item {
Ok(message) => Some(message.payload),
Err(_) => None,
}
});
replay_stream.chain(live_stream)
}
pub async fn shutdown(&self) {
if self.shutting_down.swap(true, Ordering::SeqCst) {
return;
}
tracing::info!(
age_ms = self.spawned_at.elapsed().as_millis() as u64,
"shutting down agent process"
);
self.pending.lock().await.clear();
let mut child = self.child.lock().await;
match child.try_wait() {
Ok(Some(_)) => {}
Ok(None) => {
let _ = child.kill().await;
let _ = child.wait().await;
}
Err(_) => {
let _ = child.kill().await;
}
}
}
fn spawn_stdout_loop(&self, stdout: tokio::process::ChildStdout) {
let pending = self.pending.clone();
let sender = self.sender.clone();
let ring = self.ring.clone();
let sequence = self.sequence.clone();
let spawned_at = self.spawned_at;
let first_stdout = self.first_stdout.clone();
tokio::spawn(async move {
let mut lines = BufReader::new(stdout).lines();
let mut line_count: u64 = 0;
while let Ok(Some(line)) = lines.next_line().await {
let trimmed = line.trim();
if trimmed.is_empty() {
continue;
}
line_count += 1;
if !first_stdout.swap(true, Ordering::Relaxed) {
tracing::info!(
first_stdout_ms = spawned_at.elapsed().as_millis() as u64,
line_bytes = trimmed.len(),
"agent process: first stdout line received"
);
}
let payload = match serde_json::from_str::<Value>(trimmed) {
Ok(payload) => payload,
Err(err) => {
tracing::warn!(
error = %err,
line_number = line_count,
raw = %if trimmed.len() > 200 {
format!("{}...", &trimmed[..200])
} else {
trimmed.to_string()
},
"agent stdout: invalid JSON"
);
json!({
"jsonrpc": "2.0",
"method": "_adapter/invalid_stdout",
"params": {
"error": err.to_string(),
"raw": trimmed,
}
})
}
};
let is_response = payload.get("id").is_some() && payload.get("method").is_none();
if is_response {
let key = id_key(payload.get("id").expect("checked"));
let has_error = payload.get("error").is_some();
if let Some(tx) = pending.lock().await.remove(&key) {
tracing::debug!(
id = %key,
has_error = has_error,
age_ms = spawned_at.elapsed().as_millis() as u64,
"agent stdout: response matched to pending request"
);
let _ = tx.send(payload.clone());
// Also broadcast the response so SSE/notification subscribers
// see it in order after preceding notifications. This lets the
// SSE translation task detect turn completion after all
// session/update events have been processed.
let seq = sequence.fetch_add(1, Ordering::SeqCst) + 1;
let message = StreamMessage {
sequence: seq,
payload,
};
{
let mut guard = ring.lock().await;
guard.push_back(message.clone());
while guard.len() > RING_BUFFER_SIZE {
guard.pop_front();
}
}
let _ = sender.send(message);
continue;
} else {
tracing::warn!(
id = %key,
has_error = has_error,
"agent stdout: response has no matching pending request (orphan)"
);
}
}
let method = payload
.get("method")
.and_then(|v| v.as_str())
.unwrap_or("<none>");
tracing::debug!(
method = method,
line_number = line_count,
"agent stdout: notification/event → SSE broadcast"
);
let seq = sequence.fetch_add(1, Ordering::SeqCst) + 1;
let message = StreamMessage {
sequence: seq,
payload,
};
{
let mut guard = ring.lock().await;
guard.push_back(message.clone());
while guard.len() > RING_BUFFER_SIZE {
guard.pop_front();
}
}
let _ = sender.send(message);
}
tracing::info!(
total_lines = line_count,
age_ms = spawned_at.elapsed().as_millis() as u64,
"agent stdout: stream ended"
);
});
}
fn spawn_stderr_loop(&self, stderr: tokio::process::ChildStderr) {
let spawned_at = self.spawned_at;
tokio::spawn(async move {
let mut lines = BufReader::new(stderr).lines();
let mut line_count: u64 = 0;
while let Ok(Some(line)) = lines.next_line().await {
line_count += 1;
tracing::info!(
line_number = line_count,
age_ms = spawned_at.elapsed().as_millis() as u64,
"agent stderr: {}",
line
);
}
tracing::debug!(
total_lines = line_count,
age_ms = spawned_at.elapsed().as_millis() as u64,
"agent stderr: stream ended"
);
});
}
fn spawn_exit_watcher(&self) {
let child = self.child.clone();
let sender = self.sender.clone();
let ring = self.ring.clone();
let sequence = self.sequence.clone();
let spawned_at = self.spawned_at;
let pending = self.pending.clone();
tokio::spawn(async move {
let status = {
let mut guard = child.lock().await;
guard.wait().await.ok()
};
let age_ms = spawned_at.elapsed().as_millis() as u64;
let pending_count = pending.lock().await.len();
if let Some(status) = status {
tracing::warn!(
success = status.success(),
code = status.code(),
age_ms = age_ms,
pending_requests = pending_count,
"agent process exited"
);
let payload = json!({
"jsonrpc": "2.0",
"method": "_adapter/agent_exited",
"params": {
"success": status.success(),
"code": status.code(),
}
});
let seq = sequence.fetch_add(1, Ordering::SeqCst) + 1;
let message = StreamMessage {
sequence: seq,
payload,
};
{
let mut guard = ring.lock().await;
guard.push_back(message.clone());
while guard.len() > RING_BUFFER_SIZE {
guard.pop_front();
}
}
let _ = sender.send(message);
} else {
tracing::error!(
age_ms = age_ms,
pending_requests = pending_count,
"agent process: failed to get exit status"
);
}
});
}
async fn send_to_subprocess(&self, payload: &Value) -> Result<(), AdapterError> {
let method = payload
.get("method")
.and_then(|v| v.as_str())
.unwrap_or("<none>");
let id = payload.get("id").map(|v| v.to_string()).unwrap_or_default();
tracing::debug!(
method = method,
id = %id,
bytes = serde_json::to_vec(payload).map(|b| b.len()).unwrap_or(0),
"stdin: writing message to agent"
);
let mut stdin = self.stdin.lock().await;
let bytes = serde_json::to_vec(payload).map_err(AdapterError::Serialize)?;
stdin.write_all(&bytes).await.map_err(|err| {
tracing::error!(method = method, id = %id, error = %err, "stdin: write_all failed");
AdapterError::Write(err)
})?;
stdin.write_all(b"\n").await.map_err(|err| {
tracing::error!(method = method, id = %id, error = %err, "stdin: newline write failed");
AdapterError::Write(err)
})?;
stdin.flush().await.map_err(|err| {
tracing::error!(method = method, id = %id, error = %err, "stdin: flush failed");
AdapterError::Write(err)
})?;
tracing::debug!(method = method, id = %id, "stdin: write+flush complete");
Ok(())
}
}
fn id_key(value: &Value) -> String {
serde_json::to_string(value).unwrap_or_else(|_| "null".to_string())
}

View file

@ -0,0 +1,143 @@
use std::collections::HashMap;
use std::path::PathBuf;
use serde::Deserialize;
use serde_json::Value;
use thiserror::Error;
#[derive(Debug, Clone)]
pub struct LaunchSpec {
pub program: PathBuf,
pub args: Vec<String>,
pub env: HashMap<String, String>,
}
#[derive(Debug, Error)]
pub enum RegistryError {
#[error("invalid registry json: {0}")]
InvalidJson(#[from] serde_json::Error),
#[error("unable to resolve registry entry from blob")]
UnsupportedBlob,
#[error("registry blob has agents[] but no --registry-agent-id was provided")]
MissingAgentId,
#[error("agent '{0}' was not found in registry blob")]
AgentNotFound(String),
#[error("registry entry has no supported launch target")]
MissingLaunchTarget,
#[error("platform '{0}' is not present in distribution.binary")]
UnsupportedPlatform(String),
}
impl LaunchSpec {
pub fn from_registry_blob(blob: &str, agent_id: Option<&str>) -> Result<Self, RegistryError> {
let value: Value = serde_json::from_str(blob)?;
Self::from_registry_value(value, agent_id)
}
fn from_registry_value(value: Value, agent_id: Option<&str>) -> Result<Self, RegistryError> {
if value.get("agents").is_some() {
let doc: RegistryDocument = serde_json::from_value(value)?;
let wanted = agent_id.ok_or(RegistryError::MissingAgentId)?;
let agent = doc
.agents
.into_iter()
.find(|a| a.id == wanted)
.ok_or_else(|| RegistryError::AgentNotFound(wanted.to_string()))?;
return Self::from_distribution(agent.distribution);
}
if value.get("distribution").is_some() {
let entry: RegistryAgent = serde_json::from_value(value)?;
return Self::from_distribution(entry.distribution);
}
if value.get("npx").is_some() || value.get("binary").is_some() {
let distribution: RegistryDistribution = serde_json::from_value(value)?;
return Self::from_distribution(distribution);
}
Err(RegistryError::UnsupportedBlob)
}
fn from_distribution(distribution: RegistryDistribution) -> Result<Self, RegistryError> {
if let Some(npx) = distribution.npx {
let mut args = vec!["-y".to_string(), npx.package];
args.extend(npx.args);
return Ok(Self {
program: PathBuf::from("npx"),
args,
env: npx.env,
});
}
if let Some(binary) = distribution.binary {
let platform = platform_key().ok_or(RegistryError::UnsupportedPlatform(format!(
"{}/{}",
std::env::consts::OS,
std::env::consts::ARCH
)))?;
let target = binary
.get(platform)
.ok_or_else(|| RegistryError::UnsupportedPlatform(platform.to_string()))?;
return Ok(Self {
program: PathBuf::from(&target.cmd),
args: target.args.clone(),
env: target.env.clone(),
});
}
Err(RegistryError::MissingLaunchTarget)
}
}
fn platform_key() -> Option<&'static str> {
match (std::env::consts::OS, std::env::consts::ARCH) {
("linux", "x86_64") => Some("linux-x86_64"),
("linux", "aarch64") => Some("linux-aarch64"),
("macos", "x86_64") => Some("darwin-x86_64"),
("macos", "aarch64") => Some("darwin-aarch64"),
("windows", "x86_64") => Some("windows-x86_64"),
("windows", "aarch64") => Some("windows-aarch64"),
_ => None,
}
}
#[derive(Debug, Deserialize)]
struct RegistryDocument {
agents: Vec<RegistryAgent>,
}
#[derive(Debug, Deserialize)]
struct RegistryAgent {
#[allow(dead_code)]
id: String,
distribution: RegistryDistribution,
}
#[derive(Debug, Deserialize)]
struct RegistryDistribution {
#[serde(default)]
npx: Option<RegistryNpx>,
#[serde(default)]
binary: Option<HashMap<String, RegistryBinaryTarget>>,
}
#[derive(Debug, Deserialize)]
struct RegistryNpx {
package: String,
#[serde(default)]
args: Vec<String>,
#[serde(default)]
env: HashMap<String, String>,
}
#[derive(Debug, Deserialize)]
struct RegistryBinaryTarget {
#[allow(dead_code)]
archive: Option<String>,
cmd: String,
#[serde(default)]
args: Vec<String>,
#[serde(default)]
env: HashMap<String, String>,
}