diff --git a/.github/actionlint.yml b/.github/actionlint.yml
index 6bfbc27705..ad09545902 100644
--- a/.github/actionlint.yml
+++ b/.github/actionlint.yml
@@ -5,25 +5,25 @@ self-hosted-runner:
# GitHub-hosted Runners
- github-8vcpu-ubuntu-2404
- github-16vcpu-ubuntu-2404
+ - github-32vcpu-ubuntu-2404
+ - github-8vcpu-ubuntu-2204
+ - github-16vcpu-ubuntu-2204
+ - github-32vcpu-ubuntu-2204
+ - github-16vcpu-ubuntu-2204-arm
- windows-2025-16
- windows-2025-32
- windows-2025-64
- # Buildjet Ubuntu 20.04 - AMD x86_64
- - buildjet-2vcpu-ubuntu-2004
- - buildjet-4vcpu-ubuntu-2004
- - buildjet-8vcpu-ubuntu-2004
- - buildjet-16vcpu-ubuntu-2004
- - buildjet-32vcpu-ubuntu-2004
- # Buildjet Ubuntu 22.04 - AMD x86_64
- - buildjet-2vcpu-ubuntu-2204
- - buildjet-4vcpu-ubuntu-2204
- - buildjet-8vcpu-ubuntu-2204
- - buildjet-16vcpu-ubuntu-2204
- - buildjet-32vcpu-ubuntu-2204
- # Buildjet Ubuntu 22.04 - Graviton aarch64
- - buildjet-8vcpu-ubuntu-2204-arm
- - buildjet-16vcpu-ubuntu-2204-arm
- - buildjet-32vcpu-ubuntu-2204-arm
+ # Namespace Ubuntu 20.04 (Release builds)
+ - namespace-profile-16x32-ubuntu-2004
+ - namespace-profile-32x64-ubuntu-2004
+ - namespace-profile-16x32-ubuntu-2004-arm
+ - namespace-profile-32x64-ubuntu-2004-arm
+ # Namespace Ubuntu 22.04 (Everything else)
+ - namespace-profile-2x4-ubuntu-2204
+ - namespace-profile-4x8-ubuntu-2204
+ - namespace-profile-8x16-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
+ - namespace-profile-32x64-ubuntu-2204
# Self Hosted Runners
- self-mini-macos
- self-32vcpu-windows-2022
diff --git a/.github/actions/build_docs/action.yml b/.github/actions/build_docs/action.yml
index a7effad247..d2e62d5b22 100644
--- a/.github/actions/build_docs/action.yml
+++ b/.github/actions/build_docs/action.yml
@@ -13,7 +13,7 @@ runs:
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- cache-provider: "buildjet"
+ # cache-provider: "buildjet"
- name: Install Linux dependencies
shell: bash -euxo pipefail {0}
diff --git a/.github/workflows/bump_patch_version.yml b/.github/workflows/bump_patch_version.yml
index 8a48ff96f1..bfaf7a271b 100644
--- a/.github/workflows/bump_patch_version.yml
+++ b/.github/workflows/bump_patch_version.yml
@@ -16,7 +16,7 @@ jobs:
bump_patch_version:
if: github.repository_owner == 'zed-industries'
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 43d305faae..84907351fe 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -137,7 +137,7 @@ jobs:
github.repository_owner == 'zed-industries' &&
needs.job_spec.outputs.run_tests == 'true'
runs-on:
- - buildjet-8vcpu-ubuntu-2204
+ - namespace-profile-8x16-ubuntu-2204
steps:
- name: Checkout repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
@@ -168,7 +168,7 @@ jobs:
needs: [job_spec]
if: github.repository_owner == 'zed-industries'
runs-on:
- - buildjet-8vcpu-ubuntu-2204
+ - namespace-profile-4x8-ubuntu-2204
steps:
- name: Checkout repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
@@ -221,7 +221,7 @@ jobs:
github.repository_owner == 'zed-industries' &&
(needs.job_spec.outputs.run_tests == 'true' || needs.job_spec.outputs.run_docs == 'true')
runs-on:
- - buildjet-8vcpu-ubuntu-2204
+ - namespace-profile-8x16-ubuntu-2204
steps:
- name: Checkout repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
@@ -328,7 +328,7 @@ jobs:
github.repository_owner == 'zed-industries' &&
needs.job_spec.outputs.run_tests == 'true'
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
@@ -342,7 +342,7 @@ jobs:
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- cache-provider: "buildjet"
+ # cache-provider: "buildjet"
- name: Install Linux dependencies
run: ./script/linux
@@ -380,7 +380,7 @@ jobs:
github.repository_owner == 'zed-industries' &&
needs.job_spec.outputs.run_tests == 'true'
runs-on:
- - buildjet-8vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
@@ -394,7 +394,7 @@ jobs:
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- cache-provider: "buildjet"
+ # cache-provider: "buildjet"
- name: Install Clang & Mold
run: ./script/remote-server && ./script/install-mold 2.34.0
@@ -597,7 +597,7 @@ jobs:
timeout-minutes: 60
name: Linux x86_x64 release bundle
runs-on:
- - buildjet-16vcpu-ubuntu-2004 # ubuntu 20.04 for minimal glibc
+ - namespace-profile-16x32-ubuntu-2004 # ubuntu 20.04 for minimal glibc
if: |
startsWith(github.ref, 'refs/tags/v')
|| contains(github.event.pull_request.labels.*.name, 'run-bundling')
@@ -650,7 +650,7 @@ jobs:
timeout-minutes: 60
name: Linux arm64 release bundle
runs-on:
- - buildjet-32vcpu-ubuntu-2204-arm
+ - namespace-profile-32x64-ubuntu-2004-arm # ubuntu 20.04 for minimal glibc
if: |
startsWith(github.ref, 'refs/tags/v')
|| contains(github.event.pull_request.labels.*.name, 'run-bundling')
diff --git a/.github/workflows/deploy_cloudflare.yml b/.github/workflows/deploy_cloudflare.yml
index fe443d493e..df35d44ca9 100644
--- a/.github/workflows/deploy_cloudflare.yml
+++ b/.github/workflows/deploy_cloudflare.yml
@@ -9,7 +9,7 @@ jobs:
deploy-docs:
name: Deploy Docs
if: github.repository_owner == 'zed-industries'
- runs-on: buildjet-16vcpu-ubuntu-2204
+ runs-on: namespace-profile-16x32-ubuntu-2204
steps:
- name: Checkout repo
diff --git a/.github/workflows/deploy_collab.yml b/.github/workflows/deploy_collab.yml
index f7348a1069..ff2a3589e4 100644
--- a/.github/workflows/deploy_collab.yml
+++ b/.github/workflows/deploy_collab.yml
@@ -61,7 +61,7 @@ jobs:
- style
- tests
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Install doctl
uses: digitalocean/action-doctl@v2
@@ -94,7 +94,7 @@ jobs:
needs:
- publish
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Checkout repo
@@ -137,12 +137,14 @@ jobs:
export ZED_SERVICE_NAME=collab
export ZED_LOAD_BALANCER_SIZE_UNIT=$ZED_COLLAB_LOAD_BALANCER_SIZE_UNIT
+ export DATABASE_MAX_CONNECTIONS=850
envsubst < crates/collab/k8s/collab.template.yml | kubectl apply -f -
kubectl -n "$ZED_KUBE_NAMESPACE" rollout status deployment/$ZED_SERVICE_NAME --watch
echo "deployed ${ZED_SERVICE_NAME} to ${ZED_KUBE_NAMESPACE}"
export ZED_SERVICE_NAME=api
export ZED_LOAD_BALANCER_SIZE_UNIT=$ZED_API_LOAD_BALANCER_SIZE_UNIT
+ export DATABASE_MAX_CONNECTIONS=60
envsubst < crates/collab/k8s/collab.template.yml | kubectl apply -f -
kubectl -n "$ZED_KUBE_NAMESPACE" rollout status deployment/$ZED_SERVICE_NAME --watch
echo "deployed ${ZED_SERVICE_NAME} to ${ZED_KUBE_NAMESPACE}"
diff --git a/.github/workflows/eval.yml b/.github/workflows/eval.yml
index 2ad302a602..b5da9e7b7c 100644
--- a/.github/workflows/eval.yml
+++ b/.github/workflows/eval.yml
@@ -32,7 +32,7 @@ jobs:
github.repository_owner == 'zed-industries' &&
(github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'run-eval'))
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
@@ -46,7 +46,7 @@ jobs:
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- cache-provider: "buildjet"
+ # cache-provider: "buildjet"
- name: Install Linux dependencies
run: ./script/linux
diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml
index 6c3a97c163..e682ce5890 100644
--- a/.github/workflows/nix.yml
+++ b/.github/workflows/nix.yml
@@ -20,7 +20,7 @@ jobs:
matrix:
system:
- os: x86 Linux
- runner: buildjet-16vcpu-ubuntu-2204
+ runner: namespace-profile-16x32-ubuntu-2204
install_nix: true
- os: arm Mac
runner: [macOS, ARM64, test]
diff --git a/.github/workflows/randomized_tests.yml b/.github/workflows/randomized_tests.yml
index db4d44318e..de96c3df78 100644
--- a/.github/workflows/randomized_tests.yml
+++ b/.github/workflows/randomized_tests.yml
@@ -20,7 +20,7 @@ jobs:
name: Run randomized tests
if: github.repository_owner == 'zed-industries'
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Install Node
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml
index c847149984..b3500a085b 100644
--- a/.github/workflows/release_nightly.yml
+++ b/.github/workflows/release_nightly.yml
@@ -128,7 +128,7 @@ jobs:
name: Create a Linux *.tar.gz bundle for x86
if: github.repository_owner == 'zed-industries'
runs-on:
- - buildjet-16vcpu-ubuntu-2004
+ - namespace-profile-16x32-ubuntu-2004 # ubuntu 20.04 for minimal glibc
needs: tests
steps:
- name: Checkout repo
@@ -168,7 +168,7 @@ jobs:
name: Create a Linux *.tar.gz bundle for ARM
if: github.repository_owner == 'zed-industries'
runs-on:
- - buildjet-32vcpu-ubuntu-2204-arm
+ - namespace-profile-32x64-ubuntu-2004-arm # ubuntu 20.04 for minimal glibc
needs: tests
steps:
- name: Checkout repo
diff --git a/.github/workflows/unit_evals.yml b/.github/workflows/unit_evals.yml
index cb4e39d151..2e03fb028f 100644
--- a/.github/workflows/unit_evals.yml
+++ b/.github/workflows/unit_evals.yml
@@ -23,7 +23,7 @@ jobs:
timeout-minutes: 60
name: Run unit evals
runs-on:
- - buildjet-16vcpu-ubuntu-2204
+ - namespace-profile-16x32-ubuntu-2204
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
@@ -37,7 +37,7 @@ jobs:
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- cache-provider: "buildjet"
+ # cache-provider: "buildjet"
- name: Install Linux dependencies
run: ./script/linux
diff --git a/Cargo.lock b/Cargo.lock
index 4cf5a68f1d..6f434e8685 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -17,6 +17,7 @@ dependencies = [
"indoc",
"itertools 0.14.0",
"language",
+ "language_model",
"markdown",
"parking_lot",
"project",
@@ -137,9 +138,9 @@ dependencies = [
[[package]]
name = "agent-client-protocol"
-version = "0.0.18"
+version = "0.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8e4c1dccb35e69d32566f0d11948d902f9942fc3f038821816c1150cf5925f4"
+checksum = "3fad72b7b8ee4331b3a4c8d43c107e982a4725564b4ee658ae5c4e79d2b486e8"
dependencies = [
"anyhow",
"futures 0.3.31",
@@ -150,6 +151,54 @@ dependencies = [
"serde_json",
]
+[[package]]
+name = "agent2"
+version = "0.1.0"
+dependencies = [
+ "acp_thread",
+ "agent-client-protocol",
+ "agent_servers",
+ "agent_settings",
+ "anyhow",
+ "assistant_tool",
+ "assistant_tools",
+ "client",
+ "clock",
+ "cloud_llm_client",
+ "collections",
+ "ctor",
+ "env_logger 0.11.8",
+ "fs",
+ "futures 0.3.31",
+ "gpui",
+ "gpui_tokio",
+ "handlebars 4.5.0",
+ "indoc",
+ "itertools 0.14.0",
+ "language",
+ "language_model",
+ "language_models",
+ "log",
+ "lsp",
+ "paths",
+ "pretty_assertions",
+ "project",
+ "prompt_store",
+ "reqwest_client",
+ "rust-embed",
+ "schemars",
+ "serde",
+ "serde_json",
+ "settings",
+ "smol",
+ "ui",
+ "util",
+ "uuid",
+ "watch",
+ "workspace-hack",
+ "worktree",
+]
+
[[package]]
name = "agent_servers"
version = "0.1.0"
@@ -214,6 +263,7 @@ dependencies = [
"acp_thread",
"agent",
"agent-client-protocol",
+ "agent2",
"agent_servers",
"agent_settings",
"ai_onboarding",
@@ -1371,7 +1421,7 @@ dependencies = [
"anyhow",
"arrayvec",
"log",
- "nom",
+ "nom 7.1.3",
"num-rational",
"v_frame",
]
@@ -2745,7 +2795,7 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
dependencies = [
- "nom",
+ "nom 7.1.3",
]
[[package]]
@@ -3031,17 +3081,22 @@ dependencies = [
"anyhow",
"cloud_api_types",
"futures 0.3.31",
+ "gpui",
+ "gpui_tokio",
"http_client",
"parking_lot",
"serde_json",
"workspace-hack",
+ "yawc",
]
[[package]]
name = "cloud_api_types"
version = "0.1.0"
dependencies = [
+ "anyhow",
"chrono",
+ "ciborium",
"cloud_llm_client",
"pretty_assertions",
"serde",
@@ -7457,9 +7512,9 @@ dependencies = [
[[package]]
name = "grid"
-version = "0.17.0"
+version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71b01d27060ad58be4663b9e4ac9e2d4806918e8876af8912afbddd1a91d5eaa"
+checksum = "12101ecc8225ea6d675bc70263074eab6169079621c2186fe0c66590b2df9681"
[[package]]
name = "group"
@@ -9078,6 +9133,7 @@ dependencies = [
"anyhow",
"base64 0.22.1",
"client",
+ "cloud_api_types",
"cloud_llm_client",
"collections",
"futures 0.3.31",
@@ -9208,6 +9264,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-compression",
+ "async-fs",
"async-tar",
"async-trait",
"chrono",
@@ -9239,9 +9296,11 @@ dependencies = [
"serde_json",
"serde_json_lenient",
"settings",
+ "sha2",
"smol",
"snippet_provider",
"task",
+ "tempfile",
"text",
"theme",
"toml 0.8.20",
@@ -10537,6 +10596,15 @@ dependencies = [
"minimal-lexical",
]
+[[package]]
+name = "nom"
+version = "8.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "noop_proc_macro"
version = "0.3.0"
@@ -12575,6 +12643,7 @@ dependencies = [
"editor",
"file_icons",
"git",
+ "git_ui",
"gpui",
"indexmap",
"language",
@@ -12588,6 +12657,7 @@ dependencies = [
"serde_json",
"settings",
"smallvec",
+ "telemetry",
"theme",
"ui",
"util",
@@ -15358,7 +15428,7 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790"
dependencies = [
- "nom",
+ "nom 7.1.3",
"unicode_categories",
]
@@ -16158,9 +16228,9 @@ dependencies = [
[[package]]
name = "taffy"
-version = "0.8.3"
+version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7aaef0ac998e6527d6d0d5582f7e43953bb17221ac75bb8eb2fcc2db3396db1c"
+checksum = "a13e5d13f79d558b5d353a98072ca8ca0e99da429467804de959aa8c83c9a004"
dependencies = [
"arrayvec",
"grid",
@@ -16561,9 +16631,8 @@ dependencies = [
[[package]]
name = "tiktoken-rs"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25563eeba904d770acf527e8b370fe9a5547bacd20ff84a0b6c3bc41288e5625"
+version = "0.8.0"
+source = "git+https://github.com/zed-industries/tiktoken-rs?rev=30c32a4522751699adeda0d5840c71c3b75ae73d#30c32a4522751699adeda0d5840c71c3b75ae73d"
dependencies = [
"anyhow",
"base64 0.22.1",
@@ -19934,7 +20003,7 @@ dependencies = [
"naga",
"nix 0.28.0",
"nix 0.29.0",
- "nom",
+ "nom 7.1.3",
"num-bigint",
"num-bigint-dig",
"num-integer",
@@ -20269,6 +20338,34 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
+[[package]]
+name = "yawc"
+version = "0.2.4"
+source = "git+https://github.com/deviant-forks/yawc?rev=1899688f3e69ace4545aceb97b2a13881cf26142#1899688f3e69ace4545aceb97b2a13881cf26142"
+dependencies = [
+ "base64 0.22.1",
+ "bytes 1.10.1",
+ "flate2",
+ "futures 0.3.31",
+ "http-body-util",
+ "hyper 1.6.0",
+ "hyper-util",
+ "js-sys",
+ "nom 8.0.0",
+ "pin-project",
+ "rand 0.8.5",
+ "sha1",
+ "thiserror 1.0.69",
+ "tokio",
+ "tokio-rustls 0.26.2",
+ "tokio-util",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "webpki-roots",
+]
+
[[package]]
name = "yazi"
version = "0.2.1"
@@ -20375,7 +20472,7 @@ dependencies = [
[[package]]
name = "zed"
-version = "0.199.0"
+version = "0.200.0"
dependencies = [
"activity_indicator",
"agent",
diff --git a/Cargo.toml b/Cargo.toml
index 733db92ce9..998e727602 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,6 +4,7 @@ members = [
"crates/acp_thread",
"crates/activity_indicator",
"crates/agent",
+ "crates/agent2",
"crates/agent_servers",
"crates/agent_settings",
"crates/agent_ui",
@@ -229,6 +230,7 @@ edition = "2024"
acp_thread = { path = "crates/acp_thread" }
agent = { path = "crates/agent" }
+agent2 = { path = "crates/agent2" }
activity_indicator = { path = "crates/activity_indicator" }
agent_ui = { path = "crates/agent_ui" }
agent_settings = { path = "crates/agent_settings" }
@@ -423,7 +425,7 @@ zlog_settings = { path = "crates/zlog_settings" }
#
agentic-coding-protocol = "0.0.10"
-agent-client-protocol = "0.0.18"
+agent-client-protocol = "0.0.23"
aho-corasick = "1.1"
alacritty_terminal = { git = "https://github.com/zed-industries/alacritty.git", branch = "add-hush-login-flag" }
any_vec = "0.14"
@@ -459,6 +461,7 @@ bytes = "1.0"
cargo_metadata = "0.19"
cargo_toml = "0.21"
chrono = { version = "0.4", features = ["serde"] }
+ciborium = "0.2"
circular-buffer = "1.0"
clap = { version = "4.4", features = ["derive"] }
cocoa = "0.26"
@@ -598,7 +601,7 @@ sysinfo = "0.31.0"
take-until = "0.2.0"
tempfile = "3.20.0"
thiserror = "2.0.12"
-tiktoken-rs = "0.7.0"
+tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "30c32a4522751699adeda0d5840c71c3b75ae73d" }
time = { version = "0.3", features = [
"macros",
"parsing",
@@ -658,6 +661,9 @@ which = "6.0.0"
windows-core = "0.61"
wit-component = "0.221"
workspace-hack = "0.1.0"
+# We can switch back to the published version once https://github.com/infinitefield/yawc/pull/16 is merged and a new
+# version is released.
+yawc = { git = "https://github.com/deviant-forks/yawc", rev = "1899688f3e69ace4545aceb97b2a13881cf26142" }
zstd = "0.11"
[workspace.dependencies.async-stripe]
diff --git a/Procfile b/Procfile
index 5f1231b90a..b3f13f66a6 100644
--- a/Procfile
+++ b/Procfile
@@ -1,3 +1,4 @@
collab: RUST_LOG=${RUST_LOG:-info} cargo run --package=collab serve all
+cloud: cd ../cloud; cargo make dev
livekit: livekit-server --dev
blob_store: ./script/run-local-minio
diff --git a/assets/icons/file_icons/puppet.svg b/assets/icons/file_icons/puppet.svg
new file mode 100644
index 0000000000..cdf903bc62
--- /dev/null
+++ b/assets/icons/file_icons/puppet.svg
@@ -0,0 +1 @@
+
diff --git a/assets/icons/tool_bulb.svg b/assets/icons/tool_think.svg
similarity index 100%
rename from assets/icons/tool_bulb.svg
rename to assets/icons/tool_think.svg
diff --git a/assets/images/pro_trial_stamp.svg b/assets/images/pro_trial_stamp.svg
new file mode 100644
index 0000000000..a3f9095120
--- /dev/null
+++ b/assets/images/pro_trial_stamp.svg
@@ -0,0 +1 @@
+
diff --git a/assets/images/pro_user_stamp.svg b/assets/images/pro_user_stamp.svg
new file mode 100644
index 0000000000..d037a9e833
--- /dev/null
+++ b/assets/images/pro_user_stamp.svg
@@ -0,0 +1 @@
+
diff --git a/assets/keymaps/default-linux.json b/assets/keymaps/default-linux.json
index 81f5c695a2..c436b1a8fb 100644
--- a/assets/keymaps/default-linux.json
+++ b/assets/keymaps/default-linux.json
@@ -332,7 +332,9 @@
"enter": "agent::Chat",
"up": "agent::PreviousHistoryMessage",
"down": "agent::NextHistoryMessage",
- "shift-ctrl-r": "agent::OpenAgentDiff"
+ "shift-ctrl-r": "agent::OpenAgentDiff",
+ "ctrl-shift-y": "agent::KeepAll",
+ "ctrl-shift-n": "agent::RejectAll"
}
},
{
@@ -846,6 +848,7 @@
"ctrl-delete": ["project_panel::Delete", { "skip_prompt": false }],
"alt-ctrl-r": "project_panel::RevealInFileManager",
"ctrl-shift-enter": "project_panel::OpenWithSystem",
+ "alt-d": "project_panel::CompareMarkedFiles",
"shift-find": "project_panel::NewSearchInDirectory",
"ctrl-alt-shift-f": "project_panel::NewSearchInDirectory",
"shift-down": "menu::SelectNext",
@@ -1100,6 +1103,13 @@
"ctrl-enter": "menu::Confirm"
}
},
+ {
+ "context": "OnboardingAiConfigurationModal",
+ "use_key_equivalents": true,
+ "bindings": {
+ "escape": "menu::Cancel"
+ }
+ },
{
"context": "Diagnostics",
"use_key_equivalents": true,
@@ -1176,7 +1186,8 @@
"ctrl-1": "onboarding::ActivateBasicsPage",
"ctrl-2": "onboarding::ActivateEditingPage",
"ctrl-3": "onboarding::ActivateAISetupPage",
- "ctrl-escape": "onboarding::Finish"
+ "ctrl-escape": "onboarding::Finish",
+ "alt-tab": "onboarding::SignIn"
}
}
]
diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json
index 69958fd1f8..960bac1479 100644
--- a/assets/keymaps/default-macos.json
+++ b/assets/keymaps/default-macos.json
@@ -384,7 +384,9 @@
"enter": "agent::Chat",
"up": "agent::PreviousHistoryMessage",
"down": "agent::NextHistoryMessage",
- "shift-ctrl-r": "agent::OpenAgentDiff"
+ "shift-ctrl-r": "agent::OpenAgentDiff",
+ "cmd-shift-y": "agent::KeepAll",
+ "cmd-shift-n": "agent::RejectAll"
}
},
{
@@ -905,6 +907,7 @@
"cmd-delete": ["project_panel::Delete", { "skip_prompt": false }],
"alt-cmd-r": "project_panel::RevealInFileManager",
"ctrl-shift-enter": "project_panel::OpenWithSystem",
+ "alt-d": "project_panel::CompareMarkedFiles",
"cmd-alt-backspace": ["project_panel::Delete", { "skip_prompt": false }],
"cmd-alt-shift-f": "project_panel::NewSearchInDirectory",
"shift-down": "menu::SelectNext",
@@ -1202,6 +1205,13 @@
"cmd-enter": "menu::Confirm"
}
},
+ {
+ "context": "OnboardingAiConfigurationModal",
+ "use_key_equivalents": true,
+ "bindings": {
+ "escape": "menu::Cancel"
+ }
+ },
{
"context": "Diagnostics",
"use_key_equivalents": true,
@@ -1278,7 +1288,8 @@
"cmd-1": "onboarding::ActivateBasicsPage",
"cmd-2": "onboarding::ActivateEditingPage",
"cmd-3": "onboarding::ActivateAISetupPage",
- "cmd-escape": "onboarding::Finish"
+ "cmd-escape": "onboarding::Finish",
+ "alt-tab": "onboarding::SignIn"
}
}
]
diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json
index 3096ec40bb..3fca75b572 100644
--- a/assets/keymaps/vim.json
+++ b/assets/keymaps/vim.json
@@ -815,6 +815,7 @@
"ctrl-x": "project_panel::OpenSplitUp",
"x": "project_panel::RevealInFileManager",
"s": "project_panel::OpenWithSystem",
+ "z d": "project_panel::CompareMarkedFiles",
"] c": "project_panel::SelectNextGitEntry",
"[ c": "project_panel::SelectPrevGitEntry",
"] d": "project_panel::SelectNextDiagnostic",
diff --git a/assets/settings/default.json b/assets/settings/default.json
index 4734b5d118..9c579b858d 100644
--- a/assets/settings/default.json
+++ b/assets/settings/default.json
@@ -596,6 +596,8 @@
// when a corresponding project entry becomes active.
// Gitignored entries are never auto revealed.
"auto_reveal_entries": true,
+ // Whether the project panel should open on startup.
+ "starts_open": true,
// Whether to fold directories automatically and show compact folders
// (e.g. "a/b/c" ) when a directory has only one subdirectory inside.
"auto_fold_dirs": true,
@@ -1171,6 +1173,9 @@
// Sets a delay after which the inline blame information is shown.
// Delay is restarted with every cursor movement.
"delay_ms": 0,
+ // The amount of padding between the end of the source line and the start
+ // of the inline blame in units of em widths.
+ "padding": 7,
// Whether or not to display the git commit summary on the same line.
"show_commit_summary": false,
// The minimum column number to show the inline blame information at
@@ -1233,6 +1238,11 @@
// 2. hour24
"hour_format": "hour12"
},
+ // Status bar-related settings.
+ "status_bar": {
+ // Whether to show the active language button in the status bar.
+ "active_language_button": true
+ },
// Settings specific to the terminal
"terminal": {
// What shell to use when opening a terminal. May take 3 values:
diff --git a/crates/acp_thread/Cargo.toml b/crates/acp_thread/Cargo.toml
index 225597415c..1831c7e473 100644
--- a/crates/acp_thread/Cargo.toml
+++ b/crates/acp_thread/Cargo.toml
@@ -25,6 +25,7 @@ futures.workspace = true
gpui.workspace = true
itertools.workspace = true
language.workspace = true
+language_model.workspace = true
markdown.workspace = true
project.workspace = true
serde.workspace = true
diff --git a/crates/acp_thread/src/acp_thread.rs b/crates/acp_thread/src/acp_thread.rs
index 44190a4860..1df0e1def7 100644
--- a/crates/acp_thread/src/acp_thread.rs
+++ b/crates/acp_thread/src/acp_thread.rs
@@ -1,23 +1,23 @@
mod connection;
+mod diff;
+
pub use connection::*;
+pub use diff::*;
use agent_client_protocol as acp;
use anyhow::{Context as _, Result};
use assistant_tool::ActionLog;
-use buffer_diff::BufferDiff;
-use editor::{Bias, MultiBuffer, PathKey};
+use editor::Bias;
use futures::{FutureExt, channel::oneshot, future::BoxFuture};
use gpui::{AppContext, Context, Entity, EventEmitter, SharedString, Task};
use itertools::Itertools;
-use language::{
- Anchor, Buffer, BufferSnapshot, Capability, LanguageRegistry, OffsetRangeExt as _, Point,
- text_diff,
-};
+use language::{Anchor, Buffer, BufferSnapshot, LanguageRegistry, Point, text_diff};
use markdown::Markdown;
use project::{AgentLocation, Project};
use std::collections::HashMap;
use std::error::Error;
use std::fmt::Formatter;
+use std::process::ExitStatus;
use std::rc::Rc;
use std::{
fmt::Display,
@@ -139,7 +139,7 @@ impl AgentThreadEntry {
}
}
- pub fn diffs(&self) -> impl Iterator- {
+ pub fn diffs(&self) -> impl Iterator
- > {
if let AgentThreadEntry::ToolCall(call) = self {
itertools::Either::Left(call.diffs())
} else {
@@ -165,6 +165,7 @@ pub struct ToolCall {
pub status: ToolCallStatus,
pub locations: Vec,
pub raw_input: Option,
+ pub raw_output: Option,
}
impl ToolCall {
@@ -193,10 +194,11 @@ impl ToolCall {
locations: tool_call.locations,
status,
raw_input: tool_call.raw_input,
+ raw_output: tool_call.raw_output,
}
}
- fn update(
+ fn update_fields(
&mut self,
fields: acp::ToolCallUpdateFields,
language_registry: Arc,
@@ -209,6 +211,7 @@ impl ToolCall {
content,
locations,
raw_input,
+ raw_output,
} = fields;
if let Some(kind) = kind {
@@ -220,7 +223,9 @@ impl ToolCall {
}
if let Some(title) = title {
- self.label = cx.new(|cx| Markdown::new_text(title.into(), cx));
+ self.label.update(cx, |label, cx| {
+ label.replace(title, cx);
+ });
}
if let Some(content) = content {
@@ -237,9 +242,13 @@ impl ToolCall {
if let Some(raw_input) = raw_input {
self.raw_input = Some(raw_input);
}
+
+ if let Some(raw_output) = raw_output {
+ self.raw_output = Some(raw_output);
+ }
}
- pub fn diffs(&self) -> impl Iterator
- {
+ pub fn diffs(&self) -> impl Iterator
- > {
self.content.iter().filter_map(|content| match content {
ToolCallContent::ContentBlock { .. } => None,
ToolCallContent::Diff { diff } => Some(diff),
@@ -379,7 +388,7 @@ impl ContentBlock {
#[derive(Debug)]
pub enum ToolCallContent {
ContentBlock { content: ContentBlock },
- Diff { diff: Diff },
+ Diff { diff: Entity },
}
impl ToolCallContent {
@@ -393,7 +402,7 @@ impl ToolCallContent {
content: ContentBlock::new(content, &language_registry, cx),
},
acp::ToolCallContent::Diff { diff } => Self::Diff {
- diff: Diff::from_acp(diff, language_registry, cx),
+ diff: cx.new(|cx| Diff::from_acp(diff, language_registry, cx)),
},
}
}
@@ -401,109 +410,44 @@ impl ToolCallContent {
pub fn to_markdown(&self, cx: &App) -> String {
match self {
Self::ContentBlock { content } => content.to_markdown(cx).to_string(),
- Self::Diff { diff } => diff.to_markdown(cx),
+ Self::Diff { diff } => diff.read(cx).to_markdown(cx),
}
}
}
-#[derive(Debug)]
-pub struct Diff {
- pub multibuffer: Entity,
- pub path: PathBuf,
- pub new_buffer: Entity,
- pub old_buffer: Entity,
- _task: Task>,
+#[derive(Debug, PartialEq)]
+pub enum ToolCallUpdate {
+ UpdateFields(acp::ToolCallUpdate),
+ UpdateDiff(ToolCallUpdateDiff),
}
-impl Diff {
- pub fn from_acp(
- diff: acp::Diff,
- language_registry: Arc,
- cx: &mut App,
- ) -> Self {
- let acp::Diff {
- path,
- old_text,
- new_text,
- } = diff;
-
- let multibuffer = cx.new(|_cx| MultiBuffer::without_headers(Capability::ReadOnly));
-
- let new_buffer = cx.new(|cx| Buffer::local(new_text, cx));
- let old_buffer = cx.new(|cx| Buffer::local(old_text.unwrap_or("".into()), cx));
- let new_buffer_snapshot = new_buffer.read(cx).text_snapshot();
- let old_buffer_snapshot = old_buffer.read(cx).snapshot();
- let buffer_diff = cx.new(|cx| BufferDiff::new(&new_buffer_snapshot, cx));
- let diff_task = buffer_diff.update(cx, |diff, cx| {
- diff.set_base_text(
- old_buffer_snapshot,
- Some(language_registry.clone()),
- new_buffer_snapshot,
- cx,
- )
- });
-
- let task = cx.spawn({
- let multibuffer = multibuffer.clone();
- let path = path.clone();
- let new_buffer = new_buffer.clone();
- async move |cx| {
- diff_task.await?;
-
- multibuffer
- .update(cx, |multibuffer, cx| {
- let hunk_ranges = {
- let buffer = new_buffer.read(cx);
- let diff = buffer_diff.read(cx);
- diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer, cx)
- .map(|diff_hunk| diff_hunk.buffer_range.to_point(&buffer))
- .collect::>()
- };
-
- multibuffer.set_excerpts_for_path(
- PathKey::for_buffer(&new_buffer, cx),
- new_buffer.clone(),
- hunk_ranges,
- editor::DEFAULT_MULTIBUFFER_CONTEXT,
- cx,
- );
- multibuffer.add_diff(buffer_diff.clone(), cx);
- })
- .log_err();
-
- if let Some(language) = language_registry
- .language_for_file_path(&path)
- .await
- .log_err()
- {
- new_buffer.update(cx, |buffer, cx| buffer.set_language(Some(language), cx))?;
- }
-
- anyhow::Ok(())
- }
- });
-
- Self {
- multibuffer,
- path,
- new_buffer,
- old_buffer,
- _task: task,
+impl ToolCallUpdate {
+ fn id(&self) -> &acp::ToolCallId {
+ match self {
+ Self::UpdateFields(update) => &update.id,
+ Self::UpdateDiff(diff) => &diff.id,
}
}
+}
- fn to_markdown(&self, cx: &App) -> String {
- let buffer_text = self
- .multibuffer
- .read(cx)
- .all_buffers()
- .iter()
- .map(|buffer| buffer.read(cx).text())
- .join("\n");
- format!("Diff: {}\n```\n{}\n```\n", self.path.display(), buffer_text)
+impl From for ToolCallUpdate {
+ fn from(update: acp::ToolCallUpdate) -> Self {
+ Self::UpdateFields(update)
}
}
+impl From for ToolCallUpdate {
+ fn from(diff: ToolCallUpdateDiff) -> Self {
+ Self::UpdateDiff(diff)
+ }
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ToolCallUpdateDiff {
+ pub id: acp::ToolCallId,
+ pub diff: Entity,
+}
+
#[derive(Debug, Default)]
pub struct Plan {
pub entries: Vec,
@@ -556,7 +500,7 @@ pub struct PlanEntry {
impl PlanEntry {
pub fn from_acp(entry: acp::PlanEntry, cx: &mut App) -> Self {
Self {
- content: cx.new(|cx| Markdown::new_text(entry.content.into(), cx)),
+ content: cx.new(|cx| Markdown::new(entry.content.into(), None, None, cx)),
priority: entry.priority,
status: entry.status,
}
@@ -581,6 +525,7 @@ pub enum AcpThreadEvent {
ToolAuthorizationRequired,
Stopped,
Error,
+ ServerExited(ExitStatus),
}
impl EventEmitter for AcpThread {}
@@ -654,6 +599,10 @@ impl AcpThread {
&self.entries
}
+ pub fn session_id(&self) -> &acp::SessionId {
+ &self.session_id
+ }
+
pub fn status(&self) -> ThreadStatus {
if self.send_task.is_some() {
if self.waiting_for_tool_confirmation() {
@@ -794,15 +743,26 @@ impl AcpThread {
pub fn update_tool_call(
&mut self,
- update: acp::ToolCallUpdate,
+ update: impl Into,
cx: &mut Context,
) -> Result<()> {
+ let update = update.into();
let languages = self.project.read(cx).languages().clone();
let (ix, current_call) = self
- .tool_call_mut(&update.id)
+ .tool_call_mut(update.id())
.context("Tool call not found")?;
- current_call.update(update.fields, languages, cx);
+ match update {
+ ToolCallUpdate::UpdateFields(update) => {
+ current_call.update_fields(update.fields, languages, cx);
+ }
+ ToolCallUpdate::UpdateDiff(update) => {
+ current_call.content.clear();
+ current_call
+ .content
+ .push(ToolCallContent::Diff { diff: update.diff });
+ }
+ }
cx.emit(AcpThreadEvent::EntryUpdated(ix));
@@ -890,7 +850,7 @@ impl AcpThread {
});
}
- pub fn request_tool_call_permission(
+ pub fn request_tool_call_authorization(
&mut self,
tool_call: acp::ToolCall,
options: Vec,
@@ -965,13 +925,26 @@ impl AcpThread {
}
pub fn update_plan(&mut self, request: acp::Plan, cx: &mut Context) {
- self.plan = Plan {
- entries: request
- .entries
- .into_iter()
- .map(|entry| PlanEntry::from_acp(entry, cx))
- .collect(),
- };
+ let new_entries_len = request.entries.len();
+ let mut new_entries = request.entries.into_iter();
+
+ // Reuse existing markdown to prevent flickering
+ for (old, new) in self.plan.entries.iter_mut().zip(new_entries.by_ref()) {
+ let PlanEntry {
+ content,
+ priority,
+ status,
+ } = old;
+ content.update(cx, |old, cx| {
+ old.replace(new.content, cx);
+ });
+ *priority = new.priority;
+ *status = new.status;
+ }
+ for new in new_entries {
+ self.plan.entries.push(PlanEntry::from_acp(new, cx))
+ }
+ self.plan.entries.truncate(new_entries_len);
cx.notify();
}
@@ -1032,8 +1005,9 @@ impl AcpThread {
)
})?
.await;
+
tx.send(result).log_err();
- this.update(cx, |this, _cx| this.send_task.take())?;
+
anyhow::Ok(())
}
.await
@@ -1046,7 +1020,23 @@ impl AcpThread {
.log_err();
Err(e)?
}
- _ => {
+ result => {
+ let cancelled = matches!(
+ result,
+ Ok(Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::Cancelled
+ }))
+ );
+
+ // We only take the task if the current prompt wasn't cancelled.
+ //
+ // This prompt may have been cancelled because another one was sent
+ // while it was still generating. In these cases, dropping `send_task`
+ // would cause the next generation to be cancelled.
+ if !cancelled {
+ this.update(cx, |this, _cx| this.send_task.take()).ok();
+ }
+
this.update(cx, |_, cx| cx.emit(AcpThreadEvent::Stopped))
.log_err();
Ok(())
@@ -1229,6 +1219,10 @@ impl AcpThread {
pub fn to_markdown(&self, cx: &App) -> String {
self.entries.iter().map(|e| e.to_markdown(cx)).collect()
}
+
+ pub fn emit_server_exited(&mut self, status: ExitStatus, cx: &mut Context) {
+ cx.emit(AcpThreadEvent::ServerExited(status));
+ }
}
#[cfg(test)]
@@ -1371,6 +1365,9 @@ mod tests {
cx,
)
.unwrap();
+ })?;
+ Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
})
}
.boxed_local()
@@ -1443,7 +1440,9 @@ mod tests {
.unwrap()
.await
.unwrap();
- Ok(())
+ Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
+ })
}
.boxed_local()
},
@@ -1510,13 +1509,16 @@ mod tests {
content: vec![],
locations: vec![],
raw_input: None,
+ raw_output: None,
}),
cx,
)
})
.unwrap()
.unwrap();
- Ok(())
+ Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
+ })
}
.boxed_local()
}
@@ -1620,13 +1622,16 @@ mod tests {
}],
locations: vec![],
raw_input: None,
+ raw_output: None,
}),
cx,
)
})
.unwrap()
.unwrap();
- Ok(())
+ Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
+ })
}
.boxed_local()
}
@@ -1680,7 +1685,7 @@ mod tests {
acp::PromptRequest,
WeakEntity,
AsyncApp,
- ) -> LocalBoxFuture<'static, Result<()>>
+ ) -> LocalBoxFuture<'static, Result>
+ 'static,
>,
>,
@@ -1707,7 +1712,7 @@ mod tests {
acp::PromptRequest,
WeakEntity,
AsyncApp,
- ) -> LocalBoxFuture<'static, Result<()>>
+ ) -> LocalBoxFuture<'static, Result>
+ 'static,
) -> Self {
self.on_user_message.replace(Rc::new(handler));
@@ -1749,7 +1754,11 @@ mod tests {
}
}
- fn prompt(&self, params: acp::PromptRequest, cx: &mut App) -> Task> {
+ fn prompt(
+ &self,
+ params: acp::PromptRequest,
+ cx: &mut App,
+ ) -> Task> {
let sessions = self.sessions.lock();
let thread = sessions.get(¶ms.session_id).unwrap();
if let Some(handler) = &self.on_user_message {
@@ -1757,7 +1766,9 @@ mod tests {
let thread = thread.clone();
cx.spawn(async move |cx| handler(params, thread, cx.clone()).await)
} else {
- Task::ready(Ok(()))
+ Task::ready(Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
+ }))
}
}
diff --git a/crates/acp_thread/src/connection.rs b/crates/acp_thread/src/connection.rs
index 929500a67b..cf06563bee 100644
--- a/crates/acp_thread/src/connection.rs
+++ b/crates/acp_thread/src/connection.rs
@@ -1,13 +1,61 @@
-use std::{error::Error, fmt, path::Path, rc::Rc};
+use std::{error::Error, fmt, path::Path, rc::Rc, sync::Arc};
use agent_client_protocol::{self as acp};
use anyhow::Result;
use gpui::{AsyncApp, Entity, Task};
+use language_model::LanguageModel;
use project::Project;
use ui::App;
use crate::AcpThread;
+/// Trait for agents that support listing, selecting, and querying language models.
+///
+/// This is an optional capability; agents indicate support via [AgentConnection::model_selector].
+pub trait ModelSelector: 'static {
+ /// Lists all available language models for this agent.
+ ///
+ /// # Parameters
+ /// - `cx`: The GPUI app context for async operations and global access.
+ ///
+ /// # Returns
+ /// A task resolving to the list of models or an error (e.g., if no models are configured).
+ fn list_models(&self, cx: &mut AsyncApp) -> Task>>>;
+
+ /// Selects a model for a specific session (thread).
+ ///
+ /// This sets the default model for future interactions in the session.
+ /// If the session doesn't exist or the model is invalid, it returns an error.
+ ///
+ /// # Parameters
+ /// - `session_id`: The ID of the session (thread) to apply the model to.
+ /// - `model`: The model to select (should be one from [list_models]).
+ /// - `cx`: The GPUI app context.
+ ///
+ /// # Returns
+ /// A task resolving to `Ok(())` on success or an error.
+ fn select_model(
+ &self,
+ session_id: acp::SessionId,
+ model: Arc,
+ cx: &mut AsyncApp,
+ ) -> Task>;
+
+ /// Retrieves the currently selected model for a specific session (thread).
+ ///
+ /// # Parameters
+ /// - `session_id`: The ID of the session (thread) to query.
+ /// - `cx`: The GPUI app context.
+ ///
+ /// # Returns
+ /// A task resolving to the selected model (always set) or an error (e.g., session not found).
+ fn selected_model(
+ &self,
+ session_id: &acp::SessionId,
+ cx: &mut AsyncApp,
+ ) -> Task>>;
+}
+
pub trait AgentConnection {
fn new_thread(
self: Rc,
@@ -20,9 +68,18 @@ pub trait AgentConnection {
fn authenticate(&self, method: acp::AuthMethodId, cx: &mut App) -> Task>;
- fn prompt(&self, params: acp::PromptRequest, cx: &mut App) -> Task>;
+ fn prompt(&self, params: acp::PromptRequest, cx: &mut App)
+ -> Task>;
fn cancel(&self, session_id: &acp::SessionId, cx: &mut App);
+
+ /// Returns this agent as an [Rc] if the model selection capability is supported.
+ ///
+ /// If the agent does not support model selection, returns [None].
+ /// This allows sharing the selector in UI components.
+ fn model_selector(&self) -> Option> {
+ None // Default impl for agents that don't support it
+ }
}
#[derive(Debug)]
diff --git a/crates/acp_thread/src/diff.rs b/crates/acp_thread/src/diff.rs
new file mode 100644
index 0000000000..9cc6271360
--- /dev/null
+++ b/crates/acp_thread/src/diff.rs
@@ -0,0 +1,388 @@
+use agent_client_protocol as acp;
+use anyhow::Result;
+use buffer_diff::{BufferDiff, BufferDiffSnapshot};
+use editor::{MultiBuffer, PathKey};
+use gpui::{App, AppContext, AsyncApp, Context, Entity, Subscription, Task};
+use itertools::Itertools;
+use language::{
+ Anchor, Buffer, Capability, LanguageRegistry, OffsetRangeExt as _, Point, Rope, TextBuffer,
+};
+use std::{
+ cmp::Reverse,
+ ops::Range,
+ path::{Path, PathBuf},
+ sync::Arc,
+};
+use util::ResultExt;
+
+pub enum Diff {
+ Pending(PendingDiff),
+ Finalized(FinalizedDiff),
+}
+
+impl Diff {
+ pub fn from_acp(
+ diff: acp::Diff,
+ language_registry: Arc,
+ cx: &mut Context,
+ ) -> Self {
+ let acp::Diff {
+ path,
+ old_text,
+ new_text,
+ } = diff;
+
+ let multibuffer = cx.new(|_cx| MultiBuffer::without_headers(Capability::ReadOnly));
+
+ let new_buffer = cx.new(|cx| Buffer::local(new_text, cx));
+ let old_buffer = cx.new(|cx| Buffer::local(old_text.unwrap_or("".into()), cx));
+ let new_buffer_snapshot = new_buffer.read(cx).text_snapshot();
+ let buffer_diff = cx.new(|cx| BufferDiff::new(&new_buffer_snapshot, cx));
+
+ let task = cx.spawn({
+ let multibuffer = multibuffer.clone();
+ let path = path.clone();
+ async move |_, cx| {
+ let language = language_registry
+ .language_for_file_path(&path)
+ .await
+ .log_err();
+
+ new_buffer.update(cx, |buffer, cx| buffer.set_language(language.clone(), cx))?;
+
+ let old_buffer_snapshot = old_buffer.update(cx, |buffer, cx| {
+ buffer.set_language(language, cx);
+ buffer.snapshot()
+ })?;
+
+ buffer_diff
+ .update(cx, |diff, cx| {
+ diff.set_base_text(
+ old_buffer_snapshot,
+ Some(language_registry),
+ new_buffer_snapshot,
+ cx,
+ )
+ })?
+ .await?;
+
+ multibuffer
+ .update(cx, |multibuffer, cx| {
+ let hunk_ranges = {
+ let buffer = new_buffer.read(cx);
+ let diff = buffer_diff.read(cx);
+ diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer, cx)
+ .map(|diff_hunk| diff_hunk.buffer_range.to_point(&buffer))
+ .collect::>()
+ };
+
+ multibuffer.set_excerpts_for_path(
+ PathKey::for_buffer(&new_buffer, cx),
+ new_buffer.clone(),
+ hunk_ranges,
+ editor::DEFAULT_MULTIBUFFER_CONTEXT,
+ cx,
+ );
+ multibuffer.add_diff(buffer_diff, cx);
+ })
+ .log_err();
+
+ anyhow::Ok(())
+ }
+ });
+
+ Self::Finalized(FinalizedDiff {
+ multibuffer,
+ path,
+ _update_diff: task,
+ })
+ }
+
+ pub fn new(buffer: Entity, cx: &mut Context) -> Self {
+ let buffer_snapshot = buffer.read(cx).snapshot();
+ let base_text = buffer_snapshot.text();
+ let language_registry = buffer.read(cx).language_registry();
+ let text_snapshot = buffer.read(cx).text_snapshot();
+ let buffer_diff = cx.new(|cx| {
+ let mut diff = BufferDiff::new(&text_snapshot, cx);
+ let _ = diff.set_base_text(
+ buffer_snapshot.clone(),
+ language_registry,
+ text_snapshot,
+ cx,
+ );
+ diff
+ });
+
+ let multibuffer = cx.new(|cx| {
+ let mut multibuffer = MultiBuffer::without_headers(Capability::ReadOnly);
+ multibuffer.add_diff(buffer_diff.clone(), cx);
+ multibuffer
+ });
+
+ Self::Pending(PendingDiff {
+ multibuffer,
+ base_text: Arc::new(base_text),
+ _subscription: cx.observe(&buffer, |this, _, cx| {
+ if let Diff::Pending(diff) = this {
+ diff.update(cx);
+ }
+ }),
+ buffer,
+ diff: buffer_diff,
+ revealed_ranges: Vec::new(),
+ update_diff: Task::ready(Ok(())),
+ })
+ }
+
+ pub fn reveal_range(&mut self, range: Range, cx: &mut Context) {
+ if let Self::Pending(diff) = self {
+ diff.reveal_range(range, cx);
+ }
+ }
+
+ pub fn finalize(&mut self, cx: &mut Context) {
+ if let Self::Pending(diff) = self {
+ *self = Self::Finalized(diff.finalize(cx));
+ }
+ }
+
+ pub fn multibuffer(&self) -> &Entity {
+ match self {
+ Self::Pending(PendingDiff { multibuffer, .. }) => multibuffer,
+ Self::Finalized(FinalizedDiff { multibuffer, .. }) => multibuffer,
+ }
+ }
+
+ pub fn to_markdown(&self, cx: &App) -> String {
+ let buffer_text = self
+ .multibuffer()
+ .read(cx)
+ .all_buffers()
+ .iter()
+ .map(|buffer| buffer.read(cx).text())
+ .join("\n");
+ let path = match self {
+ Diff::Pending(PendingDiff { buffer, .. }) => {
+ buffer.read(cx).file().map(|file| file.path().as_ref())
+ }
+ Diff::Finalized(FinalizedDiff { path, .. }) => Some(path.as_path()),
+ };
+ format!(
+ "Diff: {}\n```\n{}\n```\n",
+ path.unwrap_or(Path::new("untitled")).display(),
+ buffer_text
+ )
+ }
+}
+
+pub struct PendingDiff {
+ multibuffer: Entity,
+ base_text: Arc,
+ buffer: Entity,
+ diff: Entity,
+ revealed_ranges: Vec>,
+ _subscription: Subscription,
+ update_diff: Task>,
+}
+
+impl PendingDiff {
+ pub fn update(&mut self, cx: &mut Context) {
+ let buffer = self.buffer.clone();
+ let buffer_diff = self.diff.clone();
+ let base_text = self.base_text.clone();
+ self.update_diff = cx.spawn(async move |diff, cx| {
+ let text_snapshot = buffer.read_with(cx, |buffer, _| buffer.text_snapshot())?;
+ let diff_snapshot = BufferDiff::update_diff(
+ buffer_diff.clone(),
+ text_snapshot.clone(),
+ Some(base_text),
+ false,
+ false,
+ None,
+ None,
+ cx,
+ )
+ .await?;
+ buffer_diff.update(cx, |diff, cx| {
+ diff.set_snapshot(diff_snapshot, &text_snapshot, cx)
+ })?;
+ diff.update(cx, |diff, cx| {
+ if let Diff::Pending(diff) = diff {
+ diff.update_visible_ranges(cx);
+ }
+ })
+ });
+ }
+
+ pub fn reveal_range(&mut self, range: Range, cx: &mut Context) {
+ self.revealed_ranges.push(range);
+ self.update_visible_ranges(cx);
+ }
+
+ fn finalize(&self, cx: &mut Context) -> FinalizedDiff {
+ let ranges = self.excerpt_ranges(cx);
+ let base_text = self.base_text.clone();
+ let language_registry = self.buffer.read(cx).language_registry().clone();
+
+ let path = self
+ .buffer
+ .read(cx)
+ .file()
+ .map(|file| file.path().as_ref())
+ .unwrap_or(Path::new("untitled"))
+ .into();
+
+ // Replace the buffer in the multibuffer with the snapshot
+ let buffer = cx.new(|cx| {
+ let language = self.buffer.read(cx).language().cloned();
+ let buffer = TextBuffer::new_normalized(
+ 0,
+ cx.entity_id().as_non_zero_u64().into(),
+ self.buffer.read(cx).line_ending(),
+ self.buffer.read(cx).as_rope().clone(),
+ );
+ let mut buffer = Buffer::build(buffer, None, Capability::ReadWrite);
+ buffer.set_language(language, cx);
+ buffer
+ });
+
+ let buffer_diff = cx.spawn({
+ let buffer = buffer.clone();
+ let language_registry = language_registry.clone();
+ async move |_this, cx| {
+ build_buffer_diff(base_text, &buffer, language_registry, cx).await
+ }
+ });
+
+ let update_diff = cx.spawn(async move |this, cx| {
+ let buffer_diff = buffer_diff.await?;
+ this.update(cx, |this, cx| {
+ this.multibuffer().update(cx, |multibuffer, cx| {
+ let path_key = PathKey::for_buffer(&buffer, cx);
+ multibuffer.clear(cx);
+ multibuffer.set_excerpts_for_path(
+ path_key,
+ buffer,
+ ranges,
+ editor::DEFAULT_MULTIBUFFER_CONTEXT,
+ cx,
+ );
+ multibuffer.add_diff(buffer_diff.clone(), cx);
+ });
+
+ cx.notify();
+ })
+ });
+
+ FinalizedDiff {
+ path,
+ multibuffer: self.multibuffer.clone(),
+ _update_diff: update_diff,
+ }
+ }
+
+ fn update_visible_ranges(&mut self, cx: &mut Context) {
+ let ranges = self.excerpt_ranges(cx);
+ self.multibuffer.update(cx, |multibuffer, cx| {
+ multibuffer.set_excerpts_for_path(
+ PathKey::for_buffer(&self.buffer, cx),
+ self.buffer.clone(),
+ ranges,
+ editor::DEFAULT_MULTIBUFFER_CONTEXT,
+ cx,
+ );
+ let end = multibuffer.len(cx);
+ Some(multibuffer.snapshot(cx).offset_to_point(end).row + 1)
+ });
+ cx.notify();
+ }
+
+ fn excerpt_ranges(&self, cx: &App) -> Vec> {
+ let buffer = self.buffer.read(cx);
+ let diff = self.diff.read(cx);
+ let mut ranges = diff
+ .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer, cx)
+ .map(|diff_hunk| diff_hunk.buffer_range.to_point(&buffer))
+ .collect::>();
+ ranges.extend(
+ self.revealed_ranges
+ .iter()
+ .map(|range| range.to_point(&buffer)),
+ );
+ ranges.sort_unstable_by_key(|range| (range.start, Reverse(range.end)));
+
+ // Merge adjacent ranges
+ let mut ranges = ranges.into_iter().peekable();
+ let mut merged_ranges = Vec::new();
+ while let Some(mut range) = ranges.next() {
+ while let Some(next_range) = ranges.peek() {
+ if range.end >= next_range.start {
+ range.end = range.end.max(next_range.end);
+ ranges.next();
+ } else {
+ break;
+ }
+ }
+
+ merged_ranges.push(range);
+ }
+ merged_ranges
+ }
+}
+
+pub struct FinalizedDiff {
+ path: PathBuf,
+ multibuffer: Entity,
+ _update_diff: Task>,
+}
+
+async fn build_buffer_diff(
+ old_text: Arc,
+ buffer: &Entity,
+ language_registry: Option>,
+ cx: &mut AsyncApp,
+) -> Result> {
+ let buffer = cx.update(|cx| buffer.read(cx).snapshot())?;
+
+ let old_text_rope = cx
+ .background_spawn({
+ let old_text = old_text.clone();
+ async move { Rope::from(old_text.as_str()) }
+ })
+ .await;
+ let base_buffer = cx
+ .update(|cx| {
+ Buffer::build_snapshot(
+ old_text_rope,
+ buffer.language().cloned(),
+ language_registry,
+ cx,
+ )
+ })?
+ .await;
+
+ let diff_snapshot = cx
+ .update(|cx| {
+ BufferDiffSnapshot::new_with_base_buffer(
+ buffer.text.clone(),
+ Some(old_text),
+ base_buffer,
+ cx,
+ )
+ })?
+ .await;
+
+ let secondary_diff = cx.new(|cx| {
+ let mut diff = BufferDiff::new(&buffer, cx);
+ diff.set_snapshot(diff_snapshot.clone(), &buffer, cx);
+ diff
+ })?;
+
+ cx.new(|cx| {
+ let mut diff = BufferDiff::new(&buffer.text, cx);
+ diff.set_snapshot(diff_snapshot, &buffer, cx);
+ diff.set_secondary_diff(secondary_diff);
+ diff
+ })
+}
diff --git a/crates/agent/src/history_store.rs b/crates/agent/src/history_store.rs
index 89f75a72bd..eb39c3e454 100644
--- a/crates/agent/src/history_store.rs
+++ b/crates/agent/src/history_store.rs
@@ -212,7 +212,16 @@ impl HistoryStore {
fn load_recently_opened_entries(cx: &AsyncApp) -> Task>> {
cx.background_spawn(async move {
let path = paths::data_dir().join(NAVIGATION_HISTORY_PATH);
- let contents = smol::fs::read_to_string(path).await?;
+ let contents = match smol::fs::read_to_string(path).await {
+ Ok(it) => it,
+ Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
+ return Ok(Vec::new());
+ }
+ Err(e) => {
+ return Err(e)
+ .context("deserializing persisted agent panel navigation history");
+ }
+ };
let entries = serde_json::from_str::>(&contents)
.context("deserializing persisted agent panel navigation history")?
.into_iter()
diff --git a/crates/agent/src/thread.rs b/crates/agent/src/thread.rs
index 8558dd528d..048aa4245d 100644
--- a/crates/agent/src/thread.rs
+++ b/crates/agent/src/thread.rs
@@ -8,7 +8,7 @@ use crate::{
},
tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
};
-use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
+use agent_settings::{AgentProfileId, AgentSettings, CompletionMode, SUMMARIZE_THREAD_PROMPT};
use anyhow::{Result, anyhow};
use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
use chrono::{DateTime, Utc};
@@ -2112,12 +2112,10 @@ impl Thread {
return;
}
- let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
-
let request = self.to_summarize_request(
&model.model,
CompletionIntent::ThreadSummarization,
- added_user_message.into(),
+ SUMMARIZE_THREAD_PROMPT.into(),
cx,
);
@@ -4047,8 +4045,8 @@ fn main() {{
});
cx.run_until_parked();
- fake_model.stream_last_completion_response("Brief");
- fake_model.stream_last_completion_response(" Introduction");
+ fake_model.send_last_completion_stream_text_chunk("Brief");
+ fake_model.send_last_completion_stream_text_chunk(" Introduction");
fake_model.end_last_completion_stream();
cx.run_until_parked();
@@ -4141,7 +4139,7 @@ fn main() {{
});
cx.run_until_parked();
- fake_model.stream_last_completion_response("A successful summary");
+ fake_model.send_last_completion_stream_text_chunk("A successful summary");
fake_model.end_last_completion_stream();
cx.run_until_parked();
@@ -4774,7 +4772,7 @@ fn main() {{
!pending.is_empty(),
"Should have a pending completion after retry"
);
- fake_model.stream_completion_response(&pending[0], "Success!");
+ fake_model.send_completion_stream_text_chunk(&pending[0], "Success!");
fake_model.end_completion_stream(&pending[0]);
cx.run_until_parked();
@@ -4942,7 +4940,7 @@ fn main() {{
// Check for pending completions and complete them
if let Some(pending) = inner_fake.pending_completions().first() {
- inner_fake.stream_completion_response(pending, "Success!");
+ inner_fake.send_completion_stream_text_chunk(pending, "Success!");
inner_fake.end_completion_stream(pending);
}
cx.run_until_parked();
@@ -5427,7 +5425,7 @@ fn main() {{
fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
cx.run_until_parked();
- fake_model.stream_last_completion_response("Assistant response");
+ fake_model.send_last_completion_stream_text_chunk("Assistant response");
fake_model.end_last_completion_stream();
cx.run_until_parked();
}
diff --git a/crates/agent2/Cargo.toml b/crates/agent2/Cargo.toml
new file mode 100644
index 0000000000..3e19895a31
--- /dev/null
+++ b/crates/agent2/Cargo.toml
@@ -0,0 +1,64 @@
+[package]
+name = "agent2"
+version = "0.1.0"
+edition = "2021"
+license = "GPL-3.0-or-later"
+publish = false
+
+[lib]
+path = "src/agent2.rs"
+
+[lints]
+workspace = true
+
+[dependencies]
+acp_thread.workspace = true
+agent-client-protocol.workspace = true
+agent_servers.workspace = true
+agent_settings.workspace = true
+anyhow.workspace = true
+assistant_tool.workspace = true
+assistant_tools.workspace = true
+cloud_llm_client.workspace = true
+collections.workspace = true
+fs.workspace = true
+futures.workspace = true
+gpui.workspace = true
+handlebars = { workspace = true, features = ["rust-embed"] }
+indoc.workspace = true
+itertools.workspace = true
+language.workspace = true
+language_model.workspace = true
+language_models.workspace = true
+log.workspace = true
+paths.workspace = true
+project.workspace = true
+prompt_store.workspace = true
+rust-embed.workspace = true
+schemars.workspace = true
+serde.workspace = true
+serde_json.workspace = true
+settings.workspace = true
+smol.workspace = true
+ui.workspace = true
+util.workspace = true
+uuid.workspace = true
+watch.workspace = true
+workspace-hack.workspace = true
+
+[dev-dependencies]
+ctor.workspace = true
+client = { workspace = true, "features" = ["test-support"] }
+clock = { workspace = true, "features" = ["test-support"] }
+env_logger.workspace = true
+fs = { workspace = true, "features" = ["test-support"] }
+gpui = { workspace = true, "features" = ["test-support"] }
+gpui_tokio.workspace = true
+language = { workspace = true, "features" = ["test-support"] }
+language_model = { workspace = true, "features" = ["test-support"] }
+lsp = { workspace = true, "features" = ["test-support"] }
+project = { workspace = true, "features" = ["test-support"] }
+reqwest_client.workspace = true
+settings = { workspace = true, "features" = ["test-support"] }
+worktree = { workspace = true, "features" = ["test-support"] }
+pretty_assertions.workspace = true
diff --git a/crates/agent2/LICENSE-GPL b/crates/agent2/LICENSE-GPL
new file mode 120000
index 0000000000..89e542f750
--- /dev/null
+++ b/crates/agent2/LICENSE-GPL
@@ -0,0 +1 @@
+../../LICENSE-GPL
\ No newline at end of file
diff --git a/crates/agent2/src/agent.rs b/crates/agent2/src/agent.rs
new file mode 100644
index 0000000000..df061cd5ed
--- /dev/null
+++ b/crates/agent2/src/agent.rs
@@ -0,0 +1,696 @@
+use crate::{templates::Templates, AgentResponseEvent, Thread};
+use crate::{EditFileTool, FindPathTool, ReadFileTool, ThinkingTool, ToolCallAuthorization};
+use acp_thread::ModelSelector;
+use agent_client_protocol as acp;
+use anyhow::{anyhow, Context as _, Result};
+use futures::{future, StreamExt};
+use gpui::{
+ App, AppContext, AsyncApp, Context, Entity, SharedString, Subscription, Task, WeakEntity,
+};
+use language_model::{LanguageModel, LanguageModelRegistry};
+use project::{Project, ProjectItem, ProjectPath, Worktree};
+use prompt_store::{
+ ProjectContext, PromptId, PromptStore, RulesFileContext, UserRulesContext, WorktreeContext,
+};
+use std::cell::RefCell;
+use std::collections::HashMap;
+use std::path::Path;
+use std::rc::Rc;
+use std::sync::Arc;
+use util::ResultExt;
+
+const RULES_FILE_NAMES: [&'static str; 9] = [
+ ".rules",
+ ".cursorrules",
+ ".windsurfrules",
+ ".clinerules",
+ ".github/copilot-instructions.md",
+ "CLAUDE.md",
+ "AGENT.md",
+ "AGENTS.md",
+ "GEMINI.md",
+];
+
+pub struct RulesLoadingError {
+ pub message: SharedString,
+}
+
+/// Holds both the internal Thread and the AcpThread for a session
+struct Session {
+ /// The internal thread that processes messages
+ thread: Entity,
+ /// The ACP thread that handles protocol communication
+ acp_thread: WeakEntity,
+ _subscription: Subscription,
+}
+
+pub struct NativeAgent {
+ /// Session ID -> Session mapping
+ sessions: HashMap,
+ /// Shared project context for all threads
+ project_context: Rc>,
+ project_context_needs_refresh: watch::Sender<()>,
+ _maintain_project_context: Task>,
+ /// Shared templates for all threads
+ templates: Arc,
+ project: Entity,
+ prompt_store: Option>,
+ _subscriptions: Vec,
+}
+
+impl NativeAgent {
+ pub async fn new(
+ project: Entity,
+ templates: Arc,
+ prompt_store: Option>,
+ cx: &mut AsyncApp,
+ ) -> Result> {
+ log::info!("Creating new NativeAgent");
+
+ let project_context = cx
+ .update(|cx| Self::build_project_context(&project, prompt_store.as_ref(), cx))?
+ .await;
+
+ cx.new(|cx| {
+ let mut subscriptions = vec![cx.subscribe(&project, Self::handle_project_event)];
+ if let Some(prompt_store) = prompt_store.as_ref() {
+ subscriptions.push(cx.subscribe(prompt_store, Self::handle_prompts_updated_event))
+ }
+
+ let (project_context_needs_refresh_tx, project_context_needs_refresh_rx) =
+ watch::channel(());
+ Self {
+ sessions: HashMap::new(),
+ project_context: Rc::new(RefCell::new(project_context)),
+ project_context_needs_refresh: project_context_needs_refresh_tx,
+ _maintain_project_context: cx.spawn(async move |this, cx| {
+ Self::maintain_project_context(this, project_context_needs_refresh_rx, cx).await
+ }),
+ templates,
+ project,
+ prompt_store,
+ _subscriptions: subscriptions,
+ }
+ })
+ }
+
+ async fn maintain_project_context(
+ this: WeakEntity,
+ mut needs_refresh: watch::Receiver<()>,
+ cx: &mut AsyncApp,
+ ) -> Result<()> {
+ while needs_refresh.changed().await.is_ok() {
+ let project_context = this
+ .update(cx, |this, cx| {
+ Self::build_project_context(&this.project, this.prompt_store.as_ref(), cx)
+ })?
+ .await;
+ this.update(cx, |this, _| this.project_context.replace(project_context))?;
+ }
+
+ Ok(())
+ }
+
+ fn build_project_context(
+ project: &Entity,
+ prompt_store: Option<&Entity>,
+ cx: &mut App,
+ ) -> Task {
+ let worktrees = project.read(cx).visible_worktrees(cx).collect::>();
+ let worktree_tasks = worktrees
+ .into_iter()
+ .map(|worktree| {
+ Self::load_worktree_info_for_system_prompt(worktree, project.clone(), cx)
+ })
+ .collect::>();
+ let default_user_rules_task = if let Some(prompt_store) = prompt_store.as_ref() {
+ prompt_store.read_with(cx, |prompt_store, cx| {
+ let prompts = prompt_store.default_prompt_metadata();
+ let load_tasks = prompts.into_iter().map(|prompt_metadata| {
+ let contents = prompt_store.load(prompt_metadata.id, cx);
+ async move { (contents.await, prompt_metadata) }
+ });
+ cx.background_spawn(future::join_all(load_tasks))
+ })
+ } else {
+ Task::ready(vec![])
+ };
+
+ cx.spawn(async move |_cx| {
+ let (worktrees, default_user_rules) =
+ future::join(future::join_all(worktree_tasks), default_user_rules_task).await;
+
+ let worktrees = worktrees
+ .into_iter()
+ .map(|(worktree, _rules_error)| {
+ // TODO: show error message
+ // if let Some(rules_error) = rules_error {
+ // this.update(cx, |_, cx| cx.emit(rules_error)).ok();
+ // }
+ worktree
+ })
+ .collect::>();
+
+ let default_user_rules = default_user_rules
+ .into_iter()
+ .flat_map(|(contents, prompt_metadata)| match contents {
+ Ok(contents) => Some(UserRulesContext {
+ uuid: match prompt_metadata.id {
+ PromptId::User { uuid } => uuid,
+ PromptId::EditWorkflow => return None,
+ },
+ title: prompt_metadata.title.map(|title| title.to_string()),
+ contents,
+ }),
+ Err(_err) => {
+ // TODO: show error message
+ // this.update(cx, |_, cx| {
+ // cx.emit(RulesLoadingError {
+ // message: format!("{err:?}").into(),
+ // });
+ // })
+ // .ok();
+ None
+ }
+ })
+ .collect::>();
+
+ ProjectContext::new(worktrees, default_user_rules)
+ })
+ }
+
+ fn load_worktree_info_for_system_prompt(
+ worktree: Entity,
+ project: Entity,
+ cx: &mut App,
+ ) -> Task<(WorktreeContext, Option)> {
+ let tree = worktree.read(cx);
+ let root_name = tree.root_name().into();
+ let abs_path = tree.abs_path();
+
+ let mut context = WorktreeContext {
+ root_name,
+ abs_path,
+ rules_file: None,
+ };
+
+ let rules_task = Self::load_worktree_rules_file(worktree, project, cx);
+ let Some(rules_task) = rules_task else {
+ return Task::ready((context, None));
+ };
+
+ cx.spawn(async move |_| {
+ let (rules_file, rules_file_error) = match rules_task.await {
+ Ok(rules_file) => (Some(rules_file), None),
+ Err(err) => (
+ None,
+ Some(RulesLoadingError {
+ message: format!("{err}").into(),
+ }),
+ ),
+ };
+ context.rules_file = rules_file;
+ (context, rules_file_error)
+ })
+ }
+
+ fn load_worktree_rules_file(
+ worktree: Entity,
+ project: Entity,
+ cx: &mut App,
+ ) -> Option>> {
+ let worktree = worktree.read(cx);
+ let worktree_id = worktree.id();
+ let selected_rules_file = RULES_FILE_NAMES
+ .into_iter()
+ .filter_map(|name| {
+ worktree
+ .entry_for_path(name)
+ .filter(|entry| entry.is_file())
+ .map(|entry| entry.path.clone())
+ })
+ .next();
+
+ // Note that Cline supports `.clinerules` being a directory, but that is not currently
+ // supported. This doesn't seem to occur often in GitHub repositories.
+ selected_rules_file.map(|path_in_worktree| {
+ let project_path = ProjectPath {
+ worktree_id,
+ path: path_in_worktree.clone(),
+ };
+ let buffer_task =
+ project.update(cx, |project, cx| project.open_buffer(project_path, cx));
+ let rope_task = cx.spawn(async move |cx| {
+ buffer_task.await?.read_with(cx, |buffer, cx| {
+ let project_entry_id = buffer.entry_id(cx).context("buffer has no file")?;
+ anyhow::Ok((project_entry_id, buffer.as_rope().clone()))
+ })?
+ });
+ // Build a string from the rope on a background thread.
+ cx.background_spawn(async move {
+ let (project_entry_id, rope) = rope_task.await?;
+ anyhow::Ok(RulesFileContext {
+ path_in_worktree,
+ text: rope.to_string().trim().to_string(),
+ project_entry_id: project_entry_id.to_usize(),
+ })
+ })
+ })
+ }
+
+ fn handle_project_event(
+ &mut self,
+ _project: Entity,
+ event: &project::Event,
+ _cx: &mut Context,
+ ) {
+ match event {
+ project::Event::WorktreeAdded(_) | project::Event::WorktreeRemoved(_) => {
+ self.project_context_needs_refresh.send(()).ok();
+ }
+ project::Event::WorktreeUpdatedEntries(_, items) => {
+ if items.iter().any(|(path, _, _)| {
+ RULES_FILE_NAMES
+ .iter()
+ .any(|name| path.as_ref() == Path::new(name))
+ }) {
+ self.project_context_needs_refresh.send(()).ok();
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn handle_prompts_updated_event(
+ &mut self,
+ _prompt_store: Entity,
+ _event: &prompt_store::PromptsUpdatedEvent,
+ _cx: &mut Context,
+ ) {
+ self.project_context_needs_refresh.send(()).ok();
+ }
+}
+
+/// Wrapper struct that implements the AgentConnection trait
+#[derive(Clone)]
+pub struct NativeAgentConnection(pub Entity);
+
+impl ModelSelector for NativeAgentConnection {
+ fn list_models(&self, cx: &mut AsyncApp) -> Task>>> {
+ log::debug!("NativeAgentConnection::list_models called");
+ cx.spawn(async move |cx| {
+ cx.update(|cx| {
+ let registry = LanguageModelRegistry::read_global(cx);
+ let models = registry.available_models(cx).collect::>();
+ log::info!("Found {} available models", models.len());
+ if models.is_empty() {
+ Err(anyhow::anyhow!("No models available"))
+ } else {
+ Ok(models)
+ }
+ })?
+ })
+ }
+
+ fn select_model(
+ &self,
+ session_id: acp::SessionId,
+ model: Arc,
+ cx: &mut AsyncApp,
+ ) -> Task> {
+ log::info!(
+ "Setting model for session {}: {:?}",
+ session_id,
+ model.name()
+ );
+ let agent = self.0.clone();
+
+ cx.spawn(async move |cx| {
+ agent.update(cx, |agent, cx| {
+ if let Some(session) = agent.sessions.get(&session_id) {
+ session.thread.update(cx, |thread, _cx| {
+ thread.selected_model = model;
+ });
+ Ok(())
+ } else {
+ Err(anyhow!("Session not found"))
+ }
+ })?
+ })
+ }
+
+ fn selected_model(
+ &self,
+ session_id: &acp::SessionId,
+ cx: &mut AsyncApp,
+ ) -> Task>> {
+ let agent = self.0.clone();
+ let session_id = session_id.clone();
+ cx.spawn(async move |cx| {
+ let thread = agent
+ .read_with(cx, |agent, _| {
+ agent
+ .sessions
+ .get(&session_id)
+ .map(|session| session.thread.clone())
+ })?
+ .ok_or_else(|| anyhow::anyhow!("Session not found"))?;
+ let selected = thread.read_with(cx, |thread, _| thread.selected_model.clone())?;
+ Ok(selected)
+ })
+ }
+}
+
+impl acp_thread::AgentConnection for NativeAgentConnection {
+ fn new_thread(
+ self: Rc,
+ project: Entity,
+ cwd: &Path,
+ cx: &mut AsyncApp,
+ ) -> Task>> {
+ let agent = self.0.clone();
+ log::info!("Creating new thread for project at: {:?}", cwd);
+
+ cx.spawn(async move |cx| {
+ log::debug!("Starting thread creation in async context");
+
+ // Generate session ID
+ let session_id = acp::SessionId(uuid::Uuid::new_v4().to_string().into());
+ log::info!("Created session with ID: {}", session_id);
+
+ // Create AcpThread
+ let acp_thread = cx.update(|cx| {
+ cx.new(|cx| {
+ acp_thread::AcpThread::new("agent2", self.clone(), project.clone(), session_id.clone(), cx)
+ })
+ })?;
+ let action_log = cx.update(|cx| acp_thread.read(cx).action_log().clone())?;
+
+ // Create Thread
+ let thread = agent.update(
+ cx,
+ |agent, cx: &mut gpui::Context| -> Result<_> {
+ // Fetch default model from registry settings
+ let registry = LanguageModelRegistry::read_global(cx);
+
+ // Log available models for debugging
+ let available_count = registry.available_models(cx).count();
+ log::debug!("Total available models: {}", available_count);
+
+ let default_model = registry
+ .default_model()
+ .map(|configured| {
+ log::info!(
+ "Using configured default model: {:?} from provider: {:?}",
+ configured.model.name(),
+ configured.provider.name()
+ );
+ configured.model
+ })
+ .ok_or_else(|| {
+ log::warn!("No default model configured in settings");
+ anyhow!("No default model configured. Please configure a default model in settings.")
+ })?;
+
+ let thread = cx.new(|cx| {
+ let mut thread = Thread::new(project.clone(), agent.project_context.clone(), action_log.clone(), agent.templates.clone(), default_model);
+ thread.add_tool(ThinkingTool);
+ thread.add_tool(FindPathTool::new(project.clone()));
+ thread.add_tool(ReadFileTool::new(project.clone(), action_log));
+ thread.add_tool(EditFileTool::new(cx.entity()));
+ thread
+ });
+
+ Ok(thread)
+ },
+ )??;
+
+ // Store the session
+ agent.update(cx, |agent, cx| {
+ agent.sessions.insert(
+ session_id,
+ Session {
+ thread,
+ acp_thread: acp_thread.downgrade(),
+ _subscription: cx.observe_release(&acp_thread, |this, acp_thread, _cx| {
+ this.sessions.remove(acp_thread.session_id());
+ })
+ },
+ );
+ })?;
+
+ Ok(acp_thread)
+ })
+ }
+
+ fn auth_methods(&self) -> &[acp::AuthMethod] {
+ &[] // No auth for in-process
+ }
+
+ fn authenticate(&self, _method: acp::AuthMethodId, _cx: &mut App) -> Task> {
+ Task::ready(Ok(()))
+ }
+
+ fn model_selector(&self) -> Option> {
+ Some(Rc::new(self.clone()) as Rc)
+ }
+
+ fn prompt(
+ &self,
+ params: acp::PromptRequest,
+ cx: &mut App,
+ ) -> Task> {
+ let session_id = params.session_id.clone();
+ let agent = self.0.clone();
+ log::info!("Received prompt request for session: {}", session_id);
+ log::debug!("Prompt blocks count: {}", params.prompt.len());
+
+ cx.spawn(async move |cx| {
+ // Get session
+ let (thread, acp_thread) = agent
+ .update(cx, |agent, _| {
+ agent
+ .sessions
+ .get_mut(&session_id)
+ .map(|s| (s.thread.clone(), s.acp_thread.clone()))
+ })?
+ .ok_or_else(|| {
+ log::error!("Session not found: {}", session_id);
+ anyhow::anyhow!("Session not found")
+ })?;
+ log::debug!("Found session for: {}", session_id);
+
+ // Convert prompt to message
+ let message = convert_prompt_to_message(params.prompt);
+ log::info!("Converted prompt to message: {} chars", message.len());
+ log::debug!("Message content: {}", message);
+
+ // Get model using the ModelSelector capability (always available for agent2)
+ // Get the selected model from the thread directly
+ let model = thread.read_with(cx, |thread, _| thread.selected_model.clone())?;
+
+ // Send to thread
+ log::info!("Sending message to thread with model: {:?}", model.name());
+ let mut response_stream =
+ thread.update(cx, |thread, cx| thread.send(model, message, cx))?;
+
+ // Handle response stream and forward to session.acp_thread
+ while let Some(result) = response_stream.next().await {
+ match result {
+ Ok(event) => {
+ log::trace!("Received completion event: {:?}", event);
+
+ match event {
+ AgentResponseEvent::Text(text) => {
+ acp_thread.update(cx, |thread, cx| {
+ thread.push_assistant_content_block(
+ acp::ContentBlock::Text(acp::TextContent {
+ text,
+ annotations: None,
+ }),
+ false,
+ cx,
+ )
+ })?;
+ }
+ AgentResponseEvent::Thinking(text) => {
+ acp_thread.update(cx, |thread, cx| {
+ thread.push_assistant_content_block(
+ acp::ContentBlock::Text(acp::TextContent {
+ text,
+ annotations: None,
+ }),
+ true,
+ cx,
+ )
+ })?;
+ }
+ AgentResponseEvent::ToolCallAuthorization(ToolCallAuthorization {
+ tool_call,
+ options,
+ response,
+ }) => {
+ let recv = acp_thread.update(cx, |thread, cx| {
+ thread.request_tool_call_authorization(tool_call, options, cx)
+ })?;
+ cx.background_spawn(async move {
+ if let Some(option) = recv
+ .await
+ .context("authorization sender was dropped")
+ .log_err()
+ {
+ response
+ .send(option)
+ .map(|_| anyhow!("authorization receiver was dropped"))
+ .log_err();
+ }
+ })
+ .detach();
+ }
+ AgentResponseEvent::ToolCall(tool_call) => {
+ acp_thread.update(cx, |thread, cx| {
+ thread.upsert_tool_call(tool_call, cx)
+ })?;
+ }
+ AgentResponseEvent::ToolCallUpdate(update) => {
+ acp_thread.update(cx, |thread, cx| {
+ thread.update_tool_call(update, cx)
+ })??;
+ }
+ AgentResponseEvent::Stop(stop_reason) => {
+ log::debug!("Assistant message complete: {:?}", stop_reason);
+ return Ok(acp::PromptResponse { stop_reason });
+ }
+ }
+ }
+ Err(e) => {
+ log::error!("Error in model response stream: {:?}", e);
+ // TODO: Consider sending an error message to the UI
+ break;
+ }
+ }
+ }
+
+ log::info!("Response stream completed");
+ anyhow::Ok(acp::PromptResponse {
+ stop_reason: acp::StopReason::EndTurn,
+ })
+ })
+ }
+
+ fn cancel(&self, session_id: &acp::SessionId, cx: &mut App) {
+ log::info!("Cancelling on session: {}", session_id);
+ self.0.update(cx, |agent, cx| {
+ if let Some(agent) = agent.sessions.get(session_id) {
+ agent.thread.update(cx, |thread, _cx| thread.cancel());
+ }
+ });
+ }
+}
+
+/// Convert ACP content blocks to a message string
+fn convert_prompt_to_message(blocks: Vec) -> String {
+ log::debug!("Converting {} content blocks to message", blocks.len());
+ let mut message = String::new();
+
+ for block in blocks {
+ match block {
+ acp::ContentBlock::Text(text) => {
+ log::trace!("Processing text block: {} chars", text.text.len());
+ message.push_str(&text.text);
+ }
+ acp::ContentBlock::ResourceLink(link) => {
+ log::trace!("Processing resource link: {}", link.uri);
+ message.push_str(&format!(" @{} ", link.uri));
+ }
+ acp::ContentBlock::Image(_) => {
+ log::trace!("Processing image block");
+ message.push_str(" [image] ");
+ }
+ acp::ContentBlock::Audio(_) => {
+ log::trace!("Processing audio block");
+ message.push_str(" [audio] ");
+ }
+ acp::ContentBlock::Resource(resource) => {
+ log::trace!("Processing resource block: {:?}", resource.resource);
+ message.push_str(&format!(" [resource: {:?}] ", resource.resource));
+ }
+ }
+ }
+
+ message
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use fs::FakeFs;
+ use gpui::TestAppContext;
+ use serde_json::json;
+ use settings::SettingsStore;
+
+ #[gpui::test]
+ async fn test_maintaining_project_context(cx: &mut TestAppContext) {
+ init_test(cx);
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ "/",
+ json!({
+ "a": {}
+ }),
+ )
+ .await;
+ let project = Project::test(fs.clone(), [], cx).await;
+ let agent = NativeAgent::new(project.clone(), Templates::new(), None, &mut cx.to_async())
+ .await
+ .unwrap();
+ agent.read_with(cx, |agent, _| {
+ assert_eq!(agent.project_context.borrow().worktrees, vec![])
+ });
+
+ let worktree = project
+ .update(cx, |project, cx| project.create_worktree("/a", true, cx))
+ .await
+ .unwrap();
+ cx.run_until_parked();
+ agent.read_with(cx, |agent, _| {
+ assert_eq!(
+ agent.project_context.borrow().worktrees,
+ vec![WorktreeContext {
+ root_name: "a".into(),
+ abs_path: Path::new("/a").into(),
+ rules_file: None
+ }]
+ )
+ });
+
+ // Creating `/a/.rules` updates the project context.
+ fs.insert_file("/a/.rules", Vec::new()).await;
+ cx.run_until_parked();
+ agent.read_with(cx, |agent, cx| {
+ let rules_entry = worktree.read(cx).entry_for_path(".rules").unwrap();
+ assert_eq!(
+ agent.project_context.borrow().worktrees,
+ vec![WorktreeContext {
+ root_name: "a".into(),
+ abs_path: Path::new("/a").into(),
+ rules_file: Some(RulesFileContext {
+ path_in_worktree: Path::new(".rules").into(),
+ text: "".into(),
+ project_entry_id: rules_entry.id.to_usize()
+ })
+ }]
+ )
+ });
+ }
+
+ fn init_test(cx: &mut TestAppContext) {
+ env_logger::try_init().ok();
+ cx.update(|cx| {
+ let settings_store = SettingsStore::test(cx);
+ cx.set_global(settings_store);
+ Project::init_settings(cx);
+ language::init(cx);
+ });
+ }
+}
diff --git a/crates/agent2/src/agent2.rs b/crates/agent2/src/agent2.rs
new file mode 100644
index 0000000000..f13cd1bd67
--- /dev/null
+++ b/crates/agent2/src/agent2.rs
@@ -0,0 +1,14 @@
+mod agent;
+mod native_agent_server;
+mod templates;
+mod thread;
+mod tools;
+
+#[cfg(test)]
+mod tests;
+
+pub use agent::*;
+pub use native_agent_server::NativeAgentServer;
+pub use templates::*;
+pub use thread::*;
+pub use tools::*;
diff --git a/crates/agent2/src/native_agent_server.rs b/crates/agent2/src/native_agent_server.rs
new file mode 100644
index 0000000000..dd0188b548
--- /dev/null
+++ b/crates/agent2/src/native_agent_server.rs
@@ -0,0 +1,60 @@
+use std::path::Path;
+use std::rc::Rc;
+
+use agent_servers::AgentServer;
+use anyhow::Result;
+use gpui::{App, Entity, Task};
+use project::Project;
+use prompt_store::PromptStore;
+
+use crate::{templates::Templates, NativeAgent, NativeAgentConnection};
+
+#[derive(Clone)]
+pub struct NativeAgentServer;
+
+impl AgentServer for NativeAgentServer {
+ fn name(&self) -> &'static str {
+ "Native Agent"
+ }
+
+ fn empty_state_headline(&self) -> &'static str {
+ "Native Agent"
+ }
+
+ fn empty_state_message(&self) -> &'static str {
+ "How can I help you today?"
+ }
+
+ fn logo(&self) -> ui::IconName {
+ // Using the ZedAssistant icon as it's the native built-in agent
+ ui::IconName::ZedAssistant
+ }
+
+ fn connect(
+ &self,
+ _root_dir: &Path,
+ project: &Entity,
+ cx: &mut App,
+ ) -> Task>> {
+ log::info!(
+ "NativeAgentServer::connect called for path: {:?}",
+ _root_dir
+ );
+ let project = project.clone();
+ let prompt_store = PromptStore::global(cx);
+ cx.spawn(async move |cx| {
+ log::debug!("Creating templates for native agent");
+ let templates = Templates::new();
+ let prompt_store = prompt_store.await?;
+
+ log::debug!("Creating native agent entity");
+ let agent = NativeAgent::new(project, templates, Some(prompt_store), cx).await?;
+
+ // Create the connection wrapper
+ let connection = NativeAgentConnection(agent);
+ log::info!("NativeAgentServer connection established successfully");
+
+ Ok(Rc::new(connection) as Rc)
+ })
+ }
+}
diff --git a/crates/agent2/src/templates.rs b/crates/agent2/src/templates.rs
new file mode 100644
index 0000000000..a63f0ad206
--- /dev/null
+++ b/crates/agent2/src/templates.rs
@@ -0,0 +1,87 @@
+use anyhow::Result;
+use gpui::SharedString;
+use handlebars::Handlebars;
+use rust_embed::RustEmbed;
+use serde::Serialize;
+use std::sync::Arc;
+
+#[derive(RustEmbed)]
+#[folder = "src/templates"]
+#[include = "*.hbs"]
+struct Assets;
+
+pub struct Templates(Handlebars<'static>);
+
+impl Templates {
+ pub fn new() -> Arc {
+ let mut handlebars = Handlebars::new();
+ handlebars.set_strict_mode(true);
+ handlebars.register_helper("contains", Box::new(contains));
+ handlebars.register_embed_templates::().unwrap();
+ Arc::new(Self(handlebars))
+ }
+}
+
+pub trait Template: Sized {
+ const TEMPLATE_NAME: &'static str;
+
+ fn render(&self, templates: &Templates) -> Result
+ where
+ Self: Serialize + Sized,
+ {
+ Ok(templates.0.render(Self::TEMPLATE_NAME, self)?)
+ }
+}
+
+#[derive(Serialize)]
+pub struct SystemPromptTemplate<'a> {
+ #[serde(flatten)]
+ pub project: &'a prompt_store::ProjectContext,
+ pub available_tools: Vec,
+}
+
+impl Template for SystemPromptTemplate<'_> {
+ const TEMPLATE_NAME: &'static str = "system_prompt.hbs";
+}
+
+/// Handlebars helper for checking if an item is in a list
+fn contains(
+ h: &handlebars::Helper,
+ _: &handlebars::Handlebars,
+ _: &handlebars::Context,
+ _: &mut handlebars::RenderContext,
+ out: &mut dyn handlebars::Output,
+) -> handlebars::HelperResult {
+ let list = h
+ .param(0)
+ .and_then(|v| v.value().as_array())
+ .ok_or_else(|| {
+ handlebars::RenderError::new("contains: missing or invalid list parameter")
+ })?;
+ let query = h.param(1).map(|v| v.value()).ok_or_else(|| {
+ handlebars::RenderError::new("contains: missing or invalid query parameter")
+ })?;
+
+ if list.contains(&query) {
+ out.write("true")?;
+ }
+
+ Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_system_prompt_template() {
+ let project = prompt_store::ProjectContext::default();
+ let template = SystemPromptTemplate {
+ project: &project,
+ available_tools: vec!["echo".into()],
+ };
+ let templates = Templates::new();
+ let rendered = template.render(&templates).unwrap();
+ assert!(rendered.contains("## Fixing Diagnostics"));
+ }
+}
diff --git a/crates/agent2/src/templates/system_prompt.hbs b/crates/agent2/src/templates/system_prompt.hbs
new file mode 100644
index 0000000000..a9f67460d8
--- /dev/null
+++ b/crates/agent2/src/templates/system_prompt.hbs
@@ -0,0 +1,178 @@
+You are a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices.
+
+## Communication
+
+1. Be conversational but professional.
+2. Refer to the user in the second person and yourself in the first person.
+3. Format your responses in markdown. Use backticks to format file, directory, function, and class names.
+4. NEVER lie or make things up.
+5. Refrain from apologizing all the time when results are unexpected. Instead, just try your best to proceed or explain the circumstances to the user without apologizing.
+
+{{#if (gt (len available_tools) 0)}}
+## Tool Use
+
+1. Make sure to adhere to the tools schema.
+2. Provide every required argument.
+3. DO NOT use tools to access items that are already available in the context section.
+4. Use only the tools that are currently available.
+5. DO NOT use a tool that is not available just because it appears in the conversation. This means the user turned it off.
+6. NEVER run commands that don't terminate on their own such as web servers (like `npm run start`, `npm run dev`, `python -m http.server`, etc) or file watchers.
+7. Avoid HTML entity escaping - use plain characters instead.
+
+## Searching and Reading
+
+If you are unsure how to fulfill the user's request, gather more information with tool calls and/or clarifying questions.
+
+If appropriate, use tool calls to explore the current project, which contains the following root directories:
+
+{{#each worktrees}}
+- `{{abs_path}}`
+{{/each}}
+
+- Bias towards not asking the user for help if you can find the answer yourself.
+- When providing paths to tools, the path should always start with the name of a project root directory listed above.
+- Before you read or edit a file, you must first find the full path. DO NOT ever guess a file path!
+{{# if (contains available_tools 'grep') }}
+- When looking for symbols in the project, prefer the `grep` tool.
+- As you learn about the structure of the project, use that information to scope `grep` searches to targeted subtrees of the project.
+- The user might specify a partial file path. If you don't know the full path, use `find_path` (not `grep`) before you read the file.
+{{/if}}
+{{else}}
+You are being tasked with providing a response, but you have no ability to use tools or to read or write any aspect of the user's system (other than any context the user might have provided to you).
+
+As such, if you need the user to perform any actions for you, you must request them explicitly. Bias towards giving a response to the best of your ability, and then making requests for the user to take action (e.g. to give you more context) only optionally.
+
+The one exception to this is if the user references something you don't know about - for example, the name of a source code file, function, type, or other piece of code that you have no awareness of. In this case, you MUST NOT MAKE SOMETHING UP, or assume you know what that thing is or how it works. Instead, you must ask the user for clarification rather than giving a response.
+{{/if}}
+
+## Code Block Formatting
+
+Whenever you mention a code block, you MUST use ONLY use the following format:
+```path/to/Something.blah#L123-456
+(code goes here)
+```
+The `#L123-456` means the line number range 123 through 456, and the path/to/Something.blah
+is a path in the project. (If there is no valid path in the project, then you can use
+/dev/null/path.extension for its path.) This is the ONLY valid way to format code blocks, because the Markdown parser
+does not understand the more common ```language syntax, or bare ``` blocks. It only
+understands this path-based syntax, and if the path is missing, then it will error and you will have to do it over again.
+Just to be really clear about this, if you ever find yourself writing three backticks followed by a language name, STOP!
+You have made a mistake. You can only ever put paths after triple backticks!
+
+Based on all the information I've gathered, here's a summary of how this system works:
+1. The README file is loaded into the system.
+2. The system finds the first two headers, including everything in between. In this case, that would be:
+```path/to/README.md#L8-12
+# First Header
+This is the info under the first header.
+## Sub-header
+```
+3. Then the system finds the last header in the README:
+```path/to/README.md#L27-29
+## Last Header
+This is the last header in the README.
+```
+4. Finally, it passes this information on to the next process.
+
+
+In Markdown, hash marks signify headings. For example:
+```/dev/null/example.md#L1-3
+# Level 1 heading
+## Level 2 heading
+### Level 3 heading
+```
+
+Here are examples of ways you must never render code blocks:
+
+In Markdown, hash marks signify headings. For example:
+```
+# Level 1 heading
+## Level 2 heading
+### Level 3 heading
+```
+
+This example is unacceptable because it does not include the path.
+
+In Markdown, hash marks signify headings. For example:
+```markdown
+# Level 1 heading
+## Level 2 heading
+### Level 3 heading
+```
+
+This example is unacceptable because it has the language instead of the path.
+
+In Markdown, hash marks signify headings. For example:
+ # Level 1 heading
+ ## Level 2 heading
+ ### Level 3 heading
+
+This example is unacceptable because it uses indentation to mark the code block
+instead of backticks with a path.
+
+In Markdown, hash marks signify headings. For example:
+```markdown
+/dev/null/example.md#L1-3
+# Level 1 heading
+## Level 2 heading
+### Level 3 heading
+```
+
+This example is unacceptable because the path is in the wrong place. The path must be directly after the opening backticks.
+
+{{#if (gt (len available_tools) 0)}}
+## Fixing Diagnostics
+
+1. Make 1-2 attempts at fixing diagnostics, then defer to the user.
+2. Never simplify code you've written just to solve diagnostics. Complete, mostly correct code is more valuable than perfect code that doesn't solve the problem.
+
+## Debugging
+
+When debugging, only make code changes if you are certain that you can solve the problem.
+Otherwise, follow debugging best practices:
+1. Address the root cause instead of the symptoms.
+2. Add descriptive logging statements and error messages to track variable and code state.
+3. Add test functions and statements to isolate the problem.
+
+{{/if}}
+## Calling External APIs
+
+1. Unless explicitly requested by the user, use the best suited external APIs and packages to solve the task. There is no need to ask the user for permission.
+2. When selecting which version of an API or package to use, choose one that is compatible with the user's dependency management file(s). If no such file exists or if the package is not present, use the latest version that is in your training data.
+3. If an external API requires an API Key, be sure to point this out to the user. Adhere to best security practices (e.g. DO NOT hardcode an API key in a place where it can be exposed)
+
+## System Information
+
+Operating System: {{os}}
+Default Shell: {{shell}}
+
+{{#if (or has_rules has_user_rules)}}
+## User's Custom Instructions
+
+The following additional instructions are provided by the user, and should be followed to the best of your ability{{#if (gt (len available_tools) 0)}} without interfering with the tool use guidelines{{/if}}.
+
+{{#if has_rules}}
+There are project rules that apply to these root directories:
+{{#each worktrees}}
+{{#if rules_file}}
+`{{root_name}}/{{rules_file.path_in_worktree}}`:
+``````
+{{{rules_file.text}}}
+``````
+{{/if}}
+{{/each}}
+{{/if}}
+
+{{#if has_user_rules}}
+The user has specified the following rules that should be applied:
+{{#each user_rules}}
+
+{{#if title}}
+Rules title: {{title}}
+{{/if}}
+``````
+{{contents}}}
+``````
+{{/each}}
+{{/if}}
+{{/if}}
diff --git a/crates/agent2/src/tests/mod.rs b/crates/agent2/src/tests/mod.rs
new file mode 100644
index 0000000000..273da1dae5
--- /dev/null
+++ b/crates/agent2/src/tests/mod.rs
@@ -0,0 +1,846 @@
+use super::*;
+use acp_thread::AgentConnection;
+use agent_client_protocol::{self as acp};
+use anyhow::Result;
+use assistant_tool::ActionLog;
+use client::{Client, UserStore};
+use fs::FakeFs;
+use futures::channel::mpsc::UnboundedReceiver;
+use gpui::{http_client::FakeHttpClient, AppContext, Entity, Task, TestAppContext};
+use indoc::indoc;
+use language_model::{
+ fake_provider::FakeLanguageModel, LanguageModel, LanguageModelCompletionError,
+ LanguageModelCompletionEvent, LanguageModelId, LanguageModelRegistry, LanguageModelToolResult,
+ LanguageModelToolUse, MessageContent, Role, StopReason,
+};
+use project::Project;
+use prompt_store::ProjectContext;
+use reqwest_client::ReqwestClient;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use serde_json::json;
+use smol::stream::StreamExt;
+use std::{cell::RefCell, path::Path, rc::Rc, sync::Arc, time::Duration};
+use util::path;
+
+mod test_tools;
+use test_tools::*;
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_echo(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
+
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.send(model.clone(), "Testing: Reply with 'Hello'", cx)
+ })
+ .collect()
+ .await;
+ thread.update(cx, |thread, _cx| {
+ assert_eq!(
+ thread.messages().last().unwrap().content,
+ vec![MessageContent::Text("Hello".to_string())]
+ );
+ });
+ assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
+}
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_thinking(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4Thinking).await;
+
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.send(
+ model.clone(),
+ indoc! {"
+ Testing:
+
+ Generate a thinking step where you just think the word 'Think',
+ and have your final answer be 'Hello'
+ "},
+ cx,
+ )
+ })
+ .collect()
+ .await;
+ thread.update(cx, |thread, _cx| {
+ assert_eq!(
+ thread.messages().last().unwrap().to_markdown(),
+ indoc! {"
+ ## assistant
+ Think
+ Hello
+ "}
+ )
+ });
+ assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
+}
+
+#[gpui::test]
+async fn test_system_prompt(cx: &mut TestAppContext) {
+ let ThreadTest {
+ model,
+ thread,
+ project_context,
+ ..
+ } = setup(cx, TestModel::Fake).await;
+ let fake_model = model.as_fake();
+
+ project_context.borrow_mut().shell = "test-shell".into();
+ thread.update(cx, |thread, _| thread.add_tool(EchoTool));
+ thread.update(cx, |thread, cx| thread.send(model.clone(), "abc", cx));
+ cx.run_until_parked();
+ let mut pending_completions = fake_model.pending_completions();
+ assert_eq!(
+ pending_completions.len(),
+ 1,
+ "unexpected pending completions: {:?}",
+ pending_completions
+ );
+
+ let pending_completion = pending_completions.pop().unwrap();
+ assert_eq!(pending_completion.messages[0].role, Role::System);
+
+ let system_message = &pending_completion.messages[0];
+ let system_prompt = system_message.content[0].to_str().unwrap();
+ assert!(
+ system_prompt.contains("test-shell"),
+ "unexpected system message: {:?}",
+ system_message
+ );
+ assert!(
+ system_prompt.contains("## Fixing Diagnostics"),
+ "unexpected system message: {:?}",
+ system_message
+ );
+}
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_basic_tool_calls(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
+
+ // Test a tool call that's likely to complete *before* streaming stops.
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.add_tool(EchoTool);
+ thread.send(
+ model.clone(),
+ "Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'.",
+ cx,
+ )
+ })
+ .collect()
+ .await;
+ assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
+
+ // Test a tool calls that's likely to complete *after* streaming stops.
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.remove_tool(&AgentTool::name(&EchoTool));
+ thread.add_tool(DelayTool);
+ thread.send(
+ model.clone(),
+ "Now call the delay tool with 200ms. When the timer goes off, then you echo the output of the tool.",
+ cx,
+ )
+ })
+ .collect()
+ .await;
+ assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
+ thread.update(cx, |thread, _cx| {
+ assert!(thread
+ .messages()
+ .last()
+ .unwrap()
+ .content
+ .iter()
+ .any(|content| {
+ if let MessageContent::Text(text) = content {
+ text.contains("Ding")
+ } else {
+ false
+ }
+ }));
+ });
+}
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
+
+ // Test a tool call that's likely to complete *before* streaming stops.
+ let mut events = thread.update(cx, |thread, cx| {
+ thread.add_tool(WordListTool);
+ thread.send(model.clone(), "Test the word_list tool.", cx)
+ });
+
+ let mut saw_partial_tool_use = false;
+ while let Some(event) = events.next().await {
+ if let Ok(AgentResponseEvent::ToolCall(tool_call)) = event {
+ thread.update(cx, |thread, _cx| {
+ // Look for a tool use in the thread's last message
+ let last_content = thread.messages().last().unwrap().content.last().unwrap();
+ if let MessageContent::ToolUse(last_tool_use) = last_content {
+ assert_eq!(last_tool_use.name.as_ref(), "word_list");
+ if tool_call.status == acp::ToolCallStatus::Pending {
+ if !last_tool_use.is_input_complete
+ && last_tool_use.input.get("g").is_none()
+ {
+ saw_partial_tool_use = true;
+ }
+ } else {
+ last_tool_use
+ .input
+ .get("a")
+ .expect("'a' has streamed because input is now complete");
+ last_tool_use
+ .input
+ .get("g")
+ .expect("'g' has streamed because input is now complete");
+ }
+ } else {
+ panic!("last content should be a tool use");
+ }
+ });
+ }
+ }
+
+ assert!(
+ saw_partial_tool_use,
+ "should see at least one partially streamed tool use in the history"
+ );
+}
+
+#[gpui::test]
+async fn test_tool_authorization(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
+ let fake_model = model.as_fake();
+
+ let mut events = thread.update(cx, |thread, cx| {
+ thread.add_tool(ToolRequiringPermission);
+ thread.send(model.clone(), "abc", cx)
+ });
+ cx.run_until_parked();
+ fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
+ LanguageModelToolUse {
+ id: "tool_id_1".into(),
+ name: ToolRequiringPermission.name().into(),
+ raw_input: "{}".into(),
+ input: json!({}),
+ is_input_complete: true,
+ },
+ ));
+ fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
+ LanguageModelToolUse {
+ id: "tool_id_2".into(),
+ name: ToolRequiringPermission.name().into(),
+ raw_input: "{}".into(),
+ input: json!({}),
+ is_input_complete: true,
+ },
+ ));
+ fake_model.end_last_completion_stream();
+ let tool_call_auth_1 = next_tool_call_authorization(&mut events).await;
+ let tool_call_auth_2 = next_tool_call_authorization(&mut events).await;
+
+ // Approve the first
+ tool_call_auth_1
+ .response
+ .send(tool_call_auth_1.options[1].id.clone())
+ .unwrap();
+ cx.run_until_parked();
+
+ // Reject the second
+ tool_call_auth_2
+ .response
+ .send(tool_call_auth_1.options[2].id.clone())
+ .unwrap();
+ cx.run_until_parked();
+
+ let completion = fake_model.pending_completions().pop().unwrap();
+ let message = completion.messages.last().unwrap();
+ assert_eq!(
+ message.content,
+ vec![
+ MessageContent::ToolResult(LanguageModelToolResult {
+ tool_use_id: tool_call_auth_1.tool_call.id.0.to_string().into(),
+ tool_name: ToolRequiringPermission.name().into(),
+ is_error: false,
+ content: "Allowed".into(),
+ output: Some("Allowed".into())
+ }),
+ MessageContent::ToolResult(LanguageModelToolResult {
+ tool_use_id: tool_call_auth_2.tool_call.id.0.to_string().into(),
+ tool_name: ToolRequiringPermission.name().into(),
+ is_error: true,
+ content: "Permission to run tool denied by user".into(),
+ output: None
+ })
+ ]
+ );
+}
+
+#[gpui::test]
+async fn test_tool_hallucination(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
+ let fake_model = model.as_fake();
+
+ let mut events = thread.update(cx, |thread, cx| thread.send(model.clone(), "abc", cx));
+ cx.run_until_parked();
+ fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
+ LanguageModelToolUse {
+ id: "tool_id_1".into(),
+ name: "nonexistent_tool".into(),
+ raw_input: "{}".into(),
+ input: json!({}),
+ is_input_complete: true,
+ },
+ ));
+ fake_model.end_last_completion_stream();
+
+ let tool_call = expect_tool_call(&mut events).await;
+ assert_eq!(tool_call.title, "nonexistent_tool");
+ assert_eq!(tool_call.status, acp::ToolCallStatus::Pending);
+ let update = expect_tool_call_update_fields(&mut events).await;
+ assert_eq!(update.fields.status, Some(acp::ToolCallStatus::Failed));
+}
+
+async fn expect_tool_call(
+ events: &mut UnboundedReceiver>,
+) -> acp::ToolCall {
+ let event = events
+ .next()
+ .await
+ .expect("no tool call authorization event received")
+ .unwrap();
+ match event {
+ AgentResponseEvent::ToolCall(tool_call) => return tool_call,
+ event => {
+ panic!("Unexpected event {event:?}");
+ }
+ }
+}
+
+async fn expect_tool_call_update_fields(
+ events: &mut UnboundedReceiver>,
+) -> acp::ToolCallUpdate {
+ let event = events
+ .next()
+ .await
+ .expect("no tool call authorization event received")
+ .unwrap();
+ match event {
+ AgentResponseEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update)) => {
+ return update
+ }
+ event => {
+ panic!("Unexpected event {event:?}");
+ }
+ }
+}
+
+async fn next_tool_call_authorization(
+ events: &mut UnboundedReceiver>,
+) -> ToolCallAuthorization {
+ loop {
+ let event = events
+ .next()
+ .await
+ .expect("no tool call authorization event received")
+ .unwrap();
+ if let AgentResponseEvent::ToolCallAuthorization(tool_call_authorization) = event {
+ let permission_kinds = tool_call_authorization
+ .options
+ .iter()
+ .map(|o| o.kind)
+ .collect::>();
+ assert_eq!(
+ permission_kinds,
+ vec![
+ acp::PermissionOptionKind::AllowAlways,
+ acp::PermissionOptionKind::AllowOnce,
+ acp::PermissionOptionKind::RejectOnce,
+ ]
+ );
+ return tool_call_authorization;
+ }
+ }
+}
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
+
+ // Test concurrent tool calls with different delay times
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.add_tool(DelayTool);
+ thread.send(
+ model.clone(),
+ "Call the delay tool twice in the same message. Once with 100ms. Once with 300ms. When both timers are complete, describe the outputs.",
+ cx,
+ )
+ })
+ .collect()
+ .await;
+
+ let stop_reasons = stop_events(events);
+ assert_eq!(stop_reasons, vec![acp::StopReason::EndTurn]);
+
+ thread.update(cx, |thread, _cx| {
+ let last_message = thread.messages().last().unwrap();
+ let text = last_message
+ .content
+ .iter()
+ .filter_map(|content| {
+ if let MessageContent::Text(text) = content {
+ Some(text.as_str())
+ } else {
+ None
+ }
+ })
+ .collect::();
+
+ assert!(text.contains("Ding"));
+ });
+}
+
+#[gpui::test]
+#[ignore = "can't run on CI yet"]
+async fn test_cancellation(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
+
+ let mut events = thread.update(cx, |thread, cx| {
+ thread.add_tool(InfiniteTool);
+ thread.add_tool(EchoTool);
+ thread.send(
+ model.clone(),
+ "Call the echo tool and then call the infinite tool, then explain their output",
+ cx,
+ )
+ });
+
+ // Wait until both tools are called.
+ let mut expected_tools = vec!["Echo", "Infinite Tool"];
+ let mut echo_id = None;
+ let mut echo_completed = false;
+ while let Some(event) = events.next().await {
+ match event.unwrap() {
+ AgentResponseEvent::ToolCall(tool_call) => {
+ assert_eq!(tool_call.title, expected_tools.remove(0));
+ if tool_call.title == "Echo" {
+ echo_id = Some(tool_call.id);
+ }
+ }
+ AgentResponseEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
+ acp::ToolCallUpdate {
+ id,
+ fields:
+ acp::ToolCallUpdateFields {
+ status: Some(acp::ToolCallStatus::Completed),
+ ..
+ },
+ },
+ )) if Some(&id) == echo_id.as_ref() => {
+ echo_completed = true;
+ }
+ _ => {}
+ }
+
+ if expected_tools.is_empty() && echo_completed {
+ break;
+ }
+ }
+
+ // Cancel the current send and ensure that the event stream is closed, even
+ // if one of the tools is still running.
+ thread.update(cx, |thread, _cx| thread.cancel());
+ events.collect::>().await;
+
+ // Ensure we can still send a new message after cancellation.
+ let events = thread
+ .update(cx, |thread, cx| {
+ thread.send(model.clone(), "Testing: reply with 'Hello' then stop.", cx)
+ })
+ .collect::>()
+ .await;
+ thread.update(cx, |thread, _cx| {
+ assert_eq!(
+ thread.messages().last().unwrap().content,
+ vec![MessageContent::Text("Hello".to_string())]
+ );
+ });
+ assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
+}
+
+#[gpui::test]
+async fn test_refusal(cx: &mut TestAppContext) {
+ let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
+ let fake_model = model.as_fake();
+
+ let events = thread.update(cx, |thread, cx| thread.send(model.clone(), "Hello", cx));
+ cx.run_until_parked();
+ thread.read_with(cx, |thread, _| {
+ assert_eq!(
+ thread.to_markdown(),
+ indoc! {"
+ ## user
+ Hello
+ "}
+ );
+ });
+
+ fake_model.send_last_completion_stream_text_chunk("Hey!");
+ cx.run_until_parked();
+ thread.read_with(cx, |thread, _| {
+ assert_eq!(
+ thread.to_markdown(),
+ indoc! {"
+ ## user
+ Hello
+ ## assistant
+ Hey!
+ "}
+ );
+ });
+
+ // If the model refuses to continue, the thread should remove all the messages after the last user message.
+ fake_model
+ .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::Refusal));
+ let events = events.collect::>().await;
+ assert_eq!(stop_events(events), vec![acp::StopReason::Refusal]);
+ thread.read_with(cx, |thread, _| {
+ assert_eq!(thread.to_markdown(), "");
+ });
+}
+
+#[gpui::test]
+async fn test_agent_connection(cx: &mut TestAppContext) {
+ cx.update(settings::init);
+ let templates = Templates::new();
+
+ // Initialize language model system with test provider
+ cx.update(|cx| {
+ gpui_tokio::init(cx);
+ client::init_settings(cx);
+
+ let http_client = FakeHttpClient::with_404_response();
+ let clock = Arc::new(clock::FakeSystemClock::new());
+ let client = Client::new(clock, http_client, cx);
+ let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
+ language_model::init(client.clone(), cx);
+ language_models::init(user_store.clone(), client.clone(), cx);
+ Project::init_settings(cx);
+ LanguageModelRegistry::test(cx);
+ });
+ cx.executor().forbid_parking();
+
+ // Create a project for new_thread
+ let fake_fs = cx.update(|cx| fs::FakeFs::new(cx.background_executor().clone()));
+ fake_fs.insert_tree(path!("/test"), json!({})).await;
+ let project = Project::test(fake_fs, [Path::new("/test")], cx).await;
+ let cwd = Path::new("/test");
+
+ // Create agent and connection
+ let agent = NativeAgent::new(project.clone(), templates.clone(), None, &mut cx.to_async())
+ .await
+ .unwrap();
+ let connection = NativeAgentConnection(agent.clone());
+
+ // Test model_selector returns Some
+ let selector_opt = connection.model_selector();
+ assert!(
+ selector_opt.is_some(),
+ "agent2 should always support ModelSelector"
+ );
+ let selector = selector_opt.unwrap();
+
+ // Test list_models
+ let listed_models = cx
+ .update(|cx| {
+ let mut async_cx = cx.to_async();
+ selector.list_models(&mut async_cx)
+ })
+ .await
+ .expect("list_models should succeed");
+ assert!(!listed_models.is_empty(), "should have at least one model");
+ assert_eq!(listed_models[0].id().0, "fake");
+
+ // Create a thread using new_thread
+ let connection_rc = Rc::new(connection.clone());
+ let acp_thread = cx
+ .update(|cx| {
+ let mut async_cx = cx.to_async();
+ connection_rc.new_thread(project, cwd, &mut async_cx)
+ })
+ .await
+ .expect("new_thread should succeed");
+
+ // Get the session_id from the AcpThread
+ let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
+
+ // Test selected_model returns the default
+ let model = cx
+ .update(|cx| {
+ let mut async_cx = cx.to_async();
+ selector.selected_model(&session_id, &mut async_cx)
+ })
+ .await
+ .expect("selected_model should succeed");
+ let model = model.as_fake();
+ assert_eq!(model.id().0, "fake", "should return default model");
+
+ let request = acp_thread.update(cx, |thread, cx| thread.send(vec!["abc".into()], cx));
+ cx.run_until_parked();
+ model.send_last_completion_stream_text_chunk("def");
+ cx.run_until_parked();
+ acp_thread.read_with(cx, |thread, cx| {
+ assert_eq!(
+ thread.to_markdown(cx),
+ indoc! {"
+ ## User
+
+ abc
+
+ ## Assistant
+
+ def
+
+ "}
+ )
+ });
+
+ // Test cancel
+ cx.update(|cx| connection.cancel(&session_id, cx));
+ request.await.expect("prompt should fail gracefully");
+
+ // Ensure that dropping the ACP thread causes the native thread to be
+ // dropped as well.
+ cx.update(|_| drop(acp_thread));
+ let result = cx
+ .update(|cx| {
+ connection.prompt(
+ acp::PromptRequest {
+ session_id: session_id.clone(),
+ prompt: vec!["ghi".into()],
+ },
+ cx,
+ )
+ })
+ .await;
+ assert_eq!(
+ result.as_ref().unwrap_err().to_string(),
+ "Session not found",
+ "unexpected result: {:?}",
+ result
+ );
+}
+
+#[gpui::test]
+async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
+ let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
+ thread.update(cx, |thread, _cx| thread.add_tool(ThinkingTool));
+ let fake_model = model.as_fake();
+
+ let mut events = thread.update(cx, |thread, cx| thread.send(model.clone(), "Think", cx));
+ cx.run_until_parked();
+
+ // Simulate streaming partial input.
+ let input = json!({});
+ fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
+ LanguageModelToolUse {
+ id: "1".into(),
+ name: ThinkingTool.name().into(),
+ raw_input: input.to_string(),
+ input,
+ is_input_complete: false,
+ },
+ ));
+
+ // Input streaming completed
+ let input = json!({ "content": "Thinking hard!" });
+ fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
+ LanguageModelToolUse {
+ id: "1".into(),
+ name: "thinking".into(),
+ raw_input: input.to_string(),
+ input,
+ is_input_complete: true,
+ },
+ ));
+ fake_model.end_last_completion_stream();
+ cx.run_until_parked();
+
+ let tool_call = expect_tool_call(&mut events).await;
+ assert_eq!(
+ tool_call,
+ acp::ToolCall {
+ id: acp::ToolCallId("1".into()),
+ title: "Thinking".into(),
+ kind: acp::ToolKind::Think,
+ status: acp::ToolCallStatus::Pending,
+ content: vec![],
+ locations: vec![],
+ raw_input: Some(json!({})),
+ raw_output: None,
+ }
+ );
+ let update = expect_tool_call_update_fields(&mut events).await;
+ assert_eq!(
+ update,
+ acp::ToolCallUpdate {
+ id: acp::ToolCallId("1".into()),
+ fields: acp::ToolCallUpdateFields {
+ title: Some("Thinking".into()),
+ kind: Some(acp::ToolKind::Think),
+ raw_input: Some(json!({ "content": "Thinking hard!" })),
+ ..Default::default()
+ },
+ }
+ );
+ let update = expect_tool_call_update_fields(&mut events).await;
+ assert_eq!(
+ update,
+ acp::ToolCallUpdate {
+ id: acp::ToolCallId("1".into()),
+ fields: acp::ToolCallUpdateFields {
+ status: Some(acp::ToolCallStatus::InProgress),
+ ..Default::default()
+ },
+ }
+ );
+ let update = expect_tool_call_update_fields(&mut events).await;
+ assert_eq!(
+ update,
+ acp::ToolCallUpdate {
+ id: acp::ToolCallId("1".into()),
+ fields: acp::ToolCallUpdateFields {
+ content: Some(vec!["Thinking hard!".into()]),
+ ..Default::default()
+ },
+ }
+ );
+ let update = expect_tool_call_update_fields(&mut events).await;
+ assert_eq!(
+ update,
+ acp::ToolCallUpdate {
+ id: acp::ToolCallId("1".into()),
+ fields: acp::ToolCallUpdateFields {
+ status: Some(acp::ToolCallStatus::Completed),
+ ..Default::default()
+ },
+ }
+ );
+}
+
+/// Filters out the stop events for asserting against in tests
+fn stop_events(
+ result_events: Vec>,
+) -> Vec {
+ result_events
+ .into_iter()
+ .filter_map(|event| match event.unwrap() {
+ AgentResponseEvent::Stop(stop_reason) => Some(stop_reason),
+ _ => None,
+ })
+ .collect()
+}
+
+struct ThreadTest {
+ model: Arc,
+ thread: Entity,
+ project_context: Rc>,
+}
+
+enum TestModel {
+ Sonnet4,
+ Sonnet4Thinking,
+ Fake,
+}
+
+impl TestModel {
+ fn id(&self) -> LanguageModelId {
+ match self {
+ TestModel::Sonnet4 => LanguageModelId("claude-sonnet-4-latest".into()),
+ TestModel::Sonnet4Thinking => LanguageModelId("claude-sonnet-4-thinking-latest".into()),
+ TestModel::Fake => unreachable!(),
+ }
+ }
+}
+
+async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
+ cx.executor().allow_parking();
+ cx.update(|cx| {
+ settings::init(cx);
+ Project::init_settings(cx);
+ });
+ let templates = Templates::new();
+
+ let fs = FakeFs::new(cx.background_executor.clone());
+ fs.insert_tree(path!("/test"), json!({})).await;
+ let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
+
+ let model = cx
+ .update(|cx| {
+ gpui_tokio::init(cx);
+ let http_client = ReqwestClient::user_agent("agent tests").unwrap();
+ cx.set_http_client(Arc::new(http_client));
+
+ client::init_settings(cx);
+ let client = Client::production(cx);
+ let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
+ language_model::init(client.clone(), cx);
+ language_models::init(user_store.clone(), client.clone(), cx);
+
+ if let TestModel::Fake = model {
+ Task::ready(Arc::new(FakeLanguageModel::default()) as Arc<_>)
+ } else {
+ let model_id = model.id();
+ let models = LanguageModelRegistry::read_global(cx);
+ let model = models
+ .available_models(cx)
+ .find(|model| model.id() == model_id)
+ .unwrap();
+
+ let provider = models.provider(&model.provider_id()).unwrap();
+ let authenticated = provider.authenticate(cx);
+
+ cx.spawn(async move |_cx| {
+ authenticated.await.unwrap();
+ model
+ })
+ }
+ })
+ .await;
+
+ let project_context = Rc::new(RefCell::new(ProjectContext::default()));
+ let action_log = cx.new(|_| ActionLog::new(project.clone()));
+ let thread = cx.new(|_| {
+ Thread::new(
+ project,
+ project_context.clone(),
+ action_log,
+ templates,
+ model.clone(),
+ )
+ });
+ ThreadTest {
+ model,
+ thread,
+ project_context,
+ }
+}
+
+#[cfg(test)]
+#[ctor::ctor]
+fn init_logger() {
+ if std::env::var("RUST_LOG").is_ok() {
+ env_logger::init();
+ }
+}
diff --git a/crates/agent2/src/tests/test_tools.rs b/crates/agent2/src/tests/test_tools.rs
new file mode 100644
index 0000000000..d06614f3fe
--- /dev/null
+++ b/crates/agent2/src/tests/test_tools.rs
@@ -0,0 +1,201 @@
+use super::*;
+use anyhow::Result;
+use gpui::{App, SharedString, Task};
+use std::future;
+
+/// A tool that echoes its input
+#[derive(JsonSchema, Serialize, Deserialize)]
+pub struct EchoToolInput {
+ /// The text to echo.
+ text: String,
+}
+
+pub struct EchoTool;
+
+impl AgentTool for EchoTool {
+ type Input = EchoToolInput;
+ type Output = String;
+
+ fn name(&self) -> SharedString {
+ "echo".into()
+ }
+
+ fn kind(&self) -> acp::ToolKind {
+ acp::ToolKind::Other
+ }
+
+ fn initial_title(&self, _input: Result) -> SharedString {
+ "Echo".into()
+ }
+
+ fn run(
+ self: Arc,
+ input: Self::Input,
+ _event_stream: ToolCallEventStream,
+ _cx: &mut App,
+ ) -> Task> {
+ Task::ready(Ok(input.text))
+ }
+}
+
+/// A tool that waits for a specified delay
+#[derive(JsonSchema, Serialize, Deserialize)]
+pub struct DelayToolInput {
+ /// The delay in milliseconds.
+ ms: u64,
+}
+
+pub struct DelayTool;
+
+impl AgentTool for DelayTool {
+ type Input = DelayToolInput;
+ type Output = String;
+
+ fn name(&self) -> SharedString {
+ "delay".into()
+ }
+
+ fn initial_title(&self, input: Result) -> SharedString {
+ if let Ok(input) = input {
+ format!("Delay {}ms", input.ms).into()
+ } else {
+ "Delay".into()
+ }
+ }
+
+ fn kind(&self) -> acp::ToolKind {
+ acp::ToolKind::Other
+ }
+
+ fn run(
+ self: Arc,
+ input: Self::Input,
+ _event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task>
+ where
+ Self: Sized,
+ {
+ cx.foreground_executor().spawn(async move {
+ smol::Timer::after(Duration::from_millis(input.ms)).await;
+ Ok("Ding".to_string())
+ })
+ }
+}
+
+#[derive(JsonSchema, Serialize, Deserialize)]
+pub struct ToolRequiringPermissionInput {}
+
+pub struct ToolRequiringPermission;
+
+impl AgentTool for ToolRequiringPermission {
+ type Input = ToolRequiringPermissionInput;
+ type Output = String;
+
+ fn name(&self) -> SharedString {
+ "tool_requiring_permission".into()
+ }
+
+ fn kind(&self) -> acp::ToolKind {
+ acp::ToolKind::Other
+ }
+
+ fn initial_title(&self, _input: Result) -> SharedString {
+ "This tool requires permission".into()
+ }
+
+ fn run(
+ self: Arc,
+ _input: Self::Input,
+ event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task> {
+ let auth_check = event_stream.authorize("Authorize?".into());
+ cx.foreground_executor().spawn(async move {
+ auth_check.await?;
+ Ok("Allowed".to_string())
+ })
+ }
+}
+
+#[derive(JsonSchema, Serialize, Deserialize)]
+pub struct InfiniteToolInput {}
+
+pub struct InfiniteTool;
+
+impl AgentTool for InfiniteTool {
+ type Input = InfiniteToolInput;
+ type Output = String;
+
+ fn name(&self) -> SharedString {
+ "infinite".into()
+ }
+
+ fn kind(&self) -> acp::ToolKind {
+ acp::ToolKind::Other
+ }
+
+ fn initial_title(&self, _input: Result) -> SharedString {
+ "Infinite Tool".into()
+ }
+
+ fn run(
+ self: Arc,
+ _input: Self::Input,
+ _event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task> {
+ cx.foreground_executor().spawn(async move {
+ future::pending::<()>().await;
+ unreachable!()
+ })
+ }
+}
+
+/// A tool that takes an object with map from letters to random words starting with that letter.
+/// All fiealds are required! Pass a word for every letter!
+#[derive(JsonSchema, Serialize, Deserialize)]
+pub struct WordListInput {
+ /// Provide a random word that starts with A.
+ a: Option,
+ /// Provide a random word that starts with B.
+ b: Option,
+ /// Provide a random word that starts with C.
+ c: Option,
+ /// Provide a random word that starts with D.
+ d: Option,
+ /// Provide a random word that starts with E.
+ e: Option,
+ /// Provide a random word that starts with F.
+ f: Option,
+ /// Provide a random word that starts with G.
+ g: Option,
+}
+
+pub struct WordListTool;
+
+impl AgentTool for WordListTool {
+ type Input = WordListInput;
+ type Output = String;
+
+ fn name(&self) -> SharedString {
+ "word_list".into()
+ }
+
+ fn kind(&self) -> acp::ToolKind {
+ acp::ToolKind::Other
+ }
+
+ fn initial_title(&self, _input: Result) -> SharedString {
+ "List of random words".into()
+ }
+
+ fn run(
+ self: Arc,
+ _input: Self::Input,
+ _event_stream: ToolCallEventStream,
+ _cx: &mut App,
+ ) -> Task> {
+ Task::ready(Ok("ok".to_string()))
+ }
+}
diff --git a/crates/agent2/src/thread.rs b/crates/agent2/src/thread.rs
new file mode 100644
index 0000000000..f664e0f5d2
--- /dev/null
+++ b/crates/agent2/src/thread.rs
@@ -0,0 +1,1026 @@
+use crate::{SystemPromptTemplate, Template, Templates};
+use acp_thread::Diff;
+use agent_client_protocol as acp;
+use anyhow::{anyhow, Context as _, Result};
+use assistant_tool::{adapt_schema_to_format, ActionLog};
+use cloud_llm_client::{CompletionIntent, CompletionMode};
+use collections::HashMap;
+use futures::{
+ channel::{mpsc, oneshot},
+ stream::FuturesUnordered,
+};
+use gpui::{App, Context, Entity, SharedString, Task};
+use language_model::{
+ LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
+ LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
+ LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
+ LanguageModelToolUse, LanguageModelToolUseId, MessageContent, Role, StopReason,
+};
+use log;
+use project::Project;
+use prompt_store::ProjectContext;
+use schemars::{JsonSchema, Schema};
+use serde::{Deserialize, Serialize};
+use smol::stream::StreamExt;
+use std::{cell::RefCell, collections::BTreeMap, fmt::Write, future::Future, rc::Rc, sync::Arc};
+use util::{markdown::MarkdownCodeBlock, ResultExt};
+
+#[derive(Debug, Clone)]
+pub struct AgentMessage {
+ pub role: Role,
+ pub content: Vec,
+}
+
+impl AgentMessage {
+ pub fn to_markdown(&self) -> String {
+ let mut markdown = format!("## {}\n", self.role);
+
+ for content in &self.content {
+ match content {
+ MessageContent::Text(text) => {
+ markdown.push_str(text);
+ markdown.push('\n');
+ }
+ MessageContent::Thinking { text, .. } => {
+ markdown.push_str("");
+ markdown.push_str(text);
+ markdown.push_str("\n");
+ }
+ MessageContent::RedactedThinking(_) => markdown.push_str("\n"),
+ MessageContent::Image(_) => {
+ markdown.push_str("\n");
+ }
+ MessageContent::ToolUse(tool_use) => {
+ markdown.push_str(&format!(
+ "**Tool Use**: {} (ID: {})\n",
+ tool_use.name, tool_use.id
+ ));
+ markdown.push_str(&format!(
+ "{}\n",
+ MarkdownCodeBlock {
+ tag: "json",
+ text: &format!("{:#}", tool_use.input)
+ }
+ ));
+ }
+ MessageContent::ToolResult(tool_result) => {
+ markdown.push_str(&format!(
+ "**Tool Result**: {} (ID: {})\n\n",
+ tool_result.tool_name, tool_result.tool_use_id
+ ));
+ if tool_result.is_error {
+ markdown.push_str("**ERROR:**\n");
+ }
+
+ match &tool_result.content {
+ LanguageModelToolResultContent::Text(text) => {
+ writeln!(markdown, "{text}\n").ok();
+ }
+ LanguageModelToolResultContent::Image(_) => {
+ writeln!(markdown, "\n").ok();
+ }
+ }
+
+ if let Some(output) = tool_result.output.as_ref() {
+ writeln!(
+ markdown,
+ "**Debug Output**:\n\n```json\n{}\n```\n",
+ serde_json::to_string_pretty(output).unwrap()
+ )
+ .unwrap();
+ }
+ }
+ }
+ }
+
+ markdown
+ }
+}
+
+#[derive(Debug)]
+pub enum AgentResponseEvent {
+ Text(String),
+ Thinking(String),
+ ToolCall(acp::ToolCall),
+ ToolCallUpdate(acp_thread::ToolCallUpdate),
+ ToolCallAuthorization(ToolCallAuthorization),
+ Stop(acp::StopReason),
+}
+
+#[derive(Debug)]
+pub struct ToolCallAuthorization {
+ pub tool_call: acp::ToolCall,
+ pub options: Vec,
+ pub response: oneshot::Sender,
+}
+
+pub struct Thread {
+ messages: Vec,
+ completion_mode: CompletionMode,
+ /// Holds the task that handles agent interaction until the end of the turn.
+ /// Survives across multiple requests as the model performs tool calls and
+ /// we run tools, report their results.
+ running_turn: Option>,
+ pending_tool_uses: HashMap,
+ tools: BTreeMap>,
+ project_context: Rc>,
+ templates: Arc,
+ pub selected_model: Arc,
+ project: Entity,
+ action_log: Entity,
+}
+
+impl Thread {
+ pub fn new(
+ project: Entity,
+ project_context: Rc>,
+ action_log: Entity,
+ templates: Arc,
+ default_model: Arc,
+ ) -> Self {
+ Self {
+ messages: Vec::new(),
+ completion_mode: CompletionMode::Normal,
+ running_turn: None,
+ pending_tool_uses: HashMap::default(),
+ tools: BTreeMap::default(),
+ project_context,
+ templates,
+ selected_model: default_model,
+ project,
+ action_log,
+ }
+ }
+
+ pub fn project(&self) -> &Entity {
+ &self.project
+ }
+
+ pub fn action_log(&self) -> &Entity {
+ &self.action_log
+ }
+
+ pub fn set_mode(&mut self, mode: CompletionMode) {
+ self.completion_mode = mode;
+ }
+
+ pub fn messages(&self) -> &[AgentMessage] {
+ &self.messages
+ }
+
+ pub fn add_tool(&mut self, tool: impl AgentTool) {
+ self.tools.insert(tool.name(), tool.erase());
+ }
+
+ pub fn remove_tool(&mut self, name: &str) -> bool {
+ self.tools.remove(name).is_some()
+ }
+
+ pub fn cancel(&mut self) {
+ self.running_turn.take();
+
+ let tool_results = self
+ .pending_tool_uses
+ .drain()
+ .map(|(tool_use_id, tool_use)| {
+ MessageContent::ToolResult(LanguageModelToolResult {
+ tool_use_id,
+ tool_name: tool_use.name.clone(),
+ is_error: true,
+ content: LanguageModelToolResultContent::Text("Tool canceled by user".into()),
+ output: None,
+ })
+ })
+ .collect::>();
+ self.last_user_message().content.extend(tool_results);
+ }
+
+ /// Sending a message results in the model streaming a response, which could include tool calls.
+ /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
+ /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
+ pub fn send(
+ &mut self,
+ model: Arc,
+ content: impl Into,
+ cx: &mut Context,
+ ) -> mpsc::UnboundedReceiver> {
+ let content = content.into();
+ log::info!("Thread::send called with model: {:?}", model.name());
+ log::debug!("Thread::send content: {:?}", content);
+
+ cx.notify();
+ let (events_tx, events_rx) =
+ mpsc::unbounded::>();
+ let event_stream = AgentResponseEventStream(events_tx);
+
+ let user_message_ix = self.messages.len();
+ self.messages.push(AgentMessage {
+ role: Role::User,
+ content: vec![content],
+ });
+ log::info!("Total messages in thread: {}", self.messages.len());
+ self.running_turn = Some(cx.spawn(async move |thread, cx| {
+ log::info!("Starting agent turn execution");
+ let turn_result = async {
+ // Perform one request, then keep looping if the model makes tool calls.
+ let mut completion_intent = CompletionIntent::UserPrompt;
+ 'outer: loop {
+ log::debug!(
+ "Building completion request with intent: {:?}",
+ completion_intent
+ );
+ let request = thread.update(cx, |thread, cx| {
+ thread.build_completion_request(completion_intent, cx)
+ })?;
+
+ // println!(
+ // "request: {}",
+ // serde_json::to_string_pretty(&request).unwrap()
+ // );
+
+ // Stream events, appending to messages and collecting up tool uses.
+ log::info!("Calling model.stream_completion");
+ let mut events = model.stream_completion(request, cx).await?;
+ log::debug!("Stream completion started successfully");
+ let mut tool_uses = FuturesUnordered::new();
+ while let Some(event) = events.next().await {
+ match event {
+ Ok(LanguageModelCompletionEvent::Stop(reason)) => {
+ event_stream.send_stop(reason);
+ if reason == StopReason::Refusal {
+ thread.update(cx, |thread, _cx| {
+ thread.messages.truncate(user_message_ix);
+ })?;
+ break 'outer;
+ }
+ }
+ Ok(event) => {
+ log::trace!("Received completion event: {:?}", event);
+ thread
+ .update(cx, |thread, cx| {
+ tool_uses.extend(thread.handle_streamed_completion_event(
+ event,
+ &event_stream,
+ cx,
+ ));
+ })
+ .ok();
+ }
+ Err(error) => {
+ log::error!("Error in completion stream: {:?}", error);
+ event_stream.send_error(error);
+ break;
+ }
+ }
+ }
+
+ // If there are no tool uses, the turn is done.
+ if tool_uses.is_empty() {
+ log::info!("No tool uses found, completing turn");
+ break;
+ }
+ log::info!("Found {} tool uses to execute", tool_uses.len());
+
+ // As tool results trickle in, insert them in the last user
+ // message so that they can be sent on the next tick of the
+ // agentic loop.
+ while let Some(tool_result) = tool_uses.next().await {
+ log::info!("Tool finished {:?}", tool_result);
+
+ event_stream.update_tool_call_fields(
+ &tool_result.tool_use_id,
+ acp::ToolCallUpdateFields {
+ status: Some(if tool_result.is_error {
+ acp::ToolCallStatus::Failed
+ } else {
+ acp::ToolCallStatus::Completed
+ }),
+ ..Default::default()
+ },
+ );
+ thread
+ .update(cx, |thread, _cx| {
+ thread.pending_tool_uses.remove(&tool_result.tool_use_id);
+ thread
+ .last_user_message()
+ .content
+ .push(MessageContent::ToolResult(tool_result));
+ })
+ .ok();
+ }
+
+ completion_intent = CompletionIntent::ToolResults;
+ }
+
+ Ok(())
+ }
+ .await;
+
+ if let Err(error) = turn_result {
+ log::error!("Turn execution failed: {:?}", error);
+ event_stream.send_error(error);
+ } else {
+ log::info!("Turn execution completed successfully");
+ }
+ }));
+ events_rx
+ }
+
+ pub fn build_system_message(&self) -> AgentMessage {
+ log::debug!("Building system message");
+ let prompt = SystemPromptTemplate {
+ project: &self.project_context.borrow(),
+ available_tools: self.tools.keys().cloned().collect(),
+ }
+ .render(&self.templates)
+ .context("failed to build system prompt")
+ .expect("Invalid template");
+ log::debug!("System message built");
+ AgentMessage {
+ role: Role::System,
+ content: vec![prompt.into()],
+ }
+ }
+
+ /// A helper method that's called on every streamed completion event.
+ /// Returns an optional tool result task, which the main agentic loop in
+ /// send will send back to the model when it resolves.
+ fn handle_streamed_completion_event(
+ &mut self,
+ event: LanguageModelCompletionEvent,
+ event_stream: &AgentResponseEventStream,
+ cx: &mut Context,
+ ) -> Option> {
+ log::trace!("Handling streamed completion event: {:?}", event);
+ use LanguageModelCompletionEvent::*;
+
+ match event {
+ StartMessage { .. } => {
+ self.messages.push(AgentMessage {
+ role: Role::Assistant,
+ content: Vec::new(),
+ });
+ }
+ Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
+ Thinking { text, signature } => {
+ self.handle_thinking_event(text, signature, event_stream, cx)
+ }
+ RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
+ ToolUse(tool_use) => {
+ return self.handle_tool_use_event(tool_use, event_stream, cx);
+ }
+ ToolUseJsonParseError {
+ id,
+ tool_name,
+ raw_input,
+ json_parse_error,
+ } => {
+ return Some(Task::ready(self.handle_tool_use_json_parse_error_event(
+ id,
+ tool_name,
+ raw_input,
+ json_parse_error,
+ )));
+ }
+ UsageUpdate(_) | StatusUpdate(_) => {}
+ Stop(_) => unreachable!(),
+ }
+
+ None
+ }
+
+ fn handle_text_event(
+ &mut self,
+ new_text: String,
+ events_stream: &AgentResponseEventStream,
+ cx: &mut Context,
+ ) {
+ events_stream.send_text(&new_text);
+
+ let last_message = self.last_assistant_message();
+ if let Some(MessageContent::Text(text)) = last_message.content.last_mut() {
+ text.push_str(&new_text);
+ } else {
+ last_message.content.push(MessageContent::Text(new_text));
+ }
+
+ cx.notify();
+ }
+
+ fn handle_thinking_event(
+ &mut self,
+ new_text: String,
+ new_signature: Option,
+ event_stream: &AgentResponseEventStream,
+ cx: &mut Context,
+ ) {
+ event_stream.send_thinking(&new_text);
+
+ let last_message = self.last_assistant_message();
+ if let Some(MessageContent::Thinking { text, signature }) = last_message.content.last_mut()
+ {
+ text.push_str(&new_text);
+ *signature = new_signature.or(signature.take());
+ } else {
+ last_message.content.push(MessageContent::Thinking {
+ text: new_text,
+ signature: new_signature,
+ });
+ }
+
+ cx.notify();
+ }
+
+ fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context) {
+ let last_message = self.last_assistant_message();
+ last_message
+ .content
+ .push(MessageContent::RedactedThinking(data));
+ cx.notify();
+ }
+
+ fn handle_tool_use_event(
+ &mut self,
+ tool_use: LanguageModelToolUse,
+ event_stream: &AgentResponseEventStream,
+ cx: &mut Context,
+ ) -> Option> {
+ cx.notify();
+
+ let tool = self.tools.get(tool_use.name.as_ref()).cloned();
+
+ self.pending_tool_uses
+ .insert(tool_use.id.clone(), tool_use.clone());
+ let last_message = self.last_assistant_message();
+
+ // Ensure the last message ends in the current tool use
+ let push_new_tool_use = last_message.content.last_mut().map_or(true, |content| {
+ if let MessageContent::ToolUse(last_tool_use) = content {
+ if last_tool_use.id == tool_use.id {
+ *last_tool_use = tool_use.clone();
+ false
+ } else {
+ true
+ }
+ } else {
+ true
+ }
+ });
+
+ let mut title = SharedString::from(&tool_use.name);
+ let mut kind = acp::ToolKind::Other;
+ if let Some(tool) = tool.as_ref() {
+ title = tool.initial_title(tool_use.input.clone());
+ kind = tool.kind();
+ }
+
+ if push_new_tool_use {
+ event_stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
+ last_message
+ .content
+ .push(MessageContent::ToolUse(tool_use.clone()));
+ } else {
+ event_stream.update_tool_call_fields(
+ &tool_use.id,
+ acp::ToolCallUpdateFields {
+ title: Some(title.into()),
+ kind: Some(kind),
+ raw_input: Some(tool_use.input.clone()),
+ ..Default::default()
+ },
+ );
+ }
+
+ if !tool_use.is_input_complete {
+ return None;
+ }
+
+ let Some(tool) = tool else {
+ let content = format!("No tool named {} exists", tool_use.name);
+ return Some(Task::ready(LanguageModelToolResult {
+ content: LanguageModelToolResultContent::Text(Arc::from(content)),
+ tool_use_id: tool_use.id,
+ tool_name: tool_use.name,
+ is_error: true,
+ output: None,
+ }));
+ };
+
+ let tool_event_stream =
+ ToolCallEventStream::new(&tool_use, tool.kind(), event_stream.clone());
+ tool_event_stream.update_fields(acp::ToolCallUpdateFields {
+ status: Some(acp::ToolCallStatus::InProgress),
+ ..Default::default()
+ });
+ let supports_images = self.selected_model.supports_images();
+ let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
+ Some(cx.foreground_executor().spawn(async move {
+ let tool_result = tool_result.await.and_then(|output| {
+ if let LanguageModelToolResultContent::Image(_) = &output.llm_output {
+ if !supports_images {
+ return Err(anyhow!(
+ "Attempted to read an image, but this model doesn't support it.",
+ ));
+ }
+ }
+ Ok(output)
+ });
+
+ match tool_result {
+ Ok(output) => LanguageModelToolResult {
+ tool_use_id: tool_use.id,
+ tool_name: tool_use.name,
+ is_error: false,
+ content: output.llm_output,
+ output: Some(output.raw_output),
+ },
+ Err(error) => LanguageModelToolResult {
+ tool_use_id: tool_use.id,
+ tool_name: tool_use.name,
+ is_error: true,
+ content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
+ output: None,
+ },
+ }
+ }))
+ }
+
+ fn handle_tool_use_json_parse_error_event(
+ &mut self,
+ tool_use_id: LanguageModelToolUseId,
+ tool_name: Arc,
+ raw_input: Arc,
+ json_parse_error: String,
+ ) -> LanguageModelToolResult {
+ let tool_output = format!("Error parsing input JSON: {json_parse_error}");
+ LanguageModelToolResult {
+ tool_use_id,
+ tool_name,
+ is_error: true,
+ content: LanguageModelToolResultContent::Text(tool_output.into()),
+ output: Some(serde_json::Value::String(raw_input.to_string())),
+ }
+ }
+
+ /// Guarantees the last message is from the assistant and returns a mutable reference.
+ fn last_assistant_message(&mut self) -> &mut AgentMessage {
+ if self
+ .messages
+ .last()
+ .map_or(true, |m| m.role != Role::Assistant)
+ {
+ self.messages.push(AgentMessage {
+ role: Role::Assistant,
+ content: Vec::new(),
+ });
+ }
+ self.messages.last_mut().unwrap()
+ }
+
+ /// Guarantees the last message is from the user and returns a mutable reference.
+ fn last_user_message(&mut self) -> &mut AgentMessage {
+ if self.messages.last().map_or(true, |m| m.role != Role::User) {
+ self.messages.push(AgentMessage {
+ role: Role::User,
+ content: Vec::new(),
+ });
+ }
+ self.messages.last_mut().unwrap()
+ }
+
+ pub(crate) fn build_completion_request(
+ &self,
+ completion_intent: CompletionIntent,
+ cx: &mut App,
+ ) -> LanguageModelRequest {
+ log::debug!("Building completion request");
+ log::debug!("Completion intent: {:?}", completion_intent);
+ log::debug!("Completion mode: {:?}", self.completion_mode);
+
+ let messages = self.build_request_messages();
+ log::info!("Request will include {} messages", messages.len());
+
+ let tools: Vec = self
+ .tools
+ .values()
+ .filter_map(|tool| {
+ let tool_name = tool.name().to_string();
+ log::trace!("Including tool: {}", tool_name);
+ Some(LanguageModelRequestTool {
+ name: tool_name,
+ description: tool.description(cx).to_string(),
+ input_schema: tool
+ .input_schema(self.selected_model.tool_input_format())
+ .log_err()?,
+ })
+ })
+ .collect();
+
+ log::info!("Request includes {} tools", tools.len());
+
+ let request = LanguageModelRequest {
+ thread_id: None,
+ prompt_id: None,
+ intent: Some(completion_intent),
+ mode: Some(self.completion_mode),
+ messages,
+ tools,
+ tool_choice: None,
+ stop: Vec::new(),
+ temperature: None,
+ thinking_allowed: true,
+ };
+
+ log::debug!("Completion request built successfully");
+ request
+ }
+
+ fn build_request_messages(&self) -> Vec {
+ log::trace!(
+ "Building request messages from {} thread messages",
+ self.messages.len()
+ );
+
+ let messages = Some(self.build_system_message())
+ .iter()
+ .chain(self.messages.iter())
+ .map(|message| {
+ log::trace!(
+ " - {} message with {} content items",
+ match message.role {
+ Role::System => "System",
+ Role::User => "User",
+ Role::Assistant => "Assistant",
+ },
+ message.content.len()
+ );
+ LanguageModelRequestMessage {
+ role: message.role,
+ content: message.content.clone(),
+ cache: false,
+ }
+ })
+ .collect();
+ messages
+ }
+
+ pub fn to_markdown(&self) -> String {
+ let mut markdown = String::new();
+ for message in &self.messages {
+ markdown.push_str(&message.to_markdown());
+ }
+ markdown
+ }
+}
+
+pub trait AgentTool
+where
+ Self: 'static + Sized,
+{
+ type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
+ type Output: for<'de> Deserialize<'de> + Serialize + Into;
+
+ fn name(&self) -> SharedString;
+
+ fn description(&self, _cx: &mut App) -> SharedString {
+ let schema = schemars::schema_for!(Self::Input);
+ SharedString::new(
+ schema
+ .get("description")
+ .and_then(|description| description.as_str())
+ .unwrap_or_default(),
+ )
+ }
+
+ fn kind(&self) -> acp::ToolKind;
+
+ /// The initial tool title to display. Can be updated during the tool run.
+ fn initial_title(&self, input: Result) -> SharedString;
+
+ /// Returns the JSON schema that describes the tool's input.
+ fn input_schema(&self) -> Schema {
+ schemars::schema_for!(Self::Input)
+ }
+
+ /// Runs the tool with the provided input.
+ fn run(
+ self: Arc,
+ input: Self::Input,
+ event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task>;
+
+ fn erase(self) -> Arc {
+ Arc::new(Erased(Arc::new(self)))
+ }
+}
+
+pub struct Erased(T);
+
+pub struct AgentToolOutput {
+ llm_output: LanguageModelToolResultContent,
+ raw_output: serde_json::Value,
+}
+
+pub trait AnyAgentTool {
+ fn name(&self) -> SharedString;
+ fn description(&self, cx: &mut App) -> SharedString;
+ fn kind(&self) -> acp::ToolKind;
+ fn initial_title(&self, input: serde_json::Value) -> SharedString;
+ fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result;
+ fn run(
+ self: Arc,
+ input: serde_json::Value,
+ event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task>;
+}
+
+impl AnyAgentTool for Erased>
+where
+ T: AgentTool,
+{
+ fn name(&self) -> SharedString {
+ self.0.name()
+ }
+
+ fn description(&self, cx: &mut App) -> SharedString {
+ self.0.description(cx)
+ }
+
+ fn kind(&self) -> agent_client_protocol::ToolKind {
+ self.0.kind()
+ }
+
+ fn initial_title(&self, input: serde_json::Value) -> SharedString {
+ let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
+ self.0.initial_title(parsed_input)
+ }
+
+ fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result {
+ let mut json = serde_json::to_value(self.0.input_schema())?;
+ adapt_schema_to_format(&mut json, format)?;
+ Ok(json)
+ }
+
+ fn run(
+ self: Arc,
+ input: serde_json::Value,
+ event_stream: ToolCallEventStream,
+ cx: &mut App,
+ ) -> Task> {
+ cx.spawn(async move |cx| {
+ let input = serde_json::from_value(input)?;
+ let output = cx
+ .update(|cx| self.0.clone().run(input, event_stream, cx))?
+ .await?;
+ let raw_output = serde_json::to_value(&output)?;
+ Ok(AgentToolOutput {
+ llm_output: output.into(),
+ raw_output,
+ })
+ })
+ }
+}
+
+#[derive(Clone)]
+struct AgentResponseEventStream(
+ mpsc::UnboundedSender>,
+);
+
+impl AgentResponseEventStream {
+ fn send_text(&self, text: &str) {
+ self.0
+ .unbounded_send(Ok(AgentResponseEvent::Text(text.to_string())))
+ .ok();
+ }
+
+ fn send_thinking(&self, text: &str) {
+ self.0
+ .unbounded_send(Ok(AgentResponseEvent::Thinking(text.to_string())))
+ .ok();
+ }
+
+ fn authorize_tool_call(
+ &self,
+ id: &LanguageModelToolUseId,
+ title: String,
+ kind: acp::ToolKind,
+ input: serde_json::Value,
+ ) -> impl use<> + Future