Reuse conversation cache when streaming edits (#30245)
Release Notes: - Improved latency when the agent applies edits.
This commit is contained in:
parent
032022e37b
commit
9f6809a28d
50 changed files with 847 additions and 21557 deletions
|
@ -4,7 +4,7 @@ use anyhow::{Result, anyhow, bail};
|
|||
use assistant_tool::{ActionLog, Tool, ToolResult, ToolSource};
|
||||
use context_server::{ContextServerId, types};
|
||||
use gpui::{AnyWindowHandle, App, Entity, Task};
|
||||
use language_model::{LanguageModel, LanguageModelRequestMessage, LanguageModelToolSchemaFormat};
|
||||
use language_model::{LanguageModel, LanguageModelRequest, LanguageModelToolSchemaFormat};
|
||||
use project::{Project, context_server_store::ContextServerStore};
|
||||
use ui::IconName;
|
||||
|
||||
|
@ -72,7 +72,7 @@ impl Tool for ContextServerTool {
|
|||
fn run(
|
||||
self: Arc<Self>,
|
||||
input: serde_json::Value,
|
||||
_messages: &[LanguageModelRequestMessage],
|
||||
_request: Arc<LanguageModelRequest>,
|
||||
_project: Entity<Project>,
|
||||
_action_log: Entity<ActionLog>,
|
||||
_model: Arc<dyn LanguageModel>,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue