agent: Add "max mode" toggle (#29549)

This PR adds a "max mode" toggle to the Agent panel, for models that
support it.

Only visible to folks in the `new-billing` feature flag.

Icon is just a placeholder.

Release Notes:

- N/A
This commit is contained in:
Marshall Bowers 2025-04-28 12:50:47 -04:00 committed by GitHub
parent e3c987e2fb
commit ce93961fe0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 87 additions and 1 deletions

View file

@ -1293,6 +1293,7 @@ impl ActiveThread {
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: vec![],
stop: vec![],

View file

@ -460,6 +460,7 @@ impl CodegenAlternative {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
tools: Vec::new(),
stop: Vec::new(),
temperature: None,

View file

@ -11,6 +11,7 @@ use editor::{
ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, EditorEvent, EditorMode,
EditorStyle, MultiBuffer,
};
use feature_flags::{FeatureFlagAppExt, NewBillingFeatureFlag};
use file_icons::FileIcons;
use fs::Fs;
use futures::future::Shared;
@ -33,6 +34,7 @@ use theme::ThemeSettings;
use ui::{Disclosure, KeyBinding, PopoverMenuHandle, Tooltip, prelude::*};
use util::ResultExt as _;
use workspace::Workspace;
use zed_llm_client::CompletionMode;
use crate::assistant_model_selector::AssistantModelSelector;
use crate::context_picker::{ContextPicker, ContextPickerCompletionProvider};
@ -50,7 +52,6 @@ pub struct MessageEditor {
thread: Entity<Thread>,
incompatible_tools_state: Entity<IncompatibleToolsState>,
editor: Entity<Editor>,
#[allow(dead_code)]
workspace: WeakEntity<Workspace>,
project: Entity<Project>,
context_store: Entity<ContextStore>,
@ -419,6 +420,37 @@ impl MessageEditor {
}
}
fn render_max_mode_toggle(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
if !cx.has_flag::<NewBillingFeatureFlag>() {
return None;
}
let model = LanguageModelRegistry::read_global(cx)
.default_model()
.map(|default| default.model.clone())?;
if !model.supports_max_mode() {
return None;
}
let active_completion_mode = self.thread.read(cx).completion_mode();
Some(
IconButton::new("max-mode", IconName::SquarePlus)
.icon_size(IconSize::Small)
.toggle_state(active_completion_mode == Some(CompletionMode::Max))
.on_click(cx.listener(move |this, _event, _window, cx| {
this.thread.update(cx, |thread, _cx| {
thread.set_completion_mode(match active_completion_mode {
Some(CompletionMode::Max) => Some(CompletionMode::Normal),
Some(CompletionMode::Normal) | None => Some(CompletionMode::Max),
});
});
}))
.tooltip(Tooltip::text("Max Mode"))
.into_any_element(),
)
}
fn render_editor(
&self,
font_size: Rems,
@ -579,6 +611,7 @@ impl MessageEditor {
}),
)
})
.children(self.render_max_mode_toggle(cx))
.child(self.model_selector.clone())
.map({
let focus_handle = focus_handle.clone();
@ -1100,6 +1133,7 @@ impl MessageEditor {
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: vec![],
stop: vec![],

View file

@ -276,6 +276,7 @@ impl TerminalInlineAssistant {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: Vec::new(),
stop: Vec::new(),

View file

@ -34,6 +34,7 @@ use settings::Settings;
use thiserror::Error;
use util::{ResultExt as _, TryFutureExt as _, post_inc};
use uuid::Uuid;
use zed_llm_client::CompletionMode;
use crate::context::{AgentContext, ContextLoadResult, LoadedContext};
use crate::thread_store::{
@ -290,6 +291,7 @@ pub struct Thread {
summary: Option<SharedString>,
pending_summary: Task<Option<()>>,
detailed_summary_state: DetailedSummaryState,
completion_mode: Option<CompletionMode>,
messages: Vec<Message>,
next_message_id: MessageId,
last_prompt_id: PromptId,
@ -339,6 +341,7 @@ impl Thread {
summary: None,
pending_summary: Task::ready(None),
detailed_summary_state: DetailedSummaryState::NotGenerated,
completion_mode: None,
messages: Vec::new(),
next_message_id: MessageId(0),
last_prompt_id: PromptId::new(),
@ -394,6 +397,7 @@ impl Thread {
summary: Some(serialized.summary),
pending_summary: Task::ready(None),
detailed_summary_state: serialized.detailed_summary_state,
completion_mode: None,
messages: serialized
.messages
.into_iter()
@ -518,6 +522,14 @@ impl Thread {
}
}
pub fn completion_mode(&self) -> Option<CompletionMode> {
self.completion_mode
}
pub fn set_completion_mode(&mut self, mode: Option<CompletionMode>) {
self.completion_mode = mode;
}
pub fn message(&self, id: MessageId) -> Option<&Message> {
let index = self
.messages
@ -930,6 +942,12 @@ impl Thread {
self.remaining_turns -= 1;
let mut request = self.to_completion_request(cx);
request.mode = if model.supports_max_mode() {
self.completion_mode
} else {
None
};
if model.supports_tools() {
request.tools = self
.tools()
@ -967,6 +985,7 @@ impl Thread {
let mut request = LanguageModelRequest {
thread_id: Some(self.id.to_string()),
prompt_id: Some(self.last_prompt_id.to_string()),
mode: None,
messages: vec![],
tools: Vec::new(),
stop: Vec::new(),
@ -1063,6 +1082,7 @@ impl Thread {
let mut request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![],
tools: Vec::new(),
stop: Vec::new(),

View file

@ -2981,6 +2981,7 @@ impl CodegenAlternative {
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages,
tools: Vec::new(),
stop: Vec::new(),

View file

@ -294,6 +294,7 @@ impl TerminalInlineAssistant {
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages,
tools: Vec::new(),
stop: Vec::new(),

View file

@ -2559,6 +2559,7 @@ impl AssistantContext {
let mut completion_request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: Vec::new(),
tools: Vec::new(),
stop: Vec::new(),

View file

@ -573,6 +573,7 @@ impl ExampleInstance {
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text(to_prompt(assertion.description))],

View file

@ -1746,6 +1746,7 @@ impl GitPanel {
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![content.into()],

View file

@ -240,6 +240,26 @@ pub trait LanguageModel: Send + Sync {
/// Whether this model supports tools.
fn supports_tools(&self) -> bool;
/// Returns whether this model supports "max mode";
fn supports_max_mode(&self) -> bool {
if self.provider_id().0 != ZED_CLOUD_PROVIDER_ID {
return false;
}
const MAX_MODE_CAPABLE_MODELS: &[CloudModel] = &[
CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
];
for model in MAX_MODE_CAPABLE_MODELS {
if self.id().0 == model.id() {
return true;
}
}
false
}
fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
LanguageModelToolSchemaFormat::JsonSchema
}

View file

@ -11,6 +11,7 @@ use gpui::{
use image::{DynamicImage, ImageDecoder, codecs::png::PngEncoder, imageops::resize};
use serde::{Deserialize, Serialize};
use util::ResultExt;
use zed_llm_client::CompletionMode;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct LanguageModelImage {
@ -252,6 +253,7 @@ pub struct LanguageModelRequestTool {
pub struct LanguageModelRequest {
pub thread_id: Option<String>,
pub prompt_id: Option<String>,
pub mode: Option<CompletionMode>,
pub messages: Vec<LanguageModelRequestMessage>,
pub tools: Vec<LanguageModelRequestTool>,
pub stop: Vec<String>,

View file

@ -922,6 +922,7 @@ impl RulesLibrary {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::System,
content: vec![body.to_string().into()],

View file

@ -559,6 +559,7 @@ impl SummaryIndex {
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![prompt.into()],