Sketch in assistant edit button (#19705)

Add an edit button to the assistant. This is totally hacked in for now,
just to see how this would feel rendered simply in the UI.

![CleanShot 2024-10-24 at 16 26
14@2x](https://github.com/user-attachments/assets/e630d078-78b7-42d7-93f1-cf61c00bd20e)

cc @as-cii @danilo-leal 

Release Notes:

- N/A

---------

Co-authored-by: Danilo Leal <67129314+danilo-leal@users.noreply.github.com>
Co-authored-by: Richard Feldman <oss@rtfeldman.com>
This commit is contained in:
Nathan Sobo 2024-10-29 11:21:10 -06:00 committed by GitHub
parent 759d136fe6
commit cfa20ff221
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 155 additions and 35 deletions

View file

@ -66,6 +66,14 @@ impl ContextId {
}
}
#[derive(Clone, Copy, Debug)]
pub enum RequestType {
/// Request a normal chat response from the model.
Chat,
/// Add a preamble to the message, which tells the model to return a structured response that suggests edits.
SuggestEdits,
}
#[derive(Clone, Debug)]
pub enum ContextOperation {
InsertMessage {
@ -1028,7 +1036,7 @@ impl Context {
}
pub(crate) fn count_remaining_tokens(&mut self, cx: &mut ModelContext<Self>) {
let request = self.to_completion_request(cx);
let request = self.to_completion_request(RequestType::SuggestEdits, cx); // Conservatively assume SuggestEdits, since it takes more tokens.
let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
return;
};
@ -1171,7 +1179,7 @@ impl Context {
}
let request = {
let mut req = self.to_completion_request(cx);
let mut req = self.to_completion_request(RequestType::Chat, cx);
// Skip the last message because it's likely to change and
// therefore would be a waste to cache.
req.messages.pop();
@ -1859,7 +1867,11 @@ impl Context {
})
}
pub fn assist(&mut self, cx: &mut ModelContext<Self>) -> Option<MessageAnchor> {
pub fn assist(
&mut self,
request_type: RequestType,
cx: &mut ModelContext<Self>,
) -> Option<MessageAnchor> {
let model_registry = LanguageModelRegistry::read_global(cx);
let provider = model_registry.active_provider()?;
let model = model_registry.active_model()?;
@ -1872,7 +1884,7 @@ impl Context {
// Compute which messages to cache, including the last one.
self.mark_cache_anchors(&model.cache_configuration(), false, cx);
let mut request = self.to_completion_request(cx);
let mut request = self.to_completion_request(request_type, cx);
if cx.has_flag::<ToolUseFeatureFlag>() {
let tool_registry = ToolRegistry::global(cx);
@ -2074,7 +2086,11 @@ impl Context {
Some(user_message)
}
pub fn to_completion_request(&self, cx: &AppContext) -> LanguageModelRequest {
pub fn to_completion_request(
&self,
request_type: RequestType,
cx: &AppContext,
) -> LanguageModelRequest {
let buffer = self.buffer.read(cx);
let mut contents = self.contents(cx).peekable();
@ -2163,6 +2179,25 @@ impl Context {
completion_request.messages.push(request_message);
}
if let RequestType::SuggestEdits = request_type {
if let Ok(preamble) = self.prompt_builder.generate_workflow_prompt() {
let last_elem_index = completion_request.messages.len();
completion_request
.messages
.push(LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text(preamble)],
cache: false,
});
// The preamble message should be sent right before the last actual user message.
completion_request
.messages
.swap(last_elem_index, last_elem_index.saturating_sub(1));
}
}
completion_request
}
@ -2477,7 +2512,7 @@ impl Context {
return;
}
let mut request = self.to_completion_request(cx);
let mut request = self.to_completion_request(RequestType::Chat, cx);
request.messages.push(LanguageModelRequestMessage {
role: Role::User,
content: vec![