New revision of the Assistant Panel (#10870)
This is a crate only addition of a new version of the AssistantPanel. We'll be putting this behind a feature flag while we iron out the new experience. Release Notes: - N/A --------- Co-authored-by: Nathan Sobo <nathan@zed.dev> Co-authored-by: Antonio Scandurra <me@as-cii.com> Co-authored-by: Conrad Irwin <conrad@zed.dev> Co-authored-by: Marshall Bowers <elliott.codes@gmail.com> Co-authored-by: Antonio Scandurra <antonio@zed.dev> Co-authored-by: Nate Butler <nate@zed.dev> Co-authored-by: Nate Butler <iamnbutler@gmail.com> Co-authored-by: Max Brunsfeld <maxbrunsfeld@gmail.com> Co-authored-by: Max <max@zed.dev>
This commit is contained in:
parent
e0c83a1d32
commit
68a1ad89bb
55 changed files with 2989 additions and 262 deletions
|
@ -5,7 +5,8 @@
|
|||
"maxbrunsfeld",
|
||||
"iamnbutler",
|
||||
"mikayla-maki",
|
||||
"JosephTLyons"
|
||||
"JosephTLyons",
|
||||
"rgbkrk"
|
||||
],
|
||||
"channels": ["zed"],
|
||||
"number_of_users": 100
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, Context as _, Result};
|
||||
use rpc::proto;
|
||||
use util::ResultExt as _;
|
||||
|
||||
pub fn language_model_request_to_open_ai(
|
||||
request: proto::CompleteWithLanguageModel,
|
||||
|
@ -9,24 +10,83 @@ pub fn language_model_request_to_open_ai(
|
|||
messages: request
|
||||
.messages
|
||||
.into_iter()
|
||||
.map(|message| {
|
||||
.map(|message: proto::LanguageModelRequestMessage| {
|
||||
let role = proto::LanguageModelRole::from_i32(message.role)
|
||||
.ok_or_else(|| anyhow!("invalid role {}", message.role))?;
|
||||
Ok(open_ai::RequestMessage {
|
||||
role: match role {
|
||||
proto::LanguageModelRole::LanguageModelUser => open_ai::Role::User,
|
||||
proto::LanguageModelRole::LanguageModelAssistant => {
|
||||
open_ai::Role::Assistant
|
||||
}
|
||||
proto::LanguageModelRole::LanguageModelSystem => open_ai::Role::System,
|
||||
|
||||
let openai_message = match role {
|
||||
proto::LanguageModelRole::LanguageModelUser => open_ai::RequestMessage::User {
|
||||
content: message.content,
|
||||
},
|
||||
content: message.content,
|
||||
})
|
||||
proto::LanguageModelRole::LanguageModelAssistant => {
|
||||
open_ai::RequestMessage::Assistant {
|
||||
content: Some(message.content),
|
||||
tool_calls: message
|
||||
.tool_calls
|
||||
.into_iter()
|
||||
.filter_map(|call| {
|
||||
Some(open_ai::ToolCall {
|
||||
id: call.id,
|
||||
content: match call.variant? {
|
||||
proto::tool_call::Variant::Function(f) => {
|
||||
open_ai::ToolCallContent::Function {
|
||||
function: open_ai::FunctionContent {
|
||||
name: f.name,
|
||||
arguments: f.arguments,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
proto::LanguageModelRole::LanguageModelSystem => {
|
||||
open_ai::RequestMessage::System {
|
||||
content: message.content,
|
||||
}
|
||||
}
|
||||
proto::LanguageModelRole::LanguageModelTool => open_ai::RequestMessage::Tool {
|
||||
tool_call_id: message
|
||||
.tool_call_id
|
||||
.ok_or_else(|| anyhow!("tool message is missing tool call id"))?,
|
||||
content: message.content,
|
||||
},
|
||||
};
|
||||
|
||||
Ok(openai_message)
|
||||
})
|
||||
.collect::<Result<Vec<open_ai::RequestMessage>>>()?,
|
||||
stream: true,
|
||||
stop: request.stop,
|
||||
temperature: request.temperature,
|
||||
tools: request
|
||||
.tools
|
||||
.into_iter()
|
||||
.filter_map(|tool| {
|
||||
Some(match tool.variant? {
|
||||
proto::chat_completion_tool::Variant::Function(f) => {
|
||||
open_ai::ToolDefinition::Function {
|
||||
function: open_ai::FunctionDefinition {
|
||||
name: f.name,
|
||||
description: f.description,
|
||||
parameters: if let Some(params) = &f.parameters {
|
||||
Some(
|
||||
serde_json::from_str(params)
|
||||
.context("failed to deserialize tool parameters")
|
||||
.log_err()?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
tool_choice: request.tool_choice,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -58,6 +118,9 @@ pub fn language_model_request_message_to_google_ai(
|
|||
proto::LanguageModelRole::LanguageModelUser => google_ai::Role::User,
|
||||
proto::LanguageModelRole::LanguageModelAssistant => google_ai::Role::Model,
|
||||
proto::LanguageModelRole::LanguageModelSystem => google_ai::Role::User,
|
||||
proto::LanguageModelRole::LanguageModelTool => {
|
||||
Err(anyhow!("we don't handle tool calls with google ai yet"))?
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -775,9 +775,7 @@ impl Server {
|
|||
Box::new(move |envelope, session| {
|
||||
let envelope = envelope.into_any().downcast::<TypedEnvelope<M>>().unwrap();
|
||||
let received_at = envelope.received_at;
|
||||
tracing::info!(
|
||||
"message received"
|
||||
);
|
||||
tracing::info!("message received");
|
||||
let start_time = Instant::now();
|
||||
let future = (handler)(*envelope, session);
|
||||
async move {
|
||||
|
@ -786,12 +784,24 @@ impl Server {
|
|||
let processing_duration_ms = start_time.elapsed().as_micros() as f64 / 1000.0;
|
||||
let queue_duration_ms = total_duration_ms - processing_duration_ms;
|
||||
let payload_type = M::NAME;
|
||||
|
||||
match result {
|
||||
Err(error) => {
|
||||
// todo!(), why isn't this logged inside the span?
|
||||
tracing::error!(%error, total_duration_ms, processing_duration_ms, queue_duration_ms, payload_type, "error handling message")
|
||||
tracing::error!(
|
||||
?error,
|
||||
total_duration_ms,
|
||||
processing_duration_ms,
|
||||
queue_duration_ms,
|
||||
payload_type,
|
||||
"error handling message"
|
||||
)
|
||||
}
|
||||
Ok(()) => tracing::info!(total_duration_ms, processing_duration_ms, queue_duration_ms, "finished handling message"),
|
||||
Ok(()) => tracing::info!(
|
||||
total_duration_ms,
|
||||
processing_duration_ms,
|
||||
queue_duration_ms,
|
||||
"finished handling message"
|
||||
),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
|
@ -4098,7 +4108,7 @@ async fn complete_with_open_ai(
|
|||
crate::ai::language_model_request_to_open_ai(request)?,
|
||||
)
|
||||
.await
|
||||
.context("open_ai::stream_completion request failed")?;
|
||||
.context("open_ai::stream_completion request failed within collab")?;
|
||||
|
||||
while let Some(event) = completion_stream.next().await {
|
||||
let event = event?;
|
||||
|
@ -4113,8 +4123,32 @@ async fn complete_with_open_ai(
|
|||
open_ai::Role::User => LanguageModelRole::LanguageModelUser,
|
||||
open_ai::Role::Assistant => LanguageModelRole::LanguageModelAssistant,
|
||||
open_ai::Role::System => LanguageModelRole::LanguageModelSystem,
|
||||
open_ai::Role::Tool => LanguageModelRole::LanguageModelTool,
|
||||
} as i32),
|
||||
content: choice.delta.content,
|
||||
tool_calls: choice
|
||||
.delta
|
||||
.tool_calls
|
||||
.into_iter()
|
||||
.map(|delta| proto::ToolCallDelta {
|
||||
index: delta.index as u32,
|
||||
id: delta.id,
|
||||
variant: match delta.function {
|
||||
Some(function) => {
|
||||
let name = function.name;
|
||||
let arguments = function.arguments;
|
||||
|
||||
Some(proto::tool_call_delta::Variant::Function(
|
||||
proto::tool_call_delta::FunctionCallDelta {
|
||||
name,
|
||||
arguments,
|
||||
},
|
||||
))
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
finish_reason: choice.finish_reason,
|
||||
})
|
||||
|
@ -4165,6 +4199,8 @@ async fn complete_with_google_ai(
|
|||
})
|
||||
.collect(),
|
||||
),
|
||||
// Tool calls are not supported for Google
|
||||
tool_calls: Vec::new(),
|
||||
}),
|
||||
finish_reason: candidate.finish_reason.map(|reason| reason.to_string()),
|
||||
})
|
||||
|
@ -4187,24 +4223,28 @@ async fn complete_with_anthropic(
|
|||
let messages = request
|
||||
.messages
|
||||
.into_iter()
|
||||
.filter_map(|message| match message.role() {
|
||||
LanguageModelRole::LanguageModelUser => Some(anthropic::RequestMessage {
|
||||
role: anthropic::Role::User,
|
||||
content: message.content,
|
||||
}),
|
||||
LanguageModelRole::LanguageModelAssistant => Some(anthropic::RequestMessage {
|
||||
role: anthropic::Role::Assistant,
|
||||
content: message.content,
|
||||
}),
|
||||
// Anthropic's API breaks system instructions out as a separate field rather
|
||||
// than having a system message role.
|
||||
LanguageModelRole::LanguageModelSystem => {
|
||||
if !system_message.is_empty() {
|
||||
system_message.push_str("\n\n");
|
||||
}
|
||||
system_message.push_str(&message.content);
|
||||
.filter_map(|message| {
|
||||
match message.role() {
|
||||
LanguageModelRole::LanguageModelUser => Some(anthropic::RequestMessage {
|
||||
role: anthropic::Role::User,
|
||||
content: message.content,
|
||||
}),
|
||||
LanguageModelRole::LanguageModelAssistant => Some(anthropic::RequestMessage {
|
||||
role: anthropic::Role::Assistant,
|
||||
content: message.content,
|
||||
}),
|
||||
// Anthropic's API breaks system instructions out as a separate field rather
|
||||
// than having a system message role.
|
||||
LanguageModelRole::LanguageModelSystem => {
|
||||
if !system_message.is_empty() {
|
||||
system_message.push_str("\n\n");
|
||||
}
|
||||
system_message.push_str(&message.content);
|
||||
|
||||
None
|
||||
None
|
||||
}
|
||||
// We don't yet support tool calls for Anthropic
|
||||
LanguageModelRole::LanguageModelTool => None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
@ -4248,6 +4288,7 @@ async fn complete_with_anthropic(
|
|||
delta: Some(proto::LanguageModelResponseMessage {
|
||||
role: Some(current_role as i32),
|
||||
content: Some(text),
|
||||
tool_calls: Vec::new(),
|
||||
}),
|
||||
finish_reason: None,
|
||||
}],
|
||||
|
@ -4264,6 +4305,7 @@ async fn complete_with_anthropic(
|
|||
delta: Some(proto::LanguageModelResponseMessage {
|
||||
role: Some(current_role as i32),
|
||||
content: Some(text),
|
||||
tool_calls: Vec::new(),
|
||||
}),
|
||||
finish_reason: None,
|
||||
}],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue