agent: Attach thread ID and prompt ID to telemetry events (#29069)

This PR attaches the thread ID and the new prompt ID to telemetry events
for completions in the Agent panel.

Release Notes:

- N/A

---------

Co-authored-by: Mikayla Maki <mikayla.c.maki@gmail.com>
This commit is contained in:
Marshall Bowers 2025-04-18 16:41:02 -04:00 committed by GitHub
parent 73a767fc45
commit 7abe2c9c31
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 85 additions and 9 deletions

View file

@ -1212,6 +1212,8 @@ impl ActiveThread {
}
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: language_model::Role::User,
content: vec![content.into()],
@ -1277,6 +1279,7 @@ impl ActiveThread {
}
self.thread.update(cx, |thread, cx| {
thread.advance_prompt_id();
thread.send_to_model(model.model, RequestKind::Chat, cx)
});
cx.notify();

View file

@ -425,6 +425,8 @@ impl CodegenAlternative {
request_message.content.push(prompt.into());
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
tools: Vec::new(),
stop: Vec::new(),
temperature: None,

View file

@ -330,6 +330,7 @@ impl MessageEditor {
// Send to model after summaries are done
thread
.update(cx, |thread, cx| {
thread.advance_prompt_id();
thread.send_to_model(model, request_kind, cx);
})
.log_err();
@ -1013,6 +1014,8 @@ impl MessageEditor {
}
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: language_model::Role::User,
content: vec![content.into()],

View file

@ -261,6 +261,8 @@ impl TerminalInlineAssistant {
request_message.content.push(prompt.into());
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![request_message],
tools: Vec::new(),
stop: Vec::new(),

View file

@ -70,6 +70,24 @@ impl From<&str> for ThreadId {
}
}
/// The ID of the user prompt that initiated a request.
///
/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
pub struct PromptId(Arc<str>);
impl PromptId {
pub fn new() -> Self {
Self(Uuid::new_v4().to_string().into())
}
}
impl std::fmt::Display for PromptId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
pub struct MessageId(pub(crate) usize);
@ -274,6 +292,7 @@ pub struct Thread {
detailed_summary_state: DetailedSummaryState,
messages: Vec<Message>,
next_message_id: MessageId,
last_prompt_id: PromptId,
context: BTreeMap<ContextId, AssistantContext>,
context_by_message: HashMap<MessageId, Vec<ContextId>>,
project_context: SharedProjectContext,
@ -320,6 +339,7 @@ impl Thread {
detailed_summary_state: DetailedSummaryState::NotGenerated,
messages: Vec::new(),
next_message_id: MessageId(0),
last_prompt_id: PromptId::new(),
context: BTreeMap::default(),
context_by_message: HashMap::default(),
project_context: system_prompt,
@ -393,6 +413,7 @@ impl Thread {
})
.collect(),
next_message_id,
last_prompt_id: PromptId::new(),
context: BTreeMap::default(),
context_by_message: HashMap::default(),
project_context,
@ -432,6 +453,10 @@ impl Thread {
self.updated_at = Utc::now();
}
pub fn advance_prompt_id(&mut self) {
self.last_prompt_id = PromptId::new();
}
pub fn summary(&self) -> Option<SharedString> {
self.summary.clone()
}
@ -942,6 +967,8 @@ impl Thread {
cx: &mut Context<Self>,
) -> LanguageModelRequest {
let mut request = LanguageModelRequest {
thread_id: Some(self.id.to_string()),
prompt_id: Some(self.last_prompt_id.to_string()),
messages: vec![],
tools: Vec::new(),
stop: Vec::new(),
@ -1083,6 +1110,7 @@ impl Thread {
cx: &mut Context<Self>,
) {
let pending_completion_id = post_inc(&mut self.completion_count);
let prompt_id = self.last_prompt_id.clone();
let task = cx.spawn(async move |thread, cx| {
let stream_completion_future = model.stream_completion_with_usage(request, &cx);
let initial_token_usage =
@ -1273,6 +1301,7 @@ impl Thread {
telemetry::event!(
"Assistant Thread Completion",
thread_id = thread.id().to_string(),
prompt_id = prompt_id,
model = model.telemetry_id(),
model_provider = model.provider_id().to_string(),
input_tokens = usage.input_tokens,

View file

@ -2978,6 +2978,8 @@ impl CodegenAlternative {
});
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages,
tools: Vec::new(),
stop: Vec::new(),

View file

@ -292,6 +292,8 @@ impl TerminalInlineAssistant {
});
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages,
tools: Vec::new(),
stop: Vec::new(),

View file

@ -2555,6 +2555,8 @@ impl AssistantContext {
}
let mut completion_request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: Vec::new(),
tools: Vec::new(),
stop: Vec::new(),

View file

@ -502,6 +502,8 @@ impl Example {
)?;
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text(prompt)],

View file

@ -1744,6 +1744,8 @@ impl GitPanel {
const PROMPT: &str = include_str!("commit_message_prompt.txt");
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![content.into()],

View file

@ -238,6 +238,8 @@ pub struct LanguageModelRequestTool {
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct LanguageModelRequest {
pub thread_id: Option<String>,
pub prompt_id: Option<String>,
pub messages: Vec<LanguageModelRequestMessage>,
pub tools: Vec<LanguageModelRequestTool>,
pub stop: Vec<String>,

View file

@ -719,7 +719,7 @@ impl LanguageModel for CloudLanguageModel {
fn stream_completion_with_usage(
&self,
request: LanguageModelRequest,
mut request: LanguageModelRequest,
_cx: &AsyncApp,
) -> BoxFuture<
'static,
@ -728,6 +728,8 @@ impl LanguageModel for CloudLanguageModel {
Option<RequestUsage>,
)>,
> {
let thread_id = request.prompt_id.take();
let prompt_id = request.prompt_id.take();
match &self.model {
CloudModel::Anthropic(model) => {
let request = into_anthropic(
@ -744,6 +746,8 @@ impl LanguageModel for CloudLanguageModel {
client.clone(),
llm_api_token,
CompletionBody {
thread_id,
prompt_id,
provider: zed_llm_client::LanguageModelProvider::Anthropic,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)?,
@ -788,6 +792,8 @@ impl LanguageModel for CloudLanguageModel {
client.clone(),
llm_api_token,
CompletionBody {
thread_id,
prompt_id,
provider: zed_llm_client::LanguageModelProvider::OpenAi,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)?,
@ -816,6 +822,8 @@ impl LanguageModel for CloudLanguageModel {
client.clone(),
llm_api_token,
CompletionBody {
thread_id,
prompt_id,
provider: zed_llm_client::LanguageModelProvider::Google,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)?,

View file

@ -924,6 +924,8 @@ impl PromptLibrary {
.update(|_, cx| {
model.count_tokens(
LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: Role::System,
content: vec![body.to_string().into()],

View file

@ -557,6 +557,8 @@ impl SummaryIndex {
);
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![prompt.into()],