telemetry: Add language_name and model_provider (#18640)

This PR adds a bit more metadata for assistant logging.

Release Notes:

- Assistant: Added `language_name` and `model_provider` fields to
telemetry events.

---------

Co-authored-by: Marshall Bowers <elliott.codes@gmail.com>
Co-authored-by: Max <max@zed.dev>
This commit is contained in:
Boris Cherny 2024-10-04 11:37:27 -07:00 committed by GitHub
parent dfe1e43832
commit 01ad22683d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 83 additions and 29 deletions

1
Cargo.lock generated
View file

@ -11497,6 +11497,7 @@ dependencies = [
name = "telemetry_events" name = "telemetry_events"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"language",
"semantic_version", "semantic_version",
"serde", "serde",
] ]

View file

@ -2133,13 +2133,20 @@ impl Context {
}); });
if let Some(telemetry) = this.telemetry.as_ref() { if let Some(telemetry) = this.telemetry.as_ref() {
let language_name = this
.buffer
.read(cx)
.language()
.map(|language| language.name());
telemetry.report_assistant_event(AssistantEvent { telemetry.report_assistant_event(AssistantEvent {
conversation_id: Some(this.id.0.clone()), conversation_id: Some(this.id.0.clone()),
kind: AssistantKind::Panel, kind: AssistantKind::Panel,
phase: AssistantPhase::Response, phase: AssistantPhase::Response,
model: model.telemetry_id(), model: model.telemetry_id(),
model_provider: model.provider_id().to_string(),
response_latency, response_latency,
error_message, error_message,
language_name,
}); });
} }

View file

@ -210,18 +210,6 @@ impl InlineAssistant {
initial_prompt: Option<String>, initial_prompt: Option<String>,
cx: &mut WindowContext, cx: &mut WindowContext,
) { ) {
if let Some(telemetry) = self.telemetry.as_ref() {
if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
telemetry.report_assistant_event(AssistantEvent {
conversation_id: None,
kind: AssistantKind::Inline,
phase: AssistantPhase::Invoked,
model: model.telemetry_id(),
response_latency: None,
error_message: None,
});
}
}
let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx); let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx);
let mut selections = Vec::<Selection<Point>>::new(); let mut selections = Vec::<Selection<Point>>::new();
@ -268,6 +256,21 @@ impl InlineAssistant {
text_anchor: buffer.anchor_after(buffer_range.end), text_anchor: buffer.anchor_after(buffer_range.end),
}; };
codegen_ranges.push(start..end); codegen_ranges.push(start..end);
if let Some(telemetry) = self.telemetry.as_ref() {
if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
telemetry.report_assistant_event(AssistantEvent {
conversation_id: None,
kind: AssistantKind::Inline,
phase: AssistantPhase::Invoked,
model: model.telemetry_id(),
model_provider: model.provider_id().to_string(),
response_latency: None,
error_message: None,
language_name: buffer.language().map(|language| language.name()),
});
}
}
} }
let assist_group_id = self.next_assist_group_id.post_inc(); let assist_group_id = self.next_assist_group_id.post_inc();
@ -762,23 +765,34 @@ impl InlineAssistant {
} }
pub fn finish_assist(&mut self, assist_id: InlineAssistId, undo: bool, cx: &mut WindowContext) { pub fn finish_assist(&mut self, assist_id: InlineAssistId, undo: bool, cx: &mut WindowContext) {
if let Some(telemetry) = self.telemetry.as_ref() {
if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
telemetry.report_assistant_event(AssistantEvent {
conversation_id: None,
kind: AssistantKind::Inline,
phase: if undo {
AssistantPhase::Rejected
} else {
AssistantPhase::Accepted
},
model: model.telemetry_id(),
response_latency: None,
error_message: None,
});
}
}
if let Some(assist) = self.assists.get(&assist_id) { if let Some(assist) = self.assists.get(&assist_id) {
if let Some(telemetry) = self.telemetry.as_ref() {
if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
let language_name = assist.editor.upgrade().and_then(|editor| {
let multibuffer = editor.read(cx).buffer().read(cx);
let ranges = multibuffer.range_to_buffer_ranges(assist.range.clone(), cx);
ranges
.first()
.and_then(|(buffer, _, _)| buffer.read(cx).language())
.map(|language| language.name())
});
telemetry.report_assistant_event(AssistantEvent {
conversation_id: None,
kind: AssistantKind::Inline,
phase: if undo {
AssistantPhase::Rejected
} else {
AssistantPhase::Accepted
},
model: model.telemetry_id(),
model_provider: model.provider_id().to_string(),
response_latency: None,
error_message: None,
language_name,
});
}
}
let assist_group_id = assist.group_id; let assist_group_id = assist.group_id;
if self.assist_groups[&assist_group_id].linked { if self.assist_groups[&assist_group_id].linked {
for assist_id in self.unlink_assist_group(assist_group_id, cx) { for assist_id in self.unlink_assist_group(assist_group_id, cx) {
@ -2707,6 +2721,7 @@ impl CodegenAlternative {
self.edit_position = Some(self.range.start.bias_right(&self.snapshot)); self.edit_position = Some(self.range.start.bias_right(&self.snapshot));
let telemetry_id = model.telemetry_id(); let telemetry_id = model.telemetry_id();
let provider_id = model.provider_id();
let chunks: LocalBoxFuture<Result<BoxStream<Result<String>>>> = let chunks: LocalBoxFuture<Result<BoxStream<Result<String>>>> =
if user_prompt.trim().to_lowercase() == "delete" { if user_prompt.trim().to_lowercase() == "delete" {
async { Ok(stream::empty().boxed()) }.boxed_local() async { Ok(stream::empty().boxed()) }.boxed_local()
@ -2717,7 +2732,7 @@ impl CodegenAlternative {
.spawn(|_, cx| async move { model.stream_completion_text(request, &cx).await }); .spawn(|_, cx| async move { model.stream_completion_text(request, &cx).await });
async move { Ok(chunks.await?.boxed()) }.boxed_local() async move { Ok(chunks.await?.boxed()) }.boxed_local()
}; };
self.handle_stream(telemetry_id, chunks, cx); self.handle_stream(telemetry_id, provider_id.to_string(), chunks, cx);
Ok(()) Ok(())
} }
@ -2781,6 +2796,7 @@ impl CodegenAlternative {
pub fn handle_stream( pub fn handle_stream(
&mut self, &mut self,
model_telemetry_id: String, model_telemetry_id: String,
model_provider_id: String,
stream: impl 'static + Future<Output = Result<BoxStream<'static, Result<String>>>>, stream: impl 'static + Future<Output = Result<BoxStream<'static, Result<String>>>>,
cx: &mut ModelContext<Self>, cx: &mut ModelContext<Self>,
) { ) {
@ -2811,6 +2827,15 @@ impl CodegenAlternative {
} }
let telemetry = self.telemetry.clone(); let telemetry = self.telemetry.clone();
let language_name = {
let multibuffer = self.buffer.read(cx);
let ranges = multibuffer.range_to_buffer_ranges(self.range.clone(), cx);
ranges
.first()
.and_then(|(buffer, _, _)| buffer.read(cx).language())
.map(|language| language.name())
};
self.diff = Diff::default(); self.diff = Diff::default();
self.status = CodegenStatus::Pending; self.status = CodegenStatus::Pending;
let mut edit_start = self.range.start.to_offset(&snapshot); let mut edit_start = self.range.start.to_offset(&snapshot);
@ -2926,8 +2951,10 @@ impl CodegenAlternative {
kind: AssistantKind::Inline, kind: AssistantKind::Inline,
phase: AssistantPhase::Response, phase: AssistantPhase::Response,
model: model_telemetry_id, model: model_telemetry_id,
model_provider: model_provider_id.to_string(),
response_latency, response_latency,
error_message, error_message,
language_name,
}); });
} }
@ -3540,6 +3567,7 @@ mod tests {
let (chunks_tx, chunks_rx) = mpsc::unbounded(); let (chunks_tx, chunks_rx) = mpsc::unbounded();
codegen.update(cx, |codegen, cx| { codegen.update(cx, |codegen, cx| {
codegen.handle_stream( codegen.handle_stream(
String::new(),
String::new(), String::new(),
future::ready(Ok(chunks_rx.map(Ok).boxed())), future::ready(Ok(chunks_rx.map(Ok).boxed())),
cx, cx,
@ -3611,6 +3639,7 @@ mod tests {
let (chunks_tx, chunks_rx) = mpsc::unbounded(); let (chunks_tx, chunks_rx) = mpsc::unbounded();
codegen.update(cx, |codegen, cx| { codegen.update(cx, |codegen, cx| {
codegen.handle_stream( codegen.handle_stream(
String::new(),
String::new(), String::new(),
future::ready(Ok(chunks_rx.map(Ok).boxed())), future::ready(Ok(chunks_rx.map(Ok).boxed())),
cx, cx,
@ -3685,6 +3714,7 @@ mod tests {
let (chunks_tx, chunks_rx) = mpsc::unbounded(); let (chunks_tx, chunks_rx) = mpsc::unbounded();
codegen.update(cx, |codegen, cx| { codegen.update(cx, |codegen, cx| {
codegen.handle_stream( codegen.handle_stream(
String::new(),
String::new(), String::new(),
future::ready(Ok(chunks_rx.map(Ok).boxed())), future::ready(Ok(chunks_rx.map(Ok).boxed())),
cx, cx,
@ -3758,6 +3788,7 @@ mod tests {
let (chunks_tx, chunks_rx) = mpsc::unbounded(); let (chunks_tx, chunks_rx) = mpsc::unbounded();
codegen.update(cx, |codegen, cx| { codegen.update(cx, |codegen, cx| {
codegen.handle_stream( codegen.handle_stream(
String::new(),
String::new(), String::new(),
future::ready(Ok(chunks_rx.map(Ok).boxed())), future::ready(Ok(chunks_rx.map(Ok).boxed())),
cx, cx,
@ -3821,6 +3852,7 @@ mod tests {
let (chunks_tx, chunks_rx) = mpsc::unbounded(); let (chunks_tx, chunks_rx) = mpsc::unbounded();
codegen.update(cx, |codegen, cx| { codegen.update(cx, |codegen, cx| {
codegen.handle_stream( codegen.handle_stream(
String::new(),
String::new(), String::new(),
future::ready(Ok(chunks_rx.map(Ok).boxed())), future::ready(Ok(chunks_rx.map(Ok).boxed())),
cx, cx,

View file

@ -1040,6 +1040,7 @@ impl Codegen {
self.transaction = Some(TerminalTransaction::start(self.terminal.clone())); self.transaction = Some(TerminalTransaction::start(self.terminal.clone()));
self.generation = cx.spawn(|this, mut cx| async move { self.generation = cx.spawn(|this, mut cx| async move {
let model_telemetry_id = model.telemetry_id(); let model_telemetry_id = model.telemetry_id();
let model_provider_id = model.provider_id();
let response = model.stream_completion_text(prompt, &cx).await; let response = model.stream_completion_text(prompt, &cx).await;
let generate = async { let generate = async {
let (mut hunks_tx, mut hunks_rx) = mpsc::channel(1); let (mut hunks_tx, mut hunks_rx) = mpsc::channel(1);
@ -1069,8 +1070,10 @@ impl Codegen {
kind: AssistantKind::Inline, kind: AssistantKind::Inline,
phase: AssistantPhase::Response, phase: AssistantPhase::Response,
model: model_telemetry_id, model: model_telemetry_id,
model_provider: model_provider_id.to_string(),
response_latency, response_latency,
error_message, error_message,
language_name: None,
}); });
} }

View file

@ -22,6 +22,7 @@ pub use request::*;
pub use role::*; pub use role::*;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::fmt;
use std::{future::Future, sync::Arc}; use std::{future::Future, sync::Arc};
use ui::IconName; use ui::IconName;
@ -231,6 +232,12 @@ pub struct LanguageModelProviderId(pub SharedString);
#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)] #[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
pub struct LanguageModelProviderName(pub SharedString); pub struct LanguageModelProviderName(pub SharedString);
impl fmt::Display for LanguageModelProviderId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<String> for LanguageModelId { impl From<String> for LanguageModelId {
fn from(value: String) -> Self { fn from(value: String) -> Self {
Self(SharedString::from(value)) Self(SharedString::from(value))

View file

@ -12,5 +12,6 @@ workspace = true
path = "src/telemetry_events.rs" path = "src/telemetry_events.rs"
[dependencies] [dependencies]
language.workspace = true
semantic_version.workspace = true semantic_version.workspace = true
serde.workspace = true serde.workspace = true

View file

@ -1,5 +1,6 @@
//! See [Telemetry in Zed](https://zed.dev/docs/telemetry) for additional information. //! See [Telemetry in Zed](https://zed.dev/docs/telemetry) for additional information.
use language::LanguageName;
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{fmt::Display, sync::Arc, time::Duration}; use std::{fmt::Display, sync::Arc, time::Duration};
@ -153,8 +154,10 @@ pub struct AssistantEvent {
pub phase: AssistantPhase, pub phase: AssistantPhase,
/// Name of the AI model used (gpt-4o, claude-3-5-sonnet, etc) /// Name of the AI model used (gpt-4o, claude-3-5-sonnet, etc)
pub model: String, pub model: String,
pub model_provider: String,
pub response_latency: Option<Duration>, pub response_latency: Option<Duration>,
pub error_message: Option<String>, pub error_message: Option<String>,
pub language_name: Option<LanguageName>,
} }
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]