cx.background_executor().spawn(...) -> cx.background_spawn(...) (#25103)

Done automatically with

> ast-grep -p '$A.background_executor().spawn($B)' -r
'$A.background_spawn($B)' --update-all --globs "\!crates/gpui"

Followed by:

* `cargo fmt`
* Unexpected need to remove some trailing whitespace.
* Manually adding imports of `gpui::{AppContext as _}` which provides
`background_spawn`
* Added `AppContext as _` to existing use of `AppContext`

Release Notes:

- N/A
This commit is contained in:
Michael Sloan 2025-02-18 13:30:33 -07:00 committed by GitHub
parent f606b0641e
commit b1872e3afd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
120 changed files with 1146 additions and 1267 deletions

View file

@ -252,54 +252,53 @@ pub fn count_anthropic_tokens(
request: LanguageModelRequest,
cx: &App,
) -> BoxFuture<'static, Result<usize>> {
cx.background_executor()
.spawn(async move {
let messages = request.messages;
let mut tokens_from_images = 0;
let mut string_messages = Vec::with_capacity(messages.len());
cx.background_spawn(async move {
let messages = request.messages;
let mut tokens_from_images = 0;
let mut string_messages = Vec::with_capacity(messages.len());
for message in messages {
use language_model::MessageContent;
for message in messages {
use language_model::MessageContent;
let mut string_contents = String::new();
let mut string_contents = String::new();
for content in message.content {
match content {
MessageContent::Text(text) => {
string_contents.push_str(&text);
}
MessageContent::Image(image) => {
tokens_from_images += image.estimate_tokens();
}
MessageContent::ToolUse(_tool_use) => {
// TODO: Estimate token usage from tool uses.
}
MessageContent::ToolResult(tool_result) => {
string_contents.push_str(&tool_result.content);
}
for content in message.content {
match content {
MessageContent::Text(text) => {
string_contents.push_str(&text);
}
MessageContent::Image(image) => {
tokens_from_images += image.estimate_tokens();
}
MessageContent::ToolUse(_tool_use) => {
// TODO: Estimate token usage from tool uses.
}
MessageContent::ToolResult(tool_result) => {
string_contents.push_str(&tool_result.content);
}
}
if !string_contents.is_empty() {
string_messages.push(tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(string_contents),
name: None,
function_call: None,
});
}
}
// Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
.map(|tokens| tokens + tokens_from_images)
})
.boxed()
if !string_contents.is_empty() {
string_messages.push(tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(string_contents),
name: None,
function_call: None,
});
}
}
// Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
.map(|tokens| tokens + tokens_from_images)
})
.boxed()
}
impl AnthropicModel {

View file

@ -3,7 +3,8 @@ use collections::BTreeMap;
use editor::{Editor, EditorElement, EditorStyle};
use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::{
AnyView, AppContext, AsyncApp, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
AnyView, AppContext as _, AsyncApp, Entity, FontStyle, Subscription, Task, TextStyle,
WhiteSpace,
};
use http_client::HttpClient;
use language_model::{
@ -269,26 +270,25 @@ impl LanguageModel for DeepSeekLanguageModel {
request: LanguageModelRequest,
cx: &App,
) -> BoxFuture<'static, Result<usize>> {
cx.background_executor()
.spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
cx.background_spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
}
fn stream_completion(

View file

@ -330,28 +330,27 @@ pub fn count_google_tokens(
) -> BoxFuture<'static, Result<usize>> {
// We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly.
// So we have to use tokenizer from tiktoken_rs to count tokens.
cx.background_executor()
.spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
cx.background_spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
// Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
// Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
}
struct ConfigurationView {

View file

@ -281,26 +281,25 @@ impl LanguageModel for MistralLanguageModel {
request: LanguageModelRequest,
cx: &App,
) -> BoxFuture<'static, Result<usize>> {
cx.background_executor()
.spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
cx.background_spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
}
fn stream_completion(

View file

@ -343,34 +343,31 @@ pub fn count_open_ai_tokens(
model: open_ai::Model,
cx: &App,
) -> BoxFuture<'static, Result<usize>> {
cx.background_executor()
.spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
cx.background_spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
match model {
open_ai::Model::Custom { .. }
| open_ai::Model::O1Mini
| open_ai::Model::O1
| open_ai::Model::O3Mini => {
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
}
_ => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
}
})
.boxed()
match model {
open_ai::Model::Custom { .. }
| open_ai::Model::O1Mini
| open_ai::Model::O1
| open_ai::Model::O3Mini => tiktoken_rs::num_tokens_from_messages("gpt-4", &messages),
_ => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
}
})
.boxed()
}
struct ConfigurationView {