Fix missed renames in #22632 (#23688)

Fix a bug where a GPUI macro still used `ModelContext`
Rename `AsyncAppContext` -> `AsyncApp`
Rename update_model, read_model, insert_model, and reserve_model to
update_entity, read_entity, insert_entity, and reserve_entity

Release Notes:

- N/A
This commit is contained in:
Mikayla Maki 2025-01-26 15:37:34 -08:00 committed by GitHub
parent 83141d07e9
commit a6b1514246
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
118 changed files with 708 additions and 757 deletions

View file

@ -6,8 +6,7 @@ use editor::{Editor, EditorElement, EditorStyle};
use futures::Stream;
use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt, TryStreamExt as _};
use gpui::{
AnyView, App, AsyncAppContext, Context, Entity, FontStyle, Subscription, Task, TextStyle,
WhiteSpace,
AnyView, App, AsyncApp, Context, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
};
use http_client::HttpClient;
use language_model::{
@ -307,12 +306,12 @@ impl AnthropicModel {
fn stream_completion(
&self,
request: anthropic::Request,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<anthropic::Event, AnthropicError>>>>
{
let http_client = self.http_client.clone();
let Ok((api_key, api_url)) = cx.read_model(&self.state, |state, cx| {
let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| {
let settings = &AllLanguageModelSettings::get_global(cx).anthropic;
(state.api_key.clone(), settings.api_url.clone())
}) else {
@ -373,7 +372,7 @@ impl LanguageModel for AnthropicModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
let request = request.into_anthropic(
self.model.id().into(),
@ -404,7 +403,7 @@ impl LanguageModel for AnthropicModel {
tool_name: String,
tool_description: String,
input_schema: serde_json::Value,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let mut request = request.into_anthropic(
self.model.tool_model_id().into(),

View file

@ -12,7 +12,7 @@ use futures::{
TryStreamExt as _,
};
use gpui::{
AnyElement, AnyView, App, AsyncAppContext, Context, Entity, EventEmitter, Global, ReadGlobal,
AnyElement, AnyView, App, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal,
Subscription, Task,
};
use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
@ -131,7 +131,7 @@ impl RefreshLlmTokenListener {
async fn handle_refresh_llm_token(
this: Entity<Self>,
_: TypedEnvelope<proto::RefreshLlmToken>,
mut cx: AsyncAppContext,
mut cx: AsyncApp,
) -> Result<()> {
this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
}
@ -621,7 +621,7 @@ impl LanguageModel for CloudLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
match &self.model {
CloudModel::Anthropic(model) => {
@ -716,7 +716,7 @@ impl LanguageModel for CloudLanguageModel {
tool_name: String,
tool_description: String,
input_schema: serde_json::Value,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let client = self.client.clone();
let llm_api_token = self.llm_api_token.clone();

View file

@ -11,8 +11,8 @@ use futures::future::BoxFuture;
use futures::stream::BoxStream;
use futures::{FutureExt, StreamExt};
use gpui::{
percentage, svg, Animation, AnimationExt, AnyView, App, AsyncAppContext, Entity, Render,
Subscription, Task, Transformation,
percentage, svg, Animation, AnimationExt, AnyView, App, AsyncApp, Entity, Render, Subscription,
Task, Transformation,
};
use language_model::{
LanguageModel, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
@ -190,7 +190,7 @@ impl LanguageModel for CopilotChatLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
if let Some(message) = request.messages.last() {
if message.contents_empty() {
@ -266,7 +266,7 @@ impl LanguageModel for CopilotChatLanguageModel {
_name: String,
_description: String,
_schema: serde_json::Value,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
future::ready(Err(anyhow!("not implemented"))).boxed()
}

View file

@ -4,8 +4,7 @@ use editor::{Editor, EditorElement, EditorStyle};
use futures::{future::BoxFuture, FutureExt, StreamExt};
use google_ai::stream_generate_content;
use gpui::{
AnyView, App, AsyncAppContext, Context, Entity, FontStyle, Subscription, Task, TextStyle,
WhiteSpace,
AnyView, App, AsyncApp, Context, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
};
use http_client::HttpClient;
use language_model::LanguageModelCompletionEvent;
@ -282,7 +281,7 @@ impl LanguageModel for GoogleLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<
'static,
Result<futures::stream::BoxStream<'static, Result<LanguageModelCompletionEvent>>>,
@ -290,7 +289,7 @@ impl LanguageModel for GoogleLanguageModel {
let request = request.into_google(self.model.id().to_string());
let http_client = self.http_client.clone();
let Ok((api_key, api_url)) = cx.read_model(&self.state, |state, cx| {
let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| {
let settings = &AllLanguageModelSettings::get_global(cx).google;
(state.api_key.clone(), settings.api_url.clone())
}) else {
@ -319,7 +318,7 @@ impl LanguageModel for GoogleLanguageModel {
_name: String,
_description: String,
_schema: serde_json::Value,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
future::ready(Err(anyhow!("not implemented"))).boxed()
}

View file

@ -1,6 +1,6 @@
use anyhow::{anyhow, Result};
use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::{AnyView, App, AsyncAppContext, Context, Subscription, Task};
use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
use http_client::HttpClient;
use language_model::LanguageModelCompletionEvent;
use language_model::{
@ -295,7 +295,7 @@ impl LanguageModel for LmStudioLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
let request = self.to_lmstudio_request(request);
@ -362,7 +362,7 @@ impl LanguageModel for LmStudioLanguageModel {
_tool_name: String,
_tool_description: String,
_schema: serde_json::Value,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
async move { Ok(futures::stream::empty().boxed()) }.boxed()
}

View file

@ -1,6 +1,6 @@
use anyhow::{anyhow, bail, Result};
use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::{AnyView, App, AsyncAppContext, Context, Subscription, Task};
use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
use http_client::HttpClient;
use language_model::LanguageModelCompletionEvent;
use language_model::{
@ -263,7 +263,7 @@ impl OllamaLanguageModel {
fn request_completion(
&self,
request: ChatRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<ChatResponseDelta>> {
let http_client = self.http_client.clone();
@ -323,7 +323,7 @@ impl LanguageModel for OllamaLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
let request = self.to_ollama_request(request);
@ -370,7 +370,7 @@ impl LanguageModel for OllamaLanguageModel {
tool_name: String,
tool_description: String,
schema: serde_json::Value,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
use ollama::{OllamaFunctionTool, OllamaTool};
let function = OllamaFunctionTool {

View file

@ -3,8 +3,7 @@ use collections::BTreeMap;
use editor::{Editor, EditorElement, EditorStyle};
use futures::{future::BoxFuture, FutureExt, StreamExt};
use gpui::{
AnyView, App, AsyncAppContext, Context, Entity, FontStyle, Subscription, Task, TextStyle,
WhiteSpace,
AnyView, App, AsyncApp, Context, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
};
use http_client::HttpClient;
use language_model::{
@ -224,11 +223,11 @@ impl OpenAiLanguageModel {
fn stream_completion(
&self,
request: open_ai::Request,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
{
let http_client = self.http_client.clone();
let Ok((api_key, api_url)) = cx.read_model(&self.state, |state, cx| {
let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| {
let settings = &AllLanguageModelSettings::get_global(cx).openai;
(state.api_key.clone(), settings.api_url.clone())
}) else {
@ -286,7 +285,7 @@ impl LanguageModel for OpenAiLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<
'static,
Result<futures::stream::BoxStream<'static, Result<LanguageModelCompletionEvent>>>,
@ -307,7 +306,7 @@ impl LanguageModel for OpenAiLanguageModel {
tool_name: String,
tool_description: String,
schema: serde_json::Value,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
let mut request = request.into_open_ai(self.model.id().into(), self.max_output_tokens());
request.tool_choice = Some(ToolChoice::Other(ToolDefinition::Function {