Fix missed renames in #22632 (#23688)

Fix a bug where a GPUI macro still used `ModelContext`
Rename `AsyncAppContext` -> `AsyncApp`
Rename update_model, read_model, insert_model, and reserve_model to
update_entity, read_entity, insert_entity, and reserve_entity

Release Notes:

- N/A
This commit is contained in:
Mikayla Maki 2025-01-26 15:37:34 -08:00 committed by GitHub
parent 83141d07e9
commit a6b1514246
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
118 changed files with 708 additions and 757 deletions

View file

@ -4,7 +4,7 @@ use crate::{
LanguageModelProviderState, LanguageModelRequest,
};
use futures::{channel::mpsc, future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::{AnyView, App, AsyncAppContext, Entity, Task, Window};
use gpui::{AnyView, App, AsyncApp, Entity, Task, Window};
use http_client::Result;
use parking_lot::Mutex;
use serde::Serialize;
@ -164,7 +164,7 @@ impl LanguageModel for FakeLanguageModel {
fn stream_completion(
&self,
request: LanguageModelRequest,
_: &AsyncAppContext,
_: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
let (tx, rx) = mpsc::unbounded();
self.current_completion_txs.lock().push((request, tx));
@ -182,7 +182,7 @@ impl LanguageModel for FakeLanguageModel {
name: String,
description: String,
schema: serde_json::Value,
_cx: &AsyncAppContext,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let (tx, rx) = mpsc::unbounded();
let tool_call = ToolUseRequest {

View file

@ -10,7 +10,7 @@ pub mod fake_provider;
use anyhow::Result;
use futures::FutureExt;
use futures::{future::BoxFuture, stream::BoxStream, StreamExt, TryStreamExt as _};
use gpui::{AnyElement, AnyView, App, AsyncAppContext, SharedString, Task, Window};
use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
pub use model::*;
use proto::Plan;
pub use rate_limiter::*;
@ -136,13 +136,13 @@ pub trait LanguageModel: Send + Sync {
fn stream_completion(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
fn stream_completion_text(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
let events = self.stream_completion(request, cx);
@ -186,7 +186,7 @@ pub trait LanguageModel: Send + Sync {
name: String,
description: String,
schema: serde_json::Value,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
@ -203,7 +203,7 @@ impl dyn LanguageModel {
pub fn use_tool<T: LanguageModelTool>(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> impl 'static + Future<Output = Result<T>> {
let schema = schemars::schema_for!(T);
let schema_json = serde_json::to_value(&schema).unwrap();
@ -218,7 +218,7 @@ impl dyn LanguageModel {
pub fn use_tool_stream<T: LanguageModelTool>(
&self,
request: LanguageModelRequest,
cx: &AsyncAppContext,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let schema = schemars::schema_for!(T);
let schema_json = serde_json::to_value(&schema).unwrap();