diff --git a/crates/assistant2/src/assistant_panel.rs b/crates/assistant2/src/assistant_panel.rs index bcf85a6948..31264186bd 100644 --- a/crates/assistant2/src/assistant_panel.rs +++ b/crates/assistant2/src/assistant_panel.rs @@ -2835,7 +2835,7 @@ impl InlineAssistant { ) })?; - if answer.next().await == Some(0) { + if answer.await? == 0 { this.update(&mut cx, |this, _| { this.semantic_permissioned = Some(true); })?; @@ -2875,7 +2875,7 @@ impl InlineAssistant { // This has to be updated to accomodate for semantic_permissions if semantic_permissioned.await.unwrap_or(false) { semantic_index - .update(&mut cx, |index, cx| index.index_project(project, cx)) + .update(&mut cx, |index, cx| index.index_project(project, cx))? .await } else { Err(anyhow!("project is not permissioned for semantic indexing")) @@ -3453,7 +3453,7 @@ fn report_assistant_event( .default_open_ai_model .clone(); - let telemetry_settings = TelemetrySettings::get_global(cx); + let telemetry_settings = TelemetrySettings::get_global(cx).clone(); telemetry.report_assistant_event( telemetry_settings, diff --git a/crates/assistant2/src/codegen.rs b/crates/assistant2/src/codegen.rs index 9696c629ac..98b43719f3 100644 --- a/crates/assistant2/src/codegen.rs +++ b/crates/assistant2/src/codegen.rs @@ -109,13 +109,13 @@ impl Codegen { .unwrap_or_else(|| snapshot.indent_size_for_line(selection_start.row)); let response = self.provider.complete(prompt); - self.generation = cx.spawn_weak(|this, mut cx| { + self.generation = cx.spawn(|this, mut cx| { async move { let generate = async { let mut edit_start = range.start.to_offset(&snapshot); let (mut hunks_tx, mut hunks_rx) = mpsc::channel(1); - let diff = cx.background().spawn(async move { + let diff = cx.background_executor().spawn(async move { let chunks = strip_invalid_spans_from_codeblock(response.await?); futures::pin_mut!(chunks); let mut diff = StreamingDiff::new(selected_text.to_string()); @@ -181,7 +181,7 @@ impl Codegen { }); while let Some(hunks) = hunks_rx.next().await { - let this = if let Some(this) = this.upgrade(&cx) { + let this = if let Some(this) = this.upgrade() { this } else { break; @@ -251,17 +251,16 @@ impl Codegen { }; let result = generate.await; - if let Some(this) = this.upgrade(&cx) { - this.update(&mut cx, |this, cx| { - this.last_equal_ranges.clear(); - this.idle = true; - if let Err(error) = result { - this.error = Some(error); - } - cx.emit(Event::Finished); - cx.notify(); - }); - } + this.update(&mut cx, |this, cx| { + this.last_equal_ranges.clear(); + this.idle = true; + if let Err(error) = result { + this.error = Some(error); + } + cx.emit(Event::Finished); + cx.notify(); + }) + .ok(); } }); self.error.take(); @@ -370,7 +369,7 @@ mod tests { use super::*; use ai::test::FakeCompletionProvider; use futures::stream::{self}; - use gpui::TestAppContext; + use gpui::{Context, TestAppContext}; use indoc::indoc; use language::{language_settings, tree_sitter_rust, Buffer, Language, LanguageConfig, Point}; use rand::prelude::*; @@ -390,7 +389,7 @@ mod tests { #[gpui::test(iterations = 10)] async fn test_transform_autoindent(cx: &mut TestAppContext, mut rng: StdRng) { - cx.set_global(cx.read(SettingsStore::test)); + cx.set_global(cx.update(SettingsStore::test)); cx.update(language_settings::init); let text = indoc! {" @@ -402,14 +401,14 @@ mod tests { } "}; let buffer = - cx.add_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); - let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); + cx.build_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); + let buffer = cx.build_model(|cx| MultiBuffer::singleton(buffer, cx)); let range = buffer.read_with(cx, |buffer, cx| { let snapshot = buffer.snapshot(cx); snapshot.anchor_before(Point::new(1, 0))..snapshot.anchor_after(Point::new(4, 5)) }); let provider = Arc::new(FakeCompletionProvider::new()); - let codegen = cx.add_model(|cx| { + let codegen = cx.build_model(|cx| { Codegen::new( buffer.clone(), CodegenKind::Transform { range }, @@ -459,7 +458,7 @@ mod tests { cx: &mut TestAppContext, mut rng: StdRng, ) { - cx.set_global(cx.read(SettingsStore::test)); + cx.set_global(cx.update(SettingsStore::test)); cx.update(language_settings::init); let text = indoc! {" @@ -468,14 +467,14 @@ mod tests { } "}; let buffer = - cx.add_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); - let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); + cx.build_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); + let buffer = cx.build_model(|cx| MultiBuffer::singleton(buffer, cx)); let position = buffer.read_with(cx, |buffer, cx| { let snapshot = buffer.snapshot(cx); snapshot.anchor_before(Point::new(1, 6)) }); let provider = Arc::new(FakeCompletionProvider::new()); - let codegen = cx.add_model(|cx| { + let codegen = cx.build_model(|cx| { Codegen::new( buffer.clone(), CodegenKind::Generate { position }, @@ -524,7 +523,7 @@ mod tests { cx: &mut TestAppContext, mut rng: StdRng, ) { - cx.set_global(cx.read(SettingsStore::test)); + cx.set_global(cx.update(SettingsStore::test)); cx.update(language_settings::init); let text = concat!( @@ -533,14 +532,14 @@ mod tests { "}\n" // ); let buffer = - cx.add_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); - let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); + cx.build_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); + let buffer = cx.build_model(|cx| MultiBuffer::singleton(buffer, cx)); let position = buffer.read_with(cx, |buffer, cx| { let snapshot = buffer.snapshot(cx); snapshot.anchor_before(Point::new(1, 2)) }); let provider = Arc::new(FakeCompletionProvider::new()); - let codegen = cx.add_model(|cx| { + let codegen = cx.build_model(|cx| { Codegen::new( buffer.clone(), CodegenKind::Generate { position }, diff --git a/crates/assistant2/src/prompts.rs b/crates/assistant2/src/prompts.rs index b678c6fe3b..ac3b175e64 100644 --- a/crates/assistant2/src/prompts.rs +++ b/crates/assistant2/src/prompts.rs @@ -176,7 +176,7 @@ pub(crate) mod tests { use super::*; use std::sync::Arc; - use gpui::AppContext; + use gpui::{AppContext, Context}; use indoc::indoc; use language::{language_settings, tree_sitter_rust, Buffer, Language, LanguageConfig, Point}; use settings::SettingsStore; @@ -253,7 +253,7 @@ pub(crate) mod tests { } "}; let buffer = - cx.add_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); + cx.build_model(|cx| Buffer::new(0, 0, text).with_language(Arc::new(rust_lang()), cx)); let snapshot = buffer.read(cx).snapshot(); assert_eq!( diff --git a/crates/search2/src/buffer_search.rs b/crates/search2/src/buffer_search.rs index b3d6006113..b9fa36ef34 100644 --- a/crates/search2/src/buffer_search.rs +++ b/crates/search2/src/buffer_search.rs @@ -10,9 +10,9 @@ use collections::HashMap; use editor::{Editor, EditorMode}; use futures::channel::oneshot; use gpui::{ - actions, div, red, Action, AppContext, Div, EventEmitter, InteractiveElement as _, IntoElement, - ParentElement as _, Render, Styled, Subscription, Task, View, ViewContext, VisualContext as _, - WeakView, WindowContext, + actions, div, red, Action, AppContext, Div, EventEmitter, FocusableView, + InteractiveElement as _, IntoElement, ParentElement as _, Render, Styled, Subscription, Task, + View, ViewContext, VisualContext as _, WeakView, WindowContext, }; use project::search::SearchQuery; use serde::Deserialize; @@ -251,6 +251,12 @@ impl Render for BufferSearchBar { } } +impl FocusableView for BufferSearchBar { + fn focus_handle(&self, cx: &AppContext) -> gpui::FocusHandle { + self.query_editor.focus_handle(cx) + } +} + impl ToolbarItemView for BufferSearchBar { fn set_active_pane_item( &mut self,