diff --git a/crates/inline_completion_button/src/inline_completion_button.rs b/crates/inline_completion_button/src/inline_completion_button.rs index 29f0f8edb4..4a8ce1ef1b 100644 --- a/crates/inline_completion_button/src/inline_completion_button.rs +++ b/crates/inline_completion_button/src/inline_completion_button.rs @@ -906,8 +906,12 @@ impl InlineCompletionButton { }) } else { menu.separator() - .entry("No Models Available", None, |_window, _cx| { - // Display only + .header("No Models Configured") + .entry("Configure Models", None, { + let fs = fs.clone(); + move |window, cx| { + Self::open_ollama_settings(fs.clone(), window, cx); + } }) }; @@ -916,7 +920,7 @@ impl InlineCompletionButton { }) } - /// Opens Zed settings and navigates directly to the Ollama API URL configuration. + /// Opens Zed settings and navigates directly to the Ollama models configuration. /// Uses improved regex patterns to locate the exact setting in the JSON structure. fn open_ollama_settings(_fs: Arc, window: &mut Window, cx: &mut App) { if let Some(workspace) = window.root::().flatten() { @@ -938,24 +942,25 @@ impl InlineCompletionButton { .update_in(cx, |item, window, cx| { let text = item.buffer().read(cx).snapshot(cx).text(); - // Look for language_models.ollama.api_url setting with precise pattern + // Look for language_models.ollama section with precise pattern // This matches the full nested structure to avoid false matches - let api_url_pattern = r#""language_models"\s*:\s*\{[\s\S]*?"ollama"\s*:\s*\{[\s\S]*?"api_url"\s*:\s*"([^"]*)"#; - let regex = regex::Regex::new(api_url_pattern).unwrap(); + let ollama_pattern = r#""language_models"\s*:\s*\{[\s\S]*?"ollama"\s*:\s*\{[\s\S]*?"available_models"\s*:\s*\[\s*\]"#; + let regex = regex::Regex::new(ollama_pattern).unwrap(); if let Some(captures) = regex.captures(&text) { - let _full_match = captures.get(0).unwrap(); - let value_capture = captures.get(1).unwrap(); + let full_match = captures.get(0).unwrap(); - // Select the API URL value (excluding quotes) + // Position cursor after the opening bracket of available_models array + let bracket_pos = full_match.as_str().rfind('[').unwrap(); + let cursor_pos = full_match.start() + bracket_pos + 1; + + // Place cursor inside the available_models array item.change_selections( SelectionEffects::scroll(Autoscroll::newest()), window, cx, |selections| { - selections.select_ranges(vec![ - value_capture.start()..value_capture.end(), - ]); + selections.select_ranges(vec![cursor_pos..cursor_pos]); }, ); return Ok::<(), anyhow::Error>(()); @@ -1475,6 +1480,27 @@ mod tests { }); } + #[gpui::test] + async fn test_ollama_no_models_configured(cx: &mut TestAppContext) { + cx.update(|cx| { + let store = SettingsStore::test(cx); + cx.set_global(store); + AllLanguageModelSettings::register(cx); + language_model::LanguageModelRegistry::test(cx); + + // Test menu behavior when no models are configured + let settings = AllLanguageModelSettings::get_global(cx); + let ollama_settings = &settings.ollama; + + // Verify that available_models is empty by default + assert!(ollama_settings.available_models.is_empty()); + + // This simulates the condition that would trigger the "Configure Models" menu + let should_show_configure = ollama_settings.available_models.is_empty(); + assert!(should_show_configure); + }); + } + #[gpui::test] async fn test_ollama_eager_subtle_options_visibility(cx: &mut TestAppContext) { cx.update(|cx| { diff --git a/crates/zed/src/zed/inline_completion_registry.rs b/crates/zed/src/zed/inline_completion_registry.rs index 71829b58b0..7d07e3d386 100644 --- a/crates/zed/src/zed/inline_completion_registry.rs +++ b/crates/zed/src/zed/inline_completion_registry.rs @@ -333,24 +333,27 @@ fn assign_edit_prediction_provider( } EditPredictionProvider::Ollama => { let settings = &AllLanguageModelSettings::get_global(cx).ollama; - let api_url = settings.api_url.clone(); - // Use first available model or default to a FIM-capable model - // NOTE: codellama:7b and deepseek-coder:latest do NOT support FIM - // Use qwen2.5-coder:3b or starcoder2:latest instead - let model = settings - .available_models - .first() - .map(|m| m.name.clone()) - .unwrap_or_else(|| "qwen2.5-coder:3b".to_string()); + // Only create provider if models are configured + // Note: Only FIM-capable models work with inline completion: + // ✓ Supported: qwen2.5-coder:*, starcoder2:*, codeqwen:* + // ✗ Not supported: codellama:*, deepseek-coder:*, llama3:* + if let Some(first_model) = settings.available_models.first() { + let api_url = settings.api_url.clone(); + let model = first_model.name.clone(); - // Get API key from environment variable only (credentials would require async handling) - let api_key = std::env::var("OLLAMA_API_KEY").ok(); + // Get API key from environment variable only (credentials would require async handling) + let api_key = std::env::var("OLLAMA_API_KEY").ok(); - let provider = cx.new(|_| { - OllamaCompletionProvider::new(client.http_client(), api_url, model, api_key) - }); - editor.set_edit_prediction_provider(Some(provider), window, cx); + let provider = cx.new(|_| { + OllamaCompletionProvider::new(client.http_client(), api_url, model, api_key) + }); + editor.set_edit_prediction_provider(Some(provider), window, cx); + } else { + // No models configured - don't create a provider + // User will see "Configure Models" option in the completion menu + editor.set_edit_prediction_provider::(None, window, cx); + } } } }