Ollama Provider for Assistant (#12902)

Closes #4424.

A few design decisions that may need some rethinking or later PRs:

* Other providers have a check for authentication. I use this
opportunity to fetch the models which doubles as a way of finding out if
the Ollama server is running.
* Ollama has _no_ API for getting the max tokens per model
* Ollama has _no_ API for getting the current token count
https://github.com/ollama/ollama/issues/1716
* Ollama does allow setting the `num_ctx` so I've defaulted this to
4096. It can be overridden in settings.
* Ollama models will be "slow" to start inference because they're
loading the model into memory. It's faster after that. There's no UI
affordance to show that the model is being loaded.

Release Notes:

- Added an Ollama Provider for the assistant. If you have
[Ollama](https://ollama.com/) running locally on your machine, you can
enable it in your settings under:

```jsonc
"assistant": {
    "version": "1",
    "provider": {
      "name": "ollama",
      // Recommended setting to allow for model startup
      "low_speed_timeout_in_seconds": 30,
    }
}
```

Chat like usual

<img width="1840" alt="image"
src="https://github.com/zed-industries/zed/assets/836375/4e0af266-4c4f-4d9e-9d74-1a91f76a12fe">

Interact with any model from the [Ollama
Library](https://ollama.com/library)

<img width="587" alt="image"
src="https://github.com/zed-industries/zed/assets/836375/87433ac6-bf87-4a99-89e1-96a93bf8de8a">

Open up the terminal to download new models via `ollama pull`:


![image](https://github.com/zed-industries/zed/assets/836375/af7ec411-76bf-41c7-ba81-64bbaeea98a8)
This commit is contained in:
Kyle Kelley 2024-06-11 17:35:27 -07:00 committed by GitHub
parent 127b9ed857
commit 4cb8d6f40e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 624 additions and 1 deletions

View file

@ -2,12 +2,14 @@ mod anthropic;
mod cloud;
#[cfg(test)]
mod fake;
mod ollama;
mod open_ai;
pub use anthropic::*;
pub use cloud::*;
#[cfg(test)]
pub use fake::*;
pub use ollama::*;
pub use open_ai::*;
use crate::{
@ -50,6 +52,17 @@ pub fn init(client: Arc<Client>, cx: &mut AppContext) {
low_speed_timeout_in_seconds.map(Duration::from_secs),
settings_version,
)),
AssistantProvider::Ollama {
model,
api_url,
low_speed_timeout_in_seconds,
} => CompletionProvider::Ollama(OllamaCompletionProvider::new(
model.clone(),
api_url.clone(),
client.http_client(),
low_speed_timeout_in_seconds.map(Duration::from_secs),
settings_version,
)),
};
cx.set_global(provider);
@ -87,6 +100,23 @@ pub fn init(client: Arc<Client>, cx: &mut AppContext) {
settings_version,
);
}
(
CompletionProvider::Ollama(provider),
AssistantProvider::Ollama {
model,
api_url,
low_speed_timeout_in_seconds,
},
) => {
provider.update(
model.clone(),
api_url.clone(),
low_speed_timeout_in_seconds.map(Duration::from_secs),
settings_version,
);
}
(CompletionProvider::Cloud(provider), AssistantProvider::ZedDotDev { model }) => {
provider.update(model.clone(), settings_version);
}
@ -130,6 +160,22 @@ pub fn init(client: Arc<Client>, cx: &mut AppContext) {
settings_version,
));
}
(
_,
AssistantProvider::Ollama {
model,
api_url,
low_speed_timeout_in_seconds,
},
) => {
*provider = CompletionProvider::Ollama(OllamaCompletionProvider::new(
model.clone(),
api_url.clone(),
client.http_client(),
low_speed_timeout_in_seconds.map(Duration::from_secs),
settings_version,
));
}
}
})
})
@ -142,6 +188,7 @@ pub enum CompletionProvider {
Cloud(CloudCompletionProvider),
#[cfg(test)]
Fake(FakeCompletionProvider),
Ollama(OllamaCompletionProvider),
}
impl gpui::Global for CompletionProvider {}
@ -165,6 +212,10 @@ impl CompletionProvider {
.available_models()
.map(LanguageModel::Cloud)
.collect(),
CompletionProvider::Ollama(provider) => provider
.available_models()
.map(|model| LanguageModel::Ollama(model.clone()))
.collect(),
#[cfg(test)]
CompletionProvider::Fake(_) => unimplemented!(),
}
@ -175,6 +226,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.settings_version(),
CompletionProvider::Anthropic(provider) => provider.settings_version(),
CompletionProvider::Cloud(provider) => provider.settings_version(),
CompletionProvider::Ollama(provider) => provider.settings_version(),
#[cfg(test)]
CompletionProvider::Fake(_) => unimplemented!(),
}
@ -185,6 +237,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.is_authenticated(),
CompletionProvider::Anthropic(provider) => provider.is_authenticated(),
CompletionProvider::Cloud(provider) => provider.is_authenticated(),
CompletionProvider::Ollama(provider) => provider.is_authenticated(),
#[cfg(test)]
CompletionProvider::Fake(_) => true,
}
@ -195,6 +248,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.authenticate(cx),
CompletionProvider::Anthropic(provider) => provider.authenticate(cx),
CompletionProvider::Cloud(provider) => provider.authenticate(cx),
CompletionProvider::Ollama(provider) => provider.authenticate(cx),
#[cfg(test)]
CompletionProvider::Fake(_) => Task::ready(Ok(())),
}
@ -205,6 +259,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.authentication_prompt(cx),
CompletionProvider::Anthropic(provider) => provider.authentication_prompt(cx),
CompletionProvider::Cloud(provider) => provider.authentication_prompt(cx),
CompletionProvider::Ollama(provider) => provider.authentication_prompt(cx),
#[cfg(test)]
CompletionProvider::Fake(_) => unimplemented!(),
}
@ -215,6 +270,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.reset_credentials(cx),
CompletionProvider::Anthropic(provider) => provider.reset_credentials(cx),
CompletionProvider::Cloud(_) => Task::ready(Ok(())),
CompletionProvider::Ollama(provider) => provider.reset_credentials(cx),
#[cfg(test)]
CompletionProvider::Fake(_) => Task::ready(Ok(())),
}
@ -225,6 +281,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => LanguageModel::OpenAi(provider.model()),
CompletionProvider::Anthropic(provider) => LanguageModel::Anthropic(provider.model()),
CompletionProvider::Cloud(provider) => LanguageModel::Cloud(provider.model()),
CompletionProvider::Ollama(provider) => LanguageModel::Ollama(provider.model()),
#[cfg(test)]
CompletionProvider::Fake(_) => LanguageModel::default(),
}
@ -239,6 +296,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.count_tokens(request, cx),
CompletionProvider::Anthropic(provider) => provider.count_tokens(request, cx),
CompletionProvider::Cloud(provider) => provider.count_tokens(request, cx),
CompletionProvider::Ollama(provider) => provider.count_tokens(request, cx),
#[cfg(test)]
CompletionProvider::Fake(_) => futures::FutureExt::boxed(futures::future::ready(Ok(0))),
}
@ -252,6 +310,7 @@ impl CompletionProvider {
CompletionProvider::OpenAi(provider) => provider.complete(request),
CompletionProvider::Anthropic(provider) => provider.complete(request),
CompletionProvider::Cloud(provider) => provider.complete(request),
CompletionProvider::Ollama(provider) => provider.complete(request),
#[cfg(test)]
CompletionProvider::Fake(provider) => provider.complete(),
}