language_models: Add images support for Ollama vision models (#31883)
Ollama supports vision to process input images. This PR adds support for same. I have tested this with gemma3:4b and have attached the screenshot of it working. <img width="435" alt="image" src="https://github.com/user-attachments/assets/5f17d742-0a37-4e6c-b4d8-05b750a0a158" /> Release Notes: - Add image support for [Ollama vision models](https://ollama.com/search?c=vision)
This commit is contained in:
parent
a60bea8a3d
commit
59686f1f44
3 changed files with 136 additions and 26 deletions
|
@ -4,14 +4,11 @@ use futures::{Stream, TryFutureExt, stream};
|
|||
use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
|
||||
use http_client::HttpClient;
|
||||
use language_model::{
|
||||
AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
|
||||
AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
|
||||
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
|
||||
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
|
||||
LanguageModelRequestTool, LanguageModelToolChoice, LanguageModelToolUse,
|
||||
LanguageModelToolUseId, MessageContent, StopReason,
|
||||
};
|
||||
use language_model::{
|
||||
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
|
||||
LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
|
||||
LanguageModelRequest, RateLimiter, Role,
|
||||
LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
|
||||
};
|
||||
use ollama::{
|
||||
ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, KeepAlive, OllamaFunctionTool,
|
||||
|
@ -54,6 +51,8 @@ pub struct AvailableModel {
|
|||
pub keep_alive: Option<KeepAlive>,
|
||||
/// Whether the model supports tools
|
||||
pub supports_tools: Option<bool>,
|
||||
/// Whether the model supports vision
|
||||
pub supports_images: Option<bool>,
|
||||
/// Whether to enable think mode
|
||||
pub supports_thinking: Option<bool>,
|
||||
}
|
||||
|
@ -101,6 +100,7 @@ impl State {
|
|||
None,
|
||||
None,
|
||||
Some(capabilities.supports_tools()),
|
||||
Some(capabilities.supports_vision()),
|
||||
Some(capabilities.supports_thinking()),
|
||||
);
|
||||
Ok(ollama_model)
|
||||
|
@ -222,6 +222,7 @@ impl LanguageModelProvider for OllamaLanguageModelProvider {
|
|||
max_tokens: model.max_tokens,
|
||||
keep_alive: model.keep_alive.clone(),
|
||||
supports_tools: model.supports_tools,
|
||||
supports_vision: model.supports_images,
|
||||
supports_thinking: model.supports_thinking,
|
||||
},
|
||||
);
|
||||
|
@ -277,30 +278,59 @@ pub struct OllamaLanguageModel {
|
|||
|
||||
impl OllamaLanguageModel {
|
||||
fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
|
||||
let supports_vision = self.model.supports_vision.unwrap_or(false);
|
||||
|
||||
ChatRequest {
|
||||
model: self.model.name.clone(),
|
||||
messages: request
|
||||
.messages
|
||||
.into_iter()
|
||||
.map(|msg| match msg.role {
|
||||
Role::User => ChatMessage::User {
|
||||
content: msg.string_contents(),
|
||||
},
|
||||
Role::Assistant => {
|
||||
let content = msg.string_contents();
|
||||
let thinking = msg.content.into_iter().find_map(|content| match content {
|
||||
MessageContent::Thinking { text, .. } if !text.is_empty() => Some(text),
|
||||
_ => None,
|
||||
});
|
||||
ChatMessage::Assistant {
|
||||
content,
|
||||
tool_calls: None,
|
||||
thinking,
|
||||
.map(|msg| {
|
||||
let images = if supports_vision {
|
||||
msg.content
|
||||
.iter()
|
||||
.filter_map(|content| match content {
|
||||
MessageContent::Image(image) => Some(image.source.to_string()),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
match msg.role {
|
||||
Role::User => ChatMessage::User {
|
||||
content: msg.string_contents(),
|
||||
images: if images.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(images)
|
||||
},
|
||||
},
|
||||
Role::Assistant => {
|
||||
let content = msg.string_contents();
|
||||
let thinking =
|
||||
msg.content.into_iter().find_map(|content| match content {
|
||||
MessageContent::Thinking { text, .. } if !text.is_empty() => {
|
||||
Some(text)
|
||||
}
|
||||
_ => None,
|
||||
});
|
||||
ChatMessage::Assistant {
|
||||
content,
|
||||
tool_calls: None,
|
||||
images: if images.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(images)
|
||||
},
|
||||
thinking,
|
||||
}
|
||||
}
|
||||
Role::System => ChatMessage::System {
|
||||
content: msg.string_contents(),
|
||||
},
|
||||
}
|
||||
Role::System => ChatMessage::System {
|
||||
content: msg.string_contents(),
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
|
||||
|
@ -339,7 +369,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
}
|
||||
|
||||
fn supports_images(&self) -> bool {
|
||||
false
|
||||
self.model.supports_vision.unwrap_or(false)
|
||||
}
|
||||
|
||||
fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
|
||||
|
@ -437,7 +467,7 @@ fn map_to_language_model_completion_events(
|
|||
let mut events = Vec::new();
|
||||
|
||||
match delta.message {
|
||||
ChatMessage::User { content } => {
|
||||
ChatMessage::User { content, images: _ } => {
|
||||
events.push(Ok(LanguageModelCompletionEvent::Text(content)));
|
||||
}
|
||||
ChatMessage::System { content } => {
|
||||
|
@ -446,6 +476,7 @@ fn map_to_language_model_completion_events(
|
|||
ChatMessage::Assistant {
|
||||
content,
|
||||
tool_calls,
|
||||
images: _,
|
||||
thinking,
|
||||
} => {
|
||||
if let Some(text) = thinking {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue