Remove unused load_model method from LanguageModelProvider (#32070)

Removes the load_model trait method and its implementations in Ollama
and LM Studio providers, along with associated preload_model functions
and unused imports.

Release Notes:

- N/A
This commit is contained in:
Ben Brandt 2025-06-04 16:07:01 +02:00 committed by GitHub
parent 04716a0e4a
commit 4304521655
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 4 additions and 84 deletions

View file

@ -3,7 +3,7 @@ use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::B
use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::{sync::Arc, time::Duration};
use std::time::Duration;
pub const OLLAMA_API_URL: &str = "http://localhost:11434";
@ -357,36 +357,6 @@ pub async fn show_model(client: &dyn HttpClient, api_url: &str, model: &str) ->
Ok(details)
}
/// Sends an empty request to Ollama to trigger loading the model
pub async fn preload_model(client: Arc<dyn HttpClient>, api_url: &str, model: &str) -> Result<()> {
let uri = format!("{api_url}/api/generate");
let request = HttpRequest::builder()
.method(Method::POST)
.uri(uri)
.header("Content-Type", "application/json")
.body(AsyncBody::from(
serde_json::json!({
"model": model,
"keep_alive": "15m",
})
.to_string(),
))?;
let mut response = client.send(request).await?;
if response.status().is_success() {
Ok(())
} else {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
anyhow::bail!(
"Failed to connect to Ollama API: {} {}",
response.status(),
body,
);
}
}
#[cfg(test)]
mod tests {
use super::*;