open_ai: Send prompt_cache_key to improve caching (#36065)

Release Notes:

- N/A

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
This commit is contained in:
Oleksiy Syvokon 2025-08-12 21:51:23 +03:00
parent de4cf6e423
commit 6c90e882cc
2 changed files with 3 additions and 0 deletions

View file

@ -455,6 +455,7 @@ pub fn into_open_ai(
} else {
None
},
prompt_cache_key: request.thread_id,
tools: request
.tools
.into_iter()

View file

@ -244,6 +244,8 @@ pub struct Request {
pub parallel_tool_calls: Option<bool>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tools: Vec<ToolDefinition>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prompt_cache_key: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]