Accept wrapped text content from LLM providers (#31048)

Some providers sometimes send `{ "type": "text", "text": ... }` instead
of just the text as a string. Now we accept those instead of erroring.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-05-20 16:50:02 -04:00 committed by GitHub
parent 89700c3682
commit 4bb04cef9d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 72 additions and 26 deletions

View file

@ -13,7 +13,7 @@ use language_model::{
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
RateLimiter, Role, StopReason,
RateLimiter, Role, StopReason, WrappedTextContent,
};
use open_ai::{ImageUrl, Model, ResponseStreamEvent, stream_completion};
use schemars::JsonSchema;
@ -407,7 +407,11 @@ pub fn into_open_ai(
}
MessageContent::ToolResult(tool_result) => {
let content = match &tool_result.content {
LanguageModelToolResultContent::Text(text) => {
LanguageModelToolResultContent::Text(text)
| LanguageModelToolResultContent::WrappedText(WrappedTextContent {
text,
..
}) => {
vec![open_ai::MessagePart::Text {
text: text.to_string(),
}]