language_models: Add thinking support to LM Studio provider (#32337)

It works similar to how deepseek works where the thinking is returned as
reasoning_content and we don't have to send the reasoning_content back
in the request.

This is a experiment feature which can be enabled from settings like
this:
<img width="1381" alt="Screenshot 2025-06-08 at 4 26 06 PM"
src="https://github.com/user-attachments/assets/d2f60f3c-0f93-45fc-bae2-4ded42981820"
/>

Here is how it looks to use(tested with
`deepseek/deepseek-r1-0528-qwen3-8b`

<img width="528" alt="Screenshot 2025-06-08 at 5 12 33 PM"
src="https://github.com/user-attachments/assets/f7716f52-5417-4f14-82b8-e853de054f63"
/>


Release Notes:

- Add thinking support to LM Studio provider
This commit is contained in:
Umesh Yadav 2025-06-09 15:25:34 +05:30 committed by GitHub
parent c75ad2fd11
commit 4ac7935589
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 18 additions and 9 deletions

View file

@ -250,15 +250,15 @@ impl LmStudioLanguageModel {
for message in request.messages {
for content in message.content {
match content {
MessageContent::Text(text) | MessageContent::Thinking { text, .. } => messages
.push(match message.role {
Role::User => ChatMessage::User { content: text },
Role::Assistant => ChatMessage::Assistant {
content: Some(text),
tool_calls: Vec::new(),
},
Role::System => ChatMessage::System { content: text },
}),
MessageContent::Text(text) => messages.push(match message.role {
Role::User => ChatMessage::User { content: text },
Role::Assistant => ChatMessage::Assistant {
content: Some(text),
tool_calls: Vec::new(),
},
Role::System => ChatMessage::System { content: text },
}),
MessageContent::Thinking { .. } => {}
MessageContent::RedactedThinking(_) => {}
MessageContent::Image(_) => {}
MessageContent::ToolUse(tool_use) => {
@ -471,6 +471,13 @@ impl LmStudioEventMapper {
events.push(Ok(LanguageModelCompletionEvent::Text(content)));
}
if let Some(reasoning_content) = choice.delta.reasoning_content {
events.push(Ok(LanguageModelCompletionEvent::Thinking {
text: reasoning_content,
signature: None,
}));
}
if let Some(tool_calls) = choice.delta.tool_calls {
for tool_call in tool_calls {
let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();

View file

@ -277,6 +277,8 @@ pub struct ResponseMessageDelta {
pub role: Option<Role>,
pub content: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reasoning_content: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<Vec<ToolCallChunk>>,
}