fix(agent): inferring finish_reason when llm provider not set finish_reason correct
some llm provider set finish_reason to “stop” when the model is calling a function. see also https://community.openai.com/t/function-call-with-finish-reason-of-stop/437226/21 .
This commit is contained in:
parent
9e8ec72bd5
commit
78f2dda8bf
1 changed files with 9 additions and 1 deletions
|
@ -609,7 +609,15 @@ impl OpenAiEventMapper {
|
|||
}
|
||||
}
|
||||
|
||||
match choice.finish_reason.as_deref() {
|
||||
let mut effective_finish_reason = choice.finish_reason.as_deref();
|
||||
|
||||
if effective_finish_reason == Some("stop") && !self.tool_calls_by_index.is_empty()
|
||||
{
|
||||
log::warn!("finish_reason is 'stop' but tool calls are present; inferring 'tool_calls'");
|
||||
effective_finish_reason = Some("tool_calls");
|
||||
}
|
||||
|
||||
match effective_finish_reason {
|
||||
Some("stop") => {
|
||||
events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue