decrease temperature for inline assist on code content
This commit is contained in:
parent
e45491d2f8
commit
71fb23f769
3 changed files with 23 additions and 1 deletions
|
@ -53,6 +53,8 @@ pub struct OpenAIRequest {
|
||||||
pub model: String,
|
pub model: String,
|
||||||
pub messages: Vec<RequestMessage>,
|
pub messages: Vec<RequestMessage>,
|
||||||
pub stream: bool,
|
pub stream: bool,
|
||||||
|
pub stop: Vec<String>,
|
||||||
|
pub temperature: f32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||||
|
|
|
@ -78,7 +78,7 @@ impl PromptTemplate for GenerateInlineContent {
|
||||||
|
|
||||||
match file_type {
|
match file_type {
|
||||||
PromptFileType::Code => {
|
PromptFileType::Code => {
|
||||||
writeln!(prompt, "Always wrap your code in a Markdown block.").unwrap();
|
// writeln!(prompt, "Always wrap your code in a Markdown block.").unwrap();
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
|
@ -661,6 +661,19 @@ impl AssistantPanel {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Higher Temperature increases the randomness of model outputs.
|
||||||
|
// If Markdown or No Language is Known, increase the randomness for more creative output
|
||||||
|
// If Code, decrease temperature to get more deterministic outputs
|
||||||
|
let temperature = if let Some(language) = language_name.clone() {
|
||||||
|
if language.to_string() != "Markdown".to_string() {
|
||||||
|
0.5
|
||||||
|
} else {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
1.0
|
||||||
|
};
|
||||||
|
|
||||||
let user_prompt = user_prompt.to_string();
|
let user_prompt = user_prompt.to_string();
|
||||||
|
|
||||||
let snippets = if retrieve_context {
|
let snippets = if retrieve_context {
|
||||||
|
@ -731,10 +744,13 @@ impl AssistantPanel {
|
||||||
role: Role::User,
|
role: Role::User,
|
||||||
content: prompt,
|
content: prompt,
|
||||||
});
|
});
|
||||||
|
|
||||||
let request = OpenAIRequest {
|
let request = OpenAIRequest {
|
||||||
model: model.full_name().into(),
|
model: model.full_name().into(),
|
||||||
messages,
|
messages,
|
||||||
stream: true,
|
stream: true,
|
||||||
|
stop: vec!["|END|>".to_string()],
|
||||||
|
temperature,
|
||||||
};
|
};
|
||||||
codegen.update(&mut cx, |codegen, cx| codegen.start(request, cx));
|
codegen.update(&mut cx, |codegen, cx| codegen.start(request, cx));
|
||||||
anyhow::Ok(())
|
anyhow::Ok(())
|
||||||
|
@ -1727,6 +1743,8 @@ impl Conversation {
|
||||||
.map(|message| message.to_open_ai_message(self.buffer.read(cx)))
|
.map(|message| message.to_open_ai_message(self.buffer.read(cx)))
|
||||||
.collect(),
|
.collect(),
|
||||||
stream: true,
|
stream: true,
|
||||||
|
stop: vec![],
|
||||||
|
temperature: 1.0,
|
||||||
};
|
};
|
||||||
|
|
||||||
let stream = stream_completion(api_key, cx.background().clone(), request);
|
let stream = stream_completion(api_key, cx.background().clone(), request);
|
||||||
|
@ -2011,6 +2029,8 @@ impl Conversation {
|
||||||
model: self.model.full_name().to_string(),
|
model: self.model.full_name().to_string(),
|
||||||
messages: messages.collect(),
|
messages: messages.collect(),
|
||||||
stream: true,
|
stream: true,
|
||||||
|
stop: vec![],
|
||||||
|
temperature: 1.0,
|
||||||
};
|
};
|
||||||
|
|
||||||
let stream = stream_completion(api_key, cx.background().clone(), request);
|
let stream = stream_completion(api_key, cx.background().clone(), request);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue