Fix typos detected by crate-ci/typos
This commit is contained in:
parent
df67917768
commit
57400e9687
71 changed files with 133 additions and 133 deletions
|
@ -81,8 +81,8 @@ impl PromptChain {
|
|||
|
||||
pub fn generate(&self, truncate: bool) -> anyhow::Result<(String, usize)> {
|
||||
// Argsort based on Prompt Priority
|
||||
let seperator = "\n";
|
||||
let seperator_tokens = self.args.model.count_tokens(seperator)?;
|
||||
let separator = "\n";
|
||||
let separator_tokens = self.args.model.count_tokens(separator)?;
|
||||
let mut sorted_indices = (0..self.templates.len()).collect::<Vec<_>>();
|
||||
sorted_indices.sort_by_key(|&i| Reverse(&self.templates[i].0));
|
||||
|
||||
|
@ -104,7 +104,7 @@ impl PromptChain {
|
|||
prompts[idx] = template_prompt;
|
||||
|
||||
if let Some(remaining_tokens) = tokens_outstanding {
|
||||
let new_tokens = prompt_token_count + seperator_tokens;
|
||||
let new_tokens = prompt_token_count + separator_tokens;
|
||||
tokens_outstanding = if remaining_tokens > new_tokens {
|
||||
Some(remaining_tokens - new_tokens)
|
||||
} else {
|
||||
|
@ -117,9 +117,9 @@ impl PromptChain {
|
|||
|
||||
prompts.retain(|x| x != "");
|
||||
|
||||
let full_prompt = prompts.join(seperator);
|
||||
let full_prompt = prompts.join(separator);
|
||||
let total_token_count = self.args.model.count_tokens(&full_prompt)?;
|
||||
anyhow::Ok((prompts.join(seperator), total_token_count))
|
||||
anyhow::Ok((prompts.join(separator), total_token_count))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ impl PromptTemplate for RepositoryContext {
|
|||
let mut prompt = String::new();
|
||||
|
||||
let mut remaining_tokens = max_token_length.clone();
|
||||
let seperator_token_length = args.model.count_tokens("\n")?;
|
||||
let separator_token_length = args.model.count_tokens("\n")?;
|
||||
for snippet in &args.snippets {
|
||||
let mut snippet_prompt = template.to_string();
|
||||
let content = snippet.to_string();
|
||||
|
@ -79,9 +79,9 @@ impl PromptTemplate for RepositoryContext {
|
|||
if let Some(tokens_left) = remaining_tokens {
|
||||
if tokens_left >= token_count {
|
||||
writeln!(prompt, "{snippet_prompt}").unwrap();
|
||||
remaining_tokens = if tokens_left >= (token_count + seperator_token_length)
|
||||
remaining_tokens = if tokens_left >= (token_count + separator_token_length)
|
||||
{
|
||||
Some(tokens_left - token_count - seperator_token_length)
|
||||
Some(tokens_left - token_count - separator_token_length)
|
||||
} else {
|
||||
Some(0)
|
||||
};
|
||||
|
|
|
@ -273,7 +273,7 @@ impl CompletionProvider for OpenAICompletionProvider {
|
|||
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
|
||||
// Currently the CompletionRequest for OpenAI, includes a 'model' parameter
|
||||
// This means that the model is determined by the CompletionRequest and not the CompletionProvider,
|
||||
// which is currently model based, due to the langauge model.
|
||||
// which is currently model based, due to the language model.
|
||||
// At some point in the future we should rectify this.
|
||||
let credential = self.credential.read().clone();
|
||||
let request = stream_completion(credential, self.executor.clone(), prompt);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue