WIP
This commit is contained in:
parent
0de5a444d3
commit
394e87d17c
5 changed files with 76 additions and 75 deletions
15
Cargo.lock
generated
15
Cargo.lock
generated
|
@ -100,16 +100,13 @@ name = "ai"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
"assets",
|
||||||
"editor",
|
"editor",
|
||||||
"futures 0.3.28",
|
"futures 0.3.28",
|
||||||
"gpui",
|
"gpui",
|
||||||
"indoc",
|
|
||||||
"isahc",
|
"isahc",
|
||||||
"pulldown-cmark",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"unindent",
|
|
||||||
"util",
|
"util",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -2651,15 +2648,6 @@ dependencies = [
|
||||||
"version_check",
|
"version_check",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "getopts"
|
|
||||||
version = "0.2.21"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
|
|
||||||
dependencies = [
|
|
||||||
"unicode-width",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "getrandom"
|
name = "getrandom"
|
||||||
version = "0.1.16"
|
version = "0.1.16"
|
||||||
|
@ -5098,7 +5086,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63"
|
checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"getopts",
|
|
||||||
"memchr",
|
"memchr",
|
||||||
"unicase",
|
"unicase",
|
||||||
]
|
]
|
||||||
|
|
33
assets/contexts/system.zmd
Normal file
33
assets/contexts/system.zmd
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
You an AI language model embedded in a code editor named Zed, authored by Zed Industries.
|
||||||
|
The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor.
|
||||||
|
A model mention is indicated via a leading / on a line.
|
||||||
|
The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text.
|
||||||
|
In this sentence, the word ->->example<-<- is selected.
|
||||||
|
Respond to any selected model mention.
|
||||||
|
|
||||||
|
Wrap your responses in > < as follows.
|
||||||
|
/ What do you think?
|
||||||
|
> I think that's a great idea. <
|
||||||
|
|
||||||
|
For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines.
|
||||||
|
>
|
||||||
|
I think that's a great idea
|
||||||
|
<
|
||||||
|
|
||||||
|
If the selected mention is not at the end of the document, briefly summarize the context.
|
||||||
|
> Key ideas of generative programming:
|
||||||
|
* Managing context
|
||||||
|
* Managing length
|
||||||
|
* Context distillation
|
||||||
|
- Shrink a context's size without loss of meaning.
|
||||||
|
* Fine-grained version control
|
||||||
|
* Portals to other contexts
|
||||||
|
* Distillation policies
|
||||||
|
* Budgets
|
||||||
|
<
|
||||||
|
|
||||||
|
*Only* respond to a mention if either
|
||||||
|
a) The mention is at the end of the document.
|
||||||
|
b) The user's selection intersects the mention.
|
||||||
|
|
||||||
|
If no response is appropriate based on these conditions, respond with ><.
|
|
@ -9,6 +9,7 @@ path = "src/ai.rs"
|
||||||
doctest = false
|
doctest = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
assets = { path = "../assets"}
|
||||||
editor = { path = "../editor" }
|
editor = { path = "../editor" }
|
||||||
gpui = { path = "../gpui" }
|
gpui = { path = "../gpui" }
|
||||||
util = { path = "../util" }
|
util = { path = "../util" }
|
||||||
|
@ -16,12 +17,8 @@ util = { path = "../util" }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
indoc.workspace = true
|
|
||||||
pulldown-cmark = "0.9.2"
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
isahc.workspace = true
|
isahc.workspace = true
|
||||||
unindent.workspace = true
|
|
||||||
async-stream = "0.3.5"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
editor = { path = "../editor", features = ["test-support"] }
|
editor = { path = "../editor", features = ["test-support"] }
|
||||||
|
|
|
@ -7,5 +7,3 @@ Mention a language model with / at the start of any line, like this:
|
||||||
> To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example:
|
> To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example:
|
||||||
|
|
||||||
/gpt-4
|
/gpt-4
|
||||||
|
|
||||||
So you should not respond to the above mentions.
|
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
|
use assets::Assets;
|
||||||
use editor::Editor;
|
use editor::Editor;
|
||||||
use futures::AsyncBufReadExt;
|
use futures::AsyncBufReadExt;
|
||||||
use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt};
|
use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt};
|
||||||
use gpui::executor::Background;
|
use gpui::executor::Background;
|
||||||
use gpui::{actions, AppContext, Task, ViewContext};
|
use gpui::{actions, AppContext, Task, ViewContext};
|
||||||
use indoc::indoc;
|
|
||||||
use isahc::prelude::*;
|
use isahc::prelude::*;
|
||||||
use isahc::{http::StatusCode, Request};
|
use isahc::{http::StatusCode, Request};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fs;
|
||||||
use std::{io, sync::Arc};
|
use std::{io, sync::Arc};
|
||||||
use util::ResultExt;
|
use util::ResultExt;
|
||||||
|
|
||||||
|
@ -91,42 +92,6 @@ fn assist(
|
||||||
) -> Option<Task<Result<()>>> {
|
) -> Option<Task<Result<()>>> {
|
||||||
let api_key = std::env::var("OPENAI_API_KEY").log_err()?;
|
let api_key = std::env::var("OPENAI_API_KEY").log_err()?;
|
||||||
|
|
||||||
const SYSTEM_MESSAGE: &'static str = indoc! {r#"
|
|
||||||
You an AI language model embedded in a code editor named Zed, authored by Zed Industries.
|
|
||||||
The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor.
|
|
||||||
A model mention is indicated via a leading / on a line.
|
|
||||||
The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text.
|
|
||||||
In this sentence, the word ->->example<-<- is selected.
|
|
||||||
Respond to any selected model mention.
|
|
||||||
|
|
||||||
Wrap your responses in > < as follows.
|
|
||||||
/ What do you think?
|
|
||||||
> I think that's a great idea. <
|
|
||||||
|
|
||||||
For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines.
|
|
||||||
>
|
|
||||||
I think that's a great idea
|
|
||||||
<
|
|
||||||
|
|
||||||
If the selected mention is not at the end of the document, briefly summarize the context.
|
|
||||||
> Key ideas of generative programming:
|
|
||||||
* Managing context
|
|
||||||
* Managing length
|
|
||||||
* Context distillation
|
|
||||||
- Shrink a context's size without loss of meaning.
|
|
||||||
* Fine-grained version control
|
|
||||||
* Portals to other contexts
|
|
||||||
* Distillation policies
|
|
||||||
* Budgets
|
|
||||||
<
|
|
||||||
|
|
||||||
*Only* respond to a mention if either
|
|
||||||
a) The mention is at the end of the document.
|
|
||||||
b) The user's selection intersects the mention.
|
|
||||||
|
|
||||||
If no response is appropriate based on these conditions, respond with ><.
|
|
||||||
"#};
|
|
||||||
|
|
||||||
let selections = editor.selections.all(cx);
|
let selections = editor.selections.all(cx);
|
||||||
let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| {
|
let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| {
|
||||||
// Insert ->-> <-<- around selected text as described in the system prompt above.
|
// Insert ->-> <-<- around selected text as described in the system prompt above.
|
||||||
|
@ -158,26 +123,47 @@ fn assist(
|
||||||
(user_message, insertion_site)
|
(user_message, insertion_site)
|
||||||
});
|
});
|
||||||
|
|
||||||
let stream = stream_completion(
|
|
||||||
api_key,
|
|
||||||
cx.background_executor().clone(),
|
|
||||||
OpenAIRequest {
|
|
||||||
model: "gpt-4".to_string(),
|
|
||||||
messages: vec![
|
|
||||||
RequestMessage {
|
|
||||||
role: Role::System,
|
|
||||||
content: SYSTEM_MESSAGE.to_string(),
|
|
||||||
},
|
|
||||||
RequestMessage {
|
|
||||||
role: Role::User,
|
|
||||||
content: user_message,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
stream: false,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let buffer = editor.buffer().clone();
|
let buffer = editor.buffer().clone();
|
||||||
|
let executor = cx.background_executor().clone();
|
||||||
Some(cx.spawn(|_, mut cx| async move {
|
Some(cx.spawn(|_, mut cx| async move {
|
||||||
|
// TODO: We should have a get_string method on assets. This is repateated elsewhere.
|
||||||
|
let content = Assets::get("contexts/system.zmd").unwrap();
|
||||||
|
let mut system_message = std::str::from_utf8(content.data.as_ref())
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
if let Ok(custom_system_message_path) = std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") {
|
||||||
|
system_message
|
||||||
|
.push_str("\n\nAlso consider the following user-defined system prompt:\n\n");
|
||||||
|
// TODO: Replace this with our file system trait object.
|
||||||
|
// What you could bind dependencies on an action when you bind it?.
|
||||||
|
dbg!("reading from {:?}", &custom_system_message_path);
|
||||||
|
system_message.push_str(
|
||||||
|
&cx.background()
|
||||||
|
.spawn(async move { fs::read_to_string(custom_system_message_path) })
|
||||||
|
.await?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let stream = stream_completion(
|
||||||
|
api_key,
|
||||||
|
executor,
|
||||||
|
OpenAIRequest {
|
||||||
|
model: "gpt-4".to_string(),
|
||||||
|
messages: vec![
|
||||||
|
RequestMessage {
|
||||||
|
role: Role::System,
|
||||||
|
content: system_message.to_string(),
|
||||||
|
},
|
||||||
|
RequestMessage {
|
||||||
|
role: Role::User,
|
||||||
|
content: user_message,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
stream: false,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
let mut messages = stream.await?;
|
let mut messages = stream.await?;
|
||||||
while let Some(message) = messages.next().await {
|
while let Some(message) = messages.next().await {
|
||||||
let mut message = message?;
|
let mut message = message?;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue