Merge branch 'main' into zed2-project-test

Co-authored-by: Marshall <marshall@zed.dev>
This commit is contained in:
Max Brunsfeld 2023-10-31 11:50:56 -07:00
commit 291d35f337
607 changed files with 52667 additions and 14124 deletions

View file

@ -20,9 +20,7 @@ jobs:
id: get-content
with:
stringToTruncate: |
📣 Zed ${{ github.event.release.tag_name }} was just released!
Restart your Zed or head to ${{ steps.get-release-url.outputs.URL }} to grab it.
📣 Zed [${{ github.event.release.tag_name }}](${{ steps.get-release-url.outputs.URL }}) was just released!
${{ github.event.release.body }}
maxLength: 2000

193
Cargo.lock generated
View file

@ -91,6 +91,7 @@ dependencies = [
"futures 0.3.28",
"gpui",
"isahc",
"language",
"lazy_static",
"log",
"matrixmultiply",
@ -103,7 +104,34 @@ dependencies = [
"rusqlite",
"serde",
"serde_json",
"tiktoken-rs 0.5.4",
"tiktoken-rs",
"util",
]
[[package]]
name = "ai2"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"bincode",
"futures 0.3.28",
"gpui2",
"isahc",
"language2",
"lazy_static",
"log",
"matrixmultiply",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"parse_duration",
"postage",
"rand 0.8.5",
"regex",
"rusqlite",
"serde",
"serde_json",
"tiktoken-rs",
"util",
]
@ -309,6 +337,7 @@ dependencies = [
"language",
"log",
"menu",
"multi_buffer",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"project",
@ -316,12 +345,13 @@ dependencies = [
"regex",
"schemars",
"search",
"semantic_index",
"serde",
"serde_json",
"settings",
"smol",
"theme",
"tiktoken-rs 0.4.5",
"tiktoken-rs",
"util",
"uuid 1.4.1",
"workspace",
@ -1573,7 +1603,7 @@ dependencies = [
[[package]]
name = "collab"
version = "0.24.0"
version = "0.27.0"
dependencies = [
"anyhow",
"async-trait",
@ -1609,6 +1639,7 @@ dependencies = [
"lsp",
"nanoid",
"node_runtime",
"notifications",
"parking_lot 0.11.2",
"pretty_assertions",
"project",
@ -1664,20 +1695,26 @@ dependencies = [
"fuzzy",
"gpui",
"language",
"lazy_static",
"log",
"menu",
"notifications",
"picker",
"postage",
"pretty_assertions",
"project",
"recent_projects",
"rich_text",
"rpc",
"schemars",
"serde",
"serde_derive",
"settings",
"smallvec",
"theme",
"theme_selector",
"time",
"tree-sitter-markdown",
"util",
"vcs_menu",
"workspace",
@ -1731,6 +1768,7 @@ dependencies = [
"theme",
"util",
"workspace",
"zed-actions",
]
[[package]]
@ -1810,6 +1848,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
"parking_lot 0.11.2",
"rpc",
"serde",
"serde_derive",
@ -2556,11 +2595,11 @@ dependencies = [
"lazy_static",
"log",
"lsp",
"multi_buffer",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"postage",
"project",
"pulldown-cmark",
"rand 0.8.5",
"rich_text",
"rpc",
@ -4159,6 +4198,24 @@ dependencies = [
"workspace",
]
[[package]]
name = "journal2"
version = "0.1.0"
dependencies = [
"anyhow",
"chrono",
"dirs 4.0.0",
"editor",
"gpui2",
"log",
"schemars",
"serde",
"settings2",
"shellexpand",
"util",
"workspace",
]
[[package]]
name = "jpeg-decoder"
version = "0.1.22"
@ -4244,6 +4301,7 @@ dependencies = [
"lsp",
"parking_lot 0.11.2",
"postage",
"pulldown-cmark",
"rand 0.8.5",
"regex",
"rpc",
@ -4764,6 +4822,13 @@ dependencies = [
"gpui",
]
[[package]]
name = "menu2"
version = "0.1.0"
dependencies = [
"gpui2",
]
[[package]]
name = "metal"
version = "0.21.0"
@ -4921,6 +4986,55 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389"
[[package]]
name = "multi_buffer"
version = "0.1.0"
dependencies = [
"aho-corasick",
"anyhow",
"client",
"clock",
"collections",
"context_menu",
"convert_case 0.6.0",
"copilot",
"ctor",
"env_logger 0.9.3",
"futures 0.3.28",
"git",
"gpui",
"indoc",
"itertools 0.10.5",
"language",
"lazy_static",
"log",
"lsp",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"postage",
"project",
"pulldown-cmark",
"rand 0.8.5",
"rich_text",
"schemars",
"serde",
"serde_derive",
"settings",
"smallvec",
"smol",
"snippet",
"sum_tree",
"text",
"theme",
"tree-sitter",
"tree-sitter-html",
"tree-sitter-rust",
"tree-sitter-typescript",
"unindent",
"util",
"workspace",
]
[[package]]
name = "multimap"
version = "0.8.3"
@ -5070,6 +5184,26 @@ dependencies = [
"minimal-lexical",
]
[[package]]
name = "notifications"
version = "0.1.0"
dependencies = [
"anyhow",
"channel",
"client",
"clock",
"collections",
"db",
"feature_flags",
"gpui",
"rpc",
"settings",
"sum_tree",
"text",
"time",
"util",
]
[[package]]
name = "ntapi"
version = "0.3.7"
@ -5886,6 +6020,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
"parking_lot 0.11.2",
"serde",
"serde_derive",
"serde_json",
@ -6831,8 +6966,10 @@ dependencies = [
"rsa 0.4.0",
"serde",
"serde_derive",
"serde_json",
"smol",
"smol-timeout",
"strum",
"tempdir",
"tracing",
"util",
@ -7407,7 +7544,7 @@ dependencies = [
"smol",
"tempdir",
"theme",
"tiktoken-rs 0.5.4",
"tiktoken-rs",
"tree-sitter",
"tree-sitter-cpp",
"tree-sitter-elixir",
@ -7421,7 +7558,6 @@ dependencies = [
"unindent",
"util",
"workspace",
"zed",
]
[[package]]
@ -8638,6 +8774,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"clap 4.4.4",
"convert_case 0.6.0",
"gpui2",
"log",
"rust-embed",
@ -8713,21 +8850,6 @@ dependencies = [
"weezl",
]
[[package]]
name = "tiktoken-rs"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52aacc1cff93ba9d5f198c62c49c77fa0355025c729eed3326beaf7f33bc8614"
dependencies = [
"anyhow",
"base64 0.21.4",
"bstr",
"fancy-regex",
"lazy_static",
"parking_lot 0.12.1",
"rustc-hash",
]
[[package]]
name = "tiktoken-rs"
version = "0.5.4"
@ -9148,8 +9270,8 @@ dependencies = [
[[package]]
name = "tree-sitter-bash"
version = "0.19.0"
source = "git+https://github.com/tree-sitter/tree-sitter-bash?rev=1b0321ee85701d5036c334a6f04761cdc672e64c#1b0321ee85701d5036c334a6f04761cdc672e64c"
version = "0.20.4"
source = "git+https://github.com/tree-sitter/tree-sitter-bash?rev=7331995b19b8f8aba2d5e26deb51d2195c18bc94#7331995b19b8f8aba2d5e26deb51d2195c18bc94"
dependencies = [
"cc",
"tree-sitter",
@ -9388,6 +9510,15 @@ dependencies = [
"tree-sitter",
]
[[package]]
name = "tree-sitter-vue"
version = "0.0.1"
source = "git+https://github.com/zed-industries/tree-sitter-vue?rev=95b2890#95b28908d90e928c308866f7631e73ef6e1d4b5f"
dependencies = [
"cc",
"tree-sitter",
]
[[package]]
name = "tree-sitter-yaml"
version = "0.0.1"
@ -9469,10 +9600,8 @@ dependencies = [
"itertools 0.11.0",
"rand 0.8.5",
"serde",
"settings",
"smallvec",
"strum",
"theme",
"theme2",
]
@ -9714,6 +9843,7 @@ name = "vcs_menu"
version = "0.1.0"
dependencies = [
"anyhow",
"fs",
"fuzzy",
"gpui",
"picker",
@ -10658,9 +10788,10 @@ dependencies = [
[[package]]
name = "zed"
version = "0.109.0"
version = "0.111.0"
dependencies = [
"activity_indicator",
"ai",
"anyhow",
"assistant",
"async-compression",
@ -10712,6 +10843,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
"notifications",
"num_cpus",
"outline",
"parking_lot 0.11.2",
@ -10773,6 +10905,7 @@ dependencies = [
"tree-sitter-svelte",
"tree-sitter-toml",
"tree-sitter-typescript",
"tree-sitter-vue",
"tree-sitter-yaml",
"unindent",
"url",
@ -10790,12 +10923,14 @@ name = "zed-actions"
version = "0.1.0"
dependencies = [
"gpui",
"serde",
]
[[package]]
name = "zed2"
version = "0.109.0"
dependencies = [
"ai2",
"anyhow",
"async-compression",
"async-recursion 0.3.2",
@ -10811,7 +10946,7 @@ dependencies = [
"ctor",
"db2",
"env_logger 0.9.3",
"feature_flags",
"feature_flags2",
"fs2",
"fsevent",
"futures 0.3.28",
@ -10822,12 +10957,13 @@ dependencies = [
"indexmap 1.9.3",
"install_cli",
"isahc",
"journal2",
"language2",
"language_tools",
"lazy_static",
"libc",
"log",
"lsp",
"lsp2",
"node_runtime",
"num_cpus",
"parking_lot 0.11.2",
@ -10880,6 +11016,7 @@ dependencies = [
"tree-sitter-svelte",
"tree-sitter-toml",
"tree-sitter-typescript",
"tree-sitter-vue",
"tree-sitter-yaml",
"unindent",
"url",

View file

@ -48,6 +48,7 @@ members = [
"crates/install_cli",
"crates/install_cli2",
"crates/journal",
"crates/journal2",
"crates/language",
"crates/language2",
"crates/language_selector",
@ -58,7 +59,10 @@ members = [
"crates/lsp2",
"crates/media",
"crates/menu",
"crates/menu2",
"crates/multi_buffer",
"crates/node_runtime",
"crates/notifications",
"crates/outline",
"crates/picker",
"crates/plugin",
@ -133,6 +137,7 @@ serde_derive = { version = "1.0", features = ["deserialize_in_place"] }
serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] }
smallvec = { version = "1.6", features = ["union"] }
smol = { version = "1.2" }
strum = { version = "0.25.0", features = ["derive"] }
sysinfo = "0.29.10"
tempdir = { version = "0.3.7" }
thiserror = { version = "1.0.29" }
@ -144,7 +149,7 @@ pretty_assertions = "1.3.0"
git2 = { version = "0.15", default-features = false}
uuid = { version = "1.1.2", features = ["v4"] }
tree-sitter-bash = { git = "https://github.com/tree-sitter/tree-sitter-bash", rev = "1b0321ee85701d5036c334a6f04761cdc672e64c" }
tree-sitter-bash = { git = "https://github.com/tree-sitter/tree-sitter-bash", rev = "7331995b19b8f8aba2d5e26deb51d2195c18bc94" }
tree-sitter-c = "0.20.1"
tree-sitter-cpp = { git = "https://github.com/tree-sitter/tree-sitter-cpp", rev="f44509141e7e483323d2ec178f2d2e6c0fc041c1" }
tree-sitter-css = { git = "https://github.com/tree-sitter/tree-sitter-css", rev = "769203d0f9abe1a9a691ac2b9fe4bb4397a73c51" }
@ -170,7 +175,7 @@ tree-sitter-yaml = { git = "https://github.com/zed-industries/tree-sitter-yaml",
tree-sitter-lua = "0.0.14"
tree-sitter-nix = { git = "https://github.com/nix-community/tree-sitter-nix", rev = "66e3e9ce9180ae08fc57372061006ef83f0abde7" }
tree-sitter-nu = { git = "https://github.com/nushell/tree-sitter-nu", rev = "786689b0562b9799ce53e824cb45a1a2a04dc673"}
tree-sitter-vue = {git = "https://github.com/zed-industries/tree-sitter-vue", rev = "95b2890"}
[patch.crates-io]
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "35a6052fbcafc5e5fc0f9415b8652be7dcaf7222" }
async-task = { git = "https://github.com/zed-industries/async-task", rev = "341b57d6de98cdfd7b418567b8de2022ca993a6e" }

View file

@ -1,4 +1,4 @@
web: cd ../zed.dev && PORT=3000 npm run dev
collab: cd crates/collab && RUST_LOG=${RUST_LOG:-collab=info} cargo run serve
collab: cd crates/collab && RUST_LOG=${RUST_LOG:-warn,collab=info} cargo run serve
livekit: livekit-server --dev
postgrest: postgrest crates/collab/admin_api.conf

8
assets/icons/bell.svg Normal file
View file

@ -0,0 +1,8 @@
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
fill-rule="evenodd"
clip-rule="evenodd"
d="M8.60124 1.25086C8.60124 1.75459 8.26278 2.17927 7.80087 2.30989C10.1459 2.4647 12 4.41582 12 6.79999V10.25C12 11.0563 12.0329 11.7074 12.7236 12.0528C12.931 12.1565 13.0399 12.3892 12.9866 12.6149C12.9333 12.8406 12.7319 13 12.5 13H8.16144C8.36904 13.1832 8.49997 13.4513 8.49997 13.75C8.49997 14.3023 8.05226 14.75 7.49997 14.75C6.94769 14.75 6.49997 14.3023 6.49997 13.75C6.49997 13.4513 6.63091 13.1832 6.83851 13H2.49999C2.2681 13 2.06664 12.8406 2.01336 12.6149C1.96009 12.3892 2.06897 12.1565 2.27638 12.0528C2.96708 11.7074 2.99999 11.0563 2.99999 10.25V6.79999C2.99999 4.41537 4.85481 2.46396 7.20042 2.3098C6.73867 2.17908 6.40036 1.75448 6.40036 1.25086C6.40036 0.643104 6.89304 0.150421 7.5008 0.150421C8.10855 0.150421 8.60124 0.643104 8.60124 1.25086ZM7.49999 3.29999C5.56699 3.29999 3.99999 4.86699 3.99999 6.79999V10.25L4.00002 10.3009C4.0005 10.7463 4.00121 11.4084 3.69929 12H11.3007C10.9988 11.4084 10.9995 10.7463 11 10.3009L11 10.25V6.79999C11 4.86699 9.43299 3.29999 7.49999 3.29999Z"
fill="currentColor"
/>
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

3
assets/icons/link.svg Normal file
View file

@ -0,0 +1,3 @@
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.51192 3.00541C9.18827 2.54594 10.0434 2.53694 10.6788 2.95419C10.823 3.04893 10.9771 3.1993 11.389 3.61119C11.8009 4.02307 11.9513 4.17714 12.046 4.32141C12.4632 4.95675 12.4542 5.81192 11.9948 6.48827C11.8899 6.64264 11.7276 6.80811 11.3006 7.23511L10.6819 7.85383C10.4866 8.04909 10.4866 8.36567 10.6819 8.56093C10.8772 8.7562 11.1937 8.7562 11.389 8.56093L12.0077 7.94221L12.0507 7.89929C12.4203 7.52976 12.6568 7.2933 12.822 7.0502C13.4972 6.05623 13.5321 4.76252 12.8819 3.77248C12.7233 3.53102 12.4922 3.30001 12.1408 2.94871L12.0961 2.90408L12.0515 2.85942C11.7002 2.508 11.4692 2.27689 11.2277 2.11832C10.2377 1.46813 8.94396 1.50299 7.94999 2.17822C7.70689 2.34336 7.47042 2.57991 7.10088 2.94955L7.05797 2.99247L6.43926 3.61119C6.24399 3.80645 6.24399 4.12303 6.43926 4.31829C6.63452 4.51355 6.9511 4.51355 7.14636 4.31829L7.76508 3.69957C8.19208 3.27257 8.35755 3.11027 8.51192 3.00541ZM4.31794 7.14672C4.5132 6.95146 4.5132 6.63487 4.31794 6.43961C4.12267 6.24435 3.80609 6.24435 3.61083 6.43961L2.99211 7.05833L2.9492 7.10124C2.57955 7.47077 2.34301 7.70724 2.17786 7.95035C1.50263 8.94432 1.46778 10.238 2.11797 11.2281C2.27654 11.4695 2.50764 11.7005 2.85908 12.0518L2.90372 12.0965L2.94835 12.1411C3.29965 12.4925 3.53066 12.7237 3.77212 12.8822C4.76217 13.5324 6.05587 13.4976 7.04984 12.8223C7.29294 12.6572 7.52941 12.4206 7.89894 12.051L7.89895 12.051L7.94186 12.0081L8.56058 11.3894C8.75584 11.1941 8.75584 10.8775 8.56058 10.6823C8.36531 10.487 8.04873 10.487 7.85347 10.6823L7.23475 11.301C6.80775 11.728 6.64228 11.8903 6.48792 11.9951C5.81156 12.4546 4.9564 12.4636 4.32105 12.0464C4.17679 11.9516 4.02272 11.8012 3.61083 11.3894C3.19894 10.9775 3.04858 10.8234 2.95383 10.6791C2.53659 10.0438 2.54558 9.18863 3.00505 8.51227C3.10991 8.35791 3.27222 8.19244 3.69922 7.76544L4.31794 7.14672ZM9.6217 6.08558C9.81696 5.89032 9.81696 5.57373 9.6217 5.37847C9.42644 5.18321 9.10986 5.18321 8.91459 5.37847L5.37906 8.91401C5.1838 9.10927 5.1838 9.42585 5.37906 9.62111C5.57432 9.81637 5.8909 9.81637 6.08617 9.62111L9.6217 6.08558Z" fill="black"/>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

3
assets/icons/public.svg Normal file
View file

@ -0,0 +1,3 @@
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.74393 2.00204C3.41963 1.97524 3.13502 2.37572 3.10823 2.70001C3.08143 3.0243 3.32558 3.47321 3.64986 3.50001C7.99878 3.85934 11.1406 7.00122 11.5 11.3501C11.5267 11.6744 11.9756 12.0269 12.3 12C12.6243 11.9733 13.0247 11.5804 12.998 11.2561C12.5912 6.33295 8.66704 2.40882 3.74393 2.00204ZM2.9 6.00001C2.96411 5.68099 3.33084 5.29361 3.64986 5.35772C6.66377 5.96341 9.03654 8.33618 9.64223 11.3501C9.70634 11.6691 9.319 12.0359 8.99999 12.1C8.68097 12.1641 8.06411 11.819 7.99999 11.5C7.48788 8.95167 6.0483 7.51213 3.49999 7.00001C3.18097 6.9359 2.8359 6.31902 2.9 6.00001ZM2 9.20001C2.0641 8.88099 2.38635 8.65788 2.70537 8.722C4.50255 9.08317 5.91684 10.4975 6.27801 12.2946C6.34212 12.6137 6.13547 12.9242 5.81646 12.9883C5.49744 13.0525 4.86411 12.819 4.8 12.5C4.53239 11.1683 3.83158 10.4676 2.5 10.2C2.18098 10.1359 1.93588 9.51902 2 9.20001Z" fill="black"/>
</svg>

After

Width:  |  Height:  |  Size: 1,021 B

8
assets/icons/update.svg Normal file
View file

@ -0,0 +1,8 @@
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
fill-rule="evenodd"
clip-rule="evenodd"
d="M1.90321 7.29677C1.90321 10.341 4.11041 12.4147 6.58893 12.8439C6.87255 12.893 7.06266 13.1627 7.01355 13.4464C6.96444 13.73 6.69471 13.9201 6.41109 13.871C3.49942 13.3668 0.86084 10.9127 0.86084 7.29677C0.860839 5.76009 1.55996 4.55245 2.37639 3.63377C2.96124 2.97568 3.63034 2.44135 4.16846 2.03202L2.53205 2.03202C2.25591 2.03202 2.03205 1.80816 2.03205 1.53202C2.03205 1.25588 2.25591 1.03202 2.53205 1.03202L5.53205 1.03202C5.80819 1.03202 6.03205 1.25588 6.03205 1.53202L6.03205 4.53202C6.03205 4.80816 5.80819 5.03202 5.53205 5.03202C5.25591 5.03202 5.03205 4.80816 5.03205 4.53202L5.03205 2.68645L5.03054 2.68759L5.03045 2.68766L5.03044 2.68767L5.03043 2.68767C4.45896 3.11868 3.76059 3.64538 3.15554 4.3262C2.44102 5.13021 1.90321 6.10154 1.90321 7.29677ZM13.0109 7.70321C13.0109 4.69115 10.8505 2.6296 8.40384 2.17029C8.12093 2.11718 7.93465 1.84479 7.98776 1.56188C8.04087 1.27898 8.31326 1.0927 8.59616 1.14581C11.4704 1.68541 14.0532 4.12605 14.0532 7.70321C14.0532 9.23988 13.3541 10.4475 12.5377 11.3662C11.9528 12.0243 11.2837 12.5586 10.7456 12.968L12.3821 12.968C12.6582 12.968 12.8821 13.1918 12.8821 13.468C12.8821 13.7441 12.6582 13.968 12.3821 13.968L9.38205 13.968C9.10591 13.968 8.88205 13.7441 8.88205 13.468L8.88205 10.468C8.88205 10.1918 9.10591 9.96796 9.38205 9.96796C9.65819 9.96796 9.88205 10.1918 9.88205 10.468L9.88205 12.3135L9.88362 12.3123C10.4551 11.8813 11.1535 11.3546 11.7585 10.6738C12.4731 9.86976 13.0109 8.89844 13.0109 7.70321Z"
fill="currentColor"
/>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -370,42 +370,15 @@
{
"context": "Pane",
"bindings": {
"ctrl-1": [
"pane::ActivateItem",
0
],
"ctrl-2": [
"pane::ActivateItem",
1
],
"ctrl-3": [
"pane::ActivateItem",
2
],
"ctrl-4": [
"pane::ActivateItem",
3
],
"ctrl-5": [
"pane::ActivateItem",
4
],
"ctrl-6": [
"pane::ActivateItem",
5
],
"ctrl-7": [
"pane::ActivateItem",
6
],
"ctrl-8": [
"pane::ActivateItem",
7
],
"ctrl-9": [
"pane::ActivateItem",
8
],
"ctrl-1": ["pane::ActivateItem", 0],
"ctrl-2": ["pane::ActivateItem", 1],
"ctrl-3": ["pane::ActivateItem", 2],
"ctrl-4": ["pane::ActivateItem", 3],
"ctrl-5": ["pane::ActivateItem", 4],
"ctrl-6": ["pane::ActivateItem", 5],
"ctrl-7": ["pane::ActivateItem", 6],
"ctrl-8": ["pane::ActivateItem", 7],
"ctrl-9": ["pane::ActivateItem", 8],
"ctrl-0": "pane::ActivateLastItem",
"ctrl--": "pane::GoBack",
"ctrl-_": "pane::GoForward",
@ -416,42 +389,15 @@
{
"context": "Workspace",
"bindings": {
"cmd-1": [
"workspace::ActivatePane",
0
],
"cmd-2": [
"workspace::ActivatePane",
1
],
"cmd-3": [
"workspace::ActivatePane",
2
],
"cmd-4": [
"workspace::ActivatePane",
3
],
"cmd-5": [
"workspace::ActivatePane",
4
],
"cmd-6": [
"workspace::ActivatePane",
5
],
"cmd-7": [
"workspace::ActivatePane",
6
],
"cmd-8": [
"workspace::ActivatePane",
7
],
"cmd-9": [
"workspace::ActivatePane",
8
],
"cmd-1": ["workspace::ActivatePane", 0],
"cmd-2": ["workspace::ActivatePane", 1],
"cmd-3": ["workspace::ActivatePane", 2],
"cmd-4": ["workspace::ActivatePane", 3],
"cmd-5": ["workspace::ActivatePane", 4],
"cmd-6": ["workspace::ActivatePane", 5],
"cmd-7": ["workspace::ActivatePane", 6],
"cmd-8": ["workspace::ActivatePane", 7],
"cmd-9": ["workspace::ActivatePane", 8],
"cmd-b": "workspace::ToggleLeftDock",
"cmd-r": "workspace::ToggleRightDock",
"cmd-j": "workspace::ToggleBottomDock",
@ -494,38 +440,14 @@
},
{
"bindings": {
"cmd-k cmd-left": [
"workspace::ActivatePaneInDirection",
"Left"
],
"cmd-k cmd-right": [
"workspace::ActivatePaneInDirection",
"Right"
],
"cmd-k cmd-up": [
"workspace::ActivatePaneInDirection",
"Up"
],
"cmd-k cmd-down": [
"workspace::ActivatePaneInDirection",
"Down"
],
"cmd-k shift-left": [
"workspace::SwapPaneInDirection",
"Left"
],
"cmd-k shift-right": [
"workspace::SwapPaneInDirection",
"Right"
],
"cmd-k shift-up": [
"workspace::SwapPaneInDirection",
"Up"
],
"cmd-k shift-down": [
"workspace::SwapPaneInDirection",
"Down"
]
"cmd-k cmd-left": ["workspace::ActivatePaneInDirection", "Left"],
"cmd-k cmd-right": ["workspace::ActivatePaneInDirection", "Right"],
"cmd-k cmd-up": ["workspace::ActivatePaneInDirection", "Up"],
"cmd-k cmd-down": ["workspace::ActivatePaneInDirection", "Down"],
"cmd-k shift-left": ["workspace::SwapPaneInDirection", "Left"],
"cmd-k shift-right": ["workspace::SwapPaneInDirection", "Right"],
"cmd-k shift-up": ["workspace::SwapPaneInDirection", "Up"],
"cmd-k shift-down": ["workspace::SwapPaneInDirection", "Down"]
}
},
// Bindings from Atom
@ -627,14 +549,6 @@
"space": "collab_panel::InsertSpace"
}
},
{
"context": "(CollabPanel && not_editing) > Editor",
"bindings": {
"cmd-c": "collab_panel::StartLinkChannel",
"cmd-x": "collab_panel::StartMoveChannel",
"cmd-v": "collab_panel::MoveOrLinkToSelected"
}
},
{
"context": "ChannelModal",
"bindings": {
@ -655,57 +569,21 @@
"cmd-v": "terminal::Paste",
"cmd-k": "terminal::Clear",
// Some nice conveniences
"cmd-backspace": [
"terminal::SendText",
"\u0015"
],
"cmd-right": [
"terminal::SendText",
"\u0005"
],
"cmd-left": [
"terminal::SendText",
"\u0001"
],
"cmd-backspace": ["terminal::SendText", "\u0015"],
"cmd-right": ["terminal::SendText", "\u0005"],
"cmd-left": ["terminal::SendText", "\u0001"],
// Terminal.app compatibility
"alt-left": [
"terminal::SendText",
"\u001bb"
],
"alt-right": [
"terminal::SendText",
"\u001bf"
],
"alt-left": ["terminal::SendText", "\u001bb"],
"alt-right": ["terminal::SendText", "\u001bf"],
// There are conflicting bindings for these keys in the global context.
// these bindings override them, remove at your own risk:
"up": [
"terminal::SendKeystroke",
"up"
],
"pageup": [
"terminal::SendKeystroke",
"pageup"
],
"down": [
"terminal::SendKeystroke",
"down"
],
"pagedown": [
"terminal::SendKeystroke",
"pagedown"
],
"escape": [
"terminal::SendKeystroke",
"escape"
],
"enter": [
"terminal::SendKeystroke",
"enter"
],
"ctrl-c": [
"terminal::SendKeystroke",
"ctrl-c"
]
"up": ["terminal::SendKeystroke", "up"],
"pageup": ["terminal::SendKeystroke", "pageup"],
"down": ["terminal::SendKeystroke", "down"],
"pagedown": ["terminal::SendKeystroke", "pagedown"],
"escape": ["terminal::SendKeystroke", "escape"],
"enter": ["terminal::SendKeystroke", "enter"],
"ctrl-c": ["terminal::SendKeystroke", "ctrl-c"]
}
}
]

View file

@ -39,6 +39,7 @@
"w": "vim::NextWordStart",
"{": "vim::StartOfParagraph",
"}": "vim::EndOfParagraph",
"|": "vim::GoToColumn",
"shift-w": [
"vim::NextWordStart",
{
@ -97,14 +98,8 @@
"ctrl-o": "pane::GoBack",
"ctrl-i": "pane::GoForward",
"ctrl-]": "editor::GoToDefinition",
"escape": [
"vim::SwitchMode",
"Normal"
],
"ctrl+[": [
"vim::SwitchMode",
"Normal"
],
"escape": ["vim::SwitchMode", "Normal"],
"ctrl+[": ["vim::SwitchMode", "Normal"],
"v": "vim::ToggleVisual",
"shift-v": "vim::ToggleVisualLine",
"ctrl-v": "vim::ToggleVisualBlock",
@ -233,123 +228,36 @@
}
],
// Count support
"1": [
"vim::Number",
1
],
"2": [
"vim::Number",
2
],
"3": [
"vim::Number",
3
],
"4": [
"vim::Number",
4
],
"5": [
"vim::Number",
5
],
"6": [
"vim::Number",
6
],
"7": [
"vim::Number",
7
],
"8": [
"vim::Number",
8
],
"9": [
"vim::Number",
9
],
"1": ["vim::Number", 1],
"2": ["vim::Number", 2],
"3": ["vim::Number", 3],
"4": ["vim::Number", 4],
"5": ["vim::Number", 5],
"6": ["vim::Number", 6],
"7": ["vim::Number", 7],
"8": ["vim::Number", 8],
"9": ["vim::Number", 9],
// window related commands (ctrl-w X)
"ctrl-w left": [
"workspace::ActivatePaneInDirection",
"Left"
],
"ctrl-w right": [
"workspace::ActivatePaneInDirection",
"Right"
],
"ctrl-w up": [
"workspace::ActivatePaneInDirection",
"Up"
],
"ctrl-w down": [
"workspace::ActivatePaneInDirection",
"Down"
],
"ctrl-w h": [
"workspace::ActivatePaneInDirection",
"Left"
],
"ctrl-w l": [
"workspace::ActivatePaneInDirection",
"Right"
],
"ctrl-w k": [
"workspace::ActivatePaneInDirection",
"Up"
],
"ctrl-w j": [
"workspace::ActivatePaneInDirection",
"Down"
],
"ctrl-w ctrl-h": [
"workspace::ActivatePaneInDirection",
"Left"
],
"ctrl-w ctrl-l": [
"workspace::ActivatePaneInDirection",
"Right"
],
"ctrl-w ctrl-k": [
"workspace::ActivatePaneInDirection",
"Up"
],
"ctrl-w ctrl-j": [
"workspace::ActivatePaneInDirection",
"Down"
],
"ctrl-w shift-left": [
"workspace::SwapPaneInDirection",
"Left"
],
"ctrl-w shift-right": [
"workspace::SwapPaneInDirection",
"Right"
],
"ctrl-w shift-up": [
"workspace::SwapPaneInDirection",
"Up"
],
"ctrl-w shift-down": [
"workspace::SwapPaneInDirection",
"Down"
],
"ctrl-w shift-h": [
"workspace::SwapPaneInDirection",
"Left"
],
"ctrl-w shift-l": [
"workspace::SwapPaneInDirection",
"Right"
],
"ctrl-w shift-k": [
"workspace::SwapPaneInDirection",
"Up"
],
"ctrl-w shift-j": [
"workspace::SwapPaneInDirection",
"Down"
],
"ctrl-w left": ["workspace::ActivatePaneInDirection", "Left"],
"ctrl-w right": ["workspace::ActivatePaneInDirection", "Right"],
"ctrl-w up": ["workspace::ActivatePaneInDirection", "Up"],
"ctrl-w down": ["workspace::ActivatePaneInDirection", "Down"],
"ctrl-w h": ["workspace::ActivatePaneInDirection", "Left"],
"ctrl-w l": ["workspace::ActivatePaneInDirection", "Right"],
"ctrl-w k": ["workspace::ActivatePaneInDirection", "Up"],
"ctrl-w j": ["workspace::ActivatePaneInDirection", "Down"],
"ctrl-w ctrl-h": ["workspace::ActivatePaneInDirection", "Left"],
"ctrl-w ctrl-l": ["workspace::ActivatePaneInDirection", "Right"],
"ctrl-w ctrl-k": ["workspace::ActivatePaneInDirection", "Up"],
"ctrl-w ctrl-j": ["workspace::ActivatePaneInDirection", "Down"],
"ctrl-w shift-left": ["workspace::SwapPaneInDirection", "Left"],
"ctrl-w shift-right": ["workspace::SwapPaneInDirection", "Right"],
"ctrl-w shift-up": ["workspace::SwapPaneInDirection", "Up"],
"ctrl-w shift-down": ["workspace::SwapPaneInDirection", "Down"],
"ctrl-w shift-h": ["workspace::SwapPaneInDirection", "Left"],
"ctrl-w shift-l": ["workspace::SwapPaneInDirection", "Right"],
"ctrl-w shift-k": ["workspace::SwapPaneInDirection", "Up"],
"ctrl-w shift-j": ["workspace::SwapPaneInDirection", "Down"],
"ctrl-w g t": "pane::ActivateNextItem",
"ctrl-w ctrl-g t": "pane::ActivateNextItem",
"ctrl-w g shift-t": "pane::ActivatePrevItem",
@ -371,14 +279,8 @@
"ctrl-w ctrl-q": "pane::CloseAllItems",
"ctrl-w o": "workspace::CloseInactiveTabsAndPanes",
"ctrl-w ctrl-o": "workspace::CloseInactiveTabsAndPanes",
"ctrl-w n": [
"workspace::NewFileInDirection",
"Up"
],
"ctrl-w ctrl-n": [
"workspace::NewFileInDirection",
"Up"
]
"ctrl-w n": ["workspace::NewFileInDirection", "Up"],
"ctrl-w ctrl-n": ["workspace::NewFileInDirection", "Up"]
}
},
{
@ -393,21 +295,12 @@
"context": "Editor && vim_mode == normal && vim_operator == none && !VimWaiting",
"bindings": {
".": "vim::Repeat",
"c": [
"vim::PushOperator",
"Change"
],
"c": ["vim::PushOperator", "Change"],
"shift-c": "vim::ChangeToEndOfLine",
"d": [
"vim::PushOperator",
"Delete"
],
"d": ["vim::PushOperator", "Delete"],
"shift-d": "vim::DeleteToEndOfLine",
"shift-j": "vim::JoinLines",
"y": [
"vim::PushOperator",
"Yank"
],
"y": ["vim::PushOperator", "Yank"],
"shift-y": "vim::YankLine",
"i": "vim::InsertBefore",
"shift-i": "vim::InsertFirstNonWhitespace",
@ -443,10 +336,7 @@
"backwards": true
}
],
"r": [
"vim::PushOperator",
"Replace"
],
"r": ["vim::PushOperator", "Replace"],
"s": "vim::Substitute",
"shift-s": "vim::SubstituteLine",
"> >": "editor::Indent",
@ -458,10 +348,7 @@
{
"context": "Editor && VimCount",
"bindings": {
"0": [
"vim::Number",
0
]
"0": ["vim::Number", 0]
}
},
{
@ -497,12 +384,15 @@
"'": "vim::Quotes",
"`": "vim::BackQuotes",
"\"": "vim::DoubleQuotes",
"|": "vim::VerticalBars",
"(": "vim::Parentheses",
")": "vim::Parentheses",
"b": "vim::Parentheses",
"[": "vim::SquareBrackets",
"]": "vim::SquareBrackets",
"{": "vim::CurlyBrackets",
"}": "vim::CurlyBrackets",
"shift-b": "vim::CurlyBrackets",
"<": "vim::AngleBrackets",
">": "vim::AngleBrackets"
}
@ -548,22 +438,10 @@
"shift-i": "vim::InsertBefore",
"shift-a": "vim::InsertAfter",
"shift-j": "vim::JoinLines",
"r": [
"vim::PushOperator",
"Replace"
],
"ctrl-c": [
"vim::SwitchMode",
"Normal"
],
"escape": [
"vim::SwitchMode",
"Normal"
],
"ctrl+[": [
"vim::SwitchMode",
"Normal"
],
"r": ["vim::PushOperator", "Replace"],
"ctrl-c": ["vim::SwitchMode", "Normal"],
"escape": ["vim::SwitchMode", "Normal"],
"ctrl+[": ["vim::SwitchMode", "Normal"],
">": "editor::Indent",
"<": "editor::Outdent",
"i": [
@ -602,14 +480,8 @@
"bindings": {
"tab": "vim::Tab",
"enter": "vim::Enter",
"escape": [
"vim::SwitchMode",
"Normal"
],
"ctrl+[": [
"vim::SwitchMode",
"Normal"
]
"escape": ["vim::SwitchMode", "Normal"],
"ctrl+[": ["vim::SwitchMode", "Normal"]
}
},
{

View file

@ -50,6 +50,9 @@
// Whether to pop the completions menu while typing in an editor without
// explicitly requesting it.
"show_completions_on_input": true,
// Whether to display inline and alongside documentation for items in the
// completions menu
"show_completion_documentation": true,
// Whether to show wrap guides in the editor. Setting this to true will
// show a guide at the 'preferred_line_length' value if softwrap is set to
// 'preferred_line_length', and will show any additional guides as specified
@ -139,6 +142,14 @@
// Default width of the channels panel.
"default_width": 240
},
"notification_panel": {
// Whether to show the collaboration panel button in the status bar.
"button": true,
// Where to dock channels panel. Can be 'left' or 'right'.
"dock": "right",
// Default width of the channels panel.
"default_width": 380
},
"assistant": {
// Whether to show the assistant panel button in the status bar.
"button": true,

38
crates/Cargo.toml Normal file
View file

@ -0,0 +1,38 @@
[package]
name = "ai"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/ai.rs"
doctest = false
[features]
test-support = []
[dependencies]
gpui = { path = "../gpui" }
util = { path = "../util" }
language = { path = "../language" }
async-trait.workspace = true
anyhow.workspace = true
futures.workspace = true
lazy_static.workspace = true
ordered-float.workspace = true
parking_lot.workspace = true
isahc.workspace = true
regex.workspace = true
serde.workspace = true
serde_json.workspace = true
postage.workspace = true
rand.workspace = true
log.workspace = true
parse_duration = "2.1.1"
tiktoken-rs = "0.5.0"
matrixmultiply = "0.3.7"
rusqlite = { version = "0.29.0", features = ["blob", "array", "modern_sqlite"] }
bincode = "1.3.3"
[dev-dependencies]
gpui = { path = "../gpui", features = ["test-support"] }

View file

@ -8,9 +8,13 @@ publish = false
path = "src/ai.rs"
doctest = false
[features]
test-support = []
[dependencies]
gpui = { path = "../gpui" }
util = { path = "../util" }
language = { path = "../language" }
async-trait.workspace = true
anyhow.workspace = true
futures.workspace = true

View file

@ -1,2 +1,8 @@
pub mod auth;
pub mod completion;
pub mod embedding;
pub mod models;
pub mod prompts;
pub mod providers;
#[cfg(any(test, feature = "test-support"))]
pub mod test;

15
crates/ai/src/auth.rs Normal file
View file

@ -0,0 +1,15 @@
use gpui::AppContext;
#[derive(Clone, Debug)]
pub enum ProviderCredential {
Credentials { api_key: String },
NoCredentials,
NotNeeded,
}
pub trait CredentialProvider: Send + Sync {
fn has_credentials(&self) -> bool;
fn retrieve_credentials(&self, cx: &AppContext) -> ProviderCredential;
fn save_credentials(&self, cx: &AppContext, credential: ProviderCredential);
fn delete_credentials(&self, cx: &AppContext);
}

View file

@ -1,212 +1,23 @@
use anyhow::{anyhow, Result};
use futures::{
future::BoxFuture, io::BufReader, stream::BoxStream, AsyncBufReadExt, AsyncReadExt, FutureExt,
Stream, StreamExt,
};
use gpui::executor::Background;
use isahc::{http::StatusCode, Request, RequestExt};
use serde::{Deserialize, Serialize};
use std::{
fmt::{self, Display},
io,
sync::Arc,
};
use anyhow::Result;
use futures::{future::BoxFuture, stream::BoxStream};
pub const OPENAI_API_URL: &'static str = "https://api.openai.com/v1";
use crate::{auth::CredentialProvider, models::LanguageModel};
#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Role {
User,
Assistant,
System,
pub trait CompletionRequest: Send + Sync {
fn data(&self) -> serde_json::Result<String>;
}
impl Role {
pub fn cycle(&mut self) {
*self = match self {
Role::User => Role::Assistant,
Role::Assistant => Role::System,
Role::System => Role::User,
}
}
}
impl Display for Role {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Role::User => write!(f, "User"),
Role::Assistant => write!(f, "Assistant"),
Role::System => write!(f, "System"),
}
}
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct RequestMessage {
pub role: Role,
pub content: String,
}
#[derive(Debug, Default, Serialize)]
pub struct OpenAIRequest {
pub model: String,
pub messages: Vec<RequestMessage>,
pub stream: bool,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseMessage {
pub role: Option<Role>,
pub content: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIUsage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
#[derive(Deserialize, Debug)]
pub struct ChatChoiceDelta {
pub index: u32,
pub delta: ResponseMessage,
pub finish_reason: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIResponseStreamEvent {
pub id: Option<String>,
pub object: String,
pub created: u32,
pub model: String,
pub choices: Vec<ChatChoiceDelta>,
pub usage: Option<OpenAIUsage>,
}
pub async fn stream_completion(
api_key: String,
executor: Arc<Background>,
mut request: OpenAIRequest,
) -> Result<impl Stream<Item = Result<OpenAIResponseStreamEvent>>> {
request.stream = true;
let (tx, rx) = futures::channel::mpsc::unbounded::<Result<OpenAIResponseStreamEvent>>();
let json_data = serde_json::to_string(&request)?;
let mut response = Request::post(format!("{OPENAI_API_URL}/chat/completions"))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(json_data)?
.send_async()
.await?;
let status = response.status();
if status == StatusCode::OK {
executor
.spawn(async move {
let mut lines = BufReader::new(response.body_mut()).lines();
fn parse_line(
line: Result<String, io::Error>,
) -> Result<Option<OpenAIResponseStreamEvent>> {
if let Some(data) = line?.strip_prefix("data: ") {
let event = serde_json::from_str(&data)?;
Ok(Some(event))
} else {
Ok(None)
}
}
while let Some(line) = lines.next().await {
if let Some(event) = parse_line(line).transpose() {
let done = event.as_ref().map_or(false, |event| {
event
.choices
.last()
.map_or(false, |choice| choice.finish_reason.is_some())
});
if tx.unbounded_send(event).is_err() {
break;
}
if done {
break;
}
}
}
anyhow::Ok(())
})
.detach();
Ok(rx)
} else {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
#[derive(Deserialize)]
struct OpenAIResponse {
error: OpenAIError,
}
#[derive(Deserialize)]
struct OpenAIError {
message: String,
}
match serde_json::from_str::<OpenAIResponse>(&body) {
Ok(response) if !response.error.message.is_empty() => Err(anyhow!(
"Failed to connect to OpenAI API: {}",
response.error.message,
)),
_ => Err(anyhow!(
"Failed to connect to OpenAI API: {} {}",
response.status(),
body,
)),
}
}
}
pub trait CompletionProvider {
pub trait CompletionProvider: CredentialProvider {
fn base_model(&self) -> Box<dyn LanguageModel>;
fn complete(
&self,
prompt: OpenAIRequest,
prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
fn box_clone(&self) -> Box<dyn CompletionProvider>;
}
pub struct OpenAICompletionProvider {
api_key: String,
executor: Arc<Background>,
}
impl OpenAICompletionProvider {
pub fn new(api_key: String, executor: Arc<Background>) -> Self {
Self { api_key, executor }
}
}
impl CompletionProvider for OpenAICompletionProvider {
fn complete(
&self,
prompt: OpenAIRequest,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let request = stream_completion(self.api_key.clone(), self.executor.clone(), prompt);
async move {
let response = request.await?;
let stream = response
.filter_map(|response| async move {
match response {
Ok(mut response) => Some(Ok(response.choices.pop()?.delta.content?)),
Err(error) => Some(Err(error)),
}
})
.boxed();
Ok(stream)
}
.boxed()
impl Clone for Box<dyn CompletionProvider> {
fn clone(&self) -> Box<dyn CompletionProvider> {
self.box_clone()
}
}

View file

@ -1,30 +1,13 @@
use anyhow::{anyhow, Result};
use std::time::Instant;
use anyhow::Result;
use async_trait::async_trait;
use futures::AsyncReadExt;
use gpui::executor::Background;
use gpui::serde_json;
use isahc::http::StatusCode;
use isahc::prelude::Configurable;
use isahc::{AsyncBody, Response};
use lazy_static::lazy_static;
use ordered_float::OrderedFloat;
use parking_lot::Mutex;
use parse_duration::parse;
use postage::watch;
use rusqlite::types::{FromSql, FromSqlResult, ToSqlOutput, ValueRef};
use rusqlite::ToSql;
use serde::{Deserialize, Serialize};
use std::env;
use std::ops::Add;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tiktoken_rs::{cl100k_base, CoreBPE};
use util::http::{HttpClient, Request};
lazy_static! {
static ref OPENAI_API_KEY: Option<String> = env::var("OPENAI_API_KEY").ok();
static ref OPENAI_BPE_TOKENIZER: CoreBPE = cl100k_base().unwrap();
}
use crate::auth::CredentialProvider;
use crate::models::LanguageModel;
#[derive(Debug, PartialEq, Clone)]
pub struct Embedding(pub Vec<f32>);
@ -85,295 +68,14 @@ impl Embedding {
}
}
// impl FromSql for Embedding {
// fn column_result(value: ValueRef) -> FromSqlResult<Self> {
// let bytes = value.as_blob()?;
// let embedding: Result<Vec<f32>, Box<bincode::ErrorKind>> = bincode::deserialize(bytes);
// if embedding.is_err() {
// return Err(rusqlite::types::FromSqlError::Other(embedding.unwrap_err()));
// }
// Ok(Embedding(embedding.unwrap()))
// }
// }
// impl ToSql for Embedding {
// fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
// let bytes = bincode::serialize(&self.0)
// .map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?;
// Ok(ToSqlOutput::Owned(rusqlite::types::Value::Blob(bytes)))
// }
// }
#[derive(Clone)]
pub struct OpenAIEmbeddings {
pub client: Arc<dyn HttpClient>,
pub executor: Arc<Background>,
rate_limit_count_rx: watch::Receiver<Option<Instant>>,
rate_limit_count_tx: Arc<Mutex<watch::Sender<Option<Instant>>>>,
}
#[derive(Serialize)]
struct OpenAIEmbeddingRequest<'a> {
model: &'static str,
input: Vec<&'a str>,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingResponse {
data: Vec<OpenAIEmbedding>,
usage: OpenAIEmbeddingUsage,
}
#[derive(Debug, Deserialize)]
struct OpenAIEmbedding {
embedding: Vec<f32>,
index: usize,
object: String,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingUsage {
prompt_tokens: usize,
total_tokens: usize,
}
#[async_trait]
pub trait EmbeddingProvider: Sync + Send {
fn is_authenticated(&self) -> bool;
pub trait EmbeddingProvider: CredentialProvider {
fn base_model(&self) -> Box<dyn LanguageModel>;
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>>;
fn max_tokens_per_batch(&self) -> usize;
fn truncate(&self, span: &str) -> (String, usize);
fn rate_limit_expiration(&self) -> Option<Instant>;
}
pub struct DummyEmbeddings {}
#[async_trait]
impl EmbeddingProvider for DummyEmbeddings {
fn is_authenticated(&self) -> bool {
true
}
fn rate_limit_expiration(&self) -> Option<Instant> {
None
}
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>> {
// 1024 is the OpenAI Embeddings size for ada models.
// the model we will likely be starting with.
let dummy_vec = Embedding::from(vec![0.32 as f32; 1536]);
return Ok(vec![dummy_vec; spans.len()]);
}
fn max_tokens_per_batch(&self) -> usize {
OPENAI_INPUT_LIMIT
}
fn truncate(&self, span: &str) -> (String, usize) {
let mut tokens = OPENAI_BPE_TOKENIZER.encode_with_special_tokens(span);
let token_count = tokens.len();
let output = if token_count > OPENAI_INPUT_LIMIT {
tokens.truncate(OPENAI_INPUT_LIMIT);
let new_input = OPENAI_BPE_TOKENIZER.decode(tokens.clone());
new_input.ok().unwrap_or_else(|| span.to_string())
} else {
span.to_string()
};
(output, tokens.len())
}
}
const OPENAI_INPUT_LIMIT: usize = 8190;
impl OpenAIEmbeddings {
pub fn new(client: Arc<dyn HttpClient>, executor: Arc<Background>) -> Self {
let (rate_limit_count_tx, rate_limit_count_rx) = watch::channel_with(None);
let rate_limit_count_tx = Arc::new(Mutex::new(rate_limit_count_tx));
OpenAIEmbeddings {
client,
executor,
rate_limit_count_rx,
rate_limit_count_tx,
}
}
fn resolve_rate_limit(&self) {
let reset_time = *self.rate_limit_count_tx.lock().borrow();
if let Some(reset_time) = reset_time {
if Instant::now() >= reset_time {
*self.rate_limit_count_tx.lock().borrow_mut() = None
}
}
log::trace!(
"resolving reset time: {:?}",
*self.rate_limit_count_tx.lock().borrow()
);
}
fn update_reset_time(&self, reset_time: Instant) {
let original_time = *self.rate_limit_count_tx.lock().borrow();
let updated_time = if let Some(original_time) = original_time {
if reset_time < original_time {
Some(reset_time)
} else {
Some(original_time)
}
} else {
Some(reset_time)
};
log::trace!("updating rate limit time: {:?}", updated_time);
*self.rate_limit_count_tx.lock().borrow_mut() = updated_time;
}
async fn send_request(
&self,
api_key: &str,
spans: Vec<&str>,
request_timeout: u64,
) -> Result<Response<AsyncBody>> {
let request = Request::post("https://api.openai.com/v1/embeddings")
.redirect_policy(isahc::config::RedirectPolicy::Follow)
.timeout(Duration::from_secs(request_timeout))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(
serde_json::to_string(&OpenAIEmbeddingRequest {
input: spans.clone(),
model: "text-embedding-ada-002",
})
.unwrap()
.into(),
)?;
Ok(self.client.send(request).await?)
}
}
#[async_trait]
impl EmbeddingProvider for OpenAIEmbeddings {
fn is_authenticated(&self) -> bool {
OPENAI_API_KEY.as_ref().is_some()
}
fn max_tokens_per_batch(&self) -> usize {
50000
}
fn rate_limit_expiration(&self) -> Option<Instant> {
*self.rate_limit_count_rx.borrow()
}
fn truncate(&self, span: &str) -> (String, usize) {
let mut tokens = OPENAI_BPE_TOKENIZER.encode_with_special_tokens(span);
let output = if tokens.len() > OPENAI_INPUT_LIMIT {
tokens.truncate(OPENAI_INPUT_LIMIT);
OPENAI_BPE_TOKENIZER
.decode(tokens.clone())
.ok()
.unwrap_or_else(|| span.to_string())
} else {
span.to_string()
};
(output, tokens.len())
}
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>> {
const BACKOFF_SECONDS: [usize; 4] = [3, 5, 15, 45];
const MAX_RETRIES: usize = 4;
let api_key = OPENAI_API_KEY
.as_ref()
.ok_or_else(|| anyhow!("no api key"))?;
let mut request_number = 0;
let mut rate_limiting = false;
let mut request_timeout: u64 = 15;
let mut response: Response<AsyncBody>;
while request_number < MAX_RETRIES {
response = self
.send_request(
api_key,
spans.iter().map(|x| &**x).collect(),
request_timeout,
)
.await?;
request_number += 1;
match response.status() {
StatusCode::REQUEST_TIMEOUT => {
request_timeout += 5;
}
StatusCode::OK => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let response: OpenAIEmbeddingResponse = serde_json::from_str(&body)?;
log::trace!(
"openai embedding completed. tokens: {:?}",
response.usage.total_tokens
);
// If we complete a request successfully that was previously rate_limited
// resolve the rate limit
if rate_limiting {
self.resolve_rate_limit()
}
return Ok(response
.data
.into_iter()
.map(|embedding| Embedding::from(embedding.embedding))
.collect());
}
StatusCode::TOO_MANY_REQUESTS => {
rate_limiting = true;
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let delay_duration = {
let delay = Duration::from_secs(BACKOFF_SECONDS[request_number - 1] as u64);
if let Some(time_to_reset) =
response.headers().get("x-ratelimit-reset-tokens")
{
if let Ok(time_str) = time_to_reset.to_str() {
parse(time_str).unwrap_or(delay)
} else {
delay
}
} else {
delay
}
};
// If we've previously rate limited, increment the duration but not the count
let reset_time = Instant::now().add(delay_duration);
self.update_reset_time(reset_time);
log::trace!(
"openai rate limiting: waiting {:?} until lifted",
&delay_duration
);
self.executor.timer(delay_duration).await;
}
_ => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
return Err(anyhow!(
"open ai bad request: {:?} {:?}",
&response.status(),
body
));
}
}
}
Err(anyhow!("openai max retries"))
}
}
#[cfg(test)]
mod tests {
use super::*;

16
crates/ai/src/models.rs Normal file
View file

@ -0,0 +1,16 @@
pub enum TruncationDirection {
Start,
End,
}
pub trait LanguageModel {
fn name(&self) -> String;
fn count_tokens(&self, content: &str) -> anyhow::Result<usize>;
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String>;
fn capacity(&self) -> anyhow::Result<usize>;
}

View file

@ -0,0 +1,330 @@
use std::cmp::Reverse;
use std::ops::Range;
use std::sync::Arc;
use language::BufferSnapshot;
use util::ResultExt;
use crate::models::LanguageModel;
use crate::prompts::repository_context::PromptCodeSnippet;
pub(crate) enum PromptFileType {
Text,
Code,
}
// TODO: Set this up to manage for defaults well
pub struct PromptArguments {
pub model: Arc<dyn LanguageModel>,
pub user_prompt: Option<String>,
pub language_name: Option<String>,
pub project_name: Option<String>,
pub snippets: Vec<PromptCodeSnippet>,
pub reserved_tokens: usize,
pub buffer: Option<BufferSnapshot>,
pub selected_range: Option<Range<usize>>,
}
impl PromptArguments {
pub(crate) fn get_file_type(&self) -> PromptFileType {
if self
.language_name
.as_ref()
.and_then(|name| Some(!["Markdown", "Plain Text"].contains(&name.as_str())))
.unwrap_or(true)
{
PromptFileType::Code
} else {
PromptFileType::Text
}
}
}
pub trait PromptTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)>;
}
#[repr(i8)]
#[derive(PartialEq, Eq, Ord)]
pub enum PromptPriority {
Mandatory, // Ignores truncation
Ordered { order: usize }, // Truncates based on priority
}
impl PartialOrd for PromptPriority {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
match (self, other) {
(Self::Mandatory, Self::Mandatory) => Some(std::cmp::Ordering::Equal),
(Self::Mandatory, Self::Ordered { .. }) => Some(std::cmp::Ordering::Greater),
(Self::Ordered { .. }, Self::Mandatory) => Some(std::cmp::Ordering::Less),
(Self::Ordered { order: a }, Self::Ordered { order: b }) => b.partial_cmp(a),
}
}
}
pub struct PromptChain {
args: PromptArguments,
templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)>,
}
impl PromptChain {
pub fn new(
args: PromptArguments,
templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)>,
) -> Self {
PromptChain { args, templates }
}
pub fn generate(&self, truncate: bool) -> anyhow::Result<(String, usize)> {
// Argsort based on Prompt Priority
let seperator = "\n";
let seperator_tokens = self.args.model.count_tokens(seperator)?;
let mut sorted_indices = (0..self.templates.len()).collect::<Vec<_>>();
sorted_indices.sort_by_key(|&i| Reverse(&self.templates[i].0));
// If Truncate
let mut tokens_outstanding = if truncate {
Some(self.args.model.capacity()? - self.args.reserved_tokens)
} else {
None
};
let mut prompts = vec!["".to_string(); sorted_indices.len()];
for idx in sorted_indices {
let (_, template) = &self.templates[idx];
if let Some((template_prompt, prompt_token_count)) =
template.generate(&self.args, tokens_outstanding).log_err()
{
if template_prompt != "" {
prompts[idx] = template_prompt;
if let Some(remaining_tokens) = tokens_outstanding {
let new_tokens = prompt_token_count + seperator_tokens;
tokens_outstanding = if remaining_tokens > new_tokens {
Some(remaining_tokens - new_tokens)
} else {
Some(0)
};
}
}
}
}
prompts.retain(|x| x != "");
let full_prompt = prompts.join(seperator);
let total_token_count = self.args.model.count_tokens(&full_prompt)?;
anyhow::Ok((prompts.join(seperator), total_token_count))
}
}
#[cfg(test)]
pub(crate) mod tests {
use crate::models::TruncationDirection;
use crate::test::FakeLanguageModel;
use super::*;
#[test]
pub fn test_prompt_chain() {
struct TestPromptTemplate {}
impl PromptTemplate for TestPromptTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut content = "This is a test prompt template".to_string();
let mut token_count = args.model.count_tokens(&content)?;
if let Some(max_token_length) = max_token_length {
if token_count > max_token_length {
content = args.model.truncate(
&content,
max_token_length,
TruncationDirection::End,
)?;
token_count = max_token_length;
}
}
anyhow::Ok((content, token_count))
}
}
struct TestLowPriorityTemplate {}
impl PromptTemplate for TestLowPriorityTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut content = "This is a low priority test prompt template".to_string();
let mut token_count = args.model.count_tokens(&content)?;
if let Some(max_token_length) = max_token_length {
if token_count > max_token_length {
content = args.model.truncate(
&content,
max_token_length,
TruncationDirection::End,
)?;
token_count = max_token_length;
}
}
anyhow::Ok((content, token_count))
}
}
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity: 100 });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(false).unwrap();
assert_eq!(
prompt,
"This is a test prompt template\nThis is a low priority test prompt template"
.to_string()
);
assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
// Testing with Truncation Off
// Should ignore capacity and return all prompts
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity: 20 });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(false).unwrap();
assert_eq!(
prompt,
"This is a test prompt template\nThis is a low priority test prompt template"
.to_string()
);
assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
// Testing with Truncation Off
// Should ignore capacity and return all prompts
let capacity = 20;
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
(
PromptPriority::Ordered { order: 2 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(true).unwrap();
assert_eq!(prompt, "This is a test promp".to_string());
assert_eq!(token_count, capacity);
// Change Ordering of Prompts Based on Priority
let capacity = 120;
let reserved_tokens = 10;
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Mandatory,
Box::new(TestLowPriorityTemplate {}),
),
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(true).unwrap();
assert_eq!(
prompt,
"This is a low priority test prompt template\nThis is a test prompt template\nThis is a low priority test prompt "
.to_string()
);
assert_eq!(token_count, capacity - reserved_tokens);
}
}

View file

@ -0,0 +1,164 @@
use anyhow::anyhow;
use language::BufferSnapshot;
use language::ToOffset;
use crate::models::LanguageModel;
use crate::models::TruncationDirection;
use crate::prompts::base::PromptArguments;
use crate::prompts::base::PromptTemplate;
use std::fmt::Write;
use std::ops::Range;
use std::sync::Arc;
fn retrieve_context(
buffer: &BufferSnapshot,
selected_range: &Option<Range<usize>>,
model: Arc<dyn LanguageModel>,
max_token_count: Option<usize>,
) -> anyhow::Result<(String, usize, bool)> {
let mut prompt = String::new();
let mut truncated = false;
if let Some(selected_range) = selected_range {
let start = selected_range.start.to_offset(buffer);
let end = selected_range.end.to_offset(buffer);
let start_window = buffer.text_for_range(0..start).collect::<String>();
let mut selected_window = String::new();
if start == end {
write!(selected_window, "<|START|>").unwrap();
} else {
write!(selected_window, "<|START|").unwrap();
}
write!(
selected_window,
"{}",
buffer.text_for_range(start..end).collect::<String>()
)
.unwrap();
if start != end {
write!(selected_window, "|END|>").unwrap();
}
let end_window = buffer.text_for_range(end..buffer.len()).collect::<String>();
if let Some(max_token_count) = max_token_count {
let selected_tokens = model.count_tokens(&selected_window)?;
if selected_tokens > max_token_count {
return Err(anyhow!(
"selected range is greater than model context window, truncation not possible"
));
};
let mut remaining_tokens = max_token_count - selected_tokens;
let start_window_tokens = model.count_tokens(&start_window)?;
let end_window_tokens = model.count_tokens(&end_window)?;
let outside_tokens = start_window_tokens + end_window_tokens;
if outside_tokens > remaining_tokens {
let (start_goal_tokens, end_goal_tokens) =
if start_window_tokens < end_window_tokens {
let start_goal_tokens = (remaining_tokens / 2).min(start_window_tokens);
remaining_tokens -= start_goal_tokens;
let end_goal_tokens = remaining_tokens.min(end_window_tokens);
(start_goal_tokens, end_goal_tokens)
} else {
let end_goal_tokens = (remaining_tokens / 2).min(end_window_tokens);
remaining_tokens -= end_goal_tokens;
let start_goal_tokens = remaining_tokens.min(start_window_tokens);
(start_goal_tokens, end_goal_tokens)
};
let truncated_start_window =
model.truncate(&start_window, start_goal_tokens, TruncationDirection::Start)?;
let truncated_end_window =
model.truncate(&end_window, end_goal_tokens, TruncationDirection::End)?;
writeln!(
prompt,
"{truncated_start_window}{selected_window}{truncated_end_window}"
)
.unwrap();
truncated = true;
} else {
writeln!(prompt, "{start_window}{selected_window}{end_window}").unwrap();
}
} else {
// If we dont have a selected range, include entire file.
writeln!(prompt, "{}", &buffer.text()).unwrap();
// Dumb truncation strategy
if let Some(max_token_count) = max_token_count {
if model.count_tokens(&prompt)? > max_token_count {
truncated = true;
prompt = model.truncate(&prompt, max_token_count, TruncationDirection::End)?;
}
}
}
}
let token_count = model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count, truncated))
}
pub struct FileContext {}
impl PromptTemplate for FileContext {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
if let Some(buffer) = &args.buffer {
let mut prompt = String::new();
// Add Initial Preamble
// TODO: Do we want to add the path in here?
writeln!(
prompt,
"The file you are currently working on has the following content:"
)
.unwrap();
let language_name = args
.language_name
.clone()
.unwrap_or("".to_string())
.to_lowercase();
let (context, _, truncated) = retrieve_context(
buffer,
&args.selected_range,
args.model.clone(),
max_token_length,
)?;
writeln!(prompt, "```{language_name}\n{context}\n```").unwrap();
if truncated {
writeln!(prompt, "Note the content has been truncated and only represents a portion of the file.").unwrap();
}
if let Some(selected_range) = &args.selected_range {
let start = selected_range.start.to_offset(buffer);
let end = selected_range.end.to_offset(buffer);
if start == end {
writeln!(prompt, "In particular, the user's cursor is currently on the '<|START|>' span in the above content, with no text selected.").unwrap();
} else {
writeln!(prompt, "In particular, the user has selected a section of the text between the '<|START|' and '|END|>' spans.").unwrap();
}
}
// Really dumb truncation strategy
if let Some(max_tokens) = max_token_length {
prompt = args
.model
.truncate(&prompt, max_tokens, TruncationDirection::End)?;
}
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
} else {
Err(anyhow!("no buffer provided to retrieve file context from"))
}
}
}

View file

@ -0,0 +1,99 @@
use crate::prompts::base::{PromptArguments, PromptFileType, PromptTemplate};
use anyhow::anyhow;
use std::fmt::Write;
pub fn capitalize(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
pub struct GenerateInlineContent {}
impl PromptTemplate for GenerateInlineContent {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let Some(user_prompt) = &args.user_prompt else {
return Err(anyhow!("user prompt not provided"));
};
let file_type = args.get_file_type();
let content_type = match &file_type {
PromptFileType::Code => "code",
PromptFileType::Text => "text",
};
let mut prompt = String::new();
if let Some(selected_range) = &args.selected_range {
if selected_range.start == selected_range.end {
writeln!(
prompt,
"Assume the cursor is located where the `<|START|>` span is."
)
.unwrap();
writeln!(
prompt,
"{} can't be replaced, so assume your answer will be inserted at the cursor.",
capitalize(content_type)
)
.unwrap();
writeln!(
prompt,
"Generate {content_type} based on the users prompt: {user_prompt}",
)
.unwrap();
} else {
writeln!(prompt, "Modify the user's selected {content_type} based upon the users prompt: '{user_prompt}'").unwrap();
writeln!(prompt, "You must reply with only the adjusted {content_type} (within the '<|START|' and '|END|>' spans) not the entire file.").unwrap();
writeln!(prompt, "Double check that you only return code and not the '<|START|' and '|END|'> spans").unwrap();
}
} else {
writeln!(
prompt,
"Generate {content_type} based on the users prompt: {user_prompt}"
)
.unwrap();
}
if let Some(language_name) = &args.language_name {
writeln!(
prompt,
"Your answer MUST always and only be valid {}.",
language_name
)
.unwrap();
}
writeln!(prompt, "Never make remarks about the output.").unwrap();
writeln!(
prompt,
"Do not return anything else, except the generated {content_type}."
)
.unwrap();
match file_type {
PromptFileType::Code => {
// writeln!(prompt, "Always wrap your code in a Markdown block.").unwrap();
}
_ => {}
}
// Really dumb truncation strategy
if let Some(max_tokens) = max_token_length {
prompt = args.model.truncate(
&prompt,
max_tokens,
crate::models::TruncationDirection::End,
)?;
}
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
}
}

View file

@ -0,0 +1,5 @@
pub mod base;
pub mod file_context;
pub mod generate;
pub mod preamble;
pub mod repository_context;

View file

@ -0,0 +1,52 @@
use crate::prompts::base::{PromptArguments, PromptFileType, PromptTemplate};
use std::fmt::Write;
pub struct EngineerPreamble {}
impl PromptTemplate for EngineerPreamble {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut prompts = Vec::new();
match args.get_file_type() {
PromptFileType::Code => {
prompts.push(format!(
"You are an expert {}engineer.",
args.language_name.clone().unwrap_or("".to_string()) + " "
));
}
PromptFileType::Text => {
prompts.push("You are an expert engineer.".to_string());
}
}
if let Some(project_name) = args.project_name.clone() {
prompts.push(format!(
"You are currently working inside the '{project_name}' project in code editor Zed."
));
}
if let Some(mut remaining_tokens) = max_token_length {
let mut prompt = String::new();
let mut total_count = 0;
for prompt_piece in prompts {
let prompt_token_count =
args.model.count_tokens(&prompt_piece)? + args.model.count_tokens("\n")?;
if remaining_tokens > prompt_token_count {
writeln!(prompt, "{prompt_piece}").unwrap();
remaining_tokens -= prompt_token_count;
total_count += prompt_token_count;
}
}
anyhow::Ok((prompt, total_count))
} else {
let prompt = prompts.join("\n");
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
}
}
}

View file

@ -0,0 +1,94 @@
use crate::prompts::base::{PromptArguments, PromptTemplate};
use std::fmt::Write;
use std::{ops::Range, path::PathBuf};
use gpui::{AsyncAppContext, ModelHandle};
use language::{Anchor, Buffer};
#[derive(Clone)]
pub struct PromptCodeSnippet {
path: Option<PathBuf>,
language_name: Option<String>,
content: String,
}
impl PromptCodeSnippet {
pub fn new(buffer: ModelHandle<Buffer>, range: Range<Anchor>, cx: &AsyncAppContext) -> Self {
let (content, language_name, file_path) = buffer.read_with(cx, |buffer, _| {
let snapshot = buffer.snapshot();
let content = snapshot.text_for_range(range.clone()).collect::<String>();
let language_name = buffer
.language()
.and_then(|language| Some(language.name().to_string().to_lowercase()));
let file_path = buffer
.file()
.and_then(|file| Some(file.path().to_path_buf()));
(content, language_name, file_path)
});
PromptCodeSnippet {
path: file_path,
language_name,
content,
}
}
}
impl ToString for PromptCodeSnippet {
fn to_string(&self) -> String {
let path = self
.path
.as_ref()
.and_then(|path| Some(path.to_string_lossy().to_string()))
.unwrap_or("".to_string());
let language_name = self.language_name.clone().unwrap_or("".to_string());
let content = self.content.clone();
format!("The below code snippet may be relevant from file: {path}\n```{language_name}\n{content}\n```")
}
}
pub struct RepositoryContext {}
impl PromptTemplate for RepositoryContext {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
const MAXIMUM_SNIPPET_TOKEN_COUNT: usize = 500;
let template = "You are working inside a large repository, here are a few code snippets that may be useful.";
let mut prompt = String::new();
let mut remaining_tokens = max_token_length.clone();
let seperator_token_length = args.model.count_tokens("\n")?;
for snippet in &args.snippets {
let mut snippet_prompt = template.to_string();
let content = snippet.to_string();
writeln!(snippet_prompt, "{content}").unwrap();
let token_count = args.model.count_tokens(&snippet_prompt)?;
if token_count <= MAXIMUM_SNIPPET_TOKEN_COUNT {
if let Some(tokens_left) = remaining_tokens {
if tokens_left >= token_count {
writeln!(prompt, "{snippet_prompt}").unwrap();
remaining_tokens = if tokens_left >= (token_count + seperator_token_length)
{
Some(tokens_left - token_count - seperator_token_length)
} else {
Some(0)
};
}
} else {
writeln!(prompt, "{snippet_prompt}").unwrap();
}
}
}
let total_token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, total_token_count))
}
}

View file

@ -0,0 +1 @@
pub mod open_ai;

View file

@ -0,0 +1,298 @@
use anyhow::{anyhow, Result};
use futures::{
future::BoxFuture, io::BufReader, stream::BoxStream, AsyncBufReadExt, AsyncReadExt, FutureExt,
Stream, StreamExt,
};
use gpui::{executor::Background, AppContext};
use isahc::{http::StatusCode, Request, RequestExt};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::{
env,
fmt::{self, Display},
io,
sync::Arc,
};
use util::ResultExt;
use crate::{
auth::{CredentialProvider, ProviderCredential},
completion::{CompletionProvider, CompletionRequest},
models::LanguageModel,
};
use crate::providers::open_ai::{OpenAILanguageModel, OPENAI_API_URL};
#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Role {
User,
Assistant,
System,
}
impl Role {
pub fn cycle(&mut self) {
*self = match self {
Role::User => Role::Assistant,
Role::Assistant => Role::System,
Role::System => Role::User,
}
}
}
impl Display for Role {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Role::User => write!(f, "User"),
Role::Assistant => write!(f, "Assistant"),
Role::System => write!(f, "System"),
}
}
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct RequestMessage {
pub role: Role,
pub content: String,
}
#[derive(Debug, Default, Serialize)]
pub struct OpenAIRequest {
pub model: String,
pub messages: Vec<RequestMessage>,
pub stream: bool,
pub stop: Vec<String>,
pub temperature: f32,
}
impl CompletionRequest for OpenAIRequest {
fn data(&self) -> serde_json::Result<String> {
serde_json::to_string(self)
}
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseMessage {
pub role: Option<Role>,
pub content: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIUsage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
#[derive(Deserialize, Debug)]
pub struct ChatChoiceDelta {
pub index: u32,
pub delta: ResponseMessage,
pub finish_reason: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIResponseStreamEvent {
pub id: Option<String>,
pub object: String,
pub created: u32,
pub model: String,
pub choices: Vec<ChatChoiceDelta>,
pub usage: Option<OpenAIUsage>,
}
pub async fn stream_completion(
credential: ProviderCredential,
executor: Arc<Background>,
request: Box<dyn CompletionRequest>,
) -> Result<impl Stream<Item = Result<OpenAIResponseStreamEvent>>> {
let api_key = match credential {
ProviderCredential::Credentials { api_key } => api_key,
_ => {
return Err(anyhow!("no credentials provider for completion"));
}
};
let (tx, rx) = futures::channel::mpsc::unbounded::<Result<OpenAIResponseStreamEvent>>();
let json_data = request.data()?;
let mut response = Request::post(format!("{OPENAI_API_URL}/chat/completions"))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(json_data)?
.send_async()
.await?;
let status = response.status();
if status == StatusCode::OK {
executor
.spawn(async move {
let mut lines = BufReader::new(response.body_mut()).lines();
fn parse_line(
line: Result<String, io::Error>,
) -> Result<Option<OpenAIResponseStreamEvent>> {
if let Some(data) = line?.strip_prefix("data: ") {
let event = serde_json::from_str(&data)?;
Ok(Some(event))
} else {
Ok(None)
}
}
while let Some(line) = lines.next().await {
if let Some(event) = parse_line(line).transpose() {
let done = event.as_ref().map_or(false, |event| {
event
.choices
.last()
.map_or(false, |choice| choice.finish_reason.is_some())
});
if tx.unbounded_send(event).is_err() {
break;
}
if done {
break;
}
}
}
anyhow::Ok(())
})
.detach();
Ok(rx)
} else {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
#[derive(Deserialize)]
struct OpenAIResponse {
error: OpenAIError,
}
#[derive(Deserialize)]
struct OpenAIError {
message: String,
}
match serde_json::from_str::<OpenAIResponse>(&body) {
Ok(response) if !response.error.message.is_empty() => Err(anyhow!(
"Failed to connect to OpenAI API: {}",
response.error.message,
)),
_ => Err(anyhow!(
"Failed to connect to OpenAI API: {} {}",
response.status(),
body,
)),
}
}
}
#[derive(Clone)]
pub struct OpenAICompletionProvider {
model: OpenAILanguageModel,
credential: Arc<RwLock<ProviderCredential>>,
executor: Arc<Background>,
}
impl OpenAICompletionProvider {
pub fn new(model_name: &str, executor: Arc<Background>) -> Self {
let model = OpenAILanguageModel::load(model_name);
let credential = Arc::new(RwLock::new(ProviderCredential::NoCredentials));
Self {
model,
credential,
executor,
}
}
}
impl CredentialProvider for OpenAICompletionProvider {
fn has_credentials(&self) -> bool {
match *self.credential.read() {
ProviderCredential::Credentials { .. } => true,
_ => false,
}
}
fn retrieve_credentials(&self, cx: &AppContext) -> ProviderCredential {
let mut credential = self.credential.write();
match *credential {
ProviderCredential::Credentials { .. } => {
return credential.clone();
}
_ => {
if let Ok(api_key) = env::var("OPENAI_API_KEY") {
*credential = ProviderCredential::Credentials { api_key };
} else if let Some((_, api_key)) = cx
.platform()
.read_credentials(OPENAI_API_URL)
.log_err()
.flatten()
{
if let Some(api_key) = String::from_utf8(api_key).log_err() {
*credential = ProviderCredential::Credentials { api_key };
}
} else {
};
}
}
credential.clone()
}
fn save_credentials(&self, cx: &AppContext, credential: ProviderCredential) {
match credential.clone() {
ProviderCredential::Credentials { api_key } => {
cx.platform()
.write_credentials(OPENAI_API_URL, "Bearer", api_key.as_bytes())
.log_err();
}
_ => {}
}
*self.credential.write() = credential;
}
fn delete_credentials(&self, cx: &AppContext) {
cx.platform().delete_credentials(OPENAI_API_URL).log_err();
*self.credential.write() = ProviderCredential::NoCredentials;
}
}
impl CompletionProvider for OpenAICompletionProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(self.model.clone());
model
}
fn complete(
&self,
prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
// Currently the CompletionRequest for OpenAI, includes a 'model' parameter
// This means that the model is determined by the CompletionRequest and not the CompletionProvider,
// which is currently model based, due to the langauge model.
// At some point in the future we should rectify this.
let credential = self.credential.read().clone();
let request = stream_completion(credential, self.executor.clone(), prompt);
async move {
let response = request.await?;
let stream = response
.filter_map(|response| async move {
match response {
Ok(mut response) => Some(Ok(response.choices.pop()?.delta.content?)),
Err(error) => Some(Err(error)),
}
})
.boxed();
Ok(stream)
}
.boxed()
}
fn box_clone(&self) -> Box<dyn CompletionProvider> {
Box::new((*self).clone())
}
}

View file

@ -0,0 +1,306 @@
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use futures::AsyncReadExt;
use gpui::executor::Background;
use gpui::{serde_json, AppContext};
use isahc::http::StatusCode;
use isahc::prelude::Configurable;
use isahc::{AsyncBody, Response};
use lazy_static::lazy_static;
use parking_lot::{Mutex, RwLock};
use parse_duration::parse;
use postage::watch;
use serde::{Deserialize, Serialize};
use std::env;
use std::ops::Add;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tiktoken_rs::{cl100k_base, CoreBPE};
use util::http::{HttpClient, Request};
use util::ResultExt;
use crate::auth::{CredentialProvider, ProviderCredential};
use crate::embedding::{Embedding, EmbeddingProvider};
use crate::models::LanguageModel;
use crate::providers::open_ai::OpenAILanguageModel;
use crate::providers::open_ai::OPENAI_API_URL;
lazy_static! {
static ref OPENAI_BPE_TOKENIZER: CoreBPE = cl100k_base().unwrap();
}
#[derive(Clone)]
pub struct OpenAIEmbeddingProvider {
model: OpenAILanguageModel,
credential: Arc<RwLock<ProviderCredential>>,
pub client: Arc<dyn HttpClient>,
pub executor: Arc<Background>,
rate_limit_count_rx: watch::Receiver<Option<Instant>>,
rate_limit_count_tx: Arc<Mutex<watch::Sender<Option<Instant>>>>,
}
#[derive(Serialize)]
struct OpenAIEmbeddingRequest<'a> {
model: &'static str,
input: Vec<&'a str>,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingResponse {
data: Vec<OpenAIEmbedding>,
usage: OpenAIEmbeddingUsage,
}
#[derive(Debug, Deserialize)]
struct OpenAIEmbedding {
embedding: Vec<f32>,
index: usize,
object: String,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingUsage {
prompt_tokens: usize,
total_tokens: usize,
}
impl OpenAIEmbeddingProvider {
pub fn new(client: Arc<dyn HttpClient>, executor: Arc<Background>) -> Self {
let (rate_limit_count_tx, rate_limit_count_rx) = watch::channel_with(None);
let rate_limit_count_tx = Arc::new(Mutex::new(rate_limit_count_tx));
let model = OpenAILanguageModel::load("text-embedding-ada-002");
let credential = Arc::new(RwLock::new(ProviderCredential::NoCredentials));
OpenAIEmbeddingProvider {
model,
credential,
client,
executor,
rate_limit_count_rx,
rate_limit_count_tx,
}
}
fn get_api_key(&self) -> Result<String> {
match self.credential.read().clone() {
ProviderCredential::Credentials { api_key } => Ok(api_key),
_ => Err(anyhow!("api credentials not provided")),
}
}
fn resolve_rate_limit(&self) {
let reset_time = *self.rate_limit_count_tx.lock().borrow();
if let Some(reset_time) = reset_time {
if Instant::now() >= reset_time {
*self.rate_limit_count_tx.lock().borrow_mut() = None
}
}
log::trace!(
"resolving reset time: {:?}",
*self.rate_limit_count_tx.lock().borrow()
);
}
fn update_reset_time(&self, reset_time: Instant) {
let original_time = *self.rate_limit_count_tx.lock().borrow();
let updated_time = if let Some(original_time) = original_time {
if reset_time < original_time {
Some(reset_time)
} else {
Some(original_time)
}
} else {
Some(reset_time)
};
log::trace!("updating rate limit time: {:?}", updated_time);
*self.rate_limit_count_tx.lock().borrow_mut() = updated_time;
}
async fn send_request(
&self,
api_key: &str,
spans: Vec<&str>,
request_timeout: u64,
) -> Result<Response<AsyncBody>> {
let request = Request::post("https://api.openai.com/v1/embeddings")
.redirect_policy(isahc::config::RedirectPolicy::Follow)
.timeout(Duration::from_secs(request_timeout))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(
serde_json::to_string(&OpenAIEmbeddingRequest {
input: spans.clone(),
model: "text-embedding-ada-002",
})
.unwrap()
.into(),
)?;
Ok(self.client.send(request).await?)
}
}
impl CredentialProvider for OpenAIEmbeddingProvider {
fn has_credentials(&self) -> bool {
match *self.credential.read() {
ProviderCredential::Credentials { .. } => true,
_ => false,
}
}
fn retrieve_credentials(&self, cx: &AppContext) -> ProviderCredential {
let mut credential = self.credential.write();
match *credential {
ProviderCredential::Credentials { .. } => {
return credential.clone();
}
_ => {
if let Ok(api_key) = env::var("OPENAI_API_KEY") {
*credential = ProviderCredential::Credentials { api_key };
} else if let Some((_, api_key)) = cx
.platform()
.read_credentials(OPENAI_API_URL)
.log_err()
.flatten()
{
if let Some(api_key) = String::from_utf8(api_key).log_err() {
*credential = ProviderCredential::Credentials { api_key };
}
} else {
};
}
}
credential.clone()
}
fn save_credentials(&self, cx: &AppContext, credential: ProviderCredential) {
match credential.clone() {
ProviderCredential::Credentials { api_key } => {
cx.platform()
.write_credentials(OPENAI_API_URL, "Bearer", api_key.as_bytes())
.log_err();
}
_ => {}
}
*self.credential.write() = credential;
}
fn delete_credentials(&self, cx: &AppContext) {
cx.platform().delete_credentials(OPENAI_API_URL).log_err();
*self.credential.write() = ProviderCredential::NoCredentials;
}
}
#[async_trait]
impl EmbeddingProvider for OpenAIEmbeddingProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(self.model.clone());
model
}
fn max_tokens_per_batch(&self) -> usize {
50000
}
fn rate_limit_expiration(&self) -> Option<Instant> {
*self.rate_limit_count_rx.borrow()
}
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>> {
const BACKOFF_SECONDS: [usize; 4] = [3, 5, 15, 45];
const MAX_RETRIES: usize = 4;
let api_key = self.get_api_key()?;
let mut request_number = 0;
let mut rate_limiting = false;
let mut request_timeout: u64 = 15;
let mut response: Response<AsyncBody>;
while request_number < MAX_RETRIES {
response = self
.send_request(
&api_key,
spans.iter().map(|x| &**x).collect(),
request_timeout,
)
.await?;
request_number += 1;
match response.status() {
StatusCode::REQUEST_TIMEOUT => {
request_timeout += 5;
}
StatusCode::OK => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let response: OpenAIEmbeddingResponse = serde_json::from_str(&body)?;
log::trace!(
"openai embedding completed. tokens: {:?}",
response.usage.total_tokens
);
// If we complete a request successfully that was previously rate_limited
// resolve the rate limit
if rate_limiting {
self.resolve_rate_limit()
}
return Ok(response
.data
.into_iter()
.map(|embedding| Embedding::from(embedding.embedding))
.collect());
}
StatusCode::TOO_MANY_REQUESTS => {
rate_limiting = true;
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let delay_duration = {
let delay = Duration::from_secs(BACKOFF_SECONDS[request_number - 1] as u64);
if let Some(time_to_reset) =
response.headers().get("x-ratelimit-reset-tokens")
{
if let Ok(time_str) = time_to_reset.to_str() {
parse(time_str).unwrap_or(delay)
} else {
delay
}
} else {
delay
}
};
// If we've previously rate limited, increment the duration but not the count
let reset_time = Instant::now().add(delay_duration);
self.update_reset_time(reset_time);
log::trace!(
"openai rate limiting: waiting {:?} until lifted",
&delay_duration
);
self.executor.timer(delay_duration).await;
}
_ => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
return Err(anyhow!(
"open ai bad request: {:?} {:?}",
&response.status(),
body
));
}
}
}
Err(anyhow!("openai max retries"))
}
}

View file

@ -0,0 +1,9 @@
pub mod completion;
pub mod embedding;
pub mod model;
pub use completion::*;
pub use embedding::*;
pub use model::OpenAILanguageModel;
pub const OPENAI_API_URL: &'static str = "https://api.openai.com/v1";

View file

@ -0,0 +1,57 @@
use anyhow::anyhow;
use tiktoken_rs::CoreBPE;
use util::ResultExt;
use crate::models::{LanguageModel, TruncationDirection};
#[derive(Clone)]
pub struct OpenAILanguageModel {
name: String,
bpe: Option<CoreBPE>,
}
impl OpenAILanguageModel {
pub fn load(model_name: &str) -> Self {
let bpe = tiktoken_rs::get_bpe_from_model(model_name).log_err();
OpenAILanguageModel {
name: model_name.to_string(),
bpe,
}
}
}
impl LanguageModel for OpenAILanguageModel {
fn name(&self) -> String {
self.name.clone()
}
fn count_tokens(&self, content: &str) -> anyhow::Result<usize> {
if let Some(bpe) = &self.bpe {
anyhow::Ok(bpe.encode_with_special_tokens(content).len())
} else {
Err(anyhow!("bpe for open ai model was not retrieved"))
}
}
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String> {
if let Some(bpe) = &self.bpe {
let tokens = bpe.encode_with_special_tokens(content);
if tokens.len() > length {
match direction {
TruncationDirection::End => bpe.decode(tokens[..length].to_vec()),
TruncationDirection::Start => bpe.decode(tokens[length..].to_vec()),
}
} else {
bpe.decode(tokens)
}
} else {
Err(anyhow!("bpe for open ai model was not retrieved"))
}
}
fn capacity(&self) -> anyhow::Result<usize> {
anyhow::Ok(tiktoken_rs::model::get_context_size(&self.name))
}
}

View file

@ -0,0 +1,11 @@
pub trait LanguageModel {
fn name(&self) -> String;
fn count_tokens(&self, content: &str) -> anyhow::Result<usize>;
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String>;
fn capacity(&self) -> anyhow::Result<usize>;
}

191
crates/ai/src/test.rs Normal file
View file

@ -0,0 +1,191 @@
use std::{
sync::atomic::{self, AtomicUsize, Ordering},
time::Instant,
};
use async_trait::async_trait;
use futures::{channel::mpsc, future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui::AppContext;
use parking_lot::Mutex;
use crate::{
auth::{CredentialProvider, ProviderCredential},
completion::{CompletionProvider, CompletionRequest},
embedding::{Embedding, EmbeddingProvider},
models::{LanguageModel, TruncationDirection},
};
#[derive(Clone)]
pub struct FakeLanguageModel {
pub capacity: usize,
}
impl LanguageModel for FakeLanguageModel {
fn name(&self) -> String {
"dummy".to_string()
}
fn count_tokens(&self, content: &str) -> anyhow::Result<usize> {
anyhow::Ok(content.chars().collect::<Vec<char>>().len())
}
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String> {
println!("TRYING TO TRUNCATE: {:?}", length.clone());
if length > self.count_tokens(content)? {
println!("NOT TRUNCATING");
return anyhow::Ok(content.to_string());
}
anyhow::Ok(match direction {
TruncationDirection::End => content.chars().collect::<Vec<char>>()[..length]
.into_iter()
.collect::<String>(),
TruncationDirection::Start => content.chars().collect::<Vec<char>>()[length..]
.into_iter()
.collect::<String>(),
})
}
fn capacity(&self) -> anyhow::Result<usize> {
anyhow::Ok(self.capacity)
}
}
pub struct FakeEmbeddingProvider {
pub embedding_count: AtomicUsize,
}
impl Clone for FakeEmbeddingProvider {
fn clone(&self) -> Self {
FakeEmbeddingProvider {
embedding_count: AtomicUsize::new(self.embedding_count.load(Ordering::SeqCst)),
}
}
}
impl Default for FakeEmbeddingProvider {
fn default() -> Self {
FakeEmbeddingProvider {
embedding_count: AtomicUsize::default(),
}
}
}
impl FakeEmbeddingProvider {
pub fn embedding_count(&self) -> usize {
self.embedding_count.load(atomic::Ordering::SeqCst)
}
pub fn embed_sync(&self, span: &str) -> Embedding {
let mut result = vec![1.0; 26];
for letter in span.chars() {
let letter = letter.to_ascii_lowercase();
if letter as u32 >= 'a' as u32 {
let ix = (letter as u32) - ('a' as u32);
if ix < 26 {
result[ix as usize] += 1.0;
}
}
}
let norm = result.iter().map(|x| x * x).sum::<f32>().sqrt();
for x in &mut result {
*x /= norm;
}
result.into()
}
}
impl CredentialProvider for FakeEmbeddingProvider {
fn has_credentials(&self) -> bool {
true
}
fn retrieve_credentials(&self, _cx: &AppContext) -> ProviderCredential {
ProviderCredential::NotNeeded
}
fn save_credentials(&self, _cx: &AppContext, _credential: ProviderCredential) {}
fn delete_credentials(&self, _cx: &AppContext) {}
}
#[async_trait]
impl EmbeddingProvider for FakeEmbeddingProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
Box::new(FakeLanguageModel { capacity: 1000 })
}
fn max_tokens_per_batch(&self) -> usize {
1000
}
fn rate_limit_expiration(&self) -> Option<Instant> {
None
}
async fn embed_batch(&self, spans: Vec<String>) -> anyhow::Result<Vec<Embedding>> {
self.embedding_count
.fetch_add(spans.len(), atomic::Ordering::SeqCst);
anyhow::Ok(spans.iter().map(|span| self.embed_sync(span)).collect())
}
}
pub struct FakeCompletionProvider {
last_completion_tx: Mutex<Option<mpsc::Sender<String>>>,
}
impl Clone for FakeCompletionProvider {
fn clone(&self) -> Self {
Self {
last_completion_tx: Mutex::new(None),
}
}
}
impl FakeCompletionProvider {
pub fn new() -> Self {
Self {
last_completion_tx: Mutex::new(None),
}
}
pub fn send_completion(&self, completion: impl Into<String>) {
let mut tx = self.last_completion_tx.lock();
tx.as_mut().unwrap().try_send(completion.into()).unwrap();
}
pub fn finish_completion(&self) {
self.last_completion_tx.lock().take().unwrap();
}
}
impl CredentialProvider for FakeCompletionProvider {
fn has_credentials(&self) -> bool {
true
}
fn retrieve_credentials(&self, _cx: &AppContext) -> ProviderCredential {
ProviderCredential::NotNeeded
}
fn save_credentials(&self, _cx: &AppContext, _credential: ProviderCredential) {}
fn delete_credentials(&self, _cx: &AppContext) {}
}
impl CompletionProvider for FakeCompletionProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(FakeLanguageModel { capacity: 8190 });
model
}
fn complete(
&self,
_prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, anyhow::Result<BoxStream<'static, anyhow::Result<String>>>> {
let (tx, rx) = mpsc::channel(1);
*self.last_completion_tx.lock() = Some(tx);
async move { Ok(rx.map(|rx| Ok(rx)).boxed()) }.boxed()
}
fn box_clone(&self) -> Box<dyn CompletionProvider> {
Box::new((*self).clone())
}
}

38
crates/ai2/Cargo.toml Normal file
View file

@ -0,0 +1,38 @@
[package]
name = "ai2"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/ai2.rs"
doctest = false
[features]
test-support = []
[dependencies]
gpui2 = { path = "../gpui2" }
util = { path = "../util" }
language2 = { path = "../language2" }
async-trait.workspace = true
anyhow.workspace = true
futures.workspace = true
lazy_static.workspace = true
ordered-float.workspace = true
parking_lot.workspace = true
isahc.workspace = true
regex.workspace = true
serde.workspace = true
serde_json.workspace = true
postage.workspace = true
rand.workspace = true
log.workspace = true
parse_duration = "2.1.1"
tiktoken-rs = "0.5.0"
matrixmultiply = "0.3.7"
rusqlite = { version = "0.29.0", features = ["blob", "array", "modern_sqlite"] }
bincode = "1.3.3"
[dev-dependencies]
gpui2 = { path = "../gpui2", features = ["test-support"] }

8
crates/ai2/src/ai2.rs Normal file
View file

@ -0,0 +1,8 @@
pub mod auth;
pub mod completion;
pub mod embedding;
pub mod models;
pub mod prompts;
pub mod providers;
#[cfg(any(test, feature = "test-support"))]
pub mod test;

17
crates/ai2/src/auth.rs Normal file
View file

@ -0,0 +1,17 @@
use async_trait::async_trait;
use gpui2::AppContext;
#[derive(Clone, Debug)]
pub enum ProviderCredential {
Credentials { api_key: String },
NoCredentials,
NotNeeded,
}
#[async_trait]
pub trait CredentialProvider: Send + Sync {
fn has_credentials(&self) -> bool;
async fn retrieve_credentials(&self, cx: &mut AppContext) -> ProviderCredential;
async fn save_credentials(&self, cx: &mut AppContext, credential: ProviderCredential);
async fn delete_credentials(&self, cx: &mut AppContext);
}

View file

@ -0,0 +1,23 @@
use anyhow::Result;
use futures::{future::BoxFuture, stream::BoxStream};
use crate::{auth::CredentialProvider, models::LanguageModel};
pub trait CompletionRequest: Send + Sync {
fn data(&self) -> serde_json::Result<String>;
}
pub trait CompletionProvider: CredentialProvider {
fn base_model(&self) -> Box<dyn LanguageModel>;
fn complete(
&self,
prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
fn box_clone(&self) -> Box<dyn CompletionProvider>;
}
impl Clone for Box<dyn CompletionProvider> {
fn clone(&self) -> Box<dyn CompletionProvider> {
self.box_clone()
}
}

123
crates/ai2/src/embedding.rs Normal file
View file

@ -0,0 +1,123 @@
use std::time::Instant;
use anyhow::Result;
use async_trait::async_trait;
use ordered_float::OrderedFloat;
use rusqlite::types::{FromSql, FromSqlResult, ToSqlOutput, ValueRef};
use rusqlite::ToSql;
use crate::auth::CredentialProvider;
use crate::models::LanguageModel;
#[derive(Debug, PartialEq, Clone)]
pub struct Embedding(pub Vec<f32>);
// This is needed for semantic index functionality
// Unfortunately it has to live wherever the "Embedding" struct is created.
// Keeping this in here though, introduces a 'rusqlite' dependency into AI
// which is less than ideal
impl FromSql for Embedding {
fn column_result(value: ValueRef) -> FromSqlResult<Self> {
let bytes = value.as_blob()?;
let embedding: Result<Vec<f32>, Box<bincode::ErrorKind>> = bincode::deserialize(bytes);
if embedding.is_err() {
return Err(rusqlite::types::FromSqlError::Other(embedding.unwrap_err()));
}
Ok(Embedding(embedding.unwrap()))
}
}
impl ToSql for Embedding {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
let bytes = bincode::serialize(&self.0)
.map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?;
Ok(ToSqlOutput::Owned(rusqlite::types::Value::Blob(bytes)))
}
}
impl From<Vec<f32>> for Embedding {
fn from(value: Vec<f32>) -> Self {
Embedding(value)
}
}
impl Embedding {
pub fn similarity(&self, other: &Self) -> OrderedFloat<f32> {
let len = self.0.len();
assert_eq!(len, other.0.len());
let mut result = 0.0;
unsafe {
matrixmultiply::sgemm(
1,
len,
1,
1.0,
self.0.as_ptr(),
len as isize,
1,
other.0.as_ptr(),
1,
len as isize,
0.0,
&mut result as *mut f32,
1,
1,
);
}
OrderedFloat(result)
}
}
#[async_trait]
pub trait EmbeddingProvider: CredentialProvider {
fn base_model(&self) -> Box<dyn LanguageModel>;
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>>;
fn max_tokens_per_batch(&self) -> usize;
fn rate_limit_expiration(&self) -> Option<Instant>;
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
#[gpui2::test]
fn test_similarity(mut rng: StdRng) {
assert_eq!(
Embedding::from(vec![1., 0., 0., 0., 0.])
.similarity(&Embedding::from(vec![0., 1., 0., 0., 0.])),
0.
);
assert_eq!(
Embedding::from(vec![2., 0., 0., 0., 0.])
.similarity(&Embedding::from(vec![3., 1., 0., 0., 0.])),
6.
);
for _ in 0..100 {
let size = 1536;
let mut a = vec![0.; size];
let mut b = vec![0.; size];
for (a, b) in a.iter_mut().zip(b.iter_mut()) {
*a = rng.gen();
*b = rng.gen();
}
let a = Embedding::from(a);
let b = Embedding::from(b);
assert_eq!(
round_to_decimals(a.similarity(&b), 1),
round_to_decimals(reference_dot(&a.0, &b.0), 1)
);
}
fn round_to_decimals(n: OrderedFloat<f32>, decimal_places: i32) -> f32 {
let factor = (10.0 as f32).powi(decimal_places);
(n * factor).round() / factor
}
fn reference_dot(a: &[f32], b: &[f32]) -> OrderedFloat<f32> {
OrderedFloat(a.iter().zip(b.iter()).map(|(a, b)| a * b).sum())
}
}
}

16
crates/ai2/src/models.rs Normal file
View file

@ -0,0 +1,16 @@
pub enum TruncationDirection {
Start,
End,
}
pub trait LanguageModel {
fn name(&self) -> String;
fn count_tokens(&self, content: &str) -> anyhow::Result<usize>;
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String>;
fn capacity(&self) -> anyhow::Result<usize>;
}

View file

@ -0,0 +1,330 @@
use std::cmp::Reverse;
use std::ops::Range;
use std::sync::Arc;
use language2::BufferSnapshot;
use util::ResultExt;
use crate::models::LanguageModel;
use crate::prompts::repository_context::PromptCodeSnippet;
pub(crate) enum PromptFileType {
Text,
Code,
}
// TODO: Set this up to manage for defaults well
pub struct PromptArguments {
pub model: Arc<dyn LanguageModel>,
pub user_prompt: Option<String>,
pub language_name: Option<String>,
pub project_name: Option<String>,
pub snippets: Vec<PromptCodeSnippet>,
pub reserved_tokens: usize,
pub buffer: Option<BufferSnapshot>,
pub selected_range: Option<Range<usize>>,
}
impl PromptArguments {
pub(crate) fn get_file_type(&self) -> PromptFileType {
if self
.language_name
.as_ref()
.and_then(|name| Some(!["Markdown", "Plain Text"].contains(&name.as_str())))
.unwrap_or(true)
{
PromptFileType::Code
} else {
PromptFileType::Text
}
}
}
pub trait PromptTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)>;
}
#[repr(i8)]
#[derive(PartialEq, Eq, Ord)]
pub enum PromptPriority {
Mandatory, // Ignores truncation
Ordered { order: usize }, // Truncates based on priority
}
impl PartialOrd for PromptPriority {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
match (self, other) {
(Self::Mandatory, Self::Mandatory) => Some(std::cmp::Ordering::Equal),
(Self::Mandatory, Self::Ordered { .. }) => Some(std::cmp::Ordering::Greater),
(Self::Ordered { .. }, Self::Mandatory) => Some(std::cmp::Ordering::Less),
(Self::Ordered { order: a }, Self::Ordered { order: b }) => b.partial_cmp(a),
}
}
}
pub struct PromptChain {
args: PromptArguments,
templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)>,
}
impl PromptChain {
pub fn new(
args: PromptArguments,
templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)>,
) -> Self {
PromptChain { args, templates }
}
pub fn generate(&self, truncate: bool) -> anyhow::Result<(String, usize)> {
// Argsort based on Prompt Priority
let seperator = "\n";
let seperator_tokens = self.args.model.count_tokens(seperator)?;
let mut sorted_indices = (0..self.templates.len()).collect::<Vec<_>>();
sorted_indices.sort_by_key(|&i| Reverse(&self.templates[i].0));
// If Truncate
let mut tokens_outstanding = if truncate {
Some(self.args.model.capacity()? - self.args.reserved_tokens)
} else {
None
};
let mut prompts = vec!["".to_string(); sorted_indices.len()];
for idx in sorted_indices {
let (_, template) = &self.templates[idx];
if let Some((template_prompt, prompt_token_count)) =
template.generate(&self.args, tokens_outstanding).log_err()
{
if template_prompt != "" {
prompts[idx] = template_prompt;
if let Some(remaining_tokens) = tokens_outstanding {
let new_tokens = prompt_token_count + seperator_tokens;
tokens_outstanding = if remaining_tokens > new_tokens {
Some(remaining_tokens - new_tokens)
} else {
Some(0)
};
}
}
}
}
prompts.retain(|x| x != "");
let full_prompt = prompts.join(seperator);
let total_token_count = self.args.model.count_tokens(&full_prompt)?;
anyhow::Ok((prompts.join(seperator), total_token_count))
}
}
#[cfg(test)]
pub(crate) mod tests {
use crate::models::TruncationDirection;
use crate::test::FakeLanguageModel;
use super::*;
#[test]
pub fn test_prompt_chain() {
struct TestPromptTemplate {}
impl PromptTemplate for TestPromptTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut content = "This is a test prompt template".to_string();
let mut token_count = args.model.count_tokens(&content)?;
if let Some(max_token_length) = max_token_length {
if token_count > max_token_length {
content = args.model.truncate(
&content,
max_token_length,
TruncationDirection::End,
)?;
token_count = max_token_length;
}
}
anyhow::Ok((content, token_count))
}
}
struct TestLowPriorityTemplate {}
impl PromptTemplate for TestLowPriorityTemplate {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut content = "This is a low priority test prompt template".to_string();
let mut token_count = args.model.count_tokens(&content)?;
if let Some(max_token_length) = max_token_length {
if token_count > max_token_length {
content = args.model.truncate(
&content,
max_token_length,
TruncationDirection::End,
)?;
token_count = max_token_length;
}
}
anyhow::Ok((content, token_count))
}
}
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity: 100 });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(false).unwrap();
assert_eq!(
prompt,
"This is a test prompt template\nThis is a low priority test prompt template"
.to_string()
);
assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
// Testing with Truncation Off
// Should ignore capacity and return all prompts
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity: 20 });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(false).unwrap();
assert_eq!(
prompt,
"This is a test prompt template\nThis is a low priority test prompt template"
.to_string()
);
assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
// Testing with Truncation Off
// Should ignore capacity and return all prompts
let capacity = 20;
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens: 0,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
(
PromptPriority::Ordered { order: 2 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(true).unwrap();
assert_eq!(prompt, "This is a test promp".to_string());
assert_eq!(token_count, capacity);
// Change Ordering of Prompts Based on Priority
let capacity = 120;
let reserved_tokens = 10;
let model: Arc<dyn LanguageModel> = Arc::new(FakeLanguageModel { capacity });
let args = PromptArguments {
model: model.clone(),
language_name: None,
project_name: None,
snippets: Vec::new(),
reserved_tokens,
buffer: None,
selected_range: None,
user_prompt: None,
};
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(
PromptPriority::Mandatory,
Box::new(TestLowPriorityTemplate {}),
),
(
PromptPriority::Ordered { order: 0 },
Box::new(TestPromptTemplate {}),
),
(
PromptPriority::Ordered { order: 1 },
Box::new(TestLowPriorityTemplate {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, token_count) = chain.generate(true).unwrap();
assert_eq!(
prompt,
"This is a low priority test prompt template\nThis is a test prompt template\nThis is a low priority test prompt "
.to_string()
);
assert_eq!(token_count, capacity - reserved_tokens);
}
}

View file

@ -0,0 +1,164 @@
use anyhow::anyhow;
use language2::BufferSnapshot;
use language2::ToOffset;
use crate::models::LanguageModel;
use crate::models::TruncationDirection;
use crate::prompts::base::PromptArguments;
use crate::prompts::base::PromptTemplate;
use std::fmt::Write;
use std::ops::Range;
use std::sync::Arc;
fn retrieve_context(
buffer: &BufferSnapshot,
selected_range: &Option<Range<usize>>,
model: Arc<dyn LanguageModel>,
max_token_count: Option<usize>,
) -> anyhow::Result<(String, usize, bool)> {
let mut prompt = String::new();
let mut truncated = false;
if let Some(selected_range) = selected_range {
let start = selected_range.start.to_offset(buffer);
let end = selected_range.end.to_offset(buffer);
let start_window = buffer.text_for_range(0..start).collect::<String>();
let mut selected_window = String::new();
if start == end {
write!(selected_window, "<|START|>").unwrap();
} else {
write!(selected_window, "<|START|").unwrap();
}
write!(
selected_window,
"{}",
buffer.text_for_range(start..end).collect::<String>()
)
.unwrap();
if start != end {
write!(selected_window, "|END|>").unwrap();
}
let end_window = buffer.text_for_range(end..buffer.len()).collect::<String>();
if let Some(max_token_count) = max_token_count {
let selected_tokens = model.count_tokens(&selected_window)?;
if selected_tokens > max_token_count {
return Err(anyhow!(
"selected range is greater than model context window, truncation not possible"
));
};
let mut remaining_tokens = max_token_count - selected_tokens;
let start_window_tokens = model.count_tokens(&start_window)?;
let end_window_tokens = model.count_tokens(&end_window)?;
let outside_tokens = start_window_tokens + end_window_tokens;
if outside_tokens > remaining_tokens {
let (start_goal_tokens, end_goal_tokens) =
if start_window_tokens < end_window_tokens {
let start_goal_tokens = (remaining_tokens / 2).min(start_window_tokens);
remaining_tokens -= start_goal_tokens;
let end_goal_tokens = remaining_tokens.min(end_window_tokens);
(start_goal_tokens, end_goal_tokens)
} else {
let end_goal_tokens = (remaining_tokens / 2).min(end_window_tokens);
remaining_tokens -= end_goal_tokens;
let start_goal_tokens = remaining_tokens.min(start_window_tokens);
(start_goal_tokens, end_goal_tokens)
};
let truncated_start_window =
model.truncate(&start_window, start_goal_tokens, TruncationDirection::Start)?;
let truncated_end_window =
model.truncate(&end_window, end_goal_tokens, TruncationDirection::End)?;
writeln!(
prompt,
"{truncated_start_window}{selected_window}{truncated_end_window}"
)
.unwrap();
truncated = true;
} else {
writeln!(prompt, "{start_window}{selected_window}{end_window}").unwrap();
}
} else {
// If we dont have a selected range, include entire file.
writeln!(prompt, "{}", &buffer.text()).unwrap();
// Dumb truncation strategy
if let Some(max_token_count) = max_token_count {
if model.count_tokens(&prompt)? > max_token_count {
truncated = true;
prompt = model.truncate(&prompt, max_token_count, TruncationDirection::End)?;
}
}
}
}
let token_count = model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count, truncated))
}
pub struct FileContext {}
impl PromptTemplate for FileContext {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
if let Some(buffer) = &args.buffer {
let mut prompt = String::new();
// Add Initial Preamble
// TODO: Do we want to add the path in here?
writeln!(
prompt,
"The file you are currently working on has the following content:"
)
.unwrap();
let language_name = args
.language_name
.clone()
.unwrap_or("".to_string())
.to_lowercase();
let (context, _, truncated) = retrieve_context(
buffer,
&args.selected_range,
args.model.clone(),
max_token_length,
)?;
writeln!(prompt, "```{language_name}\n{context}\n```").unwrap();
if truncated {
writeln!(prompt, "Note the content has been truncated and only represents a portion of the file.").unwrap();
}
if let Some(selected_range) = &args.selected_range {
let start = selected_range.start.to_offset(buffer);
let end = selected_range.end.to_offset(buffer);
if start == end {
writeln!(prompt, "In particular, the user's cursor is currently on the '<|START|>' span in the above content, with no text selected.").unwrap();
} else {
writeln!(prompt, "In particular, the user has selected a section of the text between the '<|START|' and '|END|>' spans.").unwrap();
}
}
// Really dumb truncation strategy
if let Some(max_tokens) = max_token_length {
prompt = args
.model
.truncate(&prompt, max_tokens, TruncationDirection::End)?;
}
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
} else {
Err(anyhow!("no buffer provided to retrieve file context from"))
}
}
}

View file

@ -0,0 +1,99 @@
use crate::prompts::base::{PromptArguments, PromptFileType, PromptTemplate};
use anyhow::anyhow;
use std::fmt::Write;
pub fn capitalize(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
pub struct GenerateInlineContent {}
impl PromptTemplate for GenerateInlineContent {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let Some(user_prompt) = &args.user_prompt else {
return Err(anyhow!("user prompt not provided"));
};
let file_type = args.get_file_type();
let content_type = match &file_type {
PromptFileType::Code => "code",
PromptFileType::Text => "text",
};
let mut prompt = String::new();
if let Some(selected_range) = &args.selected_range {
if selected_range.start == selected_range.end {
writeln!(
prompt,
"Assume the cursor is located where the `<|START|>` span is."
)
.unwrap();
writeln!(
prompt,
"{} can't be replaced, so assume your answer will be inserted at the cursor.",
capitalize(content_type)
)
.unwrap();
writeln!(
prompt,
"Generate {content_type} based on the users prompt: {user_prompt}",
)
.unwrap();
} else {
writeln!(prompt, "Modify the user's selected {content_type} based upon the users prompt: '{user_prompt}'").unwrap();
writeln!(prompt, "You must reply with only the adjusted {content_type} (within the '<|START|' and '|END|>' spans) not the entire file.").unwrap();
writeln!(prompt, "Double check that you only return code and not the '<|START|' and '|END|'> spans").unwrap();
}
} else {
writeln!(
prompt,
"Generate {content_type} based on the users prompt: {user_prompt}"
)
.unwrap();
}
if let Some(language_name) = &args.language_name {
writeln!(
prompt,
"Your answer MUST always and only be valid {}.",
language_name
)
.unwrap();
}
writeln!(prompt, "Never make remarks about the output.").unwrap();
writeln!(
prompt,
"Do not return anything else, except the generated {content_type}."
)
.unwrap();
match file_type {
PromptFileType::Code => {
// writeln!(prompt, "Always wrap your code in a Markdown block.").unwrap();
}
_ => {}
}
// Really dumb truncation strategy
if let Some(max_tokens) = max_token_length {
prompt = args.model.truncate(
&prompt,
max_tokens,
crate::models::TruncationDirection::End,
)?;
}
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
}
}

View file

@ -0,0 +1,5 @@
pub mod base;
pub mod file_context;
pub mod generate;
pub mod preamble;
pub mod repository_context;

View file

@ -0,0 +1,52 @@
use crate::prompts::base::{PromptArguments, PromptFileType, PromptTemplate};
use std::fmt::Write;
pub struct EngineerPreamble {}
impl PromptTemplate for EngineerPreamble {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
let mut prompts = Vec::new();
match args.get_file_type() {
PromptFileType::Code => {
prompts.push(format!(
"You are an expert {}engineer.",
args.language_name.clone().unwrap_or("".to_string()) + " "
));
}
PromptFileType::Text => {
prompts.push("You are an expert engineer.".to_string());
}
}
if let Some(project_name) = args.project_name.clone() {
prompts.push(format!(
"You are currently working inside the '{project_name}' project in code editor Zed."
));
}
if let Some(mut remaining_tokens) = max_token_length {
let mut prompt = String::new();
let mut total_count = 0;
for prompt_piece in prompts {
let prompt_token_count =
args.model.count_tokens(&prompt_piece)? + args.model.count_tokens("\n")?;
if remaining_tokens > prompt_token_count {
writeln!(prompt, "{prompt_piece}").unwrap();
remaining_tokens -= prompt_token_count;
total_count += prompt_token_count;
}
}
anyhow::Ok((prompt, total_count))
} else {
let prompt = prompts.join("\n");
let token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, token_count))
}
}
}

View file

@ -0,0 +1,98 @@
use crate::prompts::base::{PromptArguments, PromptTemplate};
use std::fmt::Write;
use std::{ops::Range, path::PathBuf};
use gpui2::{AsyncAppContext, Model};
use language2::{Anchor, Buffer};
#[derive(Clone)]
pub struct PromptCodeSnippet {
path: Option<PathBuf>,
language_name: Option<String>,
content: String,
}
impl PromptCodeSnippet {
pub fn new(
buffer: Model<Buffer>,
range: Range<Anchor>,
cx: &mut AsyncAppContext,
) -> anyhow::Result<Self> {
let (content, language_name, file_path) = buffer.update(cx, |buffer, _| {
let snapshot = buffer.snapshot();
let content = snapshot.text_for_range(range.clone()).collect::<String>();
let language_name = buffer
.language()
.and_then(|language| Some(language.name().to_string().to_lowercase()));
let file_path = buffer
.file()
.and_then(|file| Some(file.path().to_path_buf()));
(content, language_name, file_path)
})?;
anyhow::Ok(PromptCodeSnippet {
path: file_path,
language_name,
content,
})
}
}
impl ToString for PromptCodeSnippet {
fn to_string(&self) -> String {
let path = self
.path
.as_ref()
.and_then(|path| Some(path.to_string_lossy().to_string()))
.unwrap_or("".to_string());
let language_name = self.language_name.clone().unwrap_or("".to_string());
let content = self.content.clone();
format!("The below code snippet may be relevant from file: {path}\n```{language_name}\n{content}\n```")
}
}
pub struct RepositoryContext {}
impl PromptTemplate for RepositoryContext {
fn generate(
&self,
args: &PromptArguments,
max_token_length: Option<usize>,
) -> anyhow::Result<(String, usize)> {
const MAXIMUM_SNIPPET_TOKEN_COUNT: usize = 500;
let template = "You are working inside a large repository, here are a few code snippets that may be useful.";
let mut prompt = String::new();
let mut remaining_tokens = max_token_length.clone();
let seperator_token_length = args.model.count_tokens("\n")?;
for snippet in &args.snippets {
let mut snippet_prompt = template.to_string();
let content = snippet.to_string();
writeln!(snippet_prompt, "{content}").unwrap();
let token_count = args.model.count_tokens(&snippet_prompt)?;
if token_count <= MAXIMUM_SNIPPET_TOKEN_COUNT {
if let Some(tokens_left) = remaining_tokens {
if tokens_left >= token_count {
writeln!(prompt, "{snippet_prompt}").unwrap();
remaining_tokens = if tokens_left >= (token_count + seperator_token_length)
{
Some(tokens_left - token_count - seperator_token_length)
} else {
Some(0)
};
}
} else {
writeln!(prompt, "{snippet_prompt}").unwrap();
}
}
}
let total_token_count = args.model.count_tokens(&prompt)?;
anyhow::Ok((prompt, total_token_count))
}
}

View file

@ -0,0 +1 @@
pub mod open_ai;

View file

@ -0,0 +1,306 @@
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use futures::{
future::BoxFuture, io::BufReader, stream::BoxStream, AsyncBufReadExt, AsyncReadExt, FutureExt,
Stream, StreamExt,
};
use gpui2::{AppContext, Executor};
use isahc::{http::StatusCode, Request, RequestExt};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::{
env,
fmt::{self, Display},
io,
sync::Arc,
};
use util::ResultExt;
use crate::{
auth::{CredentialProvider, ProviderCredential},
completion::{CompletionProvider, CompletionRequest},
models::LanguageModel,
};
use crate::providers::open_ai::{OpenAILanguageModel, OPENAI_API_URL};
#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Role {
User,
Assistant,
System,
}
impl Role {
pub fn cycle(&mut self) {
*self = match self {
Role::User => Role::Assistant,
Role::Assistant => Role::System,
Role::System => Role::User,
}
}
}
impl Display for Role {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Role::User => write!(f, "User"),
Role::Assistant => write!(f, "Assistant"),
Role::System => write!(f, "System"),
}
}
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct RequestMessage {
pub role: Role,
pub content: String,
}
#[derive(Debug, Default, Serialize)]
pub struct OpenAIRequest {
pub model: String,
pub messages: Vec<RequestMessage>,
pub stream: bool,
pub stop: Vec<String>,
pub temperature: f32,
}
impl CompletionRequest for OpenAIRequest {
fn data(&self) -> serde_json::Result<String> {
serde_json::to_string(self)
}
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseMessage {
pub role: Option<Role>,
pub content: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIUsage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
#[derive(Deserialize, Debug)]
pub struct ChatChoiceDelta {
pub index: u32,
pub delta: ResponseMessage,
pub finish_reason: Option<String>,
}
#[derive(Deserialize, Debug)]
pub struct OpenAIResponseStreamEvent {
pub id: Option<String>,
pub object: String,
pub created: u32,
pub model: String,
pub choices: Vec<ChatChoiceDelta>,
pub usage: Option<OpenAIUsage>,
}
pub async fn stream_completion(
credential: ProviderCredential,
executor: Arc<Executor>,
request: Box<dyn CompletionRequest>,
) -> Result<impl Stream<Item = Result<OpenAIResponseStreamEvent>>> {
let api_key = match credential {
ProviderCredential::Credentials { api_key } => api_key,
_ => {
return Err(anyhow!("no credentials provider for completion"));
}
};
let (tx, rx) = futures::channel::mpsc::unbounded::<Result<OpenAIResponseStreamEvent>>();
let json_data = request.data()?;
let mut response = Request::post(format!("{OPENAI_API_URL}/chat/completions"))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(json_data)?
.send_async()
.await?;
let status = response.status();
if status == StatusCode::OK {
executor
.spawn(async move {
let mut lines = BufReader::new(response.body_mut()).lines();
fn parse_line(
line: Result<String, io::Error>,
) -> Result<Option<OpenAIResponseStreamEvent>> {
if let Some(data) = line?.strip_prefix("data: ") {
let event = serde_json::from_str(&data)?;
Ok(Some(event))
} else {
Ok(None)
}
}
while let Some(line) = lines.next().await {
if let Some(event) = parse_line(line).transpose() {
let done = event.as_ref().map_or(false, |event| {
event
.choices
.last()
.map_or(false, |choice| choice.finish_reason.is_some())
});
if tx.unbounded_send(event).is_err() {
break;
}
if done {
break;
}
}
}
anyhow::Ok(())
})
.detach();
Ok(rx)
} else {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
#[derive(Deserialize)]
struct OpenAIResponse {
error: OpenAIError,
}
#[derive(Deserialize)]
struct OpenAIError {
message: String,
}
match serde_json::from_str::<OpenAIResponse>(&body) {
Ok(response) if !response.error.message.is_empty() => Err(anyhow!(
"Failed to connect to OpenAI API: {}",
response.error.message,
)),
_ => Err(anyhow!(
"Failed to connect to OpenAI API: {} {}",
response.status(),
body,
)),
}
}
}
#[derive(Clone)]
pub struct OpenAICompletionProvider {
model: OpenAILanguageModel,
credential: Arc<RwLock<ProviderCredential>>,
executor: Arc<Executor>,
}
impl OpenAICompletionProvider {
pub fn new(model_name: &str, executor: Arc<Executor>) -> Self {
let model = OpenAILanguageModel::load(model_name);
let credential = Arc::new(RwLock::new(ProviderCredential::NoCredentials));
Self {
model,
credential,
executor,
}
}
}
#[async_trait]
impl CredentialProvider for OpenAICompletionProvider {
fn has_credentials(&self) -> bool {
match *self.credential.read() {
ProviderCredential::Credentials { .. } => true,
_ => false,
}
}
async fn retrieve_credentials(&self, cx: &mut AppContext) -> ProviderCredential {
let existing_credential = self.credential.read().clone();
let retrieved_credential = cx
.run_on_main(move |cx| match existing_credential {
ProviderCredential::Credentials { .. } => {
return existing_credential.clone();
}
_ => {
if let Some(api_key) = env::var("OPENAI_API_KEY").log_err() {
return ProviderCredential::Credentials { api_key };
}
if let Some(Some((_, api_key))) = cx.read_credentials(OPENAI_API_URL).log_err()
{
if let Some(api_key) = String::from_utf8(api_key).log_err() {
return ProviderCredential::Credentials { api_key };
} else {
return ProviderCredential::NoCredentials;
}
} else {
return ProviderCredential::NoCredentials;
}
}
})
.await;
*self.credential.write() = retrieved_credential.clone();
retrieved_credential
}
async fn save_credentials(&self, cx: &mut AppContext, credential: ProviderCredential) {
*self.credential.write() = credential.clone();
let credential = credential.clone();
cx.run_on_main(move |cx| match credential {
ProviderCredential::Credentials { api_key } => {
cx.write_credentials(OPENAI_API_URL, "Bearer", api_key.as_bytes())
.log_err();
}
_ => {}
})
.await;
}
async fn delete_credentials(&self, cx: &mut AppContext) {
cx.run_on_main(move |cx| cx.delete_credentials(OPENAI_API_URL).log_err())
.await;
*self.credential.write() = ProviderCredential::NoCredentials;
}
}
impl CompletionProvider for OpenAICompletionProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(self.model.clone());
model
}
fn complete(
&self,
prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
// Currently the CompletionRequest for OpenAI, includes a 'model' parameter
// This means that the model is determined by the CompletionRequest and not the CompletionProvider,
// which is currently model based, due to the langauge model.
// At some point in the future we should rectify this.
let credential = self.credential.read().clone();
let request = stream_completion(credential, self.executor.clone(), prompt);
async move {
let response = request.await?;
let stream = response
.filter_map(|response| async move {
match response {
Ok(mut response) => Some(Ok(response.choices.pop()?.delta.content?)),
Err(error) => Some(Err(error)),
}
})
.boxed();
Ok(stream)
}
.boxed()
}
fn box_clone(&self) -> Box<dyn CompletionProvider> {
Box::new((*self).clone())
}
}

View file

@ -0,0 +1,313 @@
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use futures::AsyncReadExt;
use gpui2::Executor;
use gpui2::{serde_json, AppContext};
use isahc::http::StatusCode;
use isahc::prelude::Configurable;
use isahc::{AsyncBody, Response};
use lazy_static::lazy_static;
use parking_lot::{Mutex, RwLock};
use parse_duration::parse;
use postage::watch;
use serde::{Deserialize, Serialize};
use std::env;
use std::ops::Add;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tiktoken_rs::{cl100k_base, CoreBPE};
use util::http::{HttpClient, Request};
use util::ResultExt;
use crate::auth::{CredentialProvider, ProviderCredential};
use crate::embedding::{Embedding, EmbeddingProvider};
use crate::models::LanguageModel;
use crate::providers::open_ai::OpenAILanguageModel;
use crate::providers::open_ai::OPENAI_API_URL;
lazy_static! {
static ref OPENAI_BPE_TOKENIZER: CoreBPE = cl100k_base().unwrap();
}
#[derive(Clone)]
pub struct OpenAIEmbeddingProvider {
model: OpenAILanguageModel,
credential: Arc<RwLock<ProviderCredential>>,
pub client: Arc<dyn HttpClient>,
pub executor: Arc<Executor>,
rate_limit_count_rx: watch::Receiver<Option<Instant>>,
rate_limit_count_tx: Arc<Mutex<watch::Sender<Option<Instant>>>>,
}
#[derive(Serialize)]
struct OpenAIEmbeddingRequest<'a> {
model: &'static str,
input: Vec<&'a str>,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingResponse {
data: Vec<OpenAIEmbedding>,
usage: OpenAIEmbeddingUsage,
}
#[derive(Debug, Deserialize)]
struct OpenAIEmbedding {
embedding: Vec<f32>,
index: usize,
object: String,
}
#[derive(Deserialize)]
struct OpenAIEmbeddingUsage {
prompt_tokens: usize,
total_tokens: usize,
}
impl OpenAIEmbeddingProvider {
pub fn new(client: Arc<dyn HttpClient>, executor: Arc<Executor>) -> Self {
let (rate_limit_count_tx, rate_limit_count_rx) = watch::channel_with(None);
let rate_limit_count_tx = Arc::new(Mutex::new(rate_limit_count_tx));
let model = OpenAILanguageModel::load("text-embedding-ada-002");
let credential = Arc::new(RwLock::new(ProviderCredential::NoCredentials));
OpenAIEmbeddingProvider {
model,
credential,
client,
executor,
rate_limit_count_rx,
rate_limit_count_tx,
}
}
fn get_api_key(&self) -> Result<String> {
match self.credential.read().clone() {
ProviderCredential::Credentials { api_key } => Ok(api_key),
_ => Err(anyhow!("api credentials not provided")),
}
}
fn resolve_rate_limit(&self) {
let reset_time = *self.rate_limit_count_tx.lock().borrow();
if let Some(reset_time) = reset_time {
if Instant::now() >= reset_time {
*self.rate_limit_count_tx.lock().borrow_mut() = None
}
}
log::trace!(
"resolving reset time: {:?}",
*self.rate_limit_count_tx.lock().borrow()
);
}
fn update_reset_time(&self, reset_time: Instant) {
let original_time = *self.rate_limit_count_tx.lock().borrow();
let updated_time = if let Some(original_time) = original_time {
if reset_time < original_time {
Some(reset_time)
} else {
Some(original_time)
}
} else {
Some(reset_time)
};
log::trace!("updating rate limit time: {:?}", updated_time);
*self.rate_limit_count_tx.lock().borrow_mut() = updated_time;
}
async fn send_request(
&self,
api_key: &str,
spans: Vec<&str>,
request_timeout: u64,
) -> Result<Response<AsyncBody>> {
let request = Request::post("https://api.openai.com/v1/embeddings")
.redirect_policy(isahc::config::RedirectPolicy::Follow)
.timeout(Duration::from_secs(request_timeout))
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.body(
serde_json::to_string(&OpenAIEmbeddingRequest {
input: spans.clone(),
model: "text-embedding-ada-002",
})
.unwrap()
.into(),
)?;
Ok(self.client.send(request).await?)
}
}
#[async_trait]
impl CredentialProvider for OpenAIEmbeddingProvider {
fn has_credentials(&self) -> bool {
match *self.credential.read() {
ProviderCredential::Credentials { .. } => true,
_ => false,
}
}
async fn retrieve_credentials(&self, cx: &mut AppContext) -> ProviderCredential {
let existing_credential = self.credential.read().clone();
let retrieved_credential = cx
.run_on_main(move |cx| match existing_credential {
ProviderCredential::Credentials { .. } => {
return existing_credential.clone();
}
_ => {
if let Some(api_key) = env::var("OPENAI_API_KEY").log_err() {
return ProviderCredential::Credentials { api_key };
}
if let Some(Some((_, api_key))) = cx.read_credentials(OPENAI_API_URL).log_err()
{
if let Some(api_key) = String::from_utf8(api_key).log_err() {
return ProviderCredential::Credentials { api_key };
} else {
return ProviderCredential::NoCredentials;
}
} else {
return ProviderCredential::NoCredentials;
}
}
})
.await;
*self.credential.write() = retrieved_credential.clone();
retrieved_credential
}
async fn save_credentials(&self, cx: &mut AppContext, credential: ProviderCredential) {
*self.credential.write() = credential.clone();
let credential = credential.clone();
cx.run_on_main(move |cx| match credential {
ProviderCredential::Credentials { api_key } => {
cx.write_credentials(OPENAI_API_URL, "Bearer", api_key.as_bytes())
.log_err();
}
_ => {}
})
.await;
}
async fn delete_credentials(&self, cx: &mut AppContext) {
cx.run_on_main(move |cx| cx.delete_credentials(OPENAI_API_URL).log_err())
.await;
*self.credential.write() = ProviderCredential::NoCredentials;
}
}
#[async_trait]
impl EmbeddingProvider for OpenAIEmbeddingProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(self.model.clone());
model
}
fn max_tokens_per_batch(&self) -> usize {
50000
}
fn rate_limit_expiration(&self) -> Option<Instant> {
*self.rate_limit_count_rx.borrow()
}
async fn embed_batch(&self, spans: Vec<String>) -> Result<Vec<Embedding>> {
const BACKOFF_SECONDS: [usize; 4] = [3, 5, 15, 45];
const MAX_RETRIES: usize = 4;
let api_key = self.get_api_key()?;
let mut request_number = 0;
let mut rate_limiting = false;
let mut request_timeout: u64 = 15;
let mut response: Response<AsyncBody>;
while request_number < MAX_RETRIES {
response = self
.send_request(
&api_key,
spans.iter().map(|x| &**x).collect(),
request_timeout,
)
.await?;
request_number += 1;
match response.status() {
StatusCode::REQUEST_TIMEOUT => {
request_timeout += 5;
}
StatusCode::OK => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let response: OpenAIEmbeddingResponse = serde_json::from_str(&body)?;
log::trace!(
"openai embedding completed. tokens: {:?}",
response.usage.total_tokens
);
// If we complete a request successfully that was previously rate_limited
// resolve the rate limit
if rate_limiting {
self.resolve_rate_limit()
}
return Ok(response
.data
.into_iter()
.map(|embedding| Embedding::from(embedding.embedding))
.collect());
}
StatusCode::TOO_MANY_REQUESTS => {
rate_limiting = true;
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
let delay_duration = {
let delay = Duration::from_secs(BACKOFF_SECONDS[request_number - 1] as u64);
if let Some(time_to_reset) =
response.headers().get("x-ratelimit-reset-tokens")
{
if let Ok(time_str) = time_to_reset.to_str() {
parse(time_str).unwrap_or(delay)
} else {
delay
}
} else {
delay
}
};
// If we've previously rate limited, increment the duration but not the count
let reset_time = Instant::now().add(delay_duration);
self.update_reset_time(reset_time);
log::trace!(
"openai rate limiting: waiting {:?} until lifted",
&delay_duration
);
self.executor.timer(delay_duration).await;
}
_ => {
let mut body = String::new();
response.body_mut().read_to_string(&mut body).await?;
return Err(anyhow!(
"open ai bad request: {:?} {:?}",
&response.status(),
body
));
}
}
}
Err(anyhow!("openai max retries"))
}
}

View file

@ -0,0 +1,9 @@
pub mod completion;
pub mod embedding;
pub mod model;
pub use completion::*;
pub use embedding::*;
pub use model::OpenAILanguageModel;
pub const OPENAI_API_URL: &'static str = "https://api.openai.com/v1";

View file

@ -0,0 +1,57 @@
use anyhow::anyhow;
use tiktoken_rs::CoreBPE;
use util::ResultExt;
use crate::models::{LanguageModel, TruncationDirection};
#[derive(Clone)]
pub struct OpenAILanguageModel {
name: String,
bpe: Option<CoreBPE>,
}
impl OpenAILanguageModel {
pub fn load(model_name: &str) -> Self {
let bpe = tiktoken_rs::get_bpe_from_model(model_name).log_err();
OpenAILanguageModel {
name: model_name.to_string(),
bpe,
}
}
}
impl LanguageModel for OpenAILanguageModel {
fn name(&self) -> String {
self.name.clone()
}
fn count_tokens(&self, content: &str) -> anyhow::Result<usize> {
if let Some(bpe) = &self.bpe {
anyhow::Ok(bpe.encode_with_special_tokens(content).len())
} else {
Err(anyhow!("bpe for open ai model was not retrieved"))
}
}
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String> {
if let Some(bpe) = &self.bpe {
let tokens = bpe.encode_with_special_tokens(content);
if tokens.len() > length {
match direction {
TruncationDirection::End => bpe.decode(tokens[..length].to_vec()),
TruncationDirection::Start => bpe.decode(tokens[length..].to_vec()),
}
} else {
bpe.decode(tokens)
}
} else {
Err(anyhow!("bpe for open ai model was not retrieved"))
}
}
fn capacity(&self) -> anyhow::Result<usize> {
anyhow::Ok(tiktoken_rs::model::get_context_size(&self.name))
}
}

View file

@ -0,0 +1,11 @@
pub trait LanguageModel {
fn name(&self) -> String;
fn count_tokens(&self, content: &str) -> anyhow::Result<usize>;
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String>;
fn capacity(&self) -> anyhow::Result<usize>;
}

193
crates/ai2/src/test.rs Normal file
View file

@ -0,0 +1,193 @@
use std::{
sync::atomic::{self, AtomicUsize, Ordering},
time::Instant,
};
use async_trait::async_trait;
use futures::{channel::mpsc, future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
use gpui2::AppContext;
use parking_lot::Mutex;
use crate::{
auth::{CredentialProvider, ProviderCredential},
completion::{CompletionProvider, CompletionRequest},
embedding::{Embedding, EmbeddingProvider},
models::{LanguageModel, TruncationDirection},
};
#[derive(Clone)]
pub struct FakeLanguageModel {
pub capacity: usize,
}
impl LanguageModel for FakeLanguageModel {
fn name(&self) -> String {
"dummy".to_string()
}
fn count_tokens(&self, content: &str) -> anyhow::Result<usize> {
anyhow::Ok(content.chars().collect::<Vec<char>>().len())
}
fn truncate(
&self,
content: &str,
length: usize,
direction: TruncationDirection,
) -> anyhow::Result<String> {
println!("TRYING TO TRUNCATE: {:?}", length.clone());
if length > self.count_tokens(content)? {
println!("NOT TRUNCATING");
return anyhow::Ok(content.to_string());
}
anyhow::Ok(match direction {
TruncationDirection::End => content.chars().collect::<Vec<char>>()[..length]
.into_iter()
.collect::<String>(),
TruncationDirection::Start => content.chars().collect::<Vec<char>>()[length..]
.into_iter()
.collect::<String>(),
})
}
fn capacity(&self) -> anyhow::Result<usize> {
anyhow::Ok(self.capacity)
}
}
pub struct FakeEmbeddingProvider {
pub embedding_count: AtomicUsize,
}
impl Clone for FakeEmbeddingProvider {
fn clone(&self) -> Self {
FakeEmbeddingProvider {
embedding_count: AtomicUsize::new(self.embedding_count.load(Ordering::SeqCst)),
}
}
}
impl Default for FakeEmbeddingProvider {
fn default() -> Self {
FakeEmbeddingProvider {
embedding_count: AtomicUsize::default(),
}
}
}
impl FakeEmbeddingProvider {
pub fn embedding_count(&self) -> usize {
self.embedding_count.load(atomic::Ordering::SeqCst)
}
pub fn embed_sync(&self, span: &str) -> Embedding {
let mut result = vec![1.0; 26];
for letter in span.chars() {
let letter = letter.to_ascii_lowercase();
if letter as u32 >= 'a' as u32 {
let ix = (letter as u32) - ('a' as u32);
if ix < 26 {
result[ix as usize] += 1.0;
}
}
}
let norm = result.iter().map(|x| x * x).sum::<f32>().sqrt();
for x in &mut result {
*x /= norm;
}
result.into()
}
}
#[async_trait]
impl CredentialProvider for FakeEmbeddingProvider {
fn has_credentials(&self) -> bool {
true
}
async fn retrieve_credentials(&self, _cx: &mut AppContext) -> ProviderCredential {
ProviderCredential::NotNeeded
}
async fn save_credentials(&self, _cx: &mut AppContext, _credential: ProviderCredential) {}
async fn delete_credentials(&self, _cx: &mut AppContext) {}
}
#[async_trait]
impl EmbeddingProvider for FakeEmbeddingProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
Box::new(FakeLanguageModel { capacity: 1000 })
}
fn max_tokens_per_batch(&self) -> usize {
1000
}
fn rate_limit_expiration(&self) -> Option<Instant> {
None
}
async fn embed_batch(&self, spans: Vec<String>) -> anyhow::Result<Vec<Embedding>> {
self.embedding_count
.fetch_add(spans.len(), atomic::Ordering::SeqCst);
anyhow::Ok(spans.iter().map(|span| self.embed_sync(span)).collect())
}
}
pub struct FakeCompletionProvider {
last_completion_tx: Mutex<Option<mpsc::Sender<String>>>,
}
impl Clone for FakeCompletionProvider {
fn clone(&self) -> Self {
Self {
last_completion_tx: Mutex::new(None),
}
}
}
impl FakeCompletionProvider {
pub fn new() -> Self {
Self {
last_completion_tx: Mutex::new(None),
}
}
pub fn send_completion(&self, completion: impl Into<String>) {
let mut tx = self.last_completion_tx.lock();
tx.as_mut().unwrap().try_send(completion.into()).unwrap();
}
pub fn finish_completion(&self) {
self.last_completion_tx.lock().take().unwrap();
}
}
#[async_trait]
impl CredentialProvider for FakeCompletionProvider {
fn has_credentials(&self) -> bool {
true
}
async fn retrieve_credentials(&self, _cx: &mut AppContext) -> ProviderCredential {
ProviderCredential::NotNeeded
}
async fn save_credentials(&self, _cx: &mut AppContext, _credential: ProviderCredential) {}
async fn delete_credentials(&self, _cx: &mut AppContext) {}
}
impl CompletionProvider for FakeCompletionProvider {
fn base_model(&self) -> Box<dyn LanguageModel> {
let model: Box<dyn LanguageModel> = Box::new(FakeLanguageModel { capacity: 8190 });
model
}
fn complete(
&self,
_prompt: Box<dyn CompletionRequest>,
) -> BoxFuture<'static, anyhow::Result<BoxStream<'static, anyhow::Result<String>>>> {
let (tx, rx) = mpsc::channel(1);
*self.last_completion_tx.lock() = Some(tx);
async move { Ok(rx.map(|rx| Ok(rx)).boxed()) }.boxed()
}
fn box_clone(&self) -> Box<dyn CompletionProvider> {
Box::new((*self).clone())
}
}

View file

@ -17,13 +17,17 @@ fs = { path = "../fs" }
gpui = { path = "../gpui" }
language = { path = "../language" }
menu = { path = "../menu" }
multi_buffer = { path = "../multi_buffer" }
search = { path = "../search" }
settings = { path = "../settings" }
theme = { path = "../theme" }
util = { path = "../util" }
workspace = { path = "../workspace" }
uuid.workspace = true
semantic_index = { path = "../semantic_index" }
project = { path = "../project" }
uuid.workspace = true
log.workspace = true
anyhow.workspace = true
chrono = { version = "0.4", features = ["serde"] }
futures.workspace = true
@ -36,11 +40,12 @@ schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
smol.workspace = true
tiktoken-rs = "0.4"
tiktoken-rs = "0.5"
[dev-dependencies]
editor = { path = "../editor", features = ["test-support"] }
project = { path = "../project", features = ["test-support"] }
ai = { path = "../ai", features = ["test-support"]}
ctor.workspace = true
env_logger.workspace = true

View file

@ -4,7 +4,7 @@ mod codegen;
mod prompts;
mod streaming_diff;
use ai::completion::Role;
use ai::providers::open_ai::Role;
use anyhow::Result;
pub use assistant_panel::AssistantPanel;
use assistant_settings::OpenAIModel;

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,11 @@
use crate::streaming_diff::{Hunk, StreamingDiff};
use ai::completion::{CompletionProvider, OpenAIRequest};
use ai::completion::{CompletionProvider, CompletionRequest};
use anyhow::Result;
use editor::{multi_buffer, Anchor, MultiBuffer, MultiBufferSnapshot, ToOffset, ToPoint};
use editor::{Anchor, MultiBuffer, MultiBufferSnapshot, ToOffset, ToPoint};
use futures::{channel::mpsc, SinkExt, Stream, StreamExt};
use gpui::{Entity, ModelContext, ModelHandle, Task};
use language::{Rope, TransactionId};
use multi_buffer;
use std::{cmp, future, ops::Range, sync::Arc};
pub enum Event {
@ -95,7 +96,7 @@ impl Codegen {
self.error.as_ref()
}
pub fn start(&mut self, prompt: OpenAIRequest, cx: &mut ModelContext<Self>) {
pub fn start(&mut self, prompt: Box<dyn CompletionRequest>, cx: &mut ModelContext<Self>) {
let range = self.range();
let snapshot = self.snapshot.clone();
let selected_text = snapshot
@ -335,17 +336,25 @@ fn strip_markdown_codeblock(
#[cfg(test)]
mod tests {
use super::*;
use futures::{
future::BoxFuture,
stream::{self, BoxStream},
};
use ai::test::FakeCompletionProvider;
use futures::stream::{self};
use gpui::{executor::Deterministic, TestAppContext};
use indoc::indoc;
use language::{language_settings, tree_sitter_rust, Buffer, Language, LanguageConfig, Point};
use parking_lot::Mutex;
use rand::prelude::*;
use serde::Serialize;
use settings::SettingsStore;
use smol::future::FutureExt;
#[derive(Serialize)]
pub struct DummyCompletionRequest {
pub name: String,
}
impl CompletionRequest for DummyCompletionRequest {
fn data(&self) -> serde_json::Result<String> {
serde_json::to_string(self)
}
}
#[gpui::test(iterations = 10)]
async fn test_transform_autoindent(
@ -371,7 +380,7 @@ mod tests {
let snapshot = buffer.snapshot(cx);
snapshot.anchor_before(Point::new(1, 0))..snapshot.anchor_after(Point::new(4, 5))
});
let provider = Arc::new(TestCompletionProvider::new());
let provider = Arc::new(FakeCompletionProvider::new());
let codegen = cx.add_model(|cx| {
Codegen::new(
buffer.clone(),
@ -380,7 +389,11 @@ mod tests {
cx,
)
});
codegen.update(cx, |codegen, cx| codegen.start(Default::default(), cx));
let request = Box::new(DummyCompletionRequest {
name: "test".to_string(),
});
codegen.update(cx, |codegen, cx| codegen.start(request, cx));
let mut new_text = concat!(
" let mut x = 0;\n",
@ -433,7 +446,7 @@ mod tests {
let snapshot = buffer.snapshot(cx);
snapshot.anchor_before(Point::new(1, 6))
});
let provider = Arc::new(TestCompletionProvider::new());
let provider = Arc::new(FakeCompletionProvider::new());
let codegen = cx.add_model(|cx| {
Codegen::new(
buffer.clone(),
@ -442,7 +455,11 @@ mod tests {
cx,
)
});
codegen.update(cx, |codegen, cx| codegen.start(Default::default(), cx));
let request = Box::new(DummyCompletionRequest {
name: "test".to_string(),
});
codegen.update(cx, |codegen, cx| codegen.start(request, cx));
let mut new_text = concat!(
"t mut x = 0;\n",
@ -495,7 +512,7 @@ mod tests {
let snapshot = buffer.snapshot(cx);
snapshot.anchor_before(Point::new(1, 2))
});
let provider = Arc::new(TestCompletionProvider::new());
let provider = Arc::new(FakeCompletionProvider::new());
let codegen = cx.add_model(|cx| {
Codegen::new(
buffer.clone(),
@ -504,7 +521,11 @@ mod tests {
cx,
)
});
codegen.update(cx, |codegen, cx| codegen.start(Default::default(), cx));
let request = Box::new(DummyCompletionRequest {
name: "test".to_string(),
});
codegen.update(cx, |codegen, cx| codegen.start(request, cx));
let mut new_text = concat!(
"let mut x = 0;\n",
@ -592,38 +613,6 @@ mod tests {
}
}
struct TestCompletionProvider {
last_completion_tx: Mutex<Option<mpsc::Sender<String>>>,
}
impl TestCompletionProvider {
fn new() -> Self {
Self {
last_completion_tx: Mutex::new(None),
}
}
fn send_completion(&self, completion: impl Into<String>) {
let mut tx = self.last_completion_tx.lock();
tx.as_mut().unwrap().try_send(completion.into()).unwrap();
}
fn finish_completion(&self) {
self.last_completion_tx.lock().take().unwrap();
}
}
impl CompletionProvider for TestCompletionProvider {
fn complete(
&self,
_prompt: OpenAIRequest,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let (tx, rx) = mpsc::channel(1);
*self.last_completion_tx.lock() = Some(tx);
async move { Ok(rx.map(|rx| Ok(rx)).boxed()) }.boxed()
}
}
fn rust_lang() -> Language {
Language::new(
LanguageConfig {

View file

@ -1,8 +1,14 @@
use crate::codegen::CodegenKind;
use ai::models::LanguageModel;
use ai::prompts::base::{PromptArguments, PromptChain, PromptPriority, PromptTemplate};
use ai::prompts::file_context::FileContext;
use ai::prompts::generate::GenerateInlineContent;
use ai::prompts::preamble::EngineerPreamble;
use ai::prompts::repository_context::{PromptCodeSnippet, RepositoryContext};
use ai::providers::open_ai::OpenAILanguageModel;
use language::{BufferSnapshot, OffsetRangeExt, ToOffset};
use std::cmp::{self, Reverse};
use std::fmt::Write;
use std::ops::Range;
use std::sync::Arc;
#[allow(dead_code)]
fn summarize(buffer: &BufferSnapshot, selected_range: Range<impl ToOffset>) -> String {
@ -118,86 +124,50 @@ fn summarize(buffer: &BufferSnapshot, selected_range: Range<impl ToOffset>) -> S
pub fn generate_content_prompt(
user_prompt: String,
language_name: Option<&str>,
buffer: &BufferSnapshot,
range: Range<impl ToOffset>,
kind: CodegenKind,
) -> String {
let range = range.to_offset(buffer);
let mut prompt = String::new();
// General Preamble
if let Some(language_name) = language_name {
writeln!(prompt, "You're an expert {language_name} engineer.\n").unwrap();
buffer: BufferSnapshot,
range: Range<usize>,
search_results: Vec<PromptCodeSnippet>,
model: &str,
project_name: Option<String>,
) -> anyhow::Result<String> {
// Using new Prompt Templates
let openai_model: Arc<dyn LanguageModel> = Arc::new(OpenAILanguageModel::load(model));
let lang_name = if let Some(language_name) = language_name {
Some(language_name.to_string())
} else {
writeln!(prompt, "You're an expert engineer.\n").unwrap();
}
None
};
let mut content = String::new();
content.extend(buffer.text_for_range(0..range.start));
if range.start == range.end {
content.push_str("<|START|>");
} else {
content.push_str("<|START|");
}
content.extend(buffer.text_for_range(range.clone()));
if range.start != range.end {
content.push_str("|END|>");
}
content.extend(buffer.text_for_range(range.end..buffer.len()));
let args = PromptArguments {
model: openai_model,
language_name: lang_name.clone(),
project_name,
snippets: search_results.clone(),
reserved_tokens: 1000,
buffer: Some(buffer),
selected_range: Some(range),
user_prompt: Some(user_prompt.clone()),
};
writeln!(
prompt,
"The file you are currently working on has the following content:"
)
.unwrap();
if let Some(language_name) = language_name {
let language_name = language_name.to_lowercase();
writeln!(prompt, "```{language_name}\n{content}\n```").unwrap();
} else {
writeln!(prompt, "```\n{content}\n```").unwrap();
}
let templates: Vec<(PromptPriority, Box<dyn PromptTemplate>)> = vec![
(PromptPriority::Mandatory, Box::new(EngineerPreamble {})),
(
PromptPriority::Ordered { order: 1 },
Box::new(RepositoryContext {}),
),
(
PromptPriority::Ordered { order: 0 },
Box::new(FileContext {}),
),
(
PromptPriority::Mandatory,
Box::new(GenerateInlineContent {}),
),
];
let chain = PromptChain::new(args, templates);
let (prompt, _) = chain.generate(true)?;
match kind {
CodegenKind::Generate { position: _ } => {
writeln!(prompt, "In particular, the user's cursor is current on the '<|START|>' span in the above outline, with no text selected.").unwrap();
writeln!(
prompt,
"Assume the cursor is located where the `<|START|` marker is."
)
.unwrap();
writeln!(
prompt,
"Text can't be replaced, so assume your answer will be inserted at the cursor."
)
.unwrap();
writeln!(
prompt,
"Generate text based on the users prompt: {user_prompt}"
)
.unwrap();
}
CodegenKind::Transform { range: _ } => {
writeln!(prompt, "In particular, the user has selected a section of the text between the '<|START|' and '|END|>' spans.").unwrap();
writeln!(
prompt,
"Modify the users code selected text based upon the users prompt: {user_prompt}"
)
.unwrap();
writeln!(
prompt,
"You MUST reply with only the adjusted code (within the '<|START|' and '|END|>' spans), not the entire file."
)
.unwrap();
}
}
if let Some(language_name) = language_name {
writeln!(prompt, "Your answer MUST always be valid {language_name}").unwrap();
}
writeln!(prompt, "Always wrap your response in a Markdown codeblock").unwrap();
writeln!(prompt, "Never make remarks about the output.").unwrap();
prompt
anyhow::Ok(prompt)
}
#[cfg(test)]

View file

@ -10,7 +10,7 @@ use client::{
ZED_ALWAYS_ACTIVE,
};
use collections::HashSet;
use futures::{future::Shared, FutureExt};
use futures::{channel::oneshot, future::Shared, Future, FutureExt};
use gpui::{
AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Subscription, Task,
WeakModelHandle,
@ -37,10 +37,42 @@ pub struct IncomingCall {
pub initial_project: Option<proto::ParticipantProject>,
}
pub struct OneAtATime {
cancel: Option<oneshot::Sender<()>>,
}
impl OneAtATime {
/// spawn a task in the given context.
/// if another task is spawned before that resolves, or if the OneAtATime itself is dropped, the first task will be cancelled and return Ok(None)
/// otherwise you'll see the result of the task.
fn spawn<F, Fut, R>(&mut self, cx: &mut AppContext, f: F) -> Task<Result<Option<R>>>
where
F: 'static + FnOnce(AsyncAppContext) -> Fut,
Fut: Future<Output = Result<R>>,
R: 'static,
{
let (tx, rx) = oneshot::channel();
self.cancel.replace(tx);
cx.spawn(|cx| async move {
futures::select_biased! {
_ = rx.fuse() => Ok(None),
result = f(cx).fuse() => result.map(Some),
}
})
}
fn running(&self) -> bool {
self.cancel
.as_ref()
.is_some_and(|cancel| !cancel.is_canceled())
}
}
/// Singleton global maintaining the user's participation in a room across workspaces.
pub struct ActiveCall {
room: Option<(ModelHandle<Room>, Vec<Subscription>)>,
pending_room_creation: Option<Shared<Task<Result<ModelHandle<Room>, Arc<anyhow::Error>>>>>,
_join_debouncer: OneAtATime,
location: Option<WeakModelHandle<Project>>,
pending_invites: HashSet<u64>,
incoming_call: (
@ -69,6 +101,7 @@ impl ActiveCall {
pending_invites: Default::default(),
incoming_call: watch::channel(),
_join_debouncer: OneAtATime { cancel: None },
_subscriptions: vec![
client.add_request_handler(cx.handle(), Self::handle_incoming_call),
client.add_message_handler(cx.handle(), Self::handle_call_canceled),
@ -143,6 +176,10 @@ impl ActiveCall {
}
cx.notify();
if self._join_debouncer.running() {
return Task::ready(Ok(()));
}
let room = if let Some(room) = self.room().cloned() {
Some(Task::ready(Ok(room)).shared())
} else {
@ -259,11 +296,20 @@ impl ActiveCall {
return Task::ready(Err(anyhow!("no incoming call")));
};
let join = Room::join(&call, self.client.clone(), self.user_store.clone(), cx);
if self.pending_room_creation.is_some() {
return Task::ready(Ok(()));
}
let room_id = call.room_id.clone();
let client = self.client.clone();
let user_store = self.user_store.clone();
let join = self
._join_debouncer
.spawn(cx, move |cx| Room::join(room_id, client, user_store, cx));
cx.spawn(|this, mut cx| async move {
let room = join.await?;
this.update(&mut cx, |this, cx| this.set_room(Some(room.clone()), cx))
this.update(&mut cx, |this, cx| this.set_room(room.clone(), cx))
.await?;
this.update(&mut cx, |this, cx| {
this.report_call_event("accept incoming", cx)
@ -290,20 +336,28 @@ impl ActiveCall {
&mut self,
channel_id: u64,
cx: &mut ModelContext<Self>,
) -> Task<Result<ModelHandle<Room>>> {
) -> Task<Result<Option<ModelHandle<Room>>>> {
if let Some(room) = self.room().cloned() {
if room.read(cx).channel_id() == Some(channel_id) {
return Task::ready(Ok(room));
return Task::ready(Ok(Some(room)));
} else {
room.update(cx, |room, cx| room.clear_state(cx));
}
}
let join = Room::join_channel(channel_id, self.client.clone(), self.user_store.clone(), cx);
if self.pending_room_creation.is_some() {
return Task::ready(Ok(None));
}
cx.spawn(|this, mut cx| async move {
let client = self.client.clone();
let user_store = self.user_store.clone();
let join = self._join_debouncer.spawn(cx, move |cx| async move {
Room::join_channel(channel_id, client, user_store, cx).await
});
cx.spawn(move |this, mut cx| async move {
let room = join.await?;
this.update(&mut cx, |this, cx| this.set_room(Some(room.clone()), cx))
this.update(&mut cx, |this, cx| this.set_room(room.clone(), cx))
.await?;
this.update(&mut cx, |this, cx| {
this.report_call_event("join channel", cx)
@ -457,3 +511,40 @@ pub fn report_call_event_for_channel(
};
telemetry.report_clickhouse_event(event, telemetry_settings);
}
#[cfg(test)]
mod test {
use gpui::TestAppContext;
use crate::OneAtATime;
#[gpui::test]
async fn test_one_at_a_time(cx: &mut TestAppContext) {
let mut one_at_a_time = OneAtATime { cancel: None };
assert_eq!(
cx.update(|cx| one_at_a_time.spawn(cx, |_| async { Ok(1) }))
.await
.unwrap(),
Some(1)
);
let (a, b) = cx.update(|cx| {
(
one_at_a_time.spawn(cx, |_| async {
assert!(false);
Ok(2)
}),
one_at_a_time.spawn(cx, |_| async { Ok(3) }),
)
});
assert_eq!(a.await.unwrap(), None);
assert_eq!(b.await.unwrap(), Some(3));
let promise = cx.update(|cx| one_at_a_time.spawn(cx, |_| async { Ok(4) }));
drop(one_at_a_time);
assert_eq!(promise.await.unwrap(), None);
}
}

View file

@ -1,7 +1,6 @@
use crate::{
call_settings::CallSettings,
participant::{LocalParticipant, ParticipantLocation, RemoteParticipant, RemoteVideoTrack},
IncomingCall,
};
use anyhow::{anyhow, Result};
use audio::{Audio, Sound};
@ -55,7 +54,7 @@ pub enum Event {
pub struct Room {
id: u64,
channel_id: Option<u64>,
pub channel_id: Option<u64>,
live_kit: Option<LiveKitRoom>,
status: RoomStatus,
shared_projects: HashSet<WeakModelHandle<Project>>,
@ -122,6 +121,10 @@ impl Room {
}
}
pub fn can_publish(&self) -> bool {
self.live_kit.as_ref().is_some_and(|room| room.can_publish)
}
fn new(
id: u64,
channel_id: Option<u64>,
@ -181,20 +184,23 @@ impl Room {
});
let connect = room.connect(&connection_info.server_url, &connection_info.token);
cx.spawn(|this, mut cx| async move {
connect.await?;
if connection_info.can_publish {
cx.spawn(|this, mut cx| async move {
connect.await?;
if !cx.read(Self::mute_on_join) {
this.update(&mut cx, |this, cx| this.share_microphone(cx))
.await?;
}
if !cx.read(Self::mute_on_join) {
this.update(&mut cx, |this, cx| this.share_microphone(cx))
.await?;
}
anyhow::Ok(())
})
.detach_and_log_err(cx);
anyhow::Ok(())
})
.detach_and_log_err(cx);
}
Some(LiveKitRoom {
room,
can_publish: connection_info.can_publish,
screen_track: LocalTrack::None,
microphone_track: LocalTrack::None,
next_publish_id: 0,
@ -284,37 +290,32 @@ impl Room {
})
}
pub(crate) fn join_channel(
pub(crate) async fn join_channel(
channel_id: u64,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
cx: &mut AppContext,
) -> Task<Result<ModelHandle<Self>>> {
cx.spawn(|cx| async move {
Self::from_join_response(
client.request(proto::JoinChannel { channel_id }).await?,
client,
user_store,
cx,
)
})
cx: AsyncAppContext,
) -> Result<ModelHandle<Self>> {
Self::from_join_response(
client.request(proto::JoinChannel { channel_id }).await?,
client,
user_store,
cx,
)
}
pub(crate) fn join(
call: &IncomingCall,
pub(crate) async fn join(
room_id: u64,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
cx: &mut AppContext,
) -> Task<Result<ModelHandle<Self>>> {
let id = call.room_id;
cx.spawn(|cx| async move {
Self::from_join_response(
client.request(proto::JoinRoom { id }).await?,
client,
user_store,
cx,
)
})
cx: AsyncAppContext,
) -> Result<ModelHandle<Self>> {
Self::from_join_response(
client.request(proto::JoinRoom { id: room_id }).await?,
client,
user_store,
cx,
)
}
pub fn mute_on_join(cx: &AppContext) -> bool {
@ -1251,7 +1252,7 @@ impl Room {
.read_with(&cx, |this, _| {
this.live_kit
.as_ref()
.map(|live_kit| live_kit.room.publish_audio_track(&track))
.map(|live_kit| live_kit.room.publish_audio_track(track))
})
.ok_or_else(|| anyhow!("live-kit was not initialized"))?
.await
@ -1337,7 +1338,7 @@ impl Room {
.read_with(&cx, |this, _| {
this.live_kit
.as_ref()
.map(|live_kit| live_kit.room.publish_video_track(&track))
.map(|live_kit| live_kit.room.publish_video_track(track))
})
.ok_or_else(|| anyhow!("live-kit was not initialized"))?
.await
@ -1498,6 +1499,7 @@ struct LiveKitRoom {
deafened: bool,
speaking: bool,
next_publish_id: usize,
can_publish: bool,
_maintain_room: Task<()>,
_maintain_tracks: [Task<()>; 2],
}

View file

@ -12,8 +12,8 @@ use client2::{
use collections::HashSet;
use futures::{future::Shared, FutureExt};
use gpui2::{
AppContext, AsyncAppContext, Context, EventEmitter, Handle, ModelContext, Subscription, Task,
WeakHandle,
AppContext, AsyncAppContext, Context, EventEmitter, Model, ModelContext, Subscription, Task,
WeakModel,
};
use postage::watch;
use project2::Project;
@ -23,10 +23,10 @@ use std::sync::Arc;
pub use participant::ParticipantLocation;
pub use room::Room;
pub fn init(client: Arc<Client>, user_store: Handle<UserStore>, cx: &mut AppContext) {
pub fn init(client: Arc<Client>, user_store: Model<UserStore>, cx: &mut AppContext) {
CallSettings::register(cx);
let active_call = cx.entity(|cx| ActiveCall::new(client, user_store, cx));
let active_call = cx.build_model(|cx| ActiveCall::new(client, user_store, cx));
cx.set_global(active_call);
}
@ -40,16 +40,16 @@ pub struct IncomingCall {
/// Singleton global maintaining the user's participation in a room across workspaces.
pub struct ActiveCall {
room: Option<(Handle<Room>, Vec<Subscription>)>,
pending_room_creation: Option<Shared<Task<Result<Handle<Room>, Arc<anyhow::Error>>>>>,
location: Option<WeakHandle<Project>>,
room: Option<(Model<Room>, Vec<Subscription>)>,
pending_room_creation: Option<Shared<Task<Result<Model<Room>, Arc<anyhow::Error>>>>>,
location: Option<WeakModel<Project>>,
pending_invites: HashSet<u64>,
incoming_call: (
watch::Sender<Option<IncomingCall>>,
watch::Receiver<Option<IncomingCall>>,
),
client: Arc<Client>,
user_store: Handle<UserStore>,
user_store: Model<UserStore>,
_subscriptions: Vec<client2::Subscription>,
}
@ -58,11 +58,7 @@ impl EventEmitter for ActiveCall {
}
impl ActiveCall {
fn new(
client: Arc<Client>,
user_store: Handle<UserStore>,
cx: &mut ModelContext<Self>,
) -> Self {
fn new(client: Arc<Client>, user_store: Model<UserStore>, cx: &mut ModelContext<Self>) -> Self {
Self {
room: None,
pending_room_creation: None,
@ -71,8 +67,8 @@ impl ActiveCall {
incoming_call: watch::channel(),
_subscriptions: vec![
client.add_request_handler(cx.weak_handle(), Self::handle_incoming_call),
client.add_message_handler(cx.weak_handle(), Self::handle_call_canceled),
client.add_request_handler(cx.weak_model(), Self::handle_incoming_call),
client.add_message_handler(cx.weak_model(), Self::handle_call_canceled),
],
client,
user_store,
@ -84,7 +80,7 @@ impl ActiveCall {
}
async fn handle_incoming_call(
this: Handle<Self>,
this: Model<Self>,
envelope: TypedEnvelope<proto::IncomingCall>,
_: Arc<Client>,
mut cx: AsyncAppContext,
@ -112,7 +108,7 @@ impl ActiveCall {
}
async fn handle_call_canceled(
this: Handle<Self>,
this: Model<Self>,
envelope: TypedEnvelope<proto::CallCanceled>,
_: Arc<Client>,
mut cx: AsyncAppContext,
@ -129,14 +125,14 @@ impl ActiveCall {
Ok(())
}
pub fn global(cx: &AppContext) -> Handle<Self> {
cx.global::<Handle<Self>>().clone()
pub fn global(cx: &AppContext) -> Model<Self> {
cx.global::<Model<Self>>().clone()
}
pub fn invite(
&mut self,
called_user_id: u64,
initial_project: Option<Handle<Project>>,
initial_project: Option<Model<Project>>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
if !self.pending_invites.insert(called_user_id) {
@ -291,7 +287,7 @@ impl ActiveCall {
&mut self,
channel_id: u64,
cx: &mut ModelContext<Self>,
) -> Task<Result<Handle<Room>>> {
) -> Task<Result<Model<Room>>> {
if let Some(room) = self.room().cloned() {
if room.read(cx).channel_id() == Some(channel_id) {
return Task::ready(Ok(room));
@ -327,7 +323,7 @@ impl ActiveCall {
pub fn share_project(
&mut self,
project: Handle<Project>,
project: Model<Project>,
cx: &mut ModelContext<Self>,
) -> Task<Result<u64>> {
if let Some((room, _)) = self.room.as_ref() {
@ -340,7 +336,7 @@ impl ActiveCall {
pub fn unshare_project(
&mut self,
project: Handle<Project>,
project: Model<Project>,
cx: &mut ModelContext<Self>,
) -> Result<()> {
if let Some((room, _)) = self.room.as_ref() {
@ -351,13 +347,13 @@ impl ActiveCall {
}
}
pub fn location(&self) -> Option<&WeakHandle<Project>> {
pub fn location(&self) -> Option<&WeakModel<Project>> {
self.location.as_ref()
}
pub fn set_location(
&mut self,
project: Option<&Handle<Project>>,
project: Option<&Model<Project>>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
if project.is_some() || !*ZED_ALWAYS_ACTIVE {
@ -371,7 +367,7 @@ impl ActiveCall {
fn set_room(
&mut self,
room: Option<Handle<Room>>,
room: Option<Model<Room>>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
if room.as_ref() != self.room.as_ref().map(|room| &room.0) {
@ -407,7 +403,7 @@ impl ActiveCall {
}
}
pub fn room(&self) -> Option<&Handle<Room>> {
pub fn room(&self) -> Option<&Model<Room>> {
self.room.as_ref().map(|(room, _)| room)
}

View file

@ -1,10 +1,8 @@
use anyhow::{anyhow, Result};
use client2::ParticipantIndex;
use client2::{proto, User};
use collections::HashMap;
use gpui2::WeakHandle;
use gpui2::WeakModel;
pub use live_kit_client::Frame;
use live_kit_client::RemoteAudioTrack;
use project2::Project;
use std::{fmt, sync::Arc};
@ -35,7 +33,7 @@ impl ParticipantLocation {
#[derive(Clone, Default)]
pub struct LocalParticipant {
pub projects: Vec<proto::ParticipantProject>,
pub active_project: Option<WeakHandle<Project>>,
pub active_project: Option<WeakModel<Project>>,
}
#[derive(Clone, Debug)]
@ -47,8 +45,8 @@ pub struct RemoteParticipant {
pub participant_index: ParticipantIndex,
pub muted: bool,
pub speaking: bool,
pub video_tracks: HashMap<live_kit_client::Sid, Arc<RemoteVideoTrack>>,
pub audio_tracks: HashMap<live_kit_client::Sid, Arc<RemoteAudioTrack>>,
// pub video_tracks: HashMap<live_kit_client::Sid, Arc<RemoteVideoTrack>>,
// pub audio_tracks: HashMap<live_kit_client::Sid, Arc<RemoteAudioTrack>>,
}
#[derive(Clone)]

File diff suppressed because it is too large Load diff

View file

@ -7,10 +7,11 @@ use gpui::{AppContext, ModelHandle};
use std::sync::Arc;
pub use channel_buffer::{ChannelBuffer, ChannelBufferEvent, ACKNOWLEDGE_DEBOUNCE_INTERVAL};
pub use channel_chat::{ChannelChat, ChannelChatEvent, ChannelMessage, ChannelMessageId};
pub use channel_store::{
Channel, ChannelData, ChannelEvent, ChannelId, ChannelMembership, ChannelPath, ChannelStore,
pub use channel_chat::{
mentions_to_proto, ChannelChat, ChannelChatEvent, ChannelMessage, ChannelMessageId,
MessageParams,
};
pub use channel_store::{Channel, ChannelEvent, ChannelId, ChannelMembership, ChannelStore};
#[cfg(test)]
mod channel_store_tests;

View file

@ -1,4 +1,4 @@
use crate::Channel;
use crate::{Channel, ChannelId, ChannelStore};
use anyhow::Result;
use client::{Client, Collaborator, UserStore};
use collections::HashMap;
@ -19,10 +19,11 @@ pub(crate) fn init(client: &Arc<Client>) {
}
pub struct ChannelBuffer {
pub(crate) channel: Arc<Channel>,
pub channel_id: ChannelId,
connected: bool,
collaborators: HashMap<PeerId, Collaborator>,
user_store: ModelHandle<UserStore>,
channel_store: ModelHandle<ChannelStore>,
buffer: ModelHandle<language::Buffer>,
buffer_epoch: u64,
client: Arc<Client>,
@ -34,6 +35,7 @@ pub enum ChannelBufferEvent {
CollaboratorsChanged,
Disconnected,
BufferEdited,
ChannelChanged,
}
impl Entity for ChannelBuffer {
@ -46,7 +48,7 @@ impl Entity for ChannelBuffer {
}
self.client
.send(proto::LeaveChannelBuffer {
channel_id: self.channel.id,
channel_id: self.channel_id,
})
.log_err();
}
@ -58,6 +60,7 @@ impl ChannelBuffer {
channel: Arc<Channel>,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
channel_store: ModelHandle<ChannelStore>,
mut cx: AsyncAppContext,
) -> Result<ModelHandle<Self>> {
let response = client
@ -90,9 +93,10 @@ impl ChannelBuffer {
connected: true,
collaborators: Default::default(),
acknowledge_task: None,
channel,
channel_id: channel.id,
subscription: Some(subscription.set_model(&cx.handle(), &mut cx.to_async())),
user_store,
channel_store,
};
this.replace_collaborators(response.collaborators, cx);
this
@ -179,7 +183,7 @@ impl ChannelBuffer {
let operation = language::proto::serialize_operation(operation);
self.client
.send(proto::UpdateChannelBuffer {
channel_id: self.channel.id,
channel_id: self.channel_id,
operations: vec![operation],
})
.log_err();
@ -223,12 +227,15 @@ impl ChannelBuffer {
&self.collaborators
}
pub fn channel(&self) -> Arc<Channel> {
self.channel.clone()
pub fn channel(&self, cx: &AppContext) -> Option<Arc<Channel>> {
self.channel_store
.read(cx)
.channel_for_id(self.channel_id)
.cloned()
}
pub(crate) fn disconnect(&mut self, cx: &mut ModelContext<Self>) {
log::info!("channel buffer {} disconnected", self.channel.id);
log::info!("channel buffer {} disconnected", self.channel_id);
if self.connected {
self.connected = false;
self.subscription.take();
@ -237,6 +244,11 @@ impl ChannelBuffer {
}
}
pub(crate) fn channel_changed(&mut self, cx: &mut ModelContext<Self>) {
cx.emit(ChannelBufferEvent::ChannelChanged);
cx.notify()
}
pub fn is_connected(&self) -> bool {
self.connected
}

View file

@ -3,19 +3,25 @@ use anyhow::{anyhow, Result};
use client::{
proto,
user::{User, UserStore},
Client, Subscription, TypedEnvelope,
Client, Subscription, TypedEnvelope, UserId,
};
use futures::lock::Mutex;
use gpui::{AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
use rand::prelude::*;
use std::{collections::HashSet, mem, ops::Range, sync::Arc};
use std::{
collections::HashSet,
mem,
ops::{ControlFlow, Range},
sync::Arc,
};
use sum_tree::{Bias, SumTree};
use time::OffsetDateTime;
use util::{post_inc, ResultExt as _, TryFutureExt};
pub struct ChannelChat {
channel: Arc<Channel>,
pub channel_id: ChannelId,
messages: SumTree<ChannelMessage>,
acknowledged_message_ids: HashSet<u64>,
channel_store: ModelHandle<ChannelStore>,
loaded_all_messages: bool,
last_acknowledged_id: Option<u64>,
@ -27,6 +33,12 @@ pub struct ChannelChat {
_subscription: Subscription,
}
#[derive(Debug, PartialEq, Eq)]
pub struct MessageParams {
pub text: String,
pub mentions: Vec<(Range<usize>, UserId)>,
}
#[derive(Clone, Debug)]
pub struct ChannelMessage {
pub id: ChannelMessageId,
@ -34,6 +46,7 @@ pub struct ChannelMessage {
pub timestamp: OffsetDateTime,
pub sender: Arc<User>,
pub nonce: u128,
pub mentions: Vec<(Range<usize>, UserId)>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -74,7 +87,7 @@ impl Entity for ChannelChat {
fn release(&mut self, _: &mut AppContext) {
self.rpc
.send(proto::LeaveChannelChat {
channel_id: self.channel.id,
channel_id: self.channel_id,
})
.log_err();
}
@ -99,12 +112,13 @@ impl ChannelChat {
Ok(cx.add_model(|cx| {
let mut this = Self {
channel,
channel_id: channel.id,
user_store,
channel_store,
rpc: client,
outgoing_messages_lock: Default::default(),
messages: Default::default(),
acknowledged_message_ids: Default::default(),
loaded_all_messages,
next_pending_message_id: 0,
last_acknowledged_id: None,
@ -116,16 +130,23 @@ impl ChannelChat {
}))
}
pub fn channel(&self) -> &Arc<Channel> {
&self.channel
pub fn channel(&self, cx: &AppContext) -> Option<Arc<Channel>> {
self.channel_store
.read(cx)
.channel_for_id(self.channel_id)
.cloned()
}
pub fn client(&self) -> &Arc<Client> {
&self.rpc
}
pub fn send_message(
&mut self,
body: String,
message: MessageParams,
cx: &mut ModelContext<Self>,
) -> Result<Task<Result<()>>> {
if body.is_empty() {
) -> Result<Task<Result<u64>>> {
if message.text.is_empty() {
Err(anyhow!("message body can't be empty"))?;
}
@ -135,16 +156,17 @@ impl ChannelChat {
.current_user()
.ok_or_else(|| anyhow!("current_user is not present"))?;
let channel_id = self.channel.id;
let channel_id = self.channel_id;
let pending_id = ChannelMessageId::Pending(post_inc(&mut self.next_pending_message_id));
let nonce = self.rng.gen();
self.insert_messages(
SumTree::from_item(
ChannelMessage {
id: pending_id,
body: body.clone(),
body: message.text.clone(),
sender: current_user,
timestamp: OffsetDateTime::now_utc(),
mentions: message.mentions.clone(),
nonce,
},
&(),
@ -158,27 +180,25 @@ impl ChannelChat {
let outgoing_message_guard = outgoing_messages_lock.lock().await;
let request = rpc.request(proto::SendChannelMessage {
channel_id,
body,
body: message.text,
nonce: Some(nonce.into()),
mentions: mentions_to_proto(&message.mentions),
});
let response = request.await?;
drop(outgoing_message_guard);
let message = ChannelMessage::from_proto(
response.message.ok_or_else(|| anyhow!("invalid message"))?,
&user_store,
&mut cx,
)
.await?;
let response = response.message.ok_or_else(|| anyhow!("invalid message"))?;
let id = response.id;
let message = ChannelMessage::from_proto(response, &user_store, &mut cx).await?;
this.update(&mut cx, |this, cx| {
this.insert_messages(SumTree::from_item(message, &()), cx);
Ok(())
Ok(id)
})
}))
}
pub fn remove_message(&mut self, id: u64, cx: &mut ModelContext<Self>) -> Task<Result<()>> {
let response = self.rpc.request(proto::RemoveChannelMessage {
channel_id: self.channel.id,
channel_id: self.channel_id,
message_id: id,
});
cx.spawn(|this, mut cx| async move {
@ -191,41 +211,76 @@ impl ChannelChat {
})
}
pub fn load_more_messages(&mut self, cx: &mut ModelContext<Self>) -> bool {
if !self.loaded_all_messages {
let rpc = self.rpc.clone();
let user_store = self.user_store.clone();
let channel_id = self.channel.id;
if let Some(before_message_id) =
self.messages.first().and_then(|message| match message.id {
ChannelMessageId::Saved(id) => Some(id),
ChannelMessageId::Pending(_) => None,
})
{
cx.spawn(|this, mut cx| {
async move {
let response = rpc
.request(proto::GetChannelMessages {
channel_id,
before_message_id,
})
.await?;
let loaded_all_messages = response.done;
let messages =
messages_from_proto(response.messages, &user_store, &mut cx).await?;
this.update(&mut cx, |this, cx| {
this.loaded_all_messages = loaded_all_messages;
this.insert_messages(messages, cx);
});
anyhow::Ok(())
pub fn load_more_messages(&mut self, cx: &mut ModelContext<Self>) -> Option<Task<Option<()>>> {
if self.loaded_all_messages {
return None;
}
let rpc = self.rpc.clone();
let user_store = self.user_store.clone();
let channel_id = self.channel_id;
let before_message_id = self.first_loaded_message_id()?;
Some(cx.spawn(|this, mut cx| {
async move {
let response = rpc
.request(proto::GetChannelMessages {
channel_id,
before_message_id,
})
.await?;
let loaded_all_messages = response.done;
let messages = messages_from_proto(response.messages, &user_store, &mut cx).await?;
this.update(&mut cx, |this, cx| {
this.loaded_all_messages = loaded_all_messages;
this.insert_messages(messages, cx);
});
anyhow::Ok(())
}
.log_err()
}))
}
pub fn first_loaded_message_id(&mut self) -> Option<u64> {
self.messages.first().and_then(|message| match message.id {
ChannelMessageId::Saved(id) => Some(id),
ChannelMessageId::Pending(_) => None,
})
}
/// Load all of the chat messages since a certain message id.
///
/// For now, we always maintain a suffix of the channel's messages.
pub async fn load_history_since_message(
chat: ModelHandle<Self>,
message_id: u64,
mut cx: AsyncAppContext,
) -> Option<usize> {
loop {
let step = chat.update(&mut cx, |chat, cx| {
if let Some(first_id) = chat.first_loaded_message_id() {
if first_id <= message_id {
let mut cursor = chat.messages.cursor::<(ChannelMessageId, Count)>();
let message_id = ChannelMessageId::Saved(message_id);
cursor.seek(&message_id, Bias::Left, &());
return ControlFlow::Break(
if cursor
.item()
.map_or(false, |message| message.id == message_id)
{
Some(cursor.start().1 .0)
} else {
None
},
);
}
.log_err()
})
.detach();
return true;
}
ControlFlow::Continue(chat.load_more_messages(cx))
});
match step {
ControlFlow::Break(ix) => return ix,
ControlFlow::Continue(task) => task?.await?,
}
}
false
}
pub fn acknowledge_last_message(&mut self, cx: &mut ModelContext<Self>) {
@ -236,13 +291,13 @@ impl ChannelChat {
{
self.rpc
.send(proto::AckChannelMessage {
channel_id: self.channel.id,
channel_id: self.channel_id,
message_id: latest_message_id,
})
.ok();
self.last_acknowledged_id = Some(latest_message_id);
self.channel_store.update(cx, |store, cx| {
store.acknowledge_message_id(self.channel.id, latest_message_id, cx);
store.acknowledge_message_id(self.channel_id, latest_message_id, cx);
});
}
}
@ -251,7 +306,7 @@ impl ChannelChat {
pub fn rejoin(&mut self, cx: &mut ModelContext<Self>) {
let user_store = self.user_store.clone();
let rpc = self.rpc.clone();
let channel_id = self.channel.id;
let channel_id = self.channel_id;
cx.spawn(|this, mut cx| {
async move {
let response = rpc.request(proto::JoinChannelChat { channel_id }).await?;
@ -284,6 +339,7 @@ impl ChannelChat {
let request = rpc.request(proto::SendChannelMessage {
channel_id,
body: pending_message.body,
mentions: mentions_to_proto(&pending_message.mentions),
nonce: Some(pending_message.nonce.into()),
});
let response = request.await?;
@ -319,6 +375,17 @@ impl ChannelChat {
cursor.item().unwrap()
}
pub fn acknowledge_message(&mut self, id: u64) {
if self.acknowledged_message_ids.insert(id) {
self.rpc
.send(proto::AckChannelMessage {
channel_id: self.channel_id,
message_id: id,
})
.ok();
}
}
pub fn messages_in_range(&self, range: Range<usize>) -> impl Iterator<Item = &ChannelMessage> {
let mut cursor = self.messages.cursor::<Count>();
cursor.seek(&Count(range.start), Bias::Right, &());
@ -348,7 +415,7 @@ impl ChannelChat {
this.update(&mut cx, |this, cx| {
this.insert_messages(SumTree::from_item(message, &()), cx);
cx.emit(ChannelChatEvent::NewMessage {
channel_id: this.channel.id,
channel_id: this.channel_id,
message_id,
})
});
@ -451,22 +518,7 @@ async fn messages_from_proto(
user_store: &ModelHandle<UserStore>,
cx: &mut AsyncAppContext,
) -> Result<SumTree<ChannelMessage>> {
let unique_user_ids = proto_messages
.iter()
.map(|m| m.sender_id)
.collect::<HashSet<_>>()
.into_iter()
.collect();
user_store
.update(cx, |user_store, cx| {
user_store.get_users(unique_user_ids, cx)
})
.await?;
let mut messages = Vec::with_capacity(proto_messages.len());
for message in proto_messages {
messages.push(ChannelMessage::from_proto(message, user_store, cx).await?);
}
let messages = ChannelMessage::from_proto_vec(proto_messages, user_store, cx).await?;
let mut result = SumTree::new();
result.extend(messages, &());
Ok(result)
@ -486,6 +538,14 @@ impl ChannelMessage {
Ok(ChannelMessage {
id: ChannelMessageId::Saved(message.id),
body: message.body,
mentions: message
.mentions
.into_iter()
.filter_map(|mention| {
let range = mention.range?;
Some((range.start as usize..range.end as usize, mention.user_id))
})
.collect(),
timestamp: OffsetDateTime::from_unix_timestamp(message.timestamp as i64)?,
sender,
nonce: message
@ -498,6 +558,43 @@ impl ChannelMessage {
pub fn is_pending(&self) -> bool {
matches!(self.id, ChannelMessageId::Pending(_))
}
pub async fn from_proto_vec(
proto_messages: Vec<proto::ChannelMessage>,
user_store: &ModelHandle<UserStore>,
cx: &mut AsyncAppContext,
) -> Result<Vec<Self>> {
let unique_user_ids = proto_messages
.iter()
.map(|m| m.sender_id)
.collect::<HashSet<_>>()
.into_iter()
.collect();
user_store
.update(cx, |user_store, cx| {
user_store.get_users(unique_user_ids, cx)
})
.await?;
let mut messages = Vec::with_capacity(proto_messages.len());
for message in proto_messages {
messages.push(ChannelMessage::from_proto(message, user_store, cx).await?);
}
Ok(messages)
}
}
pub fn mentions_to_proto(mentions: &[(Range<usize>, UserId)]) -> Vec<proto::ChatMention> {
mentions
.iter()
.map(|(range, user_id)| proto::ChatMention {
range: Some(proto::Range {
start: range.start as u64,
end: range.end as u64,
}),
user_id: *user_id as u64,
})
.collect()
}
impl sum_tree::Item for ChannelMessage {
@ -538,3 +635,12 @@ impl<'a> sum_tree::Dimension<'a, ChannelMessageSummary> for Count {
self.0 += summary.count;
}
}
impl<'a> From<&'a str> for MessageParams {
fn from(value: &'a str) -> Self {
Self {
text: value.into(),
mentions: Vec::new(),
}
}
}

View file

@ -1,6 +1,6 @@
mod channel_index;
use crate::{channel_buffer::ChannelBuffer, channel_chat::ChannelChat};
use crate::{channel_buffer::ChannelBuffer, channel_chat::ChannelChat, ChannelMessage};
use anyhow::{anyhow, Result};
use channel_index::ChannelIndex;
use client::{Client, Subscription, User, UserId, UserStore};
@ -9,11 +9,10 @@ use db::RELEASE_CHANNEL;
use futures::{channel::mpsc, future::Shared, Future, FutureExt, StreamExt};
use gpui::{AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task, WeakModelHandle};
use rpc::{
proto::{self, ChannelEdge, ChannelPermission},
proto::{self, ChannelVisibility},
TypedEnvelope,
};
use serde_derive::{Deserialize, Serialize};
use std::{borrow::Cow, hash::Hash, mem, ops::Deref, sync::Arc, time::Duration};
use std::{mem, sync::Arc, time::Duration};
use util::ResultExt;
pub fn init(client: &Arc<Client>, user_store: ModelHandle<UserStore>, cx: &mut AppContext) {
@ -27,10 +26,9 @@ pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(30);
pub type ChannelId = u64;
pub struct ChannelStore {
channel_index: ChannelIndex,
pub channel_index: ChannelIndex,
channel_invitations: Vec<Arc<Channel>>,
channel_participants: HashMap<ChannelId, Vec<Arc<User>>>,
channels_with_admin_privileges: HashSet<ChannelId>,
outgoing_invites: HashSet<(ChannelId, UserId)>,
update_channels_tx: mpsc::UnboundedSender<proto::UpdateChannels>,
opened_buffers: HashMap<ChannelId, OpenedModelHandle<ChannelBuffer>>,
@ -43,14 +41,15 @@ pub struct ChannelStore {
_update_channels: Task<()>,
}
pub type ChannelData = (Channel, ChannelPath);
#[derive(Clone, Debug, PartialEq)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub visibility: proto::ChannelVisibility,
pub role: proto::ChannelRole,
pub unseen_note_version: Option<(u64, clock::Global)>,
pub unseen_message_id: Option<u64>,
pub parent_path: Vec<u64>,
}
impl Channel {
@ -71,15 +70,41 @@ impl Channel {
slug.trim_matches(|c| c == '-').to_string()
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
pub struct ChannelPath(Arc<[ChannelId]>);
pub fn can_edit_notes(&self) -> bool {
self.role == proto::ChannelRole::Member || self.role == proto::ChannelRole::Admin
}
}
pub struct ChannelMembership {
pub user: Arc<User>,
pub kind: proto::channel_member::Kind,
pub admin: bool,
pub role: proto::ChannelRole,
}
impl ChannelMembership {
pub fn sort_key(&self) -> MembershipSortKey {
MembershipSortKey {
role_order: match self.role {
proto::ChannelRole::Admin => 0,
proto::ChannelRole::Member => 1,
proto::ChannelRole::Banned => 2,
proto::ChannelRole::Guest => 3,
},
kind_order: match self.kind {
proto::channel_member::Kind::Member => 0,
proto::channel_member::Kind::AncestorMember => 1,
proto::channel_member::Kind::Invitee => 2,
},
username_order: self.user.github_login.as_str(),
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
pub struct MembershipSortKey<'a> {
role_order: u8,
kind_order: u8,
username_order: &'a str,
}
pub enum ChannelEvent {
@ -127,9 +152,6 @@ impl ChannelStore {
this.update(&mut cx, |this, cx| this.handle_disconnect(true, cx));
}
}
if status.is_connected() {
} else {
}
}
Some(())
});
@ -138,7 +160,6 @@ impl ChannelStore {
channel_invitations: Vec::default(),
channel_index: ChannelIndex::default(),
channel_participants: Default::default(),
channels_with_admin_privileges: Default::default(),
outgoing_invites: Default::default(),
opened_buffers: Default::default(),
opened_chats: Default::default(),
@ -167,16 +188,6 @@ impl ChannelStore {
self.client.clone()
}
pub fn has_children(&self, channel_id: ChannelId) -> bool {
self.channel_index.iter().any(|path| {
if let Some(ix) = path.iter().position(|id| *id == channel_id) {
path.len() > ix + 1
} else {
false
}
})
}
/// Returns the number of unique channels in the store
pub fn channel_count(&self) -> usize {
self.channel_index.by_id().len()
@ -196,26 +207,31 @@ impl ChannelStore {
}
/// Iterate over all entries in the channel DAG
pub fn channel_dag_entries(&self) -> impl '_ + Iterator<Item = (usize, &Arc<Channel>)> {
self.channel_index.iter().map(move |path| {
let id = path.last().unwrap();
let channel = self.channel_for_id(*id).unwrap();
(path.len() - 1, channel)
})
pub fn ordered_channels(&self) -> impl '_ + Iterator<Item = (usize, &Arc<Channel>)> {
self.channel_index
.ordered_channels()
.iter()
.filter_map(move |id| {
let channel = self.channel_index.by_id().get(id)?;
Some((channel.parent_path.len(), channel))
})
}
pub fn channel_dag_entry_at(&self, ix: usize) -> Option<(&Arc<Channel>, &ChannelPath)> {
let path = self.channel_index.get(ix)?;
let id = path.last().unwrap();
let channel = self.channel_for_id(*id).unwrap();
Some((channel, path))
pub fn channel_at_index(&self, ix: usize) -> Option<&Arc<Channel>> {
let channel_id = self.channel_index.ordered_channels().get(ix)?;
self.channel_index.by_id().get(channel_id)
}
pub fn channel_at(&self, ix: usize) -> Option<&Arc<Channel>> {
self.channel_index.by_id().values().nth(ix)
}
pub fn has_channel_invitation(&self, channel_id: ChannelId) -> bool {
self.channel_invitations
.iter()
.any(|channel| channel.id == channel_id)
}
pub fn channel_invitations(&self) -> &[Arc<Channel>] {
&self.channel_invitations
}
@ -240,14 +256,42 @@ impl ChannelStore {
) -> Task<Result<ModelHandle<ChannelBuffer>>> {
let client = self.client.clone();
let user_store = self.user_store.clone();
let channel_store = cx.handle();
self.open_channel_resource(
channel_id,
|this| &mut this.opened_buffers,
|channel, cx| ChannelBuffer::new(channel, client, user_store, cx),
|channel, cx| ChannelBuffer::new(channel, client, user_store, channel_store, cx),
cx,
)
}
pub fn fetch_channel_messages(
&self,
message_ids: Vec<u64>,
cx: &mut ModelContext<Self>,
) -> Task<Result<Vec<ChannelMessage>>> {
let request = if message_ids.is_empty() {
None
} else {
Some(
self.client
.request(proto::GetChannelMessagesById { message_ids }),
)
};
cx.spawn_weak(|this, mut cx| async move {
if let Some(request) = request {
let response = request.await?;
let this = this
.upgrade(&cx)
.ok_or_else(|| anyhow!("channel store dropped"))?;
let user_store = this.read_with(&cx, |this, _| this.user_store.clone());
ChannelMessage::from_proto_vec(response.messages, &user_store, &mut cx).await
} else {
Ok(Vec::new())
}
})
}
pub fn has_channel_buffer_changed(&self, channel_id: ChannelId) -> Option<bool> {
self.channel_index
.by_id()
@ -393,16 +437,11 @@ impl ChannelStore {
.spawn(async move { task.await.map_err(|error| anyhow!("{}", error)) })
}
pub fn is_user_admin(&self, channel_id: ChannelId) -> bool {
self.channel_index.iter().any(|path| {
if let Some(ix) = path.iter().position(|id| *id == channel_id) {
path[..=ix]
.iter()
.any(|id| self.channels_with_admin_privileges.contains(id))
} else {
false
}
})
pub fn is_channel_admin(&self, channel_id: ChannelId) -> bool {
let Some(channel) = self.channel_for_id(channel_id) else {
return false;
};
channel.role == proto::ChannelRole::Admin
}
pub fn channel_participants(&self, channel_id: ChannelId) -> &[Arc<User>] {
@ -429,24 +468,19 @@ impl ChannelStore {
.ok_or_else(|| anyhow!("missing channel in response"))?;
let channel_id = channel.id;
let parent_edge = if let Some(parent_id) = parent_id {
vec![ChannelEdge {
channel_id: channel.id,
parent_id,
}]
} else {
vec![]
};
// let parent_edge = if let Some(parent_id) = parent_id {
// vec![ChannelEdge {
// channel_id: channel.id,
// parent_id,
// }]
// } else {
// vec![]
// };
this.update(&mut cx, |this, cx| {
let task = this.update_channels(
proto::UpdateChannels {
channels: vec![channel],
insert_edge: parent_edge,
channel_permissions: vec![ChannelPermission {
channel_id,
is_admin: true,
}],
..Default::default()
},
cx,
@ -464,52 +498,34 @@ impl ChannelStore {
})
}
pub fn link_channel(
&mut self,
channel_id: ChannelId,
to: ChannelId,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
cx.spawn(|_, _| async move {
let _ = client
.request(proto::LinkChannel { channel_id, to })
.await?;
Ok(())
})
}
pub fn unlink_channel(
&mut self,
channel_id: ChannelId,
from: ChannelId,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
cx.spawn(|_, _| async move {
let _ = client
.request(proto::UnlinkChannel { channel_id, from })
.await?;
Ok(())
})
}
pub fn move_channel(
&mut self,
channel_id: ChannelId,
from: ChannelId,
to: ChannelId,
to: Option<ChannelId>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
cx.spawn(|_, _| async move {
let _ = client
.request(proto::MoveChannel {
.request(proto::MoveChannel { channel_id, to })
.await?;
Ok(())
})
}
pub fn set_channel_visibility(
&mut self,
channel_id: ChannelId,
visibility: ChannelVisibility,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
cx.spawn(|_, _| async move {
let _ = client
.request(proto::SetChannelVisibility {
channel_id,
from,
to,
visibility: visibility.into(),
})
.await?;
@ -521,7 +537,7 @@ impl ChannelStore {
&mut self,
channel_id: ChannelId,
user_id: UserId,
admin: bool,
role: proto::ChannelRole,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
if !self.outgoing_invites.insert((channel_id, user_id)) {
@ -535,7 +551,7 @@ impl ChannelStore {
.request(proto::InviteChannelMember {
channel_id,
user_id,
admin,
role: role.into(),
})
.await;
@ -579,11 +595,11 @@ impl ChannelStore {
})
}
pub fn set_member_admin(
pub fn set_member_role(
&mut self,
channel_id: ChannelId,
user_id: UserId,
admin: bool,
role: proto::ChannelRole,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
if !self.outgoing_invites.insert((channel_id, user_id)) {
@ -594,10 +610,10 @@ impl ChannelStore {
let client = self.client.clone();
cx.spawn(|this, mut cx| async move {
let result = client
.request(proto::SetChannelMemberAdmin {
.request(proto::SetChannelMemberRole {
channel_id,
user_id,
admin,
role: role.into(),
})
.await;
@ -649,14 +665,15 @@ impl ChannelStore {
&mut self,
channel_id: ChannelId,
accept: bool,
) -> impl Future<Output = Result<()>> {
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
async move {
cx.background().spawn(async move {
client
.request(proto::RespondToChannelInvite { channel_id, accept })
.await?;
Ok(())
}
})
}
pub fn get_channel_member_details(
@ -685,8 +702,8 @@ impl ChannelStore {
.filter_map(|(user, member)| {
Some(ChannelMembership {
user,
admin: member.admin,
kind: proto::channel_member::Kind::from_i32(member.kind)?,
role: member.role(),
kind: member.kind(),
})
})
.collect())
@ -724,6 +741,11 @@ impl ChannelStore {
}
fn handle_connect(&mut self, cx: &mut ModelContext<Self>) -> Task<Result<()>> {
self.channel_index.clear();
self.channel_invitations.clear();
self.channel_participants.clear();
self.channel_index.clear();
self.outgoing_invites.clear();
self.disconnect_channel_buffers_task.take();
for chat in self.opened_chats.values() {
@ -743,7 +765,7 @@ impl ChannelStore {
let channel_buffer = buffer.read(cx);
let buffer = channel_buffer.buffer().read(cx);
buffer_versions.push(proto::ChannelBufferVersion {
channel_id: channel_buffer.channel().id,
channel_id: channel_buffer.channel_id,
epoch: channel_buffer.epoch(),
version: language::proto::serialize_version(&buffer.version()),
});
@ -770,13 +792,13 @@ impl ChannelStore {
};
channel_buffer.update(cx, |channel_buffer, cx| {
let channel_id = channel_buffer.channel().id;
let channel_id = channel_buffer.channel_id;
if let Some(remote_buffer) = response
.buffers
.iter_mut()
.find(|buffer| buffer.channel_id == channel_id)
{
let channel_id = channel_buffer.channel().id;
let channel_id = channel_buffer.channel_id;
let remote_version =
language::proto::deserialize_version(&remote_buffer.version);
@ -833,12 +855,6 @@ impl ChannelStore {
}
fn handle_disconnect(&mut self, wait_for_reconnect: bool, cx: &mut ModelContext<Self>) {
self.channel_index.clear();
self.channel_invitations.clear();
self.channel_participants.clear();
self.channels_with_admin_privileges.clear();
self.channel_index.clear();
self.outgoing_invites.clear();
cx.notify();
self.disconnect_channel_buffers_task.get_or_insert_with(|| {
@ -881,9 +897,12 @@ impl ChannelStore {
ix,
Arc::new(Channel {
id: channel.id,
visibility: channel.visibility(),
role: channel.role(),
name: channel.name,
unseen_note_version: None,
unseen_message_id: None,
parent_path: channel.parent_path,
}),
),
}
@ -891,8 +910,6 @@ impl ChannelStore {
let channels_changed = !payload.channels.is_empty()
|| !payload.delete_channels.is_empty()
|| !payload.insert_edge.is_empty()
|| !payload.delete_edge.is_empty()
|| !payload.unseen_channel_messages.is_empty()
|| !payload.unseen_channel_buffer_changes.is_empty();
@ -900,12 +917,17 @@ impl ChannelStore {
if !payload.delete_channels.is_empty() {
self.channel_index.delete_channels(&payload.delete_channels);
self.channel_participants
.retain(|channel_id, _| !payload.delete_channels.contains(channel_id));
self.channels_with_admin_privileges
.retain(|channel_id| !payload.delete_channels.contains(channel_id));
.retain(|channel_id, _| !&payload.delete_channels.contains(channel_id));
for channel_id in &payload.delete_channels {
let channel_id = *channel_id;
if payload
.channels
.iter()
.any(|channel| channel.id == channel_id)
{
continue;
}
if let Some(OpenedModelHandle::Open(buffer)) =
self.opened_buffers.remove(&channel_id)
{
@ -918,7 +940,16 @@ impl ChannelStore {
let mut index = self.channel_index.bulk_insert();
for channel in payload.channels {
index.insert(channel)
let id = channel.id;
let channel_changed = index.insert(channel);
if channel_changed {
if let Some(OpenedModelHandle::Open(buffer)) = self.opened_buffers.get(&id) {
if let Some(buffer) = buffer.upgrade(cx) {
buffer.update(cx, ChannelBuffer::channel_changed);
}
}
}
}
for unseen_buffer_change in payload.unseen_channel_buffer_changes {
@ -936,24 +967,6 @@ impl ChannelStore {
unseen_channel_message.message_id,
);
}
for edge in payload.insert_edge {
index.insert_edge(edge.channel_id, edge.parent_id);
}
for edge in payload.delete_edge {
index.delete_edge(edge.parent_id, edge.channel_id);
}
}
for permission in payload.channel_permissions {
if permission.is_admin {
self.channels_with_admin_privileges
.insert(permission.channel_id);
} else {
self.channels_with_admin_privileges
.remove(&permission.channel_id);
}
}
cx.notify();
@ -1002,44 +1015,3 @@ impl ChannelStore {
}))
}
}
impl Deref for ChannelPath {
type Target = [ChannelId];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ChannelPath {
pub fn new(path: Arc<[ChannelId]>) -> Self {
debug_assert!(path.len() >= 1);
Self(path)
}
pub fn parent_id(&self) -> Option<ChannelId> {
self.0.len().checked_sub(2).map(|i| self.0[i])
}
pub fn channel_id(&self) -> ChannelId {
self.0[self.0.len() - 1]
}
}
impl From<ChannelPath> for Cow<'static, ChannelPath> {
fn from(value: ChannelPath) -> Self {
Cow::Owned(value)
}
}
impl<'a> From<&'a ChannelPath> for Cow<'a, ChannelPath> {
fn from(value: &'a ChannelPath) -> Self {
Cow::Borrowed(value)
}
}
impl Default for ChannelPath {
fn default() -> Self {
ChannelPath(Arc::from([]))
}
}

View file

@ -1,14 +1,11 @@
use std::{ops::Deref, sync::Arc};
use crate::{Channel, ChannelId};
use collections::BTreeMap;
use rpc::proto;
use super::ChannelPath;
use std::sync::Arc;
#[derive(Default, Debug)]
pub struct ChannelIndex {
paths: Vec<ChannelPath>,
channels_ordered: Vec<ChannelId>,
channels_by_id: BTreeMap<ChannelId, Arc<Channel>>,
}
@ -17,8 +14,12 @@ impl ChannelIndex {
&self.channels_by_id
}
pub fn ordered_channels(&self) -> &[ChannelId] {
&self.channels_ordered
}
pub fn clear(&mut self) {
self.paths.clear();
self.channels_ordered.clear();
self.channels_by_id.clear();
}
@ -26,15 +27,13 @@ impl ChannelIndex {
pub fn delete_channels(&mut self, channels: &[ChannelId]) {
self.channels_by_id
.retain(|channel_id, _| !channels.contains(channel_id));
self.paths.retain(|path| {
path.iter()
.all(|channel_id| self.channels_by_id.contains_key(channel_id))
});
self.channels_ordered
.retain(|channel_id| !channels.contains(channel_id));
}
pub fn bulk_insert(&mut self) -> ChannelPathsInsertGuard {
ChannelPathsInsertGuard {
paths: &mut self.paths,
channels_ordered: &mut self.channels_ordered,
channels_by_id: &mut self.channels_by_id,
}
}
@ -77,42 +76,15 @@ impl ChannelIndex {
}
}
impl Deref for ChannelIndex {
type Target = [ChannelPath];
fn deref(&self) -> &Self::Target {
&self.paths
}
}
/// A guard for ensuring that the paths index maintains its sort and uniqueness
/// invariants after a series of insertions
#[derive(Debug)]
pub struct ChannelPathsInsertGuard<'a> {
paths: &'a mut Vec<ChannelPath>,
channels_ordered: &'a mut Vec<ChannelId>,
channels_by_id: &'a mut BTreeMap<ChannelId, Arc<Channel>>,
}
impl<'a> ChannelPathsInsertGuard<'a> {
/// Remove the given edge from this index. This will not remove the channel.
/// If this operation would result in a dangling edge, re-insert it.
pub fn delete_edge(&mut self, parent_id: ChannelId, channel_id: ChannelId) {
self.paths.retain(|path| {
!path
.windows(2)
.any(|window| window == [parent_id, channel_id])
});
// Ensure that there is at least one channel path in the index
if !self
.paths
.iter()
.any(|path| path.iter().any(|id| id == &channel_id))
{
self.insert_root(channel_id);
}
}
pub fn note_changed(&mut self, channel_id: ChannelId, epoch: u64, version: &clock::Global) {
insert_note_changed(&mut self.channels_by_id, channel_id, epoch, &version);
}
@ -121,91 +93,65 @@ impl<'a> ChannelPathsInsertGuard<'a> {
insert_new_message(&mut self.channels_by_id, channel_id, message_id)
}
pub fn insert(&mut self, channel_proto: proto::Channel) {
pub fn insert(&mut self, channel_proto: proto::Channel) -> bool {
let mut ret = false;
if let Some(existing_channel) = self.channels_by_id.get_mut(&channel_proto.id) {
Arc::make_mut(existing_channel).name = channel_proto.name;
let existing_channel = Arc::make_mut(existing_channel);
ret = existing_channel.visibility != channel_proto.visibility()
|| existing_channel.role != channel_proto.role()
|| existing_channel.name != channel_proto.name;
existing_channel.visibility = channel_proto.visibility();
existing_channel.role = channel_proto.role();
existing_channel.name = channel_proto.name;
} else {
self.channels_by_id.insert(
channel_proto.id,
Arc::new(Channel {
id: channel_proto.id,
visibility: channel_proto.visibility(),
role: channel_proto.role(),
name: channel_proto.name,
unseen_note_version: None,
unseen_message_id: None,
parent_path: channel_proto.parent_path,
}),
);
self.insert_root(channel_proto.id);
}
}
pub fn insert_edge(&mut self, channel_id: ChannelId, parent_id: ChannelId) {
let mut parents = Vec::new();
let mut descendants = Vec::new();
let mut ixs_to_remove = Vec::new();
for (ix, path) in self.paths.iter().enumerate() {
if path
.windows(2)
.any(|window| window[0] == parent_id && window[1] == channel_id)
{
// We already have this edge in the index
return;
}
if path.ends_with(&[parent_id]) {
parents.push(path);
} else if let Some(position) = path.iter().position(|id| id == &channel_id) {
if position == 0 {
ixs_to_remove.push(ix);
}
descendants.push(path.split_at(position).1);
}
}
let mut new_paths = Vec::new();
for parent in parents.iter() {
if descendants.is_empty() {
let mut new_path = Vec::with_capacity(parent.len() + 1);
new_path.extend_from_slice(parent);
new_path.push(channel_id);
new_paths.push(ChannelPath::new(new_path.into()));
} else {
for descendant in descendants.iter() {
let mut new_path = Vec::with_capacity(parent.len() + descendant.len());
new_path.extend_from_slice(parent);
new_path.extend_from_slice(descendant);
new_paths.push(ChannelPath::new(new_path.into()));
}
}
}
for ix in ixs_to_remove.into_iter().rev() {
self.paths.swap_remove(ix);
}
self.paths.extend(new_paths)
ret
}
fn insert_root(&mut self, channel_id: ChannelId) {
self.paths.push(ChannelPath::new(Arc::from([channel_id])));
self.channels_ordered.push(channel_id);
}
}
impl<'a> Drop for ChannelPathsInsertGuard<'a> {
fn drop(&mut self) {
self.paths.sort_by(|a, b| {
let a = channel_path_sorting_key(a, &self.channels_by_id);
let b = channel_path_sorting_key(b, &self.channels_by_id);
self.channels_ordered.sort_by(|a, b| {
let a = channel_path_sorting_key(*a, &self.channels_by_id);
let b = channel_path_sorting_key(*b, &self.channels_by_id);
a.cmp(b)
});
self.paths.dedup();
self.channels_ordered.dedup();
}
}
fn channel_path_sorting_key<'a>(
path: &'a [ChannelId],
id: ChannelId,
channels_by_id: &'a BTreeMap<ChannelId, Arc<Channel>>,
) -> impl 'a + Iterator<Item = Option<&'a str>> {
path.iter()
.map(|id| Some(channels_by_id.get(id)?.name.as_str()))
) -> impl Iterator<Item = &str> {
let (parent_path, name) = channels_by_id
.get(&id)
.map_or((&[] as &[_], None), |channel| {
(channel.parent_path.as_slice(), Some(channel.name.as_str()))
});
parent_path
.iter()
.filter_map(|id| Some(channels_by_id.get(id)?.name.as_str()))
.chain(name)
}
fn insert_note_changed(

View file

@ -3,7 +3,7 @@ use crate::channel_chat::ChannelChatEvent;
use super::*;
use client::{test::FakeServer, Client, UserStore};
use gpui::{AppContext, ModelHandle, TestAppContext};
use rpc::proto;
use rpc::proto::{self};
use settings::SettingsStore;
use util::http::FakeHttpClient;
@ -18,16 +18,18 @@ fn test_update_channels(cx: &mut AppContext) {
proto::Channel {
id: 1,
name: "b".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Admin.into(),
parent_path: Vec::new(),
},
proto::Channel {
id: 2,
name: "a".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Member.into(),
parent_path: Vec::new(),
},
],
channel_permissions: vec![proto::ChannelPermission {
channel_id: 1,
is_admin: true,
}],
..Default::default()
},
cx,
@ -36,8 +38,8 @@ fn test_update_channels(cx: &mut AppContext) {
&channel_store,
&[
//
(0, "a".to_string(), false),
(0, "b".to_string(), true),
(0, "a".to_string(), proto::ChannelRole::Member),
(0, "b".to_string(), proto::ChannelRole::Admin),
],
cx,
);
@ -49,20 +51,16 @@ fn test_update_channels(cx: &mut AppContext) {
proto::Channel {
id: 3,
name: "x".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Admin.into(),
parent_path: vec![1],
},
proto::Channel {
id: 4,
name: "y".to_string(),
},
],
insert_edge: vec![
proto::ChannelEdge {
parent_id: 1,
channel_id: 3,
},
proto::ChannelEdge {
parent_id: 2,
channel_id: 4,
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Member.into(),
parent_path: vec![2],
},
],
..Default::default()
@ -72,10 +70,10 @@ fn test_update_channels(cx: &mut AppContext) {
assert_channels(
&channel_store,
&[
(0, "a".to_string(), false),
(1, "y".to_string(), false),
(0, "b".to_string(), true),
(1, "x".to_string(), true),
(0, "a".to_string(), proto::ChannelRole::Member),
(1, "y".to_string(), proto::ChannelRole::Member),
(0, "b".to_string(), proto::ChannelRole::Admin),
(1, "x".to_string(), proto::ChannelRole::Admin),
],
cx,
);
@ -92,30 +90,25 @@ fn test_dangling_channel_paths(cx: &mut AppContext) {
proto::Channel {
id: 0,
name: "a".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Admin.into(),
parent_path: vec![],
},
proto::Channel {
id: 1,
name: "b".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Admin.into(),
parent_path: vec![0],
},
proto::Channel {
id: 2,
name: "c".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Admin.into(),
parent_path: vec![0, 1],
},
],
insert_edge: vec![
proto::ChannelEdge {
parent_id: 0,
channel_id: 1,
},
proto::ChannelEdge {
parent_id: 1,
channel_id: 2,
},
],
channel_permissions: vec![proto::ChannelPermission {
channel_id: 0,
is_admin: true,
}],
..Default::default()
},
cx,
@ -125,9 +118,9 @@ fn test_dangling_channel_paths(cx: &mut AppContext) {
&channel_store,
&[
//
(0, "a".to_string(), true),
(1, "b".to_string(), true),
(2, "c".to_string(), true),
(0, "a".to_string(), proto::ChannelRole::Admin),
(1, "b".to_string(), proto::ChannelRole::Admin),
(2, "c".to_string(), proto::ChannelRole::Admin),
],
cx,
);
@ -142,7 +135,11 @@ fn test_dangling_channel_paths(cx: &mut AppContext) {
);
// Make sure that the 1/2/3 path is gone
assert_channels(&channel_store, &[(0, "a".to_string(), true)], cx);
assert_channels(
&channel_store,
&[(0, "a".to_string(), proto::ChannelRole::Admin)],
cx,
);
}
#[gpui::test]
@ -158,12 +155,19 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
channels: vec![proto::Channel {
id: channel_id,
name: "the-channel".to_string(),
visibility: proto::ChannelVisibility::Members as i32,
role: proto::ChannelRole::Member.into(),
parent_path: vec![],
}],
..Default::default()
});
cx.foreground().run_until_parked();
cx.read(|cx| {
assert_channels(&channel_store, &[(0, "the-channel".to_string(), false)], cx);
assert_channels(
&channel_store,
&[(0, "the-channel".to_string(), proto::ChannelRole::Member)],
cx,
);
});
let get_users = server.receive::<proto::GetUsers>().await.unwrap();
@ -181,7 +185,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
// Join a channel and populate its existing messages.
let channel = channel_store.update(cx, |store, cx| {
let channel_id = store.channel_dag_entries().next().unwrap().1.id;
let channel_id = store.ordered_channels().next().unwrap().1.id;
store.open_channel_chat(channel_id, cx)
});
let join_channel = server.receive::<proto::JoinChannelChat>().await.unwrap();
@ -194,6 +198,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
body: "a".into(),
timestamp: 1000,
sender_id: 5,
mentions: vec![],
nonce: Some(1.into()),
},
proto::ChannelMessage {
@ -201,6 +206,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
body: "b".into(),
timestamp: 1001,
sender_id: 6,
mentions: vec![],
nonce: Some(2.into()),
},
],
@ -247,6 +253,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
body: "c".into(),
timestamp: 1002,
sender_id: 7,
mentions: vec![],
nonce: Some(3.into()),
}),
});
@ -284,7 +291,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
// Scroll up to view older messages.
channel.update(cx, |channel, cx| {
assert!(channel.load_more_messages(cx));
channel.load_more_messages(cx).unwrap().detach();
});
let get_messages = server.receive::<proto::GetChannelMessages>().await.unwrap();
assert_eq!(get_messages.payload.channel_id, 5);
@ -300,6 +307,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
timestamp: 998,
sender_id: 5,
nonce: Some(4.into()),
mentions: vec![],
},
proto::ChannelMessage {
id: 9,
@ -307,6 +315,7 @@ async fn test_channel_messages(cx: &mut TestAppContext) {
timestamp: 999,
sender_id: 6,
nonce: Some(5.into()),
mentions: vec![],
},
],
},
@ -358,19 +367,13 @@ fn update_channels(
#[track_caller]
fn assert_channels(
channel_store: &ModelHandle<ChannelStore>,
expected_channels: &[(usize, String, bool)],
expected_channels: &[(usize, String, proto::ChannelRole)],
cx: &AppContext,
) {
let actual = channel_store.read_with(cx, |store, _| {
store
.channel_dag_entries()
.map(|(depth, channel)| {
(
depth,
channel.name.to_string(),
store.is_user_admin(channel.id),
)
})
.ordered_channels()
.map(|(depth, channel)| (depth, channel.name.to_string(), channel.role))
.collect::<Vec<_>>()
});
assert_eq!(actual, expected_channels);

View file

@ -4,7 +4,9 @@ use lazy_static::lazy_static;
use parking_lot::Mutex;
use serde::Serialize;
use std::{env, io::Write, mem, path::PathBuf, sync::Arc, time::Duration};
use sysinfo::{Pid, PidExt, ProcessExt, System, SystemExt};
use sysinfo::{
CpuRefreshKind, Pid, PidExt, ProcessExt, ProcessRefreshKind, RefreshKind, System, SystemExt,
};
use tempfile::NamedTempFile;
use util::http::HttpClient;
use util::{channel::ReleaseChannel, TryFutureExt};
@ -166,8 +168,16 @@ impl Telemetry {
let this = self.clone();
cx.spawn(|mut cx| async move {
let mut system = System::new_all();
system.refresh_all();
// Avoiding calling `System::new_all()`, as there have been crashes related to it
let refresh_kind = RefreshKind::new()
.with_memory() // For memory usage
.with_processes(ProcessRefreshKind::everything()) // For process usage
.with_cpu(CpuRefreshKind::everything()); // For core count
let mut system = System::new_with_specifics(refresh_kind);
// Avoiding calling `refresh_all()`, just update what we need
system.refresh_specifics(refresh_kind);
loop {
// Waiting some amount of time before the first query is important to get a reasonable value
@ -175,8 +185,7 @@ impl Telemetry {
const DURATION_BETWEEN_SYSTEM_EVENTS: Duration = Duration::from_secs(60);
smol::Timer::after(DURATION_BETWEEN_SYSTEM_EVENTS).await;
system.refresh_memory();
system.refresh_processes();
system.refresh_specifics(refresh_kind);
let current_process = Pid::from_u32(std::process::id());
let Some(process) = system.processes().get(&current_process) else {

View file

@ -293,21 +293,19 @@ impl UserStore {
// No need to paralellize here
let mut updated_contacts = Vec::new();
for contact in message.contacts {
let should_notify = contact.should_notify;
updated_contacts.push((
Arc::new(Contact::from_proto(contact, &this, &mut cx).await?),
should_notify,
updated_contacts.push(Arc::new(
Contact::from_proto(contact, &this, &mut cx).await?,
));
}
let mut incoming_requests = Vec::new();
for request in message.incoming_requests {
incoming_requests.push({
let user = this
.update(&mut cx, |this, cx| this.get_user(request.requester_id, cx))
.await?;
(user, request.should_notify)
});
incoming_requests.push(
this.update(&mut cx, |this, cx| {
this.get_user(request.requester_id, cx)
})
.await?,
);
}
let mut outgoing_requests = Vec::new();
@ -330,13 +328,7 @@ impl UserStore {
this.contacts
.retain(|contact| !removed_contacts.contains(&contact.user.id));
// Update existing contacts and insert new ones
for (updated_contact, should_notify) in updated_contacts {
if should_notify {
cx.emit(Event::Contact {
user: updated_contact.user.clone(),
kind: ContactEventKind::Accepted,
});
}
for updated_contact in updated_contacts {
match this.contacts.binary_search_by_key(
&&updated_contact.user.github_login,
|contact| &contact.user.github_login,
@ -359,14 +351,7 @@ impl UserStore {
}
});
// Update existing incoming requests and insert new ones
for (user, should_notify) in incoming_requests {
if should_notify {
cx.emit(Event::Contact {
user: user.clone(),
kind: ContactEventKind::Requested,
});
}
for user in incoming_requests {
match this
.incoming_contact_requests
.binary_search_by_key(&&user.github_login, |contact| {
@ -415,6 +400,12 @@ impl UserStore {
&self.incoming_contact_requests
}
pub fn has_incoming_contact_request(&self, user_id: u64) -> bool {
self.incoming_contact_requests
.iter()
.any(|user| user.id == user_id)
}
pub fn outgoing_contact_requests(&self) -> &[Arc<User>] {
&self.outgoing_contact_requests
}

View file

@ -14,8 +14,8 @@ use futures::{
future::BoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryFutureExt as _, TryStreamExt,
};
use gpui2::{
serde_json, AnyHandle, AnyWeakHandle, AppContext, AsyncAppContext, Handle, SemanticVersion,
Task, WeakHandle,
serde_json, AnyModel, AnyWeakModel, AppContext, AsyncAppContext, Model, SemanticVersion, Task,
WeakModel,
};
use lazy_static::lazy_static;
use parking_lot::RwLock;
@ -227,7 +227,7 @@ struct ClientState {
_reconnect_task: Option<Task<()>>,
reconnect_interval: Duration,
entities_by_type_and_remote_id: HashMap<(TypeId, u64), WeakSubscriber>,
models_by_message_type: HashMap<TypeId, AnyWeakHandle>,
models_by_message_type: HashMap<TypeId, AnyWeakModel>,
entity_types_by_message_type: HashMap<TypeId, TypeId>,
#[allow(clippy::type_complexity)]
message_handlers: HashMap<
@ -236,7 +236,7 @@ struct ClientState {
dyn Send
+ Sync
+ Fn(
AnyHandle,
AnyModel,
Box<dyn AnyTypedEnvelope>,
&Arc<Client>,
AsyncAppContext,
@ -246,7 +246,7 @@ struct ClientState {
}
enum WeakSubscriber {
Entity { handle: AnyWeakHandle },
Entity { handle: AnyWeakModel },
Pending(Vec<Box<dyn AnyTypedEnvelope>>),
}
@ -312,9 +312,9 @@ pub struct PendingEntitySubscription<T: 'static> {
impl<T> PendingEntitySubscription<T>
where
T: 'static + Send + Sync,
T: 'static + Send,
{
pub fn set_model(mut self, model: &Handle<T>, cx: &mut AsyncAppContext) -> Subscription {
pub fn set_model(mut self, model: &Model<T>, cx: &mut AsyncAppContext) -> Subscription {
self.consumed = true;
let mut state = self.client.state.write();
let id = (TypeId::of::<T>(), self.remote_id);
@ -529,7 +529,7 @@ impl Client {
remote_id: u64,
) -> Result<PendingEntitySubscription<T>>
where
T: 'static + Send + Sync,
T: 'static + Send,
{
let id = (TypeId::of::<T>(), remote_id);
@ -552,13 +552,13 @@ impl Client {
#[track_caller]
pub fn add_message_handler<M, E, H, F>(
self: &Arc<Self>,
entity: WeakHandle<E>,
entity: WeakModel<E>,
handler: H,
) -> Subscription
where
M: EnvelopedMessage,
E: 'static + Send + Sync,
H: 'static + Send + Sync + Fn(Handle<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
E: 'static + Send,
H: 'static + Send + Sync + Fn(Model<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<()>> + Send,
{
let message_type_id = TypeId::of::<M>();
@ -594,13 +594,13 @@ impl Client {
pub fn add_request_handler<M, E, H, F>(
self: &Arc<Self>,
model: WeakHandle<E>,
model: WeakModel<E>,
handler: H,
) -> Subscription
where
M: RequestMessage,
E: 'static + Send + Sync,
H: 'static + Send + Sync + Fn(Handle<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
E: 'static + Send,
H: 'static + Send + Sync + Fn(Model<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<M::Response>> + Send,
{
self.add_message_handler(model, move |handle, envelope, this, cx| {
@ -615,8 +615,8 @@ impl Client {
pub fn add_model_message_handler<M, E, H, F>(self: &Arc<Self>, handler: H)
where
M: EntityMessage,
E: 'static + Send + Sync,
H: 'static + Send + Sync + Fn(Handle<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
E: 'static + Send,
H: 'static + Send + Sync + Fn(Model<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<()>> + Send,
{
self.add_entity_message_handler::<M, E, _, _>(move |subscriber, message, client, cx| {
@ -627,8 +627,8 @@ impl Client {
fn add_entity_message_handler<M, E, H, F>(self: &Arc<Self>, handler: H)
where
M: EntityMessage,
E: 'static + Send + Sync,
H: 'static + Send + Sync + Fn(AnyHandle, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
E: 'static + Send,
H: 'static + Send + Sync + Fn(AnyModel, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<()>> + Send,
{
let model_type_id = TypeId::of::<E>();
@ -666,8 +666,8 @@ impl Client {
pub fn add_model_request_handler<M, E, H, F>(self: &Arc<Self>, handler: H)
where
M: EntityMessage + RequestMessage,
E: 'static + Send + Sync,
H: 'static + Send + Sync + Fn(Handle<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
E: 'static + Send,
H: 'static + Send + Sync + Fn(Model<E>, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<M::Response>> + Send,
{
self.add_model_message_handler(move |entity, envelope, client, cx| {
@ -1546,7 +1546,7 @@ mod tests {
let (done_tx1, mut done_rx1) = smol::channel::unbounded();
let (done_tx2, mut done_rx2) = smol::channel::unbounded();
client.add_model_message_handler(
move |model: Handle<Model>, _: TypedEnvelope<proto::JoinProject>, _, mut cx| {
move |model: Model<TestModel>, _: TypedEnvelope<proto::JoinProject>, _, mut cx| {
match model.update(&mut cx, |model, _| model.id).unwrap() {
1 => done_tx1.try_send(()).unwrap(),
2 => done_tx2.try_send(()).unwrap(),
@ -1555,15 +1555,15 @@ mod tests {
async { Ok(()) }
},
);
let model1 = cx.entity(|_| Model {
let model1 = cx.build_model(|_| TestModel {
id: 1,
subscription: None,
});
let model2 = cx.entity(|_| Model {
let model2 = cx.build_model(|_| TestModel {
id: 2,
subscription: None,
});
let model3 = cx.entity(|_| Model {
let model3 = cx.build_model(|_| TestModel {
id: 3,
subscription: None,
});
@ -1596,7 +1596,7 @@ mod tests {
let client = cx.update(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let server = FakeServer::for_client(user_id, &client, cx).await;
let model = cx.entity(|_| Model::default());
let model = cx.build_model(|_| TestModel::default());
let (done_tx1, _done_rx1) = smol::channel::unbounded();
let (done_tx2, mut done_rx2) = smol::channel::unbounded();
let subscription1 = client.add_message_handler(
@ -1624,11 +1624,11 @@ mod tests {
let client = cx.update(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let server = FakeServer::for_client(user_id, &client, cx).await;
let model = cx.entity(|_| Model::default());
let model = cx.build_model(|_| TestModel::default());
let (done_tx, mut done_rx) = smol::channel::unbounded();
let subscription = client.add_message_handler(
model.clone().downgrade(),
move |model: Handle<Model>, _: TypedEnvelope<proto::Ping>, _, mut cx| {
move |model: Model<TestModel>, _: TypedEnvelope<proto::Ping>, _, mut cx| {
model
.update(&mut cx, |model, _| model.subscription.take())
.unwrap();
@ -1644,7 +1644,7 @@ mod tests {
}
#[derive(Default)]
struct Model {
struct TestModel {
id: usize,
subscription: Option<Subscription>,
}

View file

@ -5,7 +5,9 @@ use parking_lot::Mutex;
use serde::Serialize;
use settings2::Settings;
use std::{env, io::Write, mem, path::PathBuf, sync::Arc, time::Duration};
use sysinfo::{Pid, PidExt, ProcessExt, System, SystemExt};
use sysinfo::{
CpuRefreshKind, Pid, PidExt, ProcessExt, ProcessRefreshKind, RefreshKind, System, SystemExt,
};
use tempfile::NamedTempFile;
use util::http::HttpClient;
use util::{channel::ReleaseChannel, TryFutureExt};
@ -161,8 +163,16 @@ impl Telemetry {
let this = self.clone();
cx.spawn(|cx| async move {
let mut system = System::new_all();
system.refresh_all();
// Avoiding calling `System::new_all()`, as there have been crashes related to it
let refresh_kind = RefreshKind::new()
.with_memory() // For memory usage
.with_processes(ProcessRefreshKind::everything()) // For process usage
.with_cpu(CpuRefreshKind::everything()); // For core count
let mut system = System::new_with_specifics(refresh_kind);
// Avoiding calling `refresh_all()`, just update what we need
system.refresh_specifics(refresh_kind);
loop {
// Waiting some amount of time before the first query is important to get a reasonable value
@ -170,8 +180,7 @@ impl Telemetry {
const DURATION_BETWEEN_SYSTEM_EVENTS: Duration = Duration::from_secs(60);
smol::Timer::after(DURATION_BETWEEN_SYSTEM_EVENTS).await;
system.refresh_memory();
system.refresh_processes();
system.refresh_specifics(refresh_kind);
let current_process = Pid::from_u32(std::process::id());
let Some(process) = system.processes().get(&current_process) else {

View file

@ -1,7 +1,7 @@
use crate::{Client, Connection, Credentials, EstablishConnectionError, UserStore};
use anyhow::{anyhow, Result};
use futures::{stream::BoxStream, StreamExt};
use gpui2::{Context, Executor, Handle, TestAppContext};
use gpui2::{Context, Executor, Model, TestAppContext};
use parking_lot::Mutex;
use rpc2::{
proto::{self, GetPrivateUserInfo, GetPrivateUserInfoResponse},
@ -194,9 +194,9 @@ impl FakeServer {
&self,
client: Arc<Client>,
cx: &mut TestAppContext,
) -> Handle<UserStore> {
) -> Model<UserStore> {
let http_client = FakeHttpClient::with_404_response();
let user_store = cx.entity(|cx| UserStore::new(client, http_client, cx));
let user_store = cx.build_model(|cx| UserStore::new(client, http_client, cx));
assert_eq!(
self.receive::<proto::GetUsers>()
.await

View file

@ -3,7 +3,7 @@ use anyhow::{anyhow, Context, Result};
use collections::{hash_map::Entry, HashMap, HashSet};
use feature_flags2::FeatureFlagAppExt;
use futures::{channel::mpsc, future, AsyncReadExt, Future, StreamExt};
use gpui2::{AsyncAppContext, EventEmitter, Handle, ImageData, ModelContext, Task};
use gpui2::{AsyncAppContext, EventEmitter, ImageData, Model, ModelContext, Task};
use postage::{sink::Sink, watch};
use rpc2::proto::{RequestMessage, UsersResponse};
use std::sync::{Arc, Weak};
@ -122,9 +122,9 @@ impl UserStore {
let (mut current_user_tx, current_user_rx) = watch::channel();
let (update_contacts_tx, mut update_contacts_rx) = mpsc::unbounded();
let rpc_subscriptions = vec![
client.add_message_handler(cx.weak_handle(), Self::handle_update_contacts),
client.add_message_handler(cx.weak_handle(), Self::handle_update_invite_info),
client.add_message_handler(cx.weak_handle(), Self::handle_show_contacts),
client.add_message_handler(cx.weak_model(), Self::handle_update_contacts),
client.add_message_handler(cx.weak_model(), Self::handle_update_invite_info),
client.add_message_handler(cx.weak_model(), Self::handle_show_contacts),
];
Self {
users: Default::default(),
@ -213,7 +213,7 @@ impl UserStore {
}
async fn handle_update_invite_info(
this: Handle<Self>,
this: Model<Self>,
message: TypedEnvelope<proto::UpdateInviteInfo>,
_: Arc<Client>,
mut cx: AsyncAppContext,
@ -229,7 +229,7 @@ impl UserStore {
}
async fn handle_show_contacts(
this: Handle<Self>,
this: Model<Self>,
_: TypedEnvelope<proto::ShowContacts>,
_: Arc<Client>,
mut cx: AsyncAppContext,
@ -243,7 +243,7 @@ impl UserStore {
}
async fn handle_update_contacts(
this: Handle<Self>,
this: Model<Self>,
message: TypedEnvelope<proto::UpdateContacts>,
_: Arc<Client>,
mut cx: AsyncAppContext,
@ -690,7 +690,7 @@ impl User {
impl Contact {
async fn from_proto(
contact: proto::Contact,
user_store: &Handle<UserStore>,
user_store: &Model<UserStore>,
cx: &mut AsyncAppContext,
) -> Result<Self> {
let user = user_store

View file

@ -3,7 +3,7 @@ authors = ["Nathan Sobo <nathan@zed.dev>"]
default-run = "collab"
edition = "2021"
name = "collab"
version = "0.24.0"
version = "0.27.0"
publish = false
[[bin]]
@ -73,6 +73,7 @@ git = { path = "../git", features = ["test-support"] }
live_kit_client = { path = "../live_kit_client", features = ["test-support"] }
lsp = { path = "../lsp", features = ["test-support"] }
node_runtime = { path = "../node_runtime" }
notifications = { path = "../notifications", features = ["test-support"] }
project = { path = "../project", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }

View file

@ -44,7 +44,7 @@ CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");
CREATE TABLE "projects" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER REFERENCES rooms (id) NOT NULL,
"room_id" INTEGER REFERENCES rooms (id) ON DELETE CASCADE NOT NULL,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"host_connection_id" INTEGER,
"host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
@ -192,9 +192,13 @@ CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");
CREATE TABLE "channels" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL,
"created_at" TIMESTAMP NOT NULL DEFAULT now
"created_at" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
"visibility" VARCHAR NOT NULL,
"parent_path" TEXT
);
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");
CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER NOT NULL REFERENCES users (id),
@ -213,19 +217,22 @@ CREATE TABLE IF NOT EXISTS "channel_messages" (
"nonce" BLOB NOT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_nonce" ON "channel_messages" ("nonce");
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
CREATE TABLE "channel_paths" (
"id_path" TEXT NOT NULL PRIMARY KEY,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
CREATE INDEX "index_channel_paths_on_channel_id" ON "channel_paths" ("channel_id");
CREATE TABLE "channel_members" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"channel_id" INTEGER NOT NULL REFERENCES channels (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"admin" BOOLEAN NOT NULL DEFAULT false,
"role" VARCHAR,
"accepted" BOOLEAN NOT NULL DEFAULT false,
"updated_at" TIMESTAMP NOT NULL DEFAULT now
);
@ -312,3 +319,26 @@ CREATE TABLE IF NOT EXISTS "observed_channel_messages" (
);
CREATE UNIQUE INDEX "index_observed_channel_messages_user_and_channel_id" ON "observed_channel_messages" ("user_id", "channel_id");
CREATE TABLE "notification_kinds" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE "notifications" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"created_at" TIMESTAMP NOT NULL default CURRENT_TIMESTAMP,
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View file

@ -0,0 +1,22 @@
CREATE TABLE "notification_kinds" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR NOT NULL
);
CREATE UNIQUE INDEX "index_notification_kinds_on_name" ON "notification_kinds" ("name");
CREATE TABLE notifications (
"id" SERIAL PRIMARY KEY,
"created_at" TIMESTAMP NOT NULL DEFAULT now(),
"recipient_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"kind" INTEGER NOT NULL REFERENCES notification_kinds (id),
"entity_id" INTEGER,
"content" TEXT,
"is_read" BOOLEAN NOT NULL DEFAULT FALSE,
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");

View file

@ -0,0 +1,4 @@
ALTER TABLE channel_members ADD COLUMN role TEXT;
UPDATE channel_members SET role = CASE WHEN admin THEN 'admin' ELSE 'member' END;
ALTER TABLE channels ADD COLUMN visibility TEXT NOT NULL DEFAULT 'members';

View file

@ -0,0 +1,8 @@
-- Add migration script here
ALTER TABLE projects
DROP CONSTRAINT projects_room_id_fkey,
ADD CONSTRAINT projects_room_id_fkey
FOREIGN KEY (room_id)
REFERENCES rooms (id)
ON DELETE CASCADE;

View file

@ -0,0 +1,11 @@
CREATE TABLE "channel_message_mentions" (
"message_id" INTEGER NOT NULL REFERENCES channel_messages (id) ON DELETE CASCADE,
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
);
-- We use 'on conflict update' with this index, so it should be per-user.
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
DROP INDEX "index_channel_messages_on_nonce";

View file

@ -0,0 +1,12 @@
ALTER TABLE channels ADD COLUMN parent_path TEXT;
UPDATE channels
SET parent_path = substr(
channel_paths.id_path,
2,
length(channel_paths.id_path) - length('/' || channel_paths.channel_id::text || '/')
)
FROM channel_paths
WHERE channel_paths.channel_id = channels.id;
CREATE INDEX "index_channels_on_parent_path" ON "channels" ("parent_path");

View file

@ -71,7 +71,6 @@ async fn main() {
db::NewUserParams {
github_login: github_user.login,
github_user_id: github_user.id,
invite_count: 5,
},
)
.await

View file

@ -20,7 +20,7 @@ use rpc::{
};
use sea_orm::{
entity::prelude::*,
sea_query::{Alias, Expr, OnConflict, Query},
sea_query::{Alias, Expr, OnConflict},
ActiveValue, Condition, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbErr,
FromQueryResult, IntoActiveModel, IsolationLevel, JoinType, QueryOrder, QuerySelect, Statement,
TransactionTrait,
@ -47,14 +47,14 @@ pub use ids::*;
pub use sea_orm::ConnectOptions;
pub use tables::user::Model as User;
use self::queries::channels::ChannelGraph;
pub struct Database {
options: ConnectOptions,
pool: DatabaseConnection,
rooms: DashMap<RoomId, Arc<Mutex<()>>>,
rng: Mutex<StdRng>,
executor: Executor,
notification_kinds_by_id: HashMap<NotificationKindId, &'static str>,
notification_kinds_by_name: HashMap<String, NotificationKindId>,
#[cfg(test)]
runtime: Option<tokio::runtime::Runtime>,
}
@ -69,6 +69,8 @@ impl Database {
pool: sea_orm::Database::connect(options).await?,
rooms: DashMap::with_capacity(16384),
rng: Mutex::new(StdRng::seed_from_u64(0)),
notification_kinds_by_id: HashMap::default(),
notification_kinds_by_name: HashMap::default(),
executor,
#[cfg(test)]
runtime: None,
@ -121,6 +123,11 @@ impl Database {
Ok(new_migrations)
}
pub async fn initialize_static_data(&mut self) -> Result<()> {
self.initialize_notification_kinds().await?;
Ok(())
}
pub async fn transaction<F, Fut, T>(&self, f: F) -> Result<T>
where
F: Send + Fn(TransactionHandle) -> Fut,
@ -361,18 +368,9 @@ impl<T> RoomGuard<T> {
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Contact {
Accepted {
user_id: UserId,
should_notify: bool,
busy: bool,
},
Outgoing {
user_id: UserId,
},
Incoming {
user_id: UserId,
should_notify: bool,
},
Accepted { user_id: UserId, busy: bool },
Outgoing { user_id: UserId },
Incoming { user_id: UserId },
}
impl Contact {
@ -385,6 +383,15 @@ impl Contact {
}
}
pub type NotificationBatch = Vec<(UserId, proto::Notification)>;
pub struct CreatedChannelMessage {
pub message_id: MessageId,
pub participant_connection_ids: Vec<ConnectionId>,
pub channel_members: Vec<UserId>,
pub notifications: NotificationBatch,
}
#[derive(Clone, Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)]
pub struct Invite {
pub email_address: String,
@ -417,7 +424,6 @@ pub struct WaitlistSummary {
pub struct NewUserParams {
pub github_login: String,
pub github_user_id: i32,
pub invite_count: i32,
}
#[derive(Debug)]
@ -428,17 +434,115 @@ pub struct NewUserResult {
pub signup_device_id: Option<String>,
}
#[derive(FromQueryResult, Debug, PartialEq, Eq, Hash)]
#[derive(Debug)]
pub struct MoveChannelResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub moved_channels: HashSet<ChannelId>,
}
#[derive(Debug)]
pub struct RenameChannelResult {
pub channel: Channel,
pub participants_to_update: HashMap<UserId, Channel>,
}
#[derive(Debug)]
pub struct CreateChannelResult {
pub channel: Channel,
pub participants_to_update: Vec<(UserId, ChannelsForUser)>,
}
#[derive(Debug)]
pub struct SetChannelVisibilityResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub channels_to_remove: Vec<ChannelId>,
}
#[derive(Debug)]
pub struct MembershipUpdated {
pub channel_id: ChannelId,
pub new_channels: ChannelsForUser,
pub removed_channels: Vec<ChannelId>,
}
#[derive(Debug)]
pub enum SetMemberRoleResult {
InviteUpdated(Channel),
MembershipUpdated(MembershipUpdated),
}
#[derive(Debug)]
pub struct InviteMemberResult {
pub channel: Channel,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RespondToChannelInvite {
pub membership_update: Option<MembershipUpdated>,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RemoveChannelMemberResult {
pub membership_update: MembershipUpdated,
pub notification_id: Option<NotificationId>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub role: ChannelRole,
pub parent_path: Vec<ChannelId>,
}
impl Channel {
fn from_model(value: channel::Model, role: ChannelRole) -> Self {
Channel {
id: value.id,
visibility: value.visibility,
name: value.clone().name,
role,
parent_path: value.ancestors().collect(),
}
}
pub fn to_proto(&self) -> proto::Channel {
proto::Channel {
id: self.id.to_proto(),
name: self.name.clone(),
visibility: self.visibility.into(),
role: self.role.into(),
parent_path: self.parent_path.iter().map(|c| c.to_proto()).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ChannelMember {
pub role: ChannelRole,
pub user_id: UserId,
pub kind: proto::channel_member::Kind,
}
impl ChannelMember {
pub fn to_proto(&self) -> proto::ChannelMember {
proto::ChannelMember {
role: self.role.into(),
user_id: self.user_id.to_proto(),
kind: self.kind.into(),
}
}
}
#[derive(Debug, PartialEq)]
pub struct ChannelsForUser {
pub channels: ChannelGraph,
pub channels: Vec<Channel>,
pub channel_participants: HashMap<ChannelId, Vec<UserId>>,
pub channels_with_admin_privileges: HashSet<ChannelId>,
pub unseen_buffer_changes: Vec<proto::UnseenChannelBufferChange>,
pub channel_messages: Vec<proto::UnseenChannelMessage>,
}

View file

@ -1,4 +1,5 @@
use crate::Result;
use rpc::proto;
use sea_orm::{entity::prelude::*, DbErr};
use serde::{Deserialize, Serialize};
@ -80,3 +81,119 @@ id_type!(SignupId);
id_type!(UserId);
id_type!(ChannelBufferCollaboratorId);
id_type!(FlagId);
id_type!(NotificationId);
id_type!(NotificationKindId);
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelRole {
#[sea_orm(string_value = "admin")]
Admin,
#[sea_orm(string_value = "member")]
#[default]
Member,
#[sea_orm(string_value = "guest")]
Guest,
#[sea_orm(string_value = "banned")]
Banned,
}
impl ChannelRole {
pub fn should_override(&self, other: Self) -> bool {
use ChannelRole::*;
match self {
Admin => matches!(other, Member | Banned | Guest),
Member => matches!(other, Banned | Guest),
Banned => matches!(other, Guest),
Guest => false,
}
}
pub fn max(&self, other: Self) -> Self {
if self.should_override(other) {
*self
} else {
other
}
}
pub fn can_see_all_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Admin | Member => true,
Guest | Banned => false,
}
}
pub fn can_only_see_public_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Guest => true,
Admin | Member | Banned => false,
}
}
}
impl From<proto::ChannelRole> for ChannelRole {
fn from(value: proto::ChannelRole) -> Self {
match value {
proto::ChannelRole::Admin => ChannelRole::Admin,
proto::ChannelRole::Member => ChannelRole::Member,
proto::ChannelRole::Guest => ChannelRole::Guest,
proto::ChannelRole::Banned => ChannelRole::Banned,
}
}
}
impl Into<proto::ChannelRole> for ChannelRole {
fn into(self) -> proto::ChannelRole {
match self {
ChannelRole::Admin => proto::ChannelRole::Admin,
ChannelRole::Member => proto::ChannelRole::Member,
ChannelRole::Guest => proto::ChannelRole::Guest,
ChannelRole::Banned => proto::ChannelRole::Banned,
}
}
}
impl Into<i32> for ChannelRole {
fn into(self) -> i32 {
let proto: proto::ChannelRole = self.into();
proto.into()
}
}
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelVisibility {
#[sea_orm(string_value = "public")]
Public,
#[sea_orm(string_value = "members")]
#[default]
Members,
}
impl From<proto::ChannelVisibility> for ChannelVisibility {
fn from(value: proto::ChannelVisibility) -> Self {
match value {
proto::ChannelVisibility::Public => ChannelVisibility::Public,
proto::ChannelVisibility::Members => ChannelVisibility::Members,
}
}
}
impl Into<proto::ChannelVisibility> for ChannelVisibility {
fn into(self) -> proto::ChannelVisibility {
match self {
ChannelVisibility::Public => proto::ChannelVisibility::Public,
ChannelVisibility::Members => proto::ChannelVisibility::Members,
}
}
}
impl Into<i32> for ChannelVisibility {
fn into(self) -> i32 {
let proto: proto::ChannelVisibility = self.into();
proto.into()
}
}

View file

@ -5,6 +5,7 @@ pub mod buffers;
pub mod channels;
pub mod contacts;
pub mod messages;
pub mod notifications;
pub mod projects;
pub mod rooms;
pub mod servers;

View file

@ -1,4 +1,5 @@
use super::*;
use sea_orm::sea_query::Query;
impl Database {
pub async fn create_access_token(

View file

@ -16,7 +16,8 @@ impl Database {
connection: ConnectionId,
) -> Result<proto::JoinChannelBufferResponse> {
self.transaction(|tx| async move {
self.check_user_is_channel_member(channel_id, user_id, &tx)
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &tx)
.await?;
let buffer = channel::Model {
@ -129,9 +130,11 @@ impl Database {
self.transaction(|tx| async move {
let mut results = Vec::new();
for client_buffer in buffers {
let channel_id = ChannelId::from_proto(client_buffer.channel_id);
let channel = self
.get_channel_internal(ChannelId::from_proto(client_buffer.channel_id), &*tx)
.await?;
if self
.check_user_is_channel_member(channel_id, user_id, &*tx)
.check_user_is_channel_participant(&channel, user_id, &*tx)
.await
.is_err()
{
@ -139,9 +142,9 @@ impl Database {
continue;
}
let buffer = self.get_channel_buffer(channel_id, &*tx).await?;
let buffer = self.get_channel_buffer(channel.id, &*tx).await?;
let mut collaborators = channel_buffer_collaborator::Entity::find()
.filter(channel_buffer_collaborator::Column::ChannelId.eq(channel_id))
.filter(channel_buffer_collaborator::Column::ChannelId.eq(channel.id))
.all(&*tx)
.await?;
@ -439,7 +442,8 @@ impl Database {
Vec<proto::VectorClockEntry>,
)> {
self.transaction(move |tx| async move {
self.check_user_is_channel_member(channel_id, user, &*tx)
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_member(&channel, user, &*tx)
.await?;
let buffer = buffer::Entity::find()
@ -482,7 +486,7 @@ impl Database {
)
.await?;
channel_members = self.get_channel_members_internal(channel_id, &*tx).await?;
channel_members = self.get_channel_participants(&channel, &*tx).await?;
let collaborators = self
.get_channel_buffer_collaborators_internal(channel_id, &*tx)
.await?;

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,6 @@ impl Database {
user_id_b: UserId,
a_to_b: bool,
accepted: bool,
should_notify: bool,
user_a_busy: bool,
user_b_busy: bool,
}
@ -53,7 +52,6 @@ impl Database {
if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_b,
should_notify: db_contact.should_notify && db_contact.a_to_b,
busy: db_contact.user_b_busy,
});
} else if db_contact.a_to_b {
@ -63,19 +61,16 @@ impl Database {
} else {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_b,
should_notify: db_contact.should_notify,
});
}
} else if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_a,
should_notify: db_contact.should_notify && !db_contact.a_to_b,
busy: db_contact.user_a_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_a,
should_notify: db_contact.should_notify,
});
} else {
contacts.push(Contact::Outgoing {
@ -124,7 +119,11 @@ impl Database {
.await
}
pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> {
pub async fn send_contact_request(
&self,
sender_id: UserId,
receiver_id: UserId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if sender_id < receiver_id {
(sender_id, receiver_id, true)
@ -161,11 +160,22 @@ impl Database {
.exec_without_returning(&*tx)
.await?;
if rows_affected == 1 {
Ok(())
} else {
Err(anyhow!("contact already requested"))?
if rows_affected == 0 {
Err(anyhow!("contact already requested"))?;
}
Ok(self
.create_notification(
receiver_id,
rpc::Notification::ContactRequest {
sender_id: sender_id.to_proto(),
},
true,
&*tx,
)
.await?
.into_iter()
.collect())
})
.await
}
@ -179,7 +189,11 @@ impl Database {
///
/// * `requester_id` - The user that initiates this request
/// * `responder_id` - The user that will be removed
pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<bool> {
pub async fn remove_contact(
&self,
requester_id: UserId,
responder_id: UserId,
) -> Result<(bool, Option<NotificationId>)> {
self.transaction(|tx| async move {
let (id_a, id_b) = if responder_id < requester_id {
(responder_id, requester_id)
@ -198,7 +212,21 @@ impl Database {
.ok_or_else(|| anyhow!("no such contact"))?;
contact::Entity::delete_by_id(contact.id).exec(&*tx).await?;
Ok(contact.accepted)
let mut deleted_notification_id = None;
if !contact.accepted {
deleted_notification_id = self
.remove_notification(
responder_id,
rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
&*tx,
)
.await?;
}
Ok((contact.accepted, deleted_notification_id))
})
.await
}
@ -249,7 +277,7 @@ impl Database {
responder_id: UserId,
requester_id: UserId,
accept: bool,
) -> Result<()> {
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if responder_id < requester_id {
(responder_id, requester_id, false)
@ -287,11 +315,38 @@ impl Database {
result.rows_affected
};
if rows_affected == 1 {
Ok(())
} else {
if rows_affected == 0 {
Err(anyhow!("no such contact request"))?
}
let mut notifications = Vec::new();
notifications.extend(
self.mark_notification_as_read_with_response(
responder_id,
&rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
accept,
&*tx,
)
.await?,
);
if accept {
notifications.extend(
self.create_notification(
requester_id,
rpc::Notification::ContactRequestAccepted {
responder_id: responder_id.to_proto(),
},
true,
&*tx,
)
.await?,
);
}
Ok(notifications)
})
.await
}

View file

@ -1,4 +1,6 @@
use super::*;
use rpc::Notification;
use sea_orm::TryInsertResult;
use time::OffsetDateTime;
impl Database {
@ -9,7 +11,8 @@ impl Database {
user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
self.check_user_is_channel_member(channel_id, user_id, &*tx)
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
channel_chat_participant::ActiveModel {
id: ActiveValue::NotSet,
@ -77,7 +80,8 @@ impl Database {
before_message_id: Option<MessageId>,
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
self.check_user_is_channel_member(channel_id, user_id, &*tx)
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut condition =
@ -87,33 +91,103 @@ impl Database {
condition = condition.add(channel_message::Column::Id.lt(before_message_id));
}
let mut rows = channel_message::Entity::find()
let rows = channel_message::Entity::find()
.filter(condition)
.order_by_desc(channel_message::Column::Id)
.limit(count as u64)
.stream(&*tx)
.all(&*tx)
.await?;
let mut messages = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
self.load_channel_messages(rows, &*tx).await
})
.await
}
pub async fn get_channel_messages_by_id(
&self,
user_id: UserId,
message_ids: &[MessageId],
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let rows = channel_message::Entity::find()
.filter(channel_message::Column::Id.is_in(message_ids.iter().copied()))
.order_by_desc(channel_message::Column::Id)
.all(&*tx)
.await?;
let mut channels = HashMap::<ChannelId, channel::Model>::default();
for row in &rows {
channels.insert(
row.channel_id,
self.get_channel_internal(row.channel_id, &*tx).await?,
);
}
for (_, channel) in channels {
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
}
let messages = self.load_channel_messages(rows, &*tx).await?;
Ok(messages)
})
.await
}
async fn load_channel_messages(
&self,
rows: Vec<channel_message::Model>,
tx: &DatabaseTransaction,
) -> Result<Vec<proto::ChannelMessage>> {
let mut messages = rows
.into_iter()
.map(|row| {
let nonce = row.nonce.as_u64_pair();
messages.push(proto::ChannelMessage {
proto::ChannelMessage {
id: row.id.to_proto(),
sender_id: row.sender_id.to_proto(),
body: row.body,
timestamp: row.sent_at.assume_utc().unix_timestamp() as u64,
mentions: vec![],
nonce: Some(proto::Nonce {
upper_half: nonce.0,
lower_half: nonce.1,
}),
});
}
})
.collect::<Vec<_>>();
messages.reverse();
let mut mentions = channel_message_mention::Entity::find()
.filter(channel_message_mention::Column::MessageId.is_in(messages.iter().map(|m| m.id)))
.order_by_asc(channel_message_mention::Column::MessageId)
.order_by_asc(channel_message_mention::Column::StartOffset)
.stream(&*tx)
.await?;
let mut message_ix = 0;
while let Some(mention) = mentions.next().await {
let mention = mention?;
let message_id = mention.message_id.to_proto();
while let Some(message) = messages.get_mut(message_ix) {
if message.id < message_id {
message_ix += 1;
} else {
if message.id == message_id {
message.mentions.push(proto::ChatMention {
range: Some(proto::Range {
start: mention.start_offset as u64,
end: mention.end_offset as u64,
}),
user_id: mention.user_id.to_proto(),
});
}
break;
}
}
drop(rows);
messages.reverse();
Ok(messages)
})
.await
}
Ok(messages)
}
pub async fn create_channel_message(
@ -121,10 +195,15 @@ impl Database {
channel_id: ChannelId,
user_id: UserId,
body: &str,
mentions: &[proto::ChatMention],
timestamp: OffsetDateTime,
nonce: u128,
) -> Result<(MessageId, Vec<ConnectionId>, Vec<UserId>)> {
) -> Result<CreatedChannelMessage> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
@ -150,7 +229,7 @@ impl Database {
let timestamp = timestamp.to_offset(time::UtcOffset::UTC);
let timestamp = time::PrimitiveDateTime::new(timestamp.date(), timestamp.time());
let message = channel_message::Entity::insert(channel_message::ActiveModel {
let result = channel_message::Entity::insert(channel_message::ActiveModel {
channel_id: ActiveValue::Set(channel_id),
sender_id: ActiveValue::Set(user_id),
body: ActiveValue::Set(body.to_string()),
@ -159,35 +238,85 @@ impl Database {
id: ActiveValue::NotSet,
})
.on_conflict(
OnConflict::column(channel_message::Column::Nonce)
.update_column(channel_message::Column::Nonce)
.to_owned(),
OnConflict::columns([
channel_message::Column::SenderId,
channel_message::Column::Nonce,
])
.do_nothing()
.to_owned(),
)
.do_nothing()
.exec(&*tx)
.await?;
#[derive(Debug, Clone, Copy, EnumIter, DeriveColumn)]
enum QueryConnectionId {
ConnectionId,
let message_id;
let mut notifications = Vec::new();
match result {
TryInsertResult::Inserted(result) => {
message_id = result.last_insert_id;
let mentioned_user_ids =
mentions.iter().map(|m| m.user_id).collect::<HashSet<_>>();
let mentions = mentions
.iter()
.filter_map(|mention| {
let range = mention.range.as_ref()?;
if !body.is_char_boundary(range.start as usize)
|| !body.is_char_boundary(range.end as usize)
{
return None;
}
Some(channel_message_mention::ActiveModel {
message_id: ActiveValue::Set(message_id),
start_offset: ActiveValue::Set(range.start as i32),
end_offset: ActiveValue::Set(range.end as i32),
user_id: ActiveValue::Set(UserId::from_proto(mention.user_id)),
})
})
.collect::<Vec<_>>();
if !mentions.is_empty() {
channel_message_mention::Entity::insert_many(mentions)
.exec(&*tx)
.await?;
}
for mentioned_user in mentioned_user_ids {
notifications.extend(
self.create_notification(
UserId::from_proto(mentioned_user),
rpc::Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: user_id.to_proto(),
channel_id: channel_id.to_proto(),
},
false,
&*tx,
)
.await?,
);
}
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
}
_ => {
message_id = channel_message::Entity::find()
.filter(channel_message::Column::Nonce.eq(Uuid::from_u128(nonce)))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("failed to insert message"))?
.id;
}
}
// Observe this message for the sender
self.observe_channel_message_internal(
channel_id,
user_id,
message.last_insert_id,
&*tx,
)
.await?;
let mut channel_members = self.get_channel_members_internal(channel_id, &*tx).await?;
let mut channel_members = self.get_channel_participants(&channel, &*tx).await?;
channel_members.retain(|member| !participant_user_ids.contains(member));
Ok((
message.last_insert_id,
Ok(CreatedChannelMessage {
message_id,
participant_connection_ids,
channel_members,
))
notifications,
})
})
.await
}
@ -197,11 +326,24 @@ impl Database {
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
) -> Result<()> {
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
Ok(())
let mut batch = NotificationBatch::default();
batch.extend(
self.mark_notification_as_read(
user_id,
&Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: Default::default(),
channel_id: Default::default(),
},
&*tx,
)
.await?,
);
Ok(batch)
})
.await
}
@ -337,8 +479,23 @@ impl Database {
.filter(channel_message::Column::SenderId.eq(user_id))
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such message"))?;
let channel = self.get_channel_internal(channel_id, &*tx).await?;
if self
.check_user_is_channel_admin(&channel, user_id, &*tx)
.await
.is_ok()
{
let result = channel_message::Entity::delete_by_id(message_id)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such message"))?;
}
} else {
Err(anyhow!("operation could not be completed"))?;
}
}
Ok(participant_connection_ids)

View file

@ -0,0 +1,262 @@
use super::*;
use rpc::Notification;
impl Database {
pub async fn initialize_notification_kinds(&mut self) -> Result<()> {
notification_kind::Entity::insert_many(Notification::all_variant_names().iter().map(
|kind| notification_kind::ActiveModel {
name: ActiveValue::Set(kind.to_string()),
..Default::default()
},
))
.on_conflict(OnConflict::new().do_nothing().to_owned())
.exec_without_returning(&self.pool)
.await?;
let mut rows = notification_kind::Entity::find().stream(&self.pool).await?;
while let Some(row) = rows.next().await {
let row = row?;
self.notification_kinds_by_name.insert(row.name, row.id);
}
for name in Notification::all_variant_names() {
if let Some(id) = self.notification_kinds_by_name.get(*name).copied() {
self.notification_kinds_by_id.insert(id, name);
}
}
Ok(())
}
pub async fn get_notifications(
&self,
recipient_id: UserId,
limit: usize,
before_id: Option<NotificationId>,
) -> Result<Vec<proto::Notification>> {
self.transaction(|tx| async move {
let mut result = Vec::new();
let mut condition =
Condition::all().add(notification::Column::RecipientId.eq(recipient_id));
if let Some(before_id) = before_id {
condition = condition.add(notification::Column::Id.lt(before_id));
}
let mut rows = notification::Entity::find()
.filter(condition)
.order_by_desc(notification::Column::Id)
.limit(limit as u64)
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
let kind = row.kind;
if let Some(proto) = model_to_proto(self, row) {
result.push(proto);
} else {
log::warn!("unknown notification kind {:?}", kind);
}
}
result.reverse();
Ok(result)
})
.await
}
/// Create a notification. If `avoid_duplicates` is set to true, then avoid
/// creating a new notification if the given recipient already has an
/// unread notification with the given kind and entity id.
pub async fn create_notification(
&self,
recipient_id: UserId,
notification: Notification,
avoid_duplicates: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if avoid_duplicates {
if self
.find_notification(recipient_id, &notification, tx)
.await?
.is_some()
{
return Ok(None);
}
}
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
let model = notification::ActiveModel {
recipient_id: ActiveValue::Set(recipient_id),
kind: ActiveValue::Set(kind),
entity_id: ActiveValue::Set(proto.entity_id.map(|id| id as i32)),
content: ActiveValue::Set(proto.content.clone()),
..Default::default()
}
.save(&*tx)
.await?;
Ok(Some((
recipient_id,
proto::Notification {
id: model.id.as_ref().to_proto(),
kind: proto.kind,
timestamp: model.created_at.as_ref().assume_utc().unix_timestamp() as u64,
is_read: false,
response: None,
content: proto.content,
entity_id: proto.entity_id,
},
)))
}
/// Remove an unread notification with the given recipient, kind and
/// entity id.
pub async fn remove_notification(
&self,
recipient_id: UserId,
notification: Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let id = self
.find_notification(recipient_id, &notification, tx)
.await?;
if let Some(id) = id {
notification::Entity::delete_by_id(id).exec(tx).await?;
}
Ok(id)
}
/// Populate the response for the notification with the given kind and
/// entity id.
pub async fn mark_notification_as_read_with_response(
&self,
recipient_id: UserId,
notification: &Notification,
response: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, Some(response), tx)
.await
}
pub async fn mark_notification_as_read(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, None, tx)
.await
}
pub async fn mark_notification_as_read_by_id(
&self,
recipient_id: UserId,
notification_id: NotificationId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(notification_id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
..Default::default()
})
.exec(&*tx)
.await?;
Ok(model_to_proto(self, row)
.map(|notification| (recipient_id, notification))
.into_iter()
.collect())
})
.await
}
async fn mark_notification_as_read_internal(
&self,
recipient_id: UserId,
notification: &Notification,
response: Option<bool>,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if let Some(id) = self
.find_notification(recipient_id, notification, &*tx)
.await?
{
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
response: if let Some(response) = response {
ActiveValue::Set(Some(response))
} else {
ActiveValue::NotSet
},
..Default::default()
})
.exec(tx)
.await?;
Ok(model_to_proto(self, row).map(|notification| (recipient_id, notification)))
} else {
Ok(None)
}
}
/// Find an unread notification by its recipient, kind and entity id.
async fn find_notification(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryIds {
Id,
}
Ok(notification::Entity::find()
.select_only()
.column(notification::Column::Id)
.filter(
Condition::all()
.add(notification::Column::RecipientId.eq(recipient_id))
.add(notification::Column::IsRead.eq(false))
.add(notification::Column::Kind.eq(kind))
.add(if proto.entity_id.is_some() {
notification::Column::EntityId.eq(proto.entity_id)
} else {
notification::Column::EntityId.is_null()
}),
)
.into_values::<_, QueryIds>()
.one(&*tx)
.await?)
}
}
fn model_to_proto(this: &Database, row: notification::Model) -> Option<proto::Notification> {
let kind = this.notification_kinds_by_id.get(&row.kind)?;
Some(proto::Notification {
id: row.id.to_proto(),
kind: kind.to_string(),
timestamp: row.created_at.assume_utc().unix_timestamp() as u64,
is_read: row.is_read,
response: row.response,
content: row.content,
entity_id: row.entity_id.map(|id| id as u64),
})
}
fn notification_kind_from_proto(
this: &Database,
proto: &proto::Notification,
) -> Result<NotificationKindId> {
Ok(this
.notification_kinds_by_name
.get(&proto.kind)
.copied()
.ok_or_else(|| anyhow!("invalid notification kind {:?}", proto.kind))?)
}

View file

@ -50,10 +50,10 @@ impl Database {
.map(|participant| participant.user_id),
);
let (channel_id, room) = self.get_channel_room(room_id, &tx).await?;
let (channel, room) = self.get_channel_room(room_id, &tx).await?;
let channel_members;
if let Some(channel_id) = channel_id {
channel_members = self.get_channel_members_internal(channel_id, &tx).await?;
if let Some(channel) = &channel {
channel_members = self.get_channel_participants(channel, &tx).await?;
} else {
channel_members = Vec::new();
@ -69,7 +69,7 @@ impl Database {
Ok(RefreshedRoom {
room,
channel_id,
channel_id: channel.map(|channel| channel.id),
channel_members,
stale_participant_user_ids,
canceled_calls_to_user_ids,
@ -298,98 +298,137 @@ impl Database {
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryParticipantIndices {
ParticipantIndex,
if channel_id.is_some() {
Err(anyhow!("tried to join channel call directly"))?
}
let existing_participant_indices: Vec<i32> = room_participant::Entity::find()
.filter(
room_participant::Column::RoomId
.eq(room_id)
.and(room_participant::Column::ParticipantIndex.is_not_null()),
)
.select_only()
.column(room_participant::Column::ParticipantIndex)
.into_values::<_, QueryParticipantIndices>()
.all(&*tx)
let participant_index = self
.get_next_participant_index_internal(room_id, &*tx)
.await?;
let mut participant_index = 0;
while existing_participant_indices.contains(&participant_index) {
participant_index += 1;
}
if let Some(channel_id) = channel_id {
self.check_user_is_channel_member(channel_id, user_id, &*tx)
.await?;
room_participant::Entity::insert_many([room_participant::ActiveModel {
room_id: ActiveValue::set(room_id),
user_id: ActiveValue::set(user_id),
let result = room_participant::Entity::update_many()
.filter(
Condition::all()
.add(room_participant::Column::RoomId.eq(room_id))
.add(room_participant::Column::UserId.eq(user_id))
.add(room_participant::Column::AnsweringConnectionId.is_null()),
)
.set(room_participant::ActiveModel {
participant_index: ActiveValue::Set(Some(participant_index)),
answering_connection_id: ActiveValue::set(Some(connection.id as i32)),
answering_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
answering_connection_lost: ActiveValue::set(false),
calling_user_id: ActiveValue::set(user_id),
calling_connection_id: ActiveValue::set(connection.id as i32),
calling_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
participant_index: ActiveValue::Set(Some(participant_index)),
..Default::default()
}])
.on_conflict(
OnConflict::columns([room_participant::Column::UserId])
.update_columns([
room_participant::Column::AnsweringConnectionId,
room_participant::Column::AnsweringConnectionServerId,
room_participant::Column::AnsweringConnectionLost,
room_participant::Column::ParticipantIndex,
])
.to_owned(),
)
})
.exec(&*tx)
.await?;
} else {
let result = room_participant::Entity::update_many()
.filter(
Condition::all()
.add(room_participant::Column::RoomId.eq(room_id))
.add(room_participant::Column::UserId.eq(user_id))
.add(room_participant::Column::AnsweringConnectionId.is_null()),
)
.set(room_participant::ActiveModel {
participant_index: ActiveValue::Set(Some(participant_index)),
answering_connection_id: ActiveValue::set(Some(connection.id as i32)),
answering_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
answering_connection_lost: ActiveValue::set(false),
..Default::default()
})
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("room does not exist or was already joined"))?;
}
if result.rows_affected == 0 {
Err(anyhow!("room does not exist or was already joined"))?;
}
let room = self.get_room(room_id, &tx).await?;
let channel_members = if let Some(channel_id) = channel_id {
self.get_channel_members_internal(channel_id, &tx).await?
} else {
Vec::new()
};
Ok(JoinRoom {
room,
channel_id,
channel_members,
channel_id: None,
channel_members: vec![],
})
})
.await
}
async fn get_next_participant_index_internal(
&self,
room_id: RoomId,
tx: &DatabaseTransaction,
) -> Result<i32> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryParticipantIndices {
ParticipantIndex,
}
let existing_participant_indices: Vec<i32> = room_participant::Entity::find()
.filter(
room_participant::Column::RoomId
.eq(room_id)
.and(room_participant::Column::ParticipantIndex.is_not_null()),
)
.select_only()
.column(room_participant::Column::ParticipantIndex)
.into_values::<_, QueryParticipantIndices>()
.all(&*tx)
.await?;
let mut participant_index = 0;
while existing_participant_indices.contains(&participant_index) {
participant_index += 1;
}
Ok(participant_index)
}
pub async fn channel_id_for_room(&self, room_id: RoomId) -> Result<Option<ChannelId>> {
self.transaction(|tx| async move {
let room: Option<room::Model> = room::Entity::find()
.filter(room::Column::Id.eq(room_id))
.one(&*tx)
.await?;
Ok(room.and_then(|room| room.channel_id))
})
.await
}
pub(crate) async fn join_channel_room_internal(
&self,
room_id: RoomId,
user_id: UserId,
connection: ConnectionId,
tx: &DatabaseTransaction,
) -> Result<JoinRoom> {
let participant_index = self
.get_next_participant_index_internal(room_id, &*tx)
.await?;
room_participant::Entity::insert_many([room_participant::ActiveModel {
room_id: ActiveValue::set(room_id),
user_id: ActiveValue::set(user_id),
answering_connection_id: ActiveValue::set(Some(connection.id as i32)),
answering_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
answering_connection_lost: ActiveValue::set(false),
calling_user_id: ActiveValue::set(user_id),
calling_connection_id: ActiveValue::set(connection.id as i32),
calling_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
participant_index: ActiveValue::Set(Some(participant_index)),
..Default::default()
}])
.on_conflict(
OnConflict::columns([room_participant::Column::UserId])
.update_columns([
room_participant::Column::AnsweringConnectionId,
room_participant::Column::AnsweringConnectionServerId,
room_participant::Column::AnsweringConnectionLost,
room_participant::Column::ParticipantIndex,
])
.to_owned(),
)
.exec(&*tx)
.await?;
let (channel, room) = self.get_channel_room(room_id, &tx).await?;
let channel = channel.ok_or_else(|| anyhow!("no channel for room"))?;
let channel_members = self.get_channel_participants(&channel, &*tx).await?;
Ok(JoinRoom {
room,
channel_id: Some(channel.id),
channel_members,
})
}
pub async fn rejoin_room(
&self,
rejoin_room: proto::RejoinRoom,
@ -679,16 +718,16 @@ impl Database {
});
}
let (channel_id, room) = self.get_channel_room(room_id, &tx).await?;
let channel_members = if let Some(channel_id) = channel_id {
self.get_channel_members_internal(channel_id, &tx).await?
let (channel, room) = self.get_channel_room(room_id, &tx).await?;
let channel_members = if let Some(channel) = &channel {
self.get_channel_participants(&channel, &tx).await?
} else {
Vec::new()
};
Ok(RejoinedRoom {
room,
channel_id,
channel_id: channel.map(|channel| channel.id),
channel_members,
rejoined_projects,
reshared_projects,
@ -830,7 +869,7 @@ impl Database {
.exec(&*tx)
.await?;
let (channel_id, room) = self.get_channel_room(room_id, &tx).await?;
let (channel, room) = self.get_channel_room(room_id, &tx).await?;
let deleted = if room.participants.is_empty() {
let result = room::Entity::delete_by_id(room_id).exec(&*tx).await?;
result.rows_affected > 0
@ -838,14 +877,14 @@ impl Database {
false
};
let channel_members = if let Some(channel_id) = channel_id {
self.get_channel_members_internal(channel_id, &tx).await?
let channel_members = if let Some(channel) = &channel {
self.get_channel_participants(channel, &tx).await?
} else {
Vec::new()
};
let left_room = LeftRoom {
room,
channel_id,
channel_id: channel.map(|channel| channel.id),
channel_members,
left_projects,
canceled_calls_to_user_ids,
@ -1033,7 +1072,7 @@ impl Database {
&self,
room_id: RoomId,
tx: &DatabaseTransaction,
) -> Result<(Option<ChannelId>, proto::Room)> {
) -> Result<(Option<channel::Model>, proto::Room)> {
let db_room = room::Entity::find_by_id(room_id)
.one(tx)
.await?
@ -1142,9 +1181,16 @@ impl Database {
project_id: db_follower.project_id.to_proto(),
});
}
drop(db_followers);
let channel = if let Some(channel_id) = db_room.channel_id {
Some(self.get_channel_internal(channel_id, &*tx).await?)
} else {
None
};
Ok((
db_room.channel_id,
channel,
proto::Room {
id: db_room.id.to_proto(),
live_kit_room: db_room.live_kit_room,

View file

@ -7,11 +7,13 @@ pub mod channel_buffer_collaborator;
pub mod channel_chat_participant;
pub mod channel_member;
pub mod channel_message;
pub mod channel_path;
pub mod channel_message_mention;
pub mod contact;
pub mod feature_flag;
pub mod follower;
pub mod language_server;
pub mod notification;
pub mod notification_kind;
pub mod observed_buffer_edits;
pub mod observed_channel_messages;
pub mod project;

View file

@ -1,4 +1,4 @@
use crate::db::ChannelId;
use crate::db::{ChannelId, ChannelVisibility};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
@ -7,6 +7,29 @@ pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub parent_path: String,
}
impl Model {
pub fn parent_id(&self) -> Option<ChannelId> {
self.ancestors().last()
}
pub fn ancestors(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.parent_path
.trim_end_matches('/')
.split('/')
.filter_map(|id| Some(ChannelId::from_proto(id.parse().ok()?)))
}
pub fn ancestors_including_self(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.ancestors().chain(Some(self.id))
}
pub fn path(&self) -> String {
format!("{}{}/", self.parent_path, self.id)
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -1,7 +1,7 @@
use crate::db::{channel_member, ChannelId, ChannelMemberId, UserId};
use crate::db::{channel_member, ChannelId, ChannelMemberId, ChannelRole, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_members")]
pub struct Model {
#[sea_orm(primary_key)]
@ -9,7 +9,7 @@ pub struct Model {
pub channel_id: ChannelId,
pub user_id: UserId,
pub accepted: bool,
pub admin: bool,
pub role: ChannelRole,
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,43 @@
use crate::db::{MessageId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_message_mentions")]
pub struct Model {
#[sea_orm(primary_key)]
pub message_id: MessageId,
#[sea_orm(primary_key)]
pub start_offset: i32,
pub end_offset: i32,
pub user_id: UserId,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel_message::Entity",
from = "Column::MessageId",
to = "super::channel_message::Column::Id"
)]
Message,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
MentionedUser,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Message.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::MentionedUser.def()
}
}

View file

@ -0,0 +1,29 @@
use crate::db::{NotificationId, NotificationKindId, UserId};
use sea_orm::entity::prelude::*;
use time::PrimitiveDateTime;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notifications")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: NotificationId,
pub created_at: PrimitiveDateTime,
pub recipient_id: UserId,
pub kind: NotificationKindId,
pub entity_id: Option<i32>,
pub content: String,
pub is_read: bool,
pub response: Option<bool>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::RecipientId",
to = "super::user::Column::Id"
)]
Recipient,
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -1,15 +1,15 @@
use crate::db::ChannelId;
use crate::db::NotificationKindId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_paths")]
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notification_kinds")]
pub struct Model {
#[sea_orm(primary_key)]
pub id_path: String,
pub channel_id: ChannelId,
pub id: NotificationKindId,
pub name: String,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -7,10 +7,12 @@ mod message_tests;
use super::*;
use gpui::executor::Background;
use parking_lot::Mutex;
use rpc::proto::ChannelEdge;
use sea_orm::ConnectionTrait;
use sqlx::migrate::MigrateDatabase;
use std::sync::Arc;
use std::sync::{
atomic::{AtomicI32, AtomicU32, Ordering::SeqCst},
Arc,
};
const TEST_RELEASE_CHANNEL: &'static str = "test";
@ -31,7 +33,7 @@ impl TestDb {
let mut db = runtime.block_on(async {
let mut options = ConnectOptions::new(url);
options.max_connections(5);
let db = Database::new(options, Executor::Deterministic(background))
let mut db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let sql = include_str!(concat!(
@ -45,6 +47,7 @@ impl TestDb {
))
.await
.unwrap();
db.initialize_notification_kinds().await.unwrap();
db
});
@ -79,11 +82,12 @@ impl TestDb {
options
.max_connections(5)
.idle_timeout(Duration::from_secs(0));
let db = Database::new(options, Executor::Deterministic(background))
let mut db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations");
db.migrate(Path::new(migrations_path), false).await.unwrap();
db.initialize_notification_kinds().await.unwrap();
db
});
@ -148,26 +152,39 @@ impl Drop for TestDb {
}
}
/// The second tuples are (channel_id, parent)
fn graph(channels: &[(ChannelId, &'static str)], edges: &[(ChannelId, ChannelId)]) -> ChannelGraph {
let mut graph = ChannelGraph {
channels: vec![],
edges: vec![],
};
for (id, name) in channels {
graph.channels.push(Channel {
fn channel_tree(channels: &[(ChannelId, &[ChannelId], &'static str, ChannelRole)]) -> Vec<Channel> {
channels
.iter()
.map(|(id, parent_path, name, role)| Channel {
id: *id,
name: name.to_string(),
visibility: ChannelVisibility::Members,
role: *role,
parent_path: parent_path.to_vec(),
})
}
for (channel, parent) in edges {
graph.edges.push(ChannelEdge {
channel_id: channel.to_proto(),
parent_id: parent.to_proto(),
})
}
graph
.collect()
}
static GITHUB_USER_ID: AtomicI32 = AtomicI32::new(5);
async fn new_test_user(db: &Arc<Database>, email: &str) -> UserId {
db.create_user(
email,
false,
NewUserParams {
github_login: email[0..email.find("@").unwrap()].to_string(),
github_user_id: GITHUB_USER_ID.fetch_add(1, SeqCst),
},
)
.await
.unwrap()
.user_id
}
static TEST_CONNECTION_ID: AtomicU32 = AtomicU32::new(1);
fn new_test_connection(server: ServerId) -> ConnectionId {
ConnectionId {
id: TEST_CONNECTION_ID.fetch_add(1, SeqCst),
owner_id: server.0 as u32,
}
}

View file

@ -17,7 +17,6 @@ async fn test_channel_buffers(db: &Arc<Database>) {
NewUserParams {
github_login: "user_a".into(),
github_user_id: 101,
invite_count: 0,
},
)
.await
@ -30,7 +29,6 @@ async fn test_channel_buffers(db: &Arc<Database>) {
NewUserParams {
github_login: "user_b".into(),
github_user_id: 102,
invite_count: 0,
},
)
.await
@ -45,7 +43,6 @@ async fn test_channel_buffers(db: &Arc<Database>) {
NewUserParams {
github_login: "user_c".into(),
github_user_id: 102,
invite_count: 0,
},
)
.await
@ -56,7 +53,7 @@ async fn test_channel_buffers(db: &Arc<Database>) {
let zed_id = db.create_root_channel("zed", a_id).await.unwrap();
db.invite_channel_member(zed_id, b_id, a_id, false)
db.invite_channel_member(zed_id, b_id, a_id, ChannelRole::Member)
.await
.unwrap();
@ -178,7 +175,6 @@ async fn test_channel_buffers_last_operations(db: &Database) {
NewUserParams {
github_login: "user_a".into(),
github_user_id: 101,
invite_count: 0,
},
)
.await
@ -191,7 +187,6 @@ async fn test_channel_buffers_last_operations(db: &Database) {
NewUserParams {
github_login: "user_b".into(),
github_user_id: 102,
invite_count: 0,
},
)
.await
@ -211,7 +206,7 @@ async fn test_channel_buffers_last_operations(db: &Database) {
.await
.unwrap();
db.invite_channel_member(channel, observer_id, user_id, false)
db.invite_channel_member(channel, observer_id, user_id, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(channel, observer_id, true)

File diff suppressed because it is too large Load diff

View file

@ -22,7 +22,6 @@ async fn test_get_users(db: &Arc<Database>) {
NewUserParams {
github_login: format!("user{i}"),
github_user_id: i,
invite_count: 0,
},
)
.await
@ -88,7 +87,6 @@ async fn test_get_or_create_user_by_github_account(db: &Arc<Database>) {
NewUserParams {
github_login: "login1".into(),
github_user_id: 101,
invite_count: 0,
},
)
.await
@ -101,7 +99,6 @@ async fn test_get_or_create_user_by_github_account(db: &Arc<Database>) {
NewUserParams {
github_login: "login2".into(),
github_user_id: 102,
invite_count: 0,
},
)
.await
@ -156,7 +153,6 @@ async fn test_create_access_tokens(db: &Arc<Database>) {
NewUserParams {
github_login: "u1".into(),
github_user_id: 1,
invite_count: 0,
},
)
.await
@ -238,7 +234,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
NewUserParams {
github_login: format!("user{i}"),
github_user_id: i,
invite_count: 0,
},
)
.await
@ -264,10 +259,7 @@ async fn test_add_contacts(db: &Arc<Database>) {
);
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Incoming {
user_id: user_1,
should_notify: true
}]
&[Contact::Incoming { user_id: user_1 }]
);
// User 2 dismisses the contact request notification without accepting or rejecting.
@ -280,10 +272,7 @@ async fn test_add_contacts(db: &Arc<Database>) {
.unwrap();
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Incoming {
user_id: user_1,
should_notify: false
}]
&[Contact::Incoming { user_id: user_1 }]
);
// User can't accept their own contact request
@ -299,7 +288,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
should_notify: true,
busy: false,
}],
);
@ -309,7 +297,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_2).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
should_notify: false,
busy: false,
}]
);
@ -326,7 +313,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
should_notify: true,
busy: false,
}]
);
@ -339,7 +325,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
should_notify: false,
busy: false,
}]
);
@ -353,12 +338,10 @@ async fn test_add_contacts(db: &Arc<Database>) {
&[
Contact::Accepted {
user_id: user_2,
should_notify: false,
busy: false,
},
Contact::Accepted {
user_id: user_3,
should_notify: false,
busy: false,
}
]
@ -367,7 +350,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_3).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
should_notify: false,
busy: false,
}],
);
@ -383,7 +365,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_2).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
should_notify: false,
busy: false,
}]
);
@ -391,7 +372,6 @@ async fn test_add_contacts(db: &Arc<Database>) {
db.get_contacts(user_3).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
should_notify: false,
busy: false,
}],
);
@ -415,7 +395,6 @@ async fn test_metrics_id(db: &Arc<Database>) {
NewUserParams {
github_login: "person1".into(),
github_user_id: 101,
invite_count: 5,
},
)
.await
@ -431,7 +410,6 @@ async fn test_metrics_id(db: &Arc<Database>) {
NewUserParams {
github_login: "person2".into(),
github_user_id: 102,
invite_count: 5,
},
)
.await
@ -460,7 +438,6 @@ async fn test_project_count(db: &Arc<Database>) {
NewUserParams {
github_login: "admin".into(),
github_user_id: 0,
invite_count: 0,
},
)
.await
@ -472,7 +449,6 @@ async fn test_project_count(db: &Arc<Database>) {
NewUserParams {
github_login: "user".into(),
github_user_id: 1,
invite_count: 0,
},
)
.await
@ -554,7 +530,6 @@ async fn test_fuzzy_search_users() {
NewUserParams {
github_login: github_login.into(),
github_user_id: i as i32,
invite_count: 0,
},
)
.await
@ -596,7 +571,6 @@ async fn test_non_matching_release_channels(db: &Arc<Database>) {
NewUserParams {
github_login: "admin".into(),
github_user_id: 0,
invite_count: 0,
},
)
.await
@ -608,7 +582,6 @@ async fn test_non_matching_release_channels(db: &Arc<Database>) {
NewUserParams {
github_login: "user".into(),
github_user_id: 1,
invite_count: 0,
},
)
.await

Some files were not shown because too many files have changed in this diff Show more