diff --git a/Cargo.lock b/Cargo.lock
index 27bcc3c..4f464b1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -236,9 +236,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.83"
+version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "arboard"
@@ -294,12 +294,11 @@ dependencies = [
[[package]]
name = "async-channel"
-version = "2.3.0"
+version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a"
+checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a"
dependencies = [
"concurrent-queue",
- "event-listener 5.3.0",
"event-listener-strategy 0.5.2",
"futures-core",
"pin-project-lite",
@@ -420,7 +419,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -460,7 +459,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -477,7 +476,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -755,7 +754,7 @@ checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -798,9 +797,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.97"
+version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4"
+checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f"
dependencies = [
"jobserver",
"libc",
@@ -894,7 +893,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -1091,9 +1090,9 @@ dependencies = [
[[package]]
name = "crc32fast"
-version = "1.4.0"
+version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
+checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
@@ -1119,9 +1118,9 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
-version = "0.8.19"
+version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
[[package]]
name = "crypto-common"
@@ -1202,7 +1201,7 @@ dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -1269,9 +1268,9 @@ checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
[[package]]
name = "drama_llama"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "475895ffece6688ce87c0a481f4a3f7fb06df1e33a3a3a35b9416c1a2852d2d8"
+checksum = "0e4ebd04fa5ff105e719d706ff3dbf56079a49c55e93a4f816a2ae8d3c8b09f9"
dependencies = [
"clap",
"derive_more",
@@ -1322,7 +1321,7 @@ dependencies = [
"parking_lot",
"percent-encoding",
"raw-window-handle 0.5.2",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"ron",
"serde",
"static_assertions",
@@ -1378,7 +1377,7 @@ dependencies = [
"arboard",
"egui",
"log",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"serde",
"smithay-clipboard",
"web-time",
@@ -1413,9 +1412,9 @@ dependencies = [
[[package]]
name = "either"
-version = "1.11.0"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
+checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b"
[[package]]
name = "emath"
@@ -1454,7 +1453,7 @@ checksum = "5c785274071b1b420972453b306eeca06acf4633829db4223b58a2a8c5953bc4"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -1465,7 +1464,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -1615,9 +1614,9 @@ dependencies = [
[[package]]
name = "figment"
-version = "0.10.18"
+version = "0.10.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d032832d74006f99547004d49410a4b4218e4c33382d56ca3ff89df74f86b953"
+checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3"
dependencies = [
"atomic 0.6.0",
"pear",
@@ -1670,7 +1669,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -1784,7 +1783,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -2327,9 +2326,9 @@ checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb"
[[package]]
name = "instant"
-version = "0.1.12"
+version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
+checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
]
@@ -2465,9 +2464,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "libc"
-version = "0.2.154"
+version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
+checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libloading"
@@ -2528,9 +2527,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
-version = "0.4.13"
+version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
[[package]]
name = "litrs"
@@ -2659,9 +2658,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
+checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae"
dependencies = [
"adler",
"simd-adler32",
@@ -2748,7 +2747,7 @@ dependencies = [
"ndk-sys",
"num_enum",
"raw-window-handle 0.5.2",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"thiserror",
]
@@ -2912,7 +2911,7 @@ dependencies = [
"proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -3102,7 +3101,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -3218,7 +3217,7 @@ dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -3250,7 +3249,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -3365,9 +3364,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.82"
+version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
+checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43"
dependencies = [
"unicode-ident",
]
@@ -3380,7 +3379,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
"version_check",
"yansi",
]
@@ -3485,9 +3484,9 @@ checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9"
[[package]]
name = "raw-window-handle"
-version = "0.6.1"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cc3bcbdb1ddfc11e700e62968e6b4cc9c75bb466464ad28fb61c5b2c964418b"
+checksum = "20675572f6f24e9e76ef639bc5552774ed45f1c30e2951e1e99c59888861c539"
[[package]]
name = "rayon"
@@ -3573,7 +3572,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -3757,7 +3756,7 @@ dependencies = [
"proc-macro2",
"quote",
"rocket_http",
- "syn 2.0.63",
+ "syn 2.0.65",
"unicode-xid",
"version_check",
]
@@ -3845,7 +3844,7 @@ dependencies = [
"bitflags 2.5.0",
"errno",
"libc",
- "linux-raw-sys 0.4.13",
+ "linux-raw-sys 0.4.14",
"windows-sys 0.52.0",
]
@@ -3876,9 +3875,9 @@ checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d"
[[package]]
name = "rustversion"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
[[package]]
name = "ryu"
@@ -3979,22 +3978,22 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
[[package]]
name = "serde"
-version = "1.0.201"
+version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"
+checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.201"
+version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"
+checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -4016,14 +4015,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
name = "serde_spanned"
-version = "0.6.5"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
+checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0"
dependencies = [
"serde",
]
@@ -4154,9 +4153,9 @@ dependencies = [
[[package]]
name = "smol_str"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6845563ada680337a52d43bb0b29f396f2d911616f6573012645b9e3d048a49"
+checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead"
dependencies = [
"serde",
]
@@ -4251,9 +4250,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.63"
+version = "2.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704"
+checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106"
dependencies = [
"proc-macro2",
"quote",
@@ -4310,22 +4309,22 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.60"
+version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18"
+checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.60"
+version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524"
+checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -4436,7 +4435,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -4475,21 +4474,21 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.8.12"
+version = "0.8.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
+checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "toml_edit 0.22.12",
+ "toml_edit 0.22.13",
]
[[package]]
name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf"
dependencies = [
"serde",
]
@@ -4518,9 +4517,9 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.22.12"
+version = "0.22.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
+checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c"
dependencies = [
"indexmap",
"serde",
@@ -4577,7 +4576,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
@@ -4812,7 +4811,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
"wasm-bindgen-shared",
]
@@ -4846,7 +4845,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -5051,7 +5050,7 @@ dependencies = [
"log",
"parking_lot",
"profiling",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"smallvec",
"static_assertions",
"wasm-bindgen",
@@ -5079,7 +5078,7 @@ dependencies = [
"once_cell",
"parking_lot",
"profiling",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"rustc-hash",
"smallvec",
"thiserror",
@@ -5118,7 +5117,7 @@ dependencies = [
"once_cell",
"parking_lot",
"profiling",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"renderdoc-sys",
"rustc-hash",
"smallvec",
@@ -5474,7 +5473,7 @@ dependencies = [
"orbclient",
"percent-encoding",
"raw-window-handle 0.5.2",
- "raw-window-handle 0.6.1",
+ "raw-window-handle 0.6.2",
"redox_syscall 0.3.5",
"rustix 0.38.34",
"sctk-adwaita",
@@ -5708,7 +5707,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.63",
+ "syn 2.0.65",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 213ab79..522c073 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -2,6 +2,33 @@
name = "weave"
version = "0.1.0"
edition = "2021"
+description = "A tool for collaborative generative writing."
+
+[package.metadata.bundle]
+name = "Weave"
+identifier = "dev.mdegans.weave"
+resources = ["resources"]
+copyright = "2024, Michael de Gans"
+category = "public.app-category.productivity"
+long_description = """
+A tool for collaborative generative writing. It supports multiple generative
+backends such as OpenAI and LLaMA. Stories can branch, allowing for multiple
+paths through a story. It is designed to be used by writers, game developers,
+or anyone who needs to generate text.
+"""
+icon = [
+ # "resources/icon.32.png",
+ # "resources/icon.32@2x.png",
+ # "resources/icon.64.png",
+ # "resources/icon.64@2x.png",
+ # "resources/icon.128.png",
+ # "resources/icon.128@2x.png",
+ "resources/icon.256.png",
+ # "resources/icon.256@2x.png",
+ "resources/icon.512.png",
+ "resources/icon.512@2x.png",
+ # "resources/icon.1024.png",
+]
[dependencies]
serde = { version = "1.0", features = ["derive"] }
@@ -40,6 +67,7 @@ openai = [
"dep:futures",
"dep:keyring",
"dep:tokio",
+ "tokio/rt-multi-thread",
]
ollama = ["generate", "dep:ollama-rs"]
# TODO: Claude does not yet have a good rust library. Will have to use reqwests
diff --git a/README.md b/README.md
index 6baf1ec..b0b6b88 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,34 @@
# `weave` multiversal writing tool
+![Weave Icon](/resources/icon.inkscape.svg)
+
Weave is a "multiversal" generative tree writing tool akin to [`loom`](https://github.com/socketteer/loom). It supports multiple generative backends such as:
-- [x] [`drama_llama`](https://github.com/mdegans/drama_llama) - llama.cpp wrapper supporting all llama.cpp models
-- [ ] OpenAI models
- - [ ] GPT 3.5 completions
- - [ ] Shim for GPT 4+ chat completions API.
-- [ ] Anthropic models
+- ✅ [`drama_llama`](https://github.com/mdegans/drama_llama) - llama.cpp wrapper supporting all llama.cpp models
+- ✅ OpenAI models
+ - ✅ Shim for GPT 3.5+ chat completions API, including GPT 4o.
+- 🔲 Anthropic models
+
+## Features
+
+Notable features:
+
+- **Live switching of backends** - It's possible to generate part of a story
+ with OpenAI and another part with LLaMA -- all without restarting the app.
+- **Streaming responses** - It's possible to cancel generations in progress --
+ both local and online.
+- **Live editing** - It's possible to edit posts during generation, but not to
+ add or remove nodes, so you need not wait for generation to complete to tweak
+ the text to your liking. New tokens are always appended to the end.
-# Features
+Coming soon:
-The goal of `weave` is feature parity with [`loom`](https://github.com/socketteer/loom?tab=readme-ov-file#features).
+- Fine-grained support over sampling for local models and potentially remote as
+ well for backends returning logprobs. The backend code is already written in
+ `drama_llama` but this is not exposed.
+- Keyboard shortcuts.
+
+Additionally, one goal of `weave` is feature parity with [`loom`](https://github.com/socketteer/loom?tab=readme-ov-file#features).
- ☑️ Read mode
- ✅ Linear story view
@@ -28,9 +46,33 @@ The goal of `weave` is feature parity with [`loom`](https://github.com/sockettee
- 🔲 'Visited' state
- ☑️ Generation
- 🔲 Generate N children with various models (currently one a time).
- - ✅ Modify generation settings
+ - ☑️ Modify generation settings (Complete for OpenAI but not yet from LLaMA)
- ☑️ File I/O
- ✅ Serializable application state, including stories, to JSON.
- - 🔲 Open/save trees as JSON files
+ - ✅ Open/save trees as JSON files
- 🔲 Work with trees in multiple tabs
- 🔲 Combine multiple trees
+
+# Notable issues
+
+- On some platforms (like MacOS) the Weave icon will change to an `e` shortly
+ after launch. See [this
+ issue](https://github.com/emilk/egui/issues/3823#issuecomment-1892423108) for
+ details.
+- With each new generation, all tokens need to be injested again with most
+ backends. This is solvable with `drama_llama` (longest prefix cache) but not
+ for the OpenAI API. So for OpenAI, it's recommended to generate larger posts.
+ The system prompt is customizable so you can tweak the agent's instructions on
+ verbosity.
+- It is not currently possible to have a scrollable viewport so it's
+ recommended to collapse nodes if things get cluttered. This is because the
+ nodes are implemented with [`egui::containers::Window`](https://docs.rs/egui/latest/egui/containers/struct.Window.html) which ignore scrollable areas. This is fixable
+ but not easily and not cleanly. When it is resolved the central panel will be
+ split into story and node views.
+- For the same reason as the above, nodes may obscure the generated text view.
+- The `drama_llama` backend will crash if the model's output is not valid
+ unicode. This will be fixed. If this happens, go to settings, switch backends,
+ and then switch back `drama_llama`.
+- The BOS token is not added for the `drama_llama` backend. This will be added
+ as an option and enabled by default since most models expect it. Generation
+ will still work but the quality may be affected.
diff --git a/resources/COPYRIGHT.md b/resources/COPYRIGHT.md
new file mode 100644
index 0000000..2a619f1
--- /dev/null
+++ b/resources/COPYRIGHT.md
@@ -0,0 +1,7 @@
+# Icon Copyright
+
+The app Icon was generated by Bing's Copilot and Dall-E 3. It was then post
+processed in inkscape to create a vector version then rasterized at several
+different resolutions.
+
+Because the icon is generated, it effectively falls into the public domain.
diff --git a/resources/icon.1024.png b/resources/icon.1024.png
new file mode 100644
index 0000000..9d23115
Binary files /dev/null and b/resources/icon.1024.png differ
diff --git a/resources/icon.128.png b/resources/icon.128.png
new file mode 100644
index 0000000..990d00e
Binary files /dev/null and b/resources/icon.128.png differ
diff --git a/resources/icon.128@2x.png b/resources/icon.128@2x.png
new file mode 120000
index 0000000..65ede58
--- /dev/null
+++ b/resources/icon.128@2x.png
@@ -0,0 +1 @@
+icon.256.png
\ No newline at end of file
diff --git a/resources/icon.256.png b/resources/icon.256.png
new file mode 100644
index 0000000..acab876
Binary files /dev/null and b/resources/icon.256.png differ
diff --git a/resources/icon.256@2x.png b/resources/icon.256@2x.png
new file mode 120000
index 0000000..f34f0a3
--- /dev/null
+++ b/resources/icon.256@2x.png
@@ -0,0 +1 @@
+icon.512.png
\ No newline at end of file
diff --git a/resources/icon.32.png b/resources/icon.32.png
new file mode 100644
index 0000000..a152403
Binary files /dev/null and b/resources/icon.32.png differ
diff --git a/resources/icon.32@2x.png b/resources/icon.32@2x.png
new file mode 120000
index 0000000..1f4968c
--- /dev/null
+++ b/resources/icon.32@2x.png
@@ -0,0 +1 @@
+icon.64.png
\ No newline at end of file
diff --git a/resources/icon.512.png b/resources/icon.512.png
new file mode 100644
index 0000000..fbab900
Binary files /dev/null and b/resources/icon.512.png differ
diff --git a/resources/icon.512@2x.png b/resources/icon.512@2x.png
new file mode 120000
index 0000000..873a451
--- /dev/null
+++ b/resources/icon.512@2x.png
@@ -0,0 +1 @@
+icon.1024.png
\ No newline at end of file
diff --git a/resources/icon.64.png b/resources/icon.64.png
new file mode 100644
index 0000000..b8b9d9c
Binary files /dev/null and b/resources/icon.64.png differ
diff --git a/resources/icon.64@2x.png b/resources/icon.64@2x.png
new file mode 120000
index 0000000..cac0c2f
--- /dev/null
+++ b/resources/icon.64@2x.png
@@ -0,0 +1 @@
+icon.128.png
\ No newline at end of file
diff --git a/resources/icon.inkscape.svg b/resources/icon.inkscape.svg
new file mode 100644
index 0000000..0458fa2
--- /dev/null
+++ b/resources/icon.inkscape.svg
@@ -0,0 +1,54 @@
+
+
+
+
diff --git a/src/app.rs b/src/app.rs
index dfb3253..89b97a0 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -21,18 +21,27 @@ pub struct App {
stories: Vec,
settings: Settings,
sidebar: Sidebar,
+ /// Modal error message text. If this is `Some`, the UI should display an
+ /// error message.
+ errmsg: Option,
#[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))]
drama_llama_worker: crate::drama_llama::Worker,
#[cfg(feature = "openai")]
openai_worker: crate::openai::Worker,
#[cfg(feature = "generate")]
generation_in_progress: bool,
+ #[cfg(not(target_arch = "wasm32"))]
+ save_dialog: Option,
+ #[cfg(not(target_arch = "wasm32"))]
+ saving_txt: bool,
}
// {"default_author":"","prompt_include_authors":false,"prompt_include_title":false,"selected_generative_backend":"OpenAI","backend_options":{"DramaLlama":{"DramaLlama":{"model":"","predict_options":{"n":512,"seed":1337,"stop_sequences":[],"stop_strings":[],"regex_stop_sequences":[],"sample_options":{"modes":[],"repetition":null}}}},"OpenAI":{"OpenAI":{"settings":{"openai_api_key":"hidden in keyring","chat_arguments":{"model":"gpt-3.5-turbo","messages":[{"role":"system","content":"A user and an assistant are collaborating on a story. The user starts by writing a paragraph, then the assistant writes a paragraph, and so on. Both will be credited for the end result.'"},{"role":"user","content":"Hi, GPT! Let's write a story together."},{"role":"assistant","content":"Sure, I'd love to help. How about you start us off? I'll try to match your tone and style."}],"temperature":1.0,"top_p":1.0,"n":null,"stop":null,"max_tokens":1024,"presence_penalty":0.0,"frequency_penalty":0.0,"user":null}}}}}}
impl App {
pub fn new<'s>(cc: &eframe::CreationContext<'s>) -> Self {
+ let ctx = cc.egui_ctx.clone();
+
let stories = cc
.storage
.map(|storage| {
@@ -80,7 +89,7 @@ impl App {
};
// Handle generation backends
- if let Err(e) = new.start_generative_backend() {
+ if let Err(e) = new.start_generative_backend(ctx) {
eprintln!("Failed to start generative backend: {}", e);
// This is fine. It can be restarted later once settings are fixed
// or the user chooses a different backend.
@@ -104,10 +113,12 @@ impl App {
self.active_story.map(move |i| self.stories.get_mut(i))?
}
- /// Starts the generative backend if it is not already running.
+ /// Starts the generative backend if it is not already running. A context
+ /// is required to request redraws from the worker thread.
#[cfg(feature = "generate")]
pub fn start_generative_backend(
&mut self,
+ context: egui::Context,
) -> Result<(), Box> {
log::info!(
"Starting generative backend: {}",
@@ -118,11 +129,11 @@ impl App {
match self.settings.backend_options() {
#[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))]
settings::BackendOptions::DramaLlama { model, .. } => {
- self.drama_llama_worker.start(model.clone())?;
+ self.drama_llama_worker.start(model.clone(), context)?;
}
#[cfg(feature = "openai")]
settings::BackendOptions::OpenAI { settings } => {
- self.openai_worker.start(&settings.openai_api_key);
+ self.openai_worker.start(&settings.openai_api_key, context);
}
}
@@ -134,9 +145,10 @@ impl App {
#[cfg(feature = "generate")]
pub fn reset_generative_backend(
&mut self,
+ context: egui::Context,
) -> Result<(), Box> {
self.shutdown_generative_backend()?;
- self.start_generative_backend()?;
+ self.start_generative_backend(context)?;
Ok(())
}
@@ -154,7 +166,7 @@ impl App {
panic!("Generation already in progress. This is a bug. Please report it.");
}
- #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))]
+ #[cfg(all(feature = "generate", not(target_arch = "wasm32")))]
{
let include_authors = self.settings.prompt_include_authors;
let include_title = self.settings.prompt_include_title;
@@ -354,7 +366,7 @@ impl App {
match self.sidebar.page {
SidebarPage::Settings => {
if let Some(action) = self.settings.draw(ui) {
- self.handle_settings_action(action);
+ self.handle_settings_action(action, ctx);
}
}
SidebarPage::Stories => {
@@ -364,8 +376,169 @@ impl App {
});
}
+ /// Draw error message if there is one. Returns `true` if the error message
+ /// is displayed. This function accepts a closure which can be used to
+ /// display additional UI elements, such as a button to handle the error.
+ pub fn draw_error_message(
+ &mut self,
+ ctx: &egui::Context,
+ mut f: Option>,
+ ) -> bool {
+ let mut closed = false; // because two mutable references
+ if let Some(msg) = &self.errmsg {
+ egui::CentralPanel::default().show(ctx, |ui| {
+ egui::Window::new("Error").show(ui.ctx(), |ui| {
+ ui.label(msg);
+ ui.horizontal(|ui| {
+ if ui.button("Close").clicked() {
+ closed = true;
+ }
+ if let Some(f) = &mut f {
+ f(ui);
+ }
+ })
+ });
+ });
+ } else {
+ return false;
+ }
+ if closed {
+ self.errmsg = None;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ #[cfg(not(target_arch = "wasm32"))]
+ pub fn draw_save_buttons(&mut self, ui: &mut egui::Ui) {
+ ui.label("Save");
+ ui.horizontal(|ui| {
+ let filter = Box::new(move |path: &std::path::Path| {
+ path.extension().map_or(false, |ext| ext == "json")
+ });
+
+ let save_btn = ui
+ .button("Save")
+ .on_hover_text_at_pointer("Save story to JSON.");
+
+ let export = ui
+ .button("Export")
+ .on_hover_text_at_pointer("Export active story path to Markdown.");
+
+ let load_btn = ui
+ .button("Load")
+ .on_hover_text_at_pointer("Load story from JSON.");
+
+ if save_btn.clicked() {
+ let mut dialog = egui_file::FileDialog::save_file(None)
+ .show_files_filter(filter);
+ dialog.open();
+
+ self.save_dialog = Some(dialog);
+ } else if load_btn.clicked() {
+ let mut dialog = egui_file::FileDialog::open_file(None)
+ .show_files_filter(filter);
+ dialog.open();
+
+ self.saving_txt = false;
+ self.save_dialog = Some(dialog);
+ } else if export.clicked() {
+ let filter = Box::new(move |path: &std::path::Path| {
+ path.extension().map_or(false, |ext| ext == "md")
+ });
+
+ let mut dialog = egui_file::FileDialog::open_file(None)
+ .show_files_filter(filter);
+ dialog.open();
+
+ self.saving_txt = true;
+ self.save_dialog = Some(dialog);
+ }
+
+ if let Some(dialog) = &mut self.save_dialog {
+ if dialog.show(ui.ctx()).selected() {
+ if let Some(path) = dialog.path() {
+ match dialog.dialog_type() {
+ egui_file::DialogType::OpenFile => {
+ let text = match std::fs::read_to_string(path) {
+ Ok(text) => text,
+ Err(e) => {
+ self.errmsg = Some(format!(
+ "Failed to read `{:?}` because: {}",
+ path,
+ e
+ ));
+ return;
+ }
+ };
+ let story:Story = match serde_json::from_str(&text) {
+ Ok(story) => story,
+ Err(e) => {
+ self.errmsg = Some(format!(
+ "Failed to parse `{:?}` because: {}",
+ path,
+ e
+ ));
+ return;
+ }
+ };
+
+ self.stories.push(story);
+ },
+ egui_file::DialogType::SaveFile => {
+ let active_story_index = match self.active_story {
+ Some(i) => i,
+ None => {
+ self.errmsg = Some("No active story to save.".to_string());
+ return;
+ }
+ };
+
+ let payload = if self.saving_txt {
+ self.stories[active_story_index].to_string()
+ } else {
+ match serde_json::to_string(&self.stories[active_story_index]) {
+ Ok(json) => json,
+ Err(e) => {
+ self.errmsg = Some(format!(
+ "Failed to serialize stories because: {}",
+ e
+ ));
+ return;
+ }
+ }
+ };
+
+ match std::fs::write(path, payload) {
+ Ok(_) => {},
+ Err(e) => {
+ self.errmsg = Some(format!(
+ "Failed to write `{:?}` because: {}",
+ path,
+ e
+ ));
+ return;
+ }
+ }
+ },
+ egui_file::DialogType::SelectFolder => {
+ unreachable!("Because we don't instantiate this type above.")
+ },
+ }
+ }
+ self.save_dialog = None;
+ }
+ }
+ });
+ }
+
/// Handle settings action.
- pub fn handle_settings_action(&mut self, action: settings::Action) {
+ pub fn handle_settings_action(
+ &mut self,
+ action: settings::Action,
+ context: &egui::Context,
+ ) {
match action {
settings::Action::SwitchBackends { from, to } => {
debug_assert!(from != to);
@@ -379,7 +552,7 @@ impl App {
self.settings.selected_generative_backend = to;
- if let Err(e) = self.reset_generative_backend() {
+ if let Err(e) = self.reset_generative_backend(context.clone()) {
eprintln!("Failed to start generative backend: {}", e);
}
@@ -417,6 +590,11 @@ impl App {
}
ui.text_edit_singleline(&mut self.sidebar.title_buf);
});
+
+ // We might not support wasm at all, but if we do this will have to be
+ // implemented differently. Skip it for now.
+ #[cfg(not(target_arch = "wasm32"))]
+ self.draw_save_buttons(ui);
}
/// Draw the central panel.
@@ -574,6 +752,8 @@ impl App {
)
}
crate::openai::Response::Models { models } => {
+ // because conditional compilation
+ #[allow(irrefutable_let_patterns)]
if let settings::BackendOptions::OpenAI { settings } =
self.settings.backend_options()
{
@@ -598,6 +778,10 @@ impl eframe::App for App {
ctx: &eframe::egui::Context,
frame: &mut eframe::Frame,
) {
+ if self.draw_error_message(ctx, None) {
+ // An error message is displayed. We skip the rest of the UI.
+ return;
+ }
self.draw_sidebar(ctx, frame);
self.draw_central_panel(ctx, frame);
}
diff --git a/src/app/settings.rs b/src/app/settings.rs
index 428e93d..98fa41d 100644
--- a/src/app/settings.rs
+++ b/src/app/settings.rs
@@ -220,6 +220,9 @@ impl Settings {
) -> Option {
let mut ret = None;
+ // FIXME: This doesn't display because the backend switch is blocking
+ // and by the time the UI is drawn, the backend has already switched.
+ // Not sure how to fix this easily.
if let Some(backend) = &self.pending_backend_switch {
ui.label(format!(
"Switching backend to `{}`. Please wait.",
@@ -282,10 +285,7 @@ impl Settings {
ui.label(format!("Model: {:?}", model));
if ui.button("Change model").clicked() {
let filter = move |path: &std::path::Path| {
- path.extension()
- .and_then(std::ffi::OsStr::to_str)
- .map(|ext| ext == "gguf")
- .unwrap_or(false)
+ path.extension().map_or(false, |ext| ext == "gguf")
};
let start = if model.as_os_str().is_empty() {
None
@@ -307,6 +307,7 @@ impl Settings {
path,
)
}
+ *file_dialog = None;
}
}
@@ -443,31 +444,6 @@ impl Settings {
None
}
- /// Configure model-specific settings when a local model is loaded. It will:
- /// * Set the model path if the model is valid.
- /// * Set the maximum context size if the model is valid.
- ///
- /// This can block, but only briefly. Mmap is used by default and we're just
- /// reading the metadata. Call it on setup, from the worker thread, or from
- /// the main thread if it's really necessary.
- // Like above in the draw code. If we're changing the model we do need to
- // validate it and the api doesn't allow us to do that without blockign
- // currently.
- #[cfg(feature = "generate")]
- pub fn configure_for_new_local_model(&mut self, path: &std::path::Path) {
- match self.backend_options() {
- #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))]
- BackendOptions::DramaLlama {
- model,
- max_context_size,
- ..
- } => {
- Self::drama_llama_helper(model, max_context_size, path);
- }
- _ => {}
- }
- }
-
/// This should be called once on startup to configure the backend settings,
/// for example, validating a local model or fetching a list of models from
/// OpenAI.
diff --git a/src/drama_llama.rs b/src/drama_llama.rs
index 0745f57..8cc8ee0 100644
--- a/src/drama_llama.rs
+++ b/src/drama_llama.rs
@@ -2,21 +2,23 @@ use std::{path::PathBuf, sync::mpsc::TryRecvError};
use drama_llama::{Engine, PredictOptions};
-/// A request to the worker thread (from another thread).
+/// A request to the [`Worker`] thread (from another thread).
#[derive(Debug)]
pub(crate) enum Request {
+ /// The [`Worker`] should cancel the current generation.
Stop,
+ /// The [`Worker`] should continue the `text` with the given `opts`.
Predict { text: String, opts: PredictOptions },
}
-/// A response from the worker thread (to another thread).
+/// A response from the [`Worker`] thread (to another thread).
#[derive(Debug)]
pub(crate) enum Response {
- /// Worker is done and can accept new requests.
+ /// [`Worker`] is done and can accept new requests.
Done,
- /// The worker is busy and cannot accept new requests.
+ /// The [`Worker`] is busy and cannot accept new requests.
Busy { request: Request },
- /// The worker has predicted a piece of text.
+ /// The [`Worker`] has predicted a piece of text.
Predicted { piece: String },
}
@@ -41,7 +43,11 @@ impl Worker {
// a whole bunch of stuff and likely introduce async rumble jumble. It may
// not be worth it since blocking is so rare. It only happens on shutdown or
// model change, and only then in the middle of an inference.
- pub fn start(&mut self, model: PathBuf) -> Result<(), std::io::Error> {
+ pub fn start(
+ &mut self,
+ model: PathBuf,
+ context: egui::Context,
+ ) -> Result<(), std::io::Error> {
// Loading is impossible
if !model.exists() {
return Err(std::io::Error::new(
@@ -83,6 +89,7 @@ impl Worker {
let (text, opts) = match msg {
Request::Stop => {
to_main.send(Response::Done).ok();
+ context.request_repaint();
break;
}
Request::Predict { text, opts } => {
@@ -136,15 +143,25 @@ impl Worker {
to_main
.send(Response::Busy { request: command })
.ok();
+ context.request_repaint();
}
}
// Send the predicted piece back to the main thread.
to_main.send(Response::Predicted { piece }).ok();
+ context.request_repaint();
}
// We are ready for the next command.
to_main.send(Response::Done).ok();
+ // When we're done we should repaint the UI, but we need to make
+ // sure the main thread has time to process the message first
+ // or we'll redraw before the last token is added. 100ms should
+ // be enough time.
+ context.request_repaint();
+ context.request_repaint_after(
+ std::time::Duration::from_millis(100),
+ );
}
});
diff --git a/src/openai.rs b/src/openai.rs
index 0768ba8..4a643bd 100644
--- a/src/openai.rs
+++ b/src/openai.rs
@@ -480,7 +480,7 @@ pub(crate) struct Worker {
impl Worker {
/// Start the worker thread. If the worker is already alive, this is a
/// no-op. Use `restart` to restart the worker.
- pub fn start(&mut self, api_key: &str) {
+ pub fn start(&mut self, api_key: &str, context: egui::Context) {
let api_key = api_key.to_string();
if self.is_alive() {
log::debug!("Worker is already alive");
@@ -521,6 +521,7 @@ impl Worker {
// made so we can support multiple "heads" and lock the UI
// appropriately.
while let Some(request) = from_main.next().await {
+ // Process the request.
let send_response = match request {
Request::Stop => {
// We are already stopped. We just tell main we're
@@ -606,7 +607,9 @@ impl Worker {
match to_main
.send(Response::Predicted { piece: delta })
.await {
- Ok(_) => {}
+ Ok(_) => {
+ context.request_repaint();
+ }
Err(e) => {
log::error!(
"Couldn't send predicted piece: {}",
@@ -636,10 +639,16 @@ impl Worker {
}
};
+ // We have sent a response. Was it successful?
match send_response {
Ok(_) => {
// Response sent successfully. We can now accept the
- // next request.
+ // next request. Whenever the main thread receives
+ // a message, we should repaint the UI. We'll use a
+ // slight delay to make sure the main thread has
+ // time to process the message. This is only fired
+ // when the
+ context.request_repaint_after(std::time::Duration::from_millis(100));
}
Err(e) => {
if e.is_disconnected() {