From 0fb0cb0cb792773a6b162d42863d5675a0d44b79 Mon Sep 17 00:00:00 2001 From: Michael de Gans Date: Tue, 21 May 2024 23:26:35 -0700 Subject: [PATCH] CI WIP I don't really think this will work the first time. YOLO --- .github/workflows/mac_os.yml | 149 ++++++++++++++++-- Cargo.lock | 12 ++ Cargo.toml | 19 ++- src/app.rs | 285 +++++++++++++++++++++++++++++------ src/app/settings.rs | 68 ++++++--- src/lib.rs | 6 +- src/node.rs | 10 ++ src/openai.rs | 54 ++++--- src/story.rs | 5 +- 9 files changed, 497 insertions(+), 111 deletions(-) diff --git a/.github/workflows/mac_os.yml b/.github/workflows/mac_os.yml index 786326e..c90109f 100644 --- a/.github/workflows/mac_os.yml +++ b/.github/workflows/mac_os.yml @@ -1,10 +1,17 @@ # Copilot generated GitHub Actions workflow for macOS -name: Rust Tests and Coverage +# Thank you Bing's Copilot! +name: Rust Tests, Build, and Release -on: [push, pull_request] +on: + push: + branches: + - main + pull_request: + branches: + - main jobs: - build: + mac-test: runs-on: macos-latest steps: - uses: actions/checkout@v2 @@ -14,10 +21,134 @@ jobs: run: cargo install cargo-llvm-cov - name: Run tests with coverage run: cargo llvm-cov --workspace --lcov --output-path ./target/lcov.info - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4.0.1 with: - files: ./target/lcov.info - flags: unittests - name: codecov-umbrella - fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + mac-build: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Install cargo bundle + run: cargo install cargo-bundle + - name: Build macOS binary + run: cargo bundle --release --features "gui openai drama_llama" + mac-release: + needs: [mac-test, mac-build] + runs-on: macos-latest + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + steps: + - uses: actions/checkout@v2 + - name: Create macOS release bundle + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.sha }} + release_name: Release ${{ github.sha }} + draft: false + prerelease: false + - name: Upload macOS release bundle + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: target/release/bundle/osx/Weave.app + asset_name: Weave.app + asset_content_type: application/octet-stream + linux-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install LLVM tools + run: sudo apt-get install llvm + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run tests with coverage + run: cargo llvm-cov --workspace --lcov --output-path ./target/lcov.info + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4.0.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + linux-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install cargo bundle + run: cargo install cargo-bundle + - name: Build Linux binary + run: cargo bundle --release --features "gui openai drama_llama" + linux-release: + needs: [linux-test, linux-build] + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + steps: + - uses: actions/checkout@v2 + - name: Create Linux release bundle + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.sha }} + release_name: Release ${{ github.sha }} + draft: false + prerelease: false + - name: Upload Linux release bundle + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: target/release/bundle/linux/Weave + asset_name: Weave + asset_content_type: application/octet-stream + windows-test: + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - name: Install LLVM tools + run: choco install llvm + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run tests with coverage + run: cargo llvm-cov --workspace --lcov --output-path ./target/lcov.info + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4.0.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + windows-build: + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - name: Install cargo bundle + run: cargo install cargo-bundle + - name: Build Windows binary + run: cargo bundle --release --features "gui openai drama_llama" + windows-release: + needs: [windows-test, windows-build] + runs-on: windows-latest + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + steps: + - uses: actions/checkout@v2 + - name: Create Windows release bundle + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.sha }} + release_name: Release ${{ github.sha }} + draft: false + prerelease: false + - name: Upload Windows release bundle + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: target/release/bundle/windows/Weave.exe + asset_name: Weave.exe + asset_content_type: application/octet-stream diff --git a/Cargo.lock b/Cargo.lock index 4f464b1..bf24c15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1320,6 +1320,7 @@ dependencies = [ "objc", "parking_lot", "percent-encoding", + "pollster", "raw-window-handle 0.5.2", "raw-window-handle 0.6.2", "ron", @@ -1330,6 +1331,7 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "web-time", + "wgpu", "winapi", "winit", ] @@ -3325,6 +3327,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "pollster" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22686f4785f02a4fcc856d3b3bb19bf6c8160d103f7a99cc258bddd0251dc7f2" + [[package]] name = "powerfmt" version = "0.2.0" @@ -4982,6 +4990,7 @@ dependencies = [ name = "weave" version = "0.1.0" dependencies = [ + "derivative", "derive_more", "drama_llama", "eframe", @@ -4996,6 +5005,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", + "thiserror", "tokio", "uuid", ] @@ -5048,6 +5058,7 @@ dependencies = [ "cfg_aliases", "js-sys", "log", + "naga", "parking_lot", "profiling", "raw-window-handle 0.6.2", @@ -5097,6 +5108,7 @@ dependencies = [ "arrayvec", "ash", "bitflags 2.5.0", + "block", "cfg_aliases", "core-graphics-types", "glow", diff --git a/Cargo.toml b/Cargo.toml index 522c073..53350f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,26 +21,37 @@ icon = [ # "resources/icon.32@2x.png", # "resources/icon.64.png", # "resources/icon.64@2x.png", - # "resources/icon.128.png", - # "resources/icon.128@2x.png", + "resources/icon.128.png", + "resources/icon.128@2x.png", "resources/icon.256.png", - # "resources/icon.256@2x.png", + "resources/icon.256@2x.png", "resources/icon.512.png", "resources/icon.512@2x.png", # "resources/icon.1024.png", ] +# For release, we optimize for size and use lto. +[profile.release] +lto = true +panic = "abort" +strip = true + [dependencies] serde = { version = "1.0", features = ["derive"] } egui = { version = "0.27", features = ["persistence"], optional = true } -eframe = { version = "0.27", features = ["persistence"], optional = true } +eframe = { version = "0.27", features = [ + "persistence", + "wgpu", +], optional = true } egui_file = { version = "0.17.0", optional = true } derive_more = "0.99.17" serde_json = "1.0" log = "0.4" env_logger = "0.11" static_assertions = "1.1" +thiserror = "1.0" uuid = { version = "1.8", features = ["v4", "fast-rng"] } +derivative = "2.2.0" tokio = { version = "1", optional = true } futures = { version = "0.3", features = ["executor"], optional = true } diff --git a/src/app.rs b/src/app.rs index 89b97a0..9f92c9a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,6 +1,9 @@ mod settings; -use {self::settings::Settings, crate::story::Story}; +use { + self::settings::{BackendOptions, Settings}, + crate::story::Story, +}; #[derive(Default, PartialEq, derive_more::Display)] pub enum SidebarPage { @@ -10,20 +13,68 @@ pub enum SidebarPage { } #[derive(Default)] -pub struct Sidebar { +struct LeftSidebar { // New story title buffer - title_buf: String, - page: SidebarPage, + pub title_buf: String, + pub page: SidebarPage, + pub visible: bool, } + +#[derive(Default)] +struct RightSidebar { + pub text: Option, + pub text_current: bool, + pub visible: bool, + pub model_view: bool, +} + +impl RightSidebar { + /// The story text will be updated on the next draw if this is called. This + /// is an optimization to avoid reformatting the story each frame if it + /// hasn't changed. + // TODO: This might not actually be worth it. We shoudl profile first since + // formatting the story and traversing the tree isn't actually all that + // expensive, but it could be if there are many nodes. There is a lot of CPU + // usage, but it doens't seem to be coming from our code. My guess is using + // `egui::Window` for each node is part of the problem. + pub fn refresh_story(&mut self) { + self.text_current = false; + } +} + +#[derive(derivative::Derivative, thiserror::Error)] +#[derivative(Debug)] +#[error("{}", message)] +struct Error { + message: String, + #[derivative(Debug = "ignore")] + action: Option>, +} + +impl From<&str> for Error { + fn from(message: &str) -> Self { + message.to_string().into() + } +} + +impl From for Error { + fn from(message: String) -> Self { + Self { + message: message.into(), + action: None, + } + } +} + #[derive(Default)] pub struct App { active_story: Option, stories: Vec, settings: Settings, - sidebar: Sidebar, - /// Modal error message text. If this is `Some`, the UI should display an - /// error message. - errmsg: Option, + left_sidebar: LeftSidebar, + right_sidebar: RightSidebar, + /// Modal error messages. + errors: Vec, #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] drama_llama_worker: crate::drama_llama::Worker, #[cfg(feature = "openai")] @@ -41,6 +92,7 @@ pub struct App { impl App { pub fn new<'s>(cc: &eframe::CreationContext<'s>) -> Self { let ctx = cc.egui_ctx.clone(); + let mut errors: Vec = Vec::new(); let stories = cc .storage @@ -52,7 +104,13 @@ impl App { match serde_json::from_str(&s) { Ok(stories) => Some(stories), Err(e) => { - log::error!("Failed to load stories: {}", e); + errors.push( + format!( + "Failed to load stories because: {}", + e + ) + .into(), + ); None } } @@ -71,7 +129,13 @@ impl App { match serde_json::from_str(&s) { Ok(settings) => Some(settings), Err(e) => { - log::error!("Failed to load settings: {}", e); + errors.push( + format!( + "Failed to load settings because:{}", + e + ) + .into(), + ); None } } @@ -309,7 +373,7 @@ impl App { } /// Draw sidebar. - pub fn draw_sidebar( + pub fn draw_left_sidebar( &mut self, ctx: &eframe::egui::Context, _frame: &mut eframe::Frame, @@ -317,7 +381,7 @@ impl App { egui::SidePanel::left("sidebar") .default_width(200.0) .resizable(true) - .show(ctx, |ui| { + .show_animated(ctx, self.left_sidebar.visible, |ui| { // Stuff could break if the user changes the story or backend // settings while generation is in progress. The easiest way to // fix this is just to make such actions impossible so we'll @@ -350,20 +414,20 @@ impl App { // TODO: better tabs and layout ui.horizontal(|ui| { ui.selectable_value( - &mut self.sidebar.page, + &mut self.left_sidebar.page, SidebarPage::Stories, "Stories", ); ui.selectable_value( - &mut self.sidebar.page, + &mut self.left_sidebar.page, SidebarPage::Settings, "Settings", ); }); - ui.heading(self.sidebar.page.to_string()); + ui.heading(self.left_sidebar.page.to_string()); - match self.sidebar.page { + match self.left_sidebar.page { SidebarPage::Settings => { if let Some(action) = self.settings.draw(ui) { self.handle_settings_action(action, ctx); @@ -376,24 +440,90 @@ impl App { }); } + pub fn draw_right_sidebar( + &mut self, + ctx: &eframe::egui::Context, + _frame: &mut eframe::Frame, + ) { + if self.story().is_none() { + return; + } + // Story is some. We can unwrap below. Story cannot change while this + // function is running since it is not accessable from any other + // thread. + + egui::SidePanel::right("right_sidebar") + .default_width(200.0) + .resizable(true) + .show_animated(ctx, self.right_sidebar.visible, |ui| { + if self + .settings + .selected_generative_backend + .supports_model_view() + { + if ui + .checkbox( + &mut self.right_sidebar.model_view, + "model view", + ) + .on_hover_text_at_pointer( + "Show exactly what the model is prompted with.", + ) + .changed() + { + self.right_sidebar.refresh_story(); + } + } + + let include_authors = if self.right_sidebar.model_view { + self.settings.prompt_include_authors + } else { + true + }; + let include_title = if self.right_sidebar.model_view { + self.settings.prompt_include_title + } else { + true + }; + + if !self.right_sidebar.text_current { + // We need to shuffle the text around a bit. We do this + // because mutable references, and to avoid reallocation + let mut text = + self.right_sidebar.text.take().unwrap_or(String::new()); + text.clear(); + self.story() + .unwrap() + .format_full(&mut text, include_authors, include_title) + .unwrap(); + self.right_sidebar.text = Some(text); + } + + // We have some text to display because there is a story and + // formatting cannot actually fail. + ui.label(self.right_sidebar.text.as_ref().unwrap()); + }); + } + /// Draw error message if there is one. Returns `true` if the error message /// is displayed. This function accepts a closure which can be used to /// display additional UI elements, such as a button to handle the error. - pub fn draw_error_message( - &mut self, - ctx: &egui::Context, - mut f: Option>, - ) -> bool { + pub fn draw_error_messages(&mut self, ctx: &egui::Context) -> bool { let mut closed = false; // because two mutable references - if let Some(msg) = &self.errmsg { + if let Some(Error { + message, + ref mut action, + }) = self.errors.first_mut() + { + log::error!("{}", message); egui::CentralPanel::default().show(ctx, |ui| { egui::Window::new("Error").show(ui.ctx(), |ui| { - ui.label(msg); + ui.label(message.as_str()); ui.horizontal(|ui| { if ui.button("Close").clicked() { closed = true; } - if let Some(f) = &mut f { + if let Some(f) = action { f(ui); } }) @@ -403,8 +533,8 @@ impl App { return false; } if closed { - self.errmsg = None; - return false; + self.errors.remove(0); + return !self.errors.is_empty(); } else { return true; } @@ -464,22 +594,22 @@ impl App { let text = match std::fs::read_to_string(path) { Ok(text) => text, Err(e) => { - self.errmsg = Some(format!( + self.errors.push(format!( "Failed to read `{:?}` because: {}", path, e - )); + ).into()); return; } }; let story:Story = match serde_json::from_str(&text) { Ok(story) => story, Err(e) => { - self.errmsg = Some(format!( + self.errors.push(format!( "Failed to parse `{:?}` because: {}", path, e - )); + ).into()); return; } }; @@ -490,7 +620,7 @@ impl App { let active_story_index = match self.active_story { Some(i) => i, None => { - self.errmsg = Some("No active story to save.".to_string()); + self.errors.push("No active story to save.".into()); return; } }; @@ -501,10 +631,10 @@ impl App { match serde_json::to_string(&self.stories[active_story_index]) { Ok(json) => json, Err(e) => { - self.errmsg = Some(format!( + self.errors.push(format!( "Failed to serialize stories because: {}", e - )); + ).into()); return; } } @@ -513,11 +643,11 @@ impl App { match std::fs::write(path, payload) { Ok(_) => {}, Err(e) => { - self.errmsg = Some(format!( + self.errors.push(format!( "Failed to write `{:?}` because: {}", path, e - )); + ).into()); return; } } @@ -558,6 +688,30 @@ impl App { self.settings.pending_backend_switch = None; } + #[cfg(feature = "openai")] + settings::Action::OpenAI(action) => match action { + crate::openai::SettingsAction::FetchModels => { + if self.openai_worker.is_alive() { + // Non-blocking. We'll get a response back when the + // worker is done fetching. + self.openai_worker.fetch_models().ok(); + } else { + if let BackendOptions::OpenAI { settings } = + self.settings.backend_options() + { + if let Err(e) = settings.fetch_models_sync(None) { + self.errors.push( + format!( + "Failed to fetch OpenAI models because: {}", + e + ) + .into(), + ); + } + } + } + } + }, } } @@ -583,12 +737,12 @@ impl App { ui.horizontal(|ui| { if ui.button("New").clicked() { - let title = self.sidebar.title_buf.clone(); + let title = self.left_sidebar.title_buf.clone(); let author = self.settings.default_author.clone(); self.new_story(title, author); - self.sidebar.title_buf.clear(); + self.left_sidebar.title_buf.clear(); } - ui.text_edit_singleline(&mut self.sidebar.title_buf); + ui.text_edit_singleline(&mut self.left_sidebar.title_buf); }); // We might not support wasm at all, but if we do this will have to be @@ -628,6 +782,7 @@ impl App { // with it. // In the meantime, the windows are, at least, collapsible. let generation_in_progress = self.generation_in_progress; + let mut update_right_sidebar = false; if let Some(story) = self.story_mut() { // TODO: the response from story.draw could be more succinct. We // only realy know if we need to start generation (for now). @@ -637,8 +792,17 @@ impl App { // start generation. start_generation = true; } + if action.modified { + update_right_sidebar = true; + } + } + if !new_pieces.is_empty() { + story.extend_paragraph(new_pieces); + update_right_sidebar = true; + } + if update_right_sidebar { + self.right_sidebar.refresh_story(); } - story.extend_paragraph(new_pieces); } else { if !new_pieces.is_empty() { // We received a piece of text but there is no active story. @@ -648,13 +812,15 @@ impl App { ); } ui.heading("Welcome to Weave!"); - ui.label("Create a new story or select an existing one."); + ui.label("Keyboard shortcuts:\n- `ESC` to start a new story or to access settings.\n- `F1` to toggle a view of the story's path as text."); } }); if start_generation { if let Err(e) = self.start_generation() { - log::error!("Failed to start generation: {}", e); + self.errors.push( + format!("Failed to start generation because: {}", e).into(), + ); } } } @@ -698,6 +864,7 @@ impl App { // it to the story. crate::drama_llama::Response::Predicted { piece } => { new_pieces.push(piece); + self.right_sidebar.refresh_story(); } crate::drama_llama::Response::Done => { // Trim whitespace from the end of the story. The @@ -707,6 +874,7 @@ impl App { // `drama_llama` if let Some(story) = self.story_mut() { story.head_mut().trim_end_whitespace(); + self.right_sidebar.refresh_story(); } // We can unlock the UI now. self.generation_in_progress = false; @@ -714,11 +882,11 @@ impl App { crate::drama_llama::Response::Busy { request } => { // This might happen because of data races, but really // shouldn't. - // TODO: gui error message - log::error!( + // TODO: make a macro for all these error messages. + self.errors.push(format!( "Unexpected request sent to worker. Report this please: {:?}", request - ) + ).into()); } }, None => { @@ -746,12 +914,15 @@ impl App { self.generation_in_progress = false; } crate::openai::Response::Busy { request } => { - log::error!( + self.errors.push(format!( "Unexpected request sent to worker. Report this please: {:?}", request - ) + ).into()); } crate::openai::Response::Models { models } => { + // The worker is done fetching models. We can update the + // settings now. + // because conditional compilation #[allow(irrefutable_let_patterns)] if let settings::BackendOptions::OpenAI { settings } = @@ -770,6 +941,22 @@ impl App { _ => {} } } + + /// Handle input events (keyboard shortcuts, etc). + pub fn handle_input( + &mut self, + ctx: &eframe::egui::Context, + _frame: &mut eframe::Frame, + ) { + ctx.input(|input| { + if input.key_pressed(egui::Key::Escape) { + self.left_sidebar.visible = !self.left_sidebar.visible; + } + if input.key_pressed(egui::Key::F1) { + self.right_sidebar.visible = !self.right_sidebar.visible; + } + }); + } } impl eframe::App for App { @@ -778,11 +965,13 @@ impl eframe::App for App { ctx: &eframe::egui::Context, frame: &mut eframe::Frame, ) { - if self.draw_error_message(ctx, None) { + self.handle_input(ctx, frame); + if self.draw_error_messages(ctx) { // An error message is displayed. We skip the rest of the UI. return; } - self.draw_sidebar(ctx, frame); + self.draw_left_sidebar(ctx, frame); + self.draw_right_sidebar(ctx, frame); self.draw_central_panel(ctx, frame); } diff --git a/src/app/settings.rs b/src/app/settings.rs index 98fa41d..4d9f53a 100644 --- a/src/app/settings.rs +++ b/src/app/settings.rs @@ -45,6 +45,18 @@ impl GenerativeBackend { } else { Self::ALL[0] }; + + pub fn supports_model_view(&self) -> bool { + match self { + #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] + GenerativeBackend::DramaLlama => true, + // We don't actually know how the OpenAI model is prompted since we + // feed it messages, not raw text. We could make a good educated + // guess, but it's not worth it right now. + #[cfg(feature = "openai")] + GenerativeBackend::OpenAI => false, + } + } } #[cfg(feature = "generate")] @@ -128,6 +140,7 @@ impl BackendOptions { } } + #[cfg(feature = "openai")] pub fn as_openai(&self) -> Option<&crate::openai::Settings> { match self { BackendOptions::OpenAI { settings } => Some(settings), @@ -197,6 +210,8 @@ pub enum Action { /// This backend should be started. to: GenerativeBackend, }, + #[cfg(feature = "openai")] + OpenAI(crate::openai::SettingsAction), } impl Settings { @@ -245,29 +260,33 @@ impl Settings { ) .on_hover_text_at_pointer("It will still be shown in the viewport. Hiding it can improve quality of generation since models have biases. Does not apply to all backends."); - ui.label("Generative backend:"); - egui::ComboBox::from_label("Backend") - .selected_text(self.selected_generative_backend.to_string()) - .show_ui(ui, |ui| { - for &backend in GenerativeBackend::ALL { - let active: bool = - self.selected_generative_backend == *backend; - - if ui - .selectable_label(active, backend.to_string()) - .clicked() - { - ret = Some(Action::SwitchBackends { - from: self.selected_generative_backend, - to: *backend, - }); - - // We don't immediately switch the backend because we - // want to clean up first. The `App` will switch the - // `selected_generative_backend` after the cleanup. + // If there is only one backend, don't show the dropdown. + if GenerativeBackend::ALL.len() > 1 { + // allow the user to switch backends + ui.label("Generative backend:"); + egui::ComboBox::from_label("Backend") + .selected_text(self.selected_generative_backend.to_string()) + .show_ui(ui, |ui| { + for &backend in GenerativeBackend::ALL { + let active: bool = + self.selected_generative_backend == *backend; + + if ui + .selectable_label(active, backend.to_string()) + .clicked() + { + ret = Some(Action::SwitchBackends { + from: self.selected_generative_backend, + to: *backend, + }); + + // We don't immediately switch the backend because we + // want to clean up first. The `App` will switch the + // `selected_generative_backend` after the cleanup. + } } - } - }); + }); + } match self.backend_options() { #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] @@ -406,7 +425,6 @@ impl Settings { if let Some(i) = remove { predict_options.stop_strings.remove(i); - remove = None; } if ui.button("Add stop string").clicked() { @@ -419,7 +437,9 @@ impl Settings { } #[cfg(feature = "openai")] BackendOptions::OpenAI { settings } => { - settings.ui(ui); + if let Some(action) = settings.draw(ui) { + ret = Some(Action::OpenAI(action)); + } } #[allow(unreachable_patterns)] // because same as above diff --git a/src/lib.rs b/src/lib.rs index b6a81c0..5acfa08 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,14 +10,12 @@ pub mod app; /// OpenAI generative [`Worker`]. [`Request`]s are sent to the worker and -/// [`Response`]s are received. This can be used to run tasks in the background -/// and are not tied to any specific frontend. +/// [`Response`]s are received. #[cfg(feature = "openai")] pub(crate) mod openai; /// [`drama_llama`] generative [`Worker`]. [`Request`]s are sent to the worker -/// and [`Response`]s are received. This can be used to run tasks in the -/// background and are not tied to any specific frontend. +/// and [`Response`]s are received. #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] pub(crate) mod drama_llama; diff --git a/src/node.rs b/src/node.rs index e52a7d1..87104fe 100644 --- a/src/node.rs +++ b/src/node.rs @@ -71,6 +71,9 @@ pub struct Action { pub continue_: bool, /// If new node should be generated, and it's child index. pub generate: Option, + /// If the node (or tree) has been modified. This is an optimization to + /// avoid unnecessary rendering, allocation, and node traversal. + pub modified: bool, } #[cfg(feature = "gui")] @@ -460,6 +463,13 @@ impl Node { self.pieces.push(Piece { end: self.text.len(), }); + response = match response { + Some(mut action) => { + action.modified = true; + Some(action) + } + None => Some(Action { modified: true, ..Default::default() }), + }; } response diff --git a/src/openai.rs b/src/openai.rs index 4a643bd..3c4e422 100644 --- a/src/openai.rs +++ b/src/openai.rs @@ -55,7 +55,7 @@ impl Into for ChatArguments { } impl ChatArguments { - pub fn ui(&mut self, ui: &mut egui::Ui) -> egui::Response { + pub fn draw(&mut self, ui: &mut egui::Ui) -> egui::Response { // `model` is set by the parent `Settings` struct, since it has the // available choices. We don't draw it here. @@ -297,6 +297,13 @@ where } } +/// When calling [`Settings::draw`], this action determines what the caller +/// should do. +pub enum SettingsAction { + /// The caller should call [`Worker::fetch_models`]. + FetchModels, +} + #[derive(Debug, Default, Serialize, Deserialize)] pub struct Settings { /// Available models, if available from the OpenAI API. We don't want to @@ -368,26 +375,21 @@ impl Settings { Ok(()) } + /// Draw the settings UI. If the caller needs to perform any action, + /// `Some(action)` will be returned and should be acted upon. #[cfg(feature = "gui")] - pub fn ui(&mut self, ui: &mut egui::Ui) -> egui::Response { + pub fn draw(&mut self, ui: &mut egui::Ui) -> Option { + let mut action = None; + if self.models.is_empty() { if ui.button("Fetch models").clicked() { - // TODO: Somehow we need to send a message to our worker to - // fetch the models and then get them back from a channel. This - // is some work but we need to wrap the async stuff in it's own - // thread because egui itself is not async. So we'll start an - // executor in a worker and do like we do with `drama_llama`. - // Alternatively we could just block the main thread and do it - // on startup with futures::executor::block_on. - - // FIXME: This is blocking. We do have a way of sending a - // request to the worker to fetch the models, but it's on the - // parent struct, so we'll need to return some kind of request - // from here to the parent to tell it to fetch the models. Then - // when the models are ready, they're sent back to the main - // thread and all is well with no blocking. But this is fine - // for now. - self.fetch_models_sync(None).ok(); + // We can't use async here, but we can do old-fashioned + // non-blocking code to achieve the same effect. This will tell + // the caller to send a request to the worker to fetch the + // models. That fetch happens in a worker thread. When it's + // done, the worker will notify the main thread and the models + // will be updated. Very roundabout but avoids a UI hang. + action = Some(SettingsAction::FetchModels); } } else { // We display a dropdown for the models and let the user select one. @@ -415,7 +417,9 @@ impl Settings { .hint_text("OpenAI API key"), ); - self.chat_arguments.ui(ui) + self.chat_arguments.draw(ui); + + action } } @@ -740,6 +744,18 @@ impl Worker { self.handle.is_some() } + pub fn fetch_models(&mut self) -> Result<(), futures::channel::mpsc::TrySendError> { + if !self.is_alive() { + panic!("Worker is not alive. Can't fetch models."); + } + + if let Some(to_worker) = self.to_worker.as_mut() { + to_worker.try_send(Request::FetchModels)?; + } + + Ok(()) + } + /// Start prediction. Returns any SendError that occurs. This does not block /// the current thread. Use `shutdown` to stop the worker thread. /// diff --git a/src/story.rs b/src/story.rs index 2686e28..4a920a7 100644 --- a/src/story.rs +++ b/src/story.rs @@ -145,10 +145,8 @@ impl Story { ) -> Option { use crate::node::PathAction; - ui.label(self.to_string()); - // Draw, and update active path if changed. - if let Some(PathAction { path, action }) = self.root.draw( + if let Some(PathAction { path, mut action }) = self.root.draw( ui, self.active_path.as_ref().map(|v| v.as_slice()), lock_topology, @@ -160,6 +158,7 @@ impl Story { if action.delete { // We can handle this here. self.decapitate(); + action.modified = true; return None; } else if action.generate.is_some() | action.continue_ { return Some(action);