diff --git a/src/app.rs b/src/app.rs index 6e66c57..dfb3253 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,11 +1,8 @@ -use crate::{settings::Settings, story::Story}; +mod settings; -#[derive(Default)] -pub struct Toolbar { - pub title_buf: String, -} +use {self::settings::Settings, crate::story::Story}; -#[derive(Default, derive_more::Display)] +#[derive(Default, PartialEq, derive_more::Display)] pub enum SidebarPage { #[default] Stories, @@ -14,6 +11,8 @@ pub enum SidebarPage { #[derive(Default)] pub struct Sidebar { + // New story title buffer + title_buf: String, page: SidebarPage, } #[derive(Default)] @@ -22,7 +21,6 @@ pub struct App { stories: Vec, settings: Settings, sidebar: Sidebar, - toolbar: Toolbar, #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] drama_llama_worker: crate::drama_llama::Worker, #[cfg(feature = "openai")] @@ -119,20 +117,13 @@ impl App { match self.settings.backend_options() { #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] - crate::settings::BackendOptions::DramaLlama { model, .. } => { + settings::BackendOptions::DramaLlama { model, .. } => { self.drama_llama_worker.start(model.clone())?; } #[cfg(feature = "openai")] - crate::settings::BackendOptions::OpenAI { settings } => { + settings::BackendOptions::OpenAI { settings } => { self.openai_worker.start(&settings.openai_api_key); } - #[allow(unreachable_patterns)] // because conditional compilation - _ => { - todo!( - "Generative backend not implemented: {}", - self.settings.selected_generative_backend - ); - } } Ok(()) @@ -151,6 +142,9 @@ impl App { } /// Start generation (with current settings, at the story head). + // TODO: Move backend code to the backend modules. This function is too + // long. Each backend does more or less the same thing. See if we can make + // a trait for this. #[cfg(feature = "generate")] pub fn start_generation( &mut self, @@ -172,7 +166,7 @@ impl App { feature = "drama_llama", not(target_arch = "wasm32") ))] - crate::settings::BackendOptions::DramaLlama { + settings::BackendOptions::DramaLlama { predict_options, .. } => { @@ -216,7 +210,7 @@ impl App { } } #[cfg(feature = "openai")] - crate::settings::BackendOptions::OpenAI { settings } => { + settings::BackendOptions::OpenAI { settings } => { let mut options = settings.chat_arguments.clone(); let story = if let Some(story) = self.story_mut() { @@ -268,11 +262,11 @@ impl App { ) -> Result<(), Box> { match self.settings.selected_generative_backend { #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] - crate::settings::GenerativeBackend::DramaLlama => { + settings::GenerativeBackend::DramaLlama => { self.drama_llama_worker.stop()?; } #[cfg(feature = "openai")] - crate::settings::GenerativeBackend::OpenAI => { + settings::GenerativeBackend::OpenAI => { self.openai_worker.try_stop()?; } } @@ -288,43 +282,20 @@ impl App { ) -> Result<(), Box> { match self.settings.selected_generative_backend { #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] - crate::settings::GenerativeBackend::DramaLlama => { + settings::GenerativeBackend::DramaLlama => { if self.drama_llama_worker.shutdown().is_err() { return Err("`drama_llama` worker thread did not shut down cleanly.".into()); } } #[cfg(feature = "openai")] - crate::settings::GenerativeBackend::OpenAI => { + settings::GenerativeBackend::OpenAI => { self.openai_worker.shutdown()?; } - #[allow(unreachable_patterns)] // because conditional compilation`` - _ => {} } Ok(()) } - /// Draw the toolbar. - pub fn draw_toolbar( - &mut self, - ctx: &eframe::egui::Context, - _frame: &mut eframe::Frame, - ) { - egui::TopBottomPanel::top("toolbar") - .resizable(true) - .show(ctx, |ui| { - egui::menu::bar(ui, |ui| { - if ui.button("New Story").clicked() { - let title = self.toolbar.title_buf.clone(); - let author = self.settings.default_author.clone(); - self.new_story(title, author); - self.toolbar.title_buf.clear(); - } - ui.text_edit_singleline(&mut self.toolbar.title_buf); - }); - }); - } - /// Draw sidebar. pub fn draw_sidebar( &mut self, @@ -366,19 +337,25 @@ impl App { // These are our sidebar tabs. // TODO: better tabs and layout ui.horizontal(|ui| { - if ui.button("Stories").clicked() { - self.sidebar.page = SidebarPage::Stories; - } - if ui.button("Settings").clicked() { - self.sidebar.page = SidebarPage::Settings; - } + ui.selectable_value( + &mut self.sidebar.page, + SidebarPage::Stories, + "Stories", + ); + ui.selectable_value( + &mut self.sidebar.page, + SidebarPage::Settings, + "Settings", + ); }); ui.heading(self.sidebar.page.to_string()); match self.sidebar.page { SidebarPage::Settings => { - self.settings.draw(ui); + if let Some(action) = self.settings.draw(ui) { + self.handle_settings_action(action); + } } SidebarPage::Stories => { self.draw_stories_tab(ui); @@ -387,6 +364,30 @@ impl App { }); } + /// Handle settings action. + pub fn handle_settings_action(&mut self, action: settings::Action) { + match action { + settings::Action::SwitchBackends { from, to } => { + debug_assert!(from != to); + debug_assert!( + self.settings.selected_generative_backend == from + ); + + if let Err(e) = self.stop_generation() { + eprintln!("Failed to stop generation: {}", e); + } + + self.settings.selected_generative_backend = to; + + if let Err(e) = self.reset_generative_backend() { + eprintln!("Failed to start generative backend: {}", e); + } + + self.settings.pending_backend_switch = None; + } + } + } + /// Draw the stories sidebar tab. fn draw_stories_tab(&mut self, ui: &mut egui::Ui) { let mut delete = None; @@ -406,6 +407,16 @@ impl App { self.active_story = None; } } + + ui.horizontal(|ui| { + if ui.button("New").clicked() { + let title = self.sidebar.title_buf.clone(); + let author = self.settings.default_author.clone(); + self.new_story(title, author); + self.sidebar.title_buf.clear(); + } + ui.text_edit_singleline(&mut self.sidebar.title_buf); + }); } /// Draw the central panel. @@ -438,10 +449,11 @@ impl App { // solution might perform better as well and I have some experience // with it. // In the meantime, the windows are, at least, collapsible. + let generation_in_progress = self.generation_in_progress; if let Some(story) = self.story_mut() { // TODO: the response from story.draw could be more succinct. We // only realy know if we need to start generation (for now). - if let Some(action) = story.draw(ui) { + if let Some(action) = story.draw(ui, generation_in_progress) { if action.continue_ | action.generate.is_some() { // The path has already been changed. We need only // start generation. @@ -469,10 +481,10 @@ impl App { } } - /// Update any generation that is in progress. + /// Update `new_pieces` with any newly generated pieces of text. #[cfg(feature = "generate")] fn update_generation(&mut self, new_pieces: &mut Vec) { - use crate::settings::GenerativeBackend; + use settings::GenerativeBackend; if !self.generation_in_progress { return; @@ -521,13 +533,13 @@ impl App { // We can unlock the UI now. self.generation_in_progress = false; } - crate::drama_llama::Response::Busy { command } => { + crate::drama_llama::Response::Busy { request } => { // This might happen because of data races, but really // shouldn't. // TODO: gui error message log::error!( - "Unexpected command sent to worker. Report this please: {:?}", - command + "Unexpected request sent to worker. Report this please: {:?}", + request ) } }, @@ -555,16 +567,15 @@ impl App { } self.generation_in_progress = false; } - crate::openai::Response::Busy { command } => { + crate::openai::Response::Busy { request } => { log::error!( - "Unexpected command sent to worker. Report this please: {:?}", - command + "Unexpected request sent to worker. Report this please: {:?}", + request ) } crate::openai::Response::Models { models } => { - if let crate::settings::BackendOptions::OpenAI { - settings, - } = self.settings.backend_options() + if let settings::BackendOptions::OpenAI { settings } = + self.settings.backend_options() { settings.models = models; } @@ -587,7 +598,6 @@ impl eframe::App for App { ctx: &eframe::egui::Context, frame: &mut eframe::Frame, ) { - self.draw_toolbar(ctx, frame); self.draw_sidebar(ctx, frame); self.draw_central_panel(ctx, frame); } diff --git a/src/settings.rs b/src/app/settings.rs similarity index 83% rename from src/settings.rs rename to src/app/settings.rs index 2a9dbfd..428e93d 100644 --- a/src/settings.rs +++ b/src/app/settings.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; +/// Backend for generation. #[cfg(feature = "generate")] #[derive( Clone, @@ -23,10 +24,33 @@ pub enum GenerativeBackend { Claude, } +#[cfg(feature = "generate")] +impl GenerativeBackend { + /// All the generative backends that can be used, in order of preference. + pub const ALL: &'static [&'static GenerativeBackend] = &[ + #[cfg(feature = "drama_llama")] + &GenerativeBackend::DramaLlama, + #[cfg(feature = "ollama")] + &GenerativeBackend::Ollama, + #[cfg(feature = "openai")] + &GenerativeBackend::OpenAI, + #[cfg(feature = "claude")] + &GenerativeBackend::Claude, + ]; + + pub const DEFAULT: &'static GenerativeBackend = if Self::ALL.is_empty() { + panic!( + "There must be at least one generative backend feature enabled to use the `generate` feature." + ); + } else { + Self::ALL[0] + }; +} + #[cfg(feature = "generate")] impl Default for GenerativeBackend { fn default() -> Self { - *crate::consts::DEFAULT_GENERATIVE_BACKEND + *Self::DEFAULT } } @@ -139,6 +163,8 @@ impl Into for &mut BackendOptions { } } +/// Crate settings. +// This is used for App but not much else so we might feature gate this to `gui` #[derive(Default, Serialize, Deserialize)] pub struct Settings { /// Default author for new nodes. @@ -157,21 +183,23 @@ pub struct Settings { // are not enabled. pub backend_options: std::collections::HashMap, + #[serde(skip)] + /// Whether backend switching is pending. + pub pending_backend_switch: Option, } -impl Settings { - pub fn new() -> Self { - Self { - default_author: "Anonymous".to_string(), - prompt_include_authors: true, - prompt_include_title: true, - #[cfg(feature = "generate")] - selected_generative_backend: GenerativeBackend::default(), - #[cfg(feature = "generate")] - backend_options: std::collections::HashMap::new(), - } - } +pub enum Action { + /// The user has requested to switch generative backends. When the switch is + /// complete, `Settings::pending_backend_switch` should be set to `None`. + SwitchBackends { + /// This backend should be shut down. + from: GenerativeBackend, + /// This backend should be started. + to: GenerativeBackend, + }, +} +impl Settings { #[cfg(feature = "generate")] pub fn backend_options(&mut self) -> &mut BackendOptions { self.backend_options @@ -181,41 +209,59 @@ impl Settings { }) } + /// Draws generation settings. If there is some additional action the + /// [`App`] should take, it will return that action. + /// + /// [`App`]: crate::app::App #[cfg(feature = "generate")] - pub fn draw_generation_settings(&mut self, ui: &mut egui::Ui) { - // Choose generative backend + pub fn draw_generation_settings( + &mut self, + ui: &mut egui::Ui, + ) -> Option { + let mut ret = None; + + if let Some(backend) = &self.pending_backend_switch { + ui.label(format!( + "Switching backend to `{}`. Please wait.", + backend + )); + } + // Choose generative backend use std::num::NonZeroU128; + ui.checkbox( + &mut self.prompt_include_authors, + "Include author in prompt sent to model.", + ) + .on_hover_text_at_pointer("It will still be shown in the viewport. Hiding it can improve quality of generation since models have biases. Does not apply to all backends."); + + ui.checkbox( + &mut self.prompt_include_title, + "Include title in prompt sent to model.", + ) + .on_hover_text_at_pointer("It will still be shown in the viewport. Hiding it can improve quality of generation since models have biases. Does not apply to all backends."); + ui.label("Generative backend:"); egui::ComboBox::from_label("Backend") .selected_text(self.selected_generative_backend.to_string()) .show_ui(ui, |ui| { - for &backend in crate::consts::GENERATIVE_BACKENDS { - // The linter is wrong. `backend` is used below. - #[allow(unused_variables)] + for &backend in GenerativeBackend::ALL { let active: bool = - matches!(self.selected_generative_backend, backend); + self.selected_generative_backend == *backend; if ui .selectable_label(active, backend.to_string()) .clicked() { - // We need to shutdown the worker if we're changing - // backends because the worker is tied to the backend. - // FIXME: Because the app has the worker, we should - // return something indicating the worker should be - // restarted. I can't think of another way. If we do - // that, we can't change it immediatly here, but should - // return the selected backend and then change it in the - // App::update method. It's a bit of a mess. - // Alternatively we could move the workers into the - // settings struct. It's a bit odd but it would work and - // might be cleaner. As it stands, a running worker for - // a given backend will keep running until the app is - // closed. That might not be terrible, but some backends - // can use a lot of resources, like the local models. - self.selected_generative_backend = *backend; + ret = Some(Action::SwitchBackends { + from: self.selected_generative_backend, + to: *backend, + }); + + // We don't immediately switch the backend because we + // want to clean up first. The `App` will switch the + // `selected_generative_backend` after the cleanup. } } }); @@ -365,25 +411,6 @@ impl Settings { if ui.button("Add stop string").clicked() { predict_options.stop_strings.push(Default::default()); } - - ui.label("Stop token sequences:").on_hover_text_at_pointer("Stop generating when any of these token sequences are predicted. Note that any model-specific sequences will be added automatically on generation and not shown here."); - for (i, seq) in predict_options.stop_sequences.iter_mut().enumerate() { - // This might not work very well because the edit will - // be cleared when the string is updated and this - // happens at every frame. We could use a separate - // buffer, but it would have to be associated with the - // one that is being edited. If this doesn't work we can - // have a delete button and a separate add button with a - // text field. - let mut s = int_vec_to_string(seq); - ui.horizontal_wrapped(|ui| { - ui.text_edit_singleline(&mut s); - if ui.button("X").clicked() { - remove = Some(i); - } - }); - *seq = string_to_int_vec(&s); - } }); // TODO: Add ui for options. This is perhaps better done in @@ -397,29 +424,23 @@ impl Settings { #[allow(unreachable_patterns)] // because same as above _ => {} } + + ret } - #[cfg(feature = "gui")] - pub fn draw(&mut self, ui: &mut egui::Ui) { + pub fn draw(&mut self, ui: &mut egui::Ui) -> Option { ui.label("Default author:"); ui.text_edit_singleline(&mut self.default_author); - ui.checkbox( - &mut self.prompt_include_authors, - "Include author in prompt sent to model.", - ) - .on_hover_text_at_pointer("It will still be shown in the viewport. Hiding it can improve quality of generation since models have biases."); - - ui.checkbox( - &mut self.prompt_include_title, - "Include title in prompt sent to model.", - ) - .on_hover_text_at_pointer("It will still be shown in the viewport. Hiding it can improve quality of generation since models have biases."); - #[cfg(feature = "generate")] { - self.draw_generation_settings(ui); + ui.separator(); + ui.heading("Generation"); + return self.draw_generation_settings(ui); } + + #[cfg(not(feature = "generate"))] + None } /// Configure model-specific settings when a local model is loaded. It will: @@ -524,16 +545,3 @@ impl Settings { } } } - -#[cfg(feature = "gui")] -fn int_vec_to_string(vec: &[i32]) -> String { - vec.iter() - .map(|&i| i.to_string()) - .collect::>() - .join(", ") -} - -#[cfg(feature = "gui")] -fn string_to_int_vec(s: &str) -> Vec { - s.split(',').filter_map(|s| s.trim().parse().ok()).collect() -} diff --git a/src/consts.rs b/src/consts.rs index 843a1f0..1196511 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -1,6 +1,3 @@ -#[cfg(feature = "generate")] -use crate::settings::GenerativeBackend; - // Story options /// What to use if the story has no title. @@ -9,30 +6,3 @@ pub const DEFAULT_TITLE: &str = "Untitled"; pub const DEFAULT_AUTHOR: &str = "Anonymous"; /// What to use if the model name cannot be determined. pub const DEFAULT_MODEL_NAME: &str = "AI"; - -// Generative options - -/// All the generative backends that can be used. -#[cfg(feature = "generate")] -pub const GENERATIVE_BACKENDS: &[&GenerativeBackend] = &[ - #[cfg(feature = "drama_llama")] - &GenerativeBackend::DramaLlama, - #[cfg(feature = "ollama")] - &GenerativeBackend::Ollama, - #[cfg(feature = "openai")] - &GenerativeBackend::OpenAI, - #[cfg(feature = "claude")] - &GenerativeBackend::Claude, -]; - -/// The default generative backend to use. -// There must be at least one backend or the app will not compile. -#[cfg(feature = "generate")] -pub const DEFAULT_GENERATIVE_BACKEND: &GenerativeBackend = - if GENERATIVE_BACKENDS.is_empty() { - panic!( - "There must be at least one generative backend feature enabled to use the `generate` feature." - ); - } else { - GENERATIVE_BACKENDS[0] - }; diff --git a/src/drama_llama.rs b/src/drama_llama.rs index 706d6bf..0745f57 100644 --- a/src/drama_llama.rs +++ b/src/drama_llama.rs @@ -2,28 +2,31 @@ use std::{path::PathBuf, sync::mpsc::TryRecvError}; use drama_llama::{Engine, PredictOptions}; +/// A request to the worker thread (from another thread). #[derive(Debug)] -pub(crate) enum Command { +pub(crate) enum Request { Stop, Predict { text: String, opts: PredictOptions }, } +/// A response from the worker thread (to another thread). #[derive(Debug)] pub(crate) enum Response { - /// Worker is done and can accept new commands. + /// Worker is done and can accept new requests. Done, - /// The worker is busy and cannot accept new commands. - Busy { command: Command }, + /// The worker is busy and cannot accept new requests. + Busy { request: Request }, /// The worker has predicted a piece of text. Predicted { piece: String }, } +/// A worker helps to manage the `drama_llama` worker thread and its channels. #[derive(Default)] pub(crate) struct Worker { /// Thread handle to the worker. handle: Option>, /// Channel to send text and options to the worker. - to_worker: Option>, + to_worker: Option>, /// Channel to receive strings until the worker is done, then `None`. from_worker: Option>, } @@ -78,11 +81,11 @@ impl Worker { while let Ok(msg) = from_main.recv() { let (text, opts) = match msg { - Command::Stop => { + Request::Stop => { to_main.send(Response::Done).ok(); break; } - Command::Predict { text, opts } => { + Request::Predict { text, opts } => { // If the requested context size is greater than the // engine's we must recreate it. if opts.n.get() > engine.n_ctx() as usize { @@ -115,9 +118,9 @@ impl Worker { // since it is the tightest loop we have. match from_main.try_recv() { Err(std::sync::mpsc::TryRecvError::Empty) => { - // No new commands, nothing to do. + // No new requests, nothing to do. } - Ok(Command::Stop) => { + Ok(Request::Stop) => { log::debug!("Generation cancelled."); break; } @@ -130,7 +133,9 @@ impl Worker { // We can't handle this command right now. We'll // send a busy Response and the main thread can // decide what to do. - to_main.send(Response::Busy { command }).ok(); + to_main + .send(Response::Busy { request: command }) + .ok(); } } @@ -153,10 +158,10 @@ impl Worker { /// Stop current generation after the next token. Does not shut down the /// worker thread. Does not block. Does not guarantee that generation will /// stop immediately. Use [`Worker::shutdown`] to shut down the worker. - pub fn stop(&mut self) -> Result<(), std::sync::mpsc::SendError> { + pub fn stop(&mut self) -> Result<(), std::sync::mpsc::SendError> { log::debug!("Telling worker to cancel current generation."); if let Some(to_worker) = self.to_worker.as_ref() { - to_worker.send(Command::Stop)?; + to_worker.send(Request::Stop)?; } Ok(()) @@ -203,21 +208,21 @@ impl Worker { &mut self, text: String, options: drama_llama::PredictOptions, - ) -> Result<(), std::sync::mpsc::SendError> { + ) -> Result<(), std::sync::mpsc::SendError> { if !self.is_alive() { - return Err(std::sync::mpsc::SendError(Command::Predict { + return Err(std::sync::mpsc::SendError(Request::Predict { text, opts: options, })); } if let Some(to_worker) = self.to_worker.as_ref() { - to_worker.send(Command::Predict { + to_worker.send(Request::Predict { text, opts: options, })?; } else { - return Err(std::sync::mpsc::SendError(Command::Predict { + return Err(std::sync::mpsc::SendError(Request::Predict { text, opts: options, })); diff --git a/src/lib.rs b/src/lib.rs index af0bfea..b6a81c0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,23 +1,34 @@ +//! Weave is primarily a binary crate, but has reusable components that can be +//! used in other story-writing projects. + #![forbid(unsafe_code)] #![cfg_attr(not(debug_assertions), deny(warnings))] // Forbid warnings in release builds #![warn(clippy::all, rust_2018_idioms)] +/// [`egui`] [`App`]` for the Weave application. #[cfg(feature = "gui")] pub mod app; +/// OpenAI generative [`Worker`]. [`Request`]s are sent to the worker and +/// [`Response`]s are received. This can be used to run tasks in the background +/// and are not tied to any specific frontend. #[cfg(feature = "openai")] pub(crate) mod openai; +/// [`drama_llama`] generative [`Worker`]. [`Request`]s are sent to the worker +/// and [`Response`]s are received. This can be used to run tasks in the +/// background and are not tied to any specific frontend. #[cfg(all(feature = "drama_llama", not(target_arch = "wasm32")))] pub(crate) mod drama_llama; +/// Crate-wide constants. pub mod consts; +/// Contains [`Node`] and associated types such as [`Meta`]. pub mod node; -pub mod settings; +/// Contains a branching [`Story`] (a tree of [`Node`]s). pub mod story; -// ---------------------------------------------------------------------------- -// When compiling for web: +// wasm entrypoints: #[cfg(feature = "gui")] #[cfg(target_arch = "wasm32")] diff --git a/src/main.rs b/src/main.rs index 4ea6fbf..3c51e7f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,10 +1,10 @@ +//! Native entrypoints for the Weave application. + #![forbid(unsafe_code)] -#![cfg_attr(not(debug_assertions), deny(warnings))] // Forbid warnings in release builds +#![cfg_attr(not(debug_assertions), deny(warnings))] #![warn(clippy::all, rust_2018_idioms)] -// When compiling natively: -#[cfg(feature = "gui")] -#[cfg(not(target_arch = "wasm32"))] +#[cfg(all(feature = "gui", not(target_arch = "wasm32")))] fn main() { use eframe::egui::Visuals; use weave::app::App; @@ -22,7 +22,7 @@ fn main() { .expect("Failed to run native example"); } -#[cfg(not(feature = "gui"))] +#[cfg(all(not(feature = "gui"), not(target_arch = "wasm32")))] fn main() { println!("This example requires the `gui` feature."); } diff --git a/src/node.rs b/src/node.rs index 27a6101..e52a7d1 100644 --- a/src/node.rs +++ b/src/node.rs @@ -8,6 +8,8 @@ pub struct Piece { pub end: usize, } +static_assertions::assert_impl_all!(Piece: Send, Sync); + /// Node data. Contains a paragraph within a story tree. #[derive(Default, Serialize, Deserialize)] pub struct Node { @@ -24,6 +26,8 @@ pub struct Node { pub meta: T, } +static_assertions::assert_impl_all!(Node: Send, Sync); + /// Node metadata. #[derive(Clone, Serialize, Deserialize)] #[cfg(feature = "gui")] @@ -36,6 +40,26 @@ pub struct Meta { pub size: egui::Vec2, } +#[cfg(feature = "gui")] +impl Meta { + /// Get unique id. + pub fn id(&self) -> u128 { + self.id + } +} + +#[cfg(feature = "gui")] +impl Default for Meta { + fn default() -> Self { + let id = uuid::Uuid::new_v4().as_u128(); + Self { + id, + pos: egui::Pos2::new(0.0, 0.0), + size: egui::Vec2::new(100.0, 100.0), + } + } +} + /// An action is needed for a node. All actions imply selection of either the /// current node or a child node. #[cfg(feature = "gui")] @@ -49,6 +73,9 @@ pub struct Action { pub generate: Option, } +#[cfg(feature = "gui")] +static_assertions::assert_impl_all!(Action: Send, Sync); + #[cfg(feature = "gui")] impl Action { /// Returns true if any action is needed. @@ -68,24 +95,7 @@ pub struct PathAction { } #[cfg(feature = "gui")] -impl Meta { - /// Get unique id. - pub fn id(&self) -> u128 { - self.id - } -} - -#[cfg(feature = "gui")] -impl Default for Meta { - fn default() -> Self { - let id = uuid::Uuid::new_v4().as_u128(); - Self { - id, - pos: egui::Pos2::new(0.0, 0.0), - size: egui::Vec2::new(100.0, 100.0), - } - } -} +static_assertions::assert_impl_all!(PathAction: Send, Sync); /// Dummy node metadata. #[derive(Default, Serialize, Deserialize)] @@ -253,18 +263,17 @@ impl std::fmt::Display for Node { } } -#[cfg(feature = "gui")] impl Node { - /// Draw the tree. The active path is highlighted. + /// Draw the tree. The active path is highlighted. If `lock_topology` is + /// true, the user cannot add or remove nodes. /// /// Returns an action to perform at the path or None if no action is needed. - // FIXME: we can avoid the active parameter if we move this method to the - // story where it fits better. #[cfg(feature = "gui")] pub fn draw( &mut self, ui: &mut egui::Ui, active_path: Option<&[usize]>, + lock_topology: bool, ) -> Option { let active_path = active_path.unwrap_or(&[]); let mut ret = None; // the default, meaning no action is needed. @@ -295,7 +304,7 @@ impl Node { } // Draw the node and take any action in response to it's widgets. - if let Some(action) = node.draw_one(ui, highlight_node) { + if let Some(action) = node.draw_one(ui, highlight_node, lock_topology) { if action.delete { // How to delete a node? We're taking a reference to the // node so we can't delete it here. We can delete the @@ -354,13 +363,10 @@ impl Node { &mut self, ui: &mut egui::Ui, highlighted: bool, + lock_topology: bool, ) -> Option { - let frame = - egui::Frame::window(&ui.ctx().style()).fill(if highlighted { - egui::Color32::from_rgba_premultiplied(64, 64, 64, 255) - } else { - egui::Color32::from_rgba_premultiplied(64, 64, 64, 128) - }); + let frame = egui::Frame::window(&ui.ctx().style()) + .fill(egui::Color32::from_gray(64)); let title = self .text @@ -376,66 +382,77 @@ impl Node { .auto_sized() .frame(frame) .show(ui.ctx(), |ui| { + + if !highlighted { + ui.set_opacity(0.5); + } + + let mut response = None; - ui.horizontal(|ui| { - if ui - .button("Add Child") - .on_hover_text_at_pointer( - "Add an empty child node." - ) - .clicked() { - self.add_child(Node::default()); - } - if ui - .button("Delete") - .on_hover_text_at_pointer( - "Delete this node and all its children." - ) - .clicked() { - // Tell caller to delete this node. - *(&mut response) = Some(Action { - delete: true, - ..Default::default() - }); - } - if ui - .button("Select") - .on_hover_text_at_pointer( - "Set this node as the active node. The story will end or continue from this node." - ) - .clicked() { - // Any action means selection. - *(&mut response) = Some(Action::default()); - } - // FIXME: The terminology here could be improved. These are - // confusing. We should find new names. - if ui - .button("Continue") - .on_hover_text_at_pointer( - "Continue generating the current node.", - ) - .clicked() - { - // Tell caller to continue generation on this node. - *(&mut response) = Some(Action { - continue_: true, - ..Default::default() - }); - } - if ui - .button("Generate") - .on_hover_text_at_pointer("Create a new node, select it, and continue generation.") - .clicked() { - // Tell caller to generate a new node. - *(&mut response) = Some( - Action { - generate: Some(self.add_child(Node::default())), + if !lock_topology { + ui.horizontal(|ui| { + if ui + .button("Add Child") + .on_hover_text_at_pointer( + "Add an empty child node." + ) + .clicked() { + self.add_child(Node::default()); + } + if ui + .button("Delete") + .on_hover_text_at_pointer( + "Delete this node and all its children." + ) + .clicked() { + // Tell caller to delete this node. + *(&mut response) = Some(Action { + delete: true, ..Default::default() - }, - ); - } - }); + }); + } + if ui + .button("Select") + .on_hover_text_at_pointer( + "Set this node as the active node. The story will end or continue from this node." + ) + .clicked() { + // Any action means selection. + *(&mut response) = Some(Action::default()); + } + // FIXME: The terminology here could be improved. These are + // confusing. We should find new names. + if ui + .button("Continue") + .on_hover_text_at_pointer( + "Continue generating the current node.", + ) + .clicked() + { + // Tell caller to continue generation on this node. + *(&mut response) = Some(Action { + continue_: true, + ..Default::default() + }); + } + if ui + .button("Generate") + .on_hover_text_at_pointer("Create a new node, select it, and continue generation.") + .clicked() { + // Tell caller to generate a new node. + *(&mut response) = Some( + Action { + generate: Some(self.add_child(Node::default())), + ..Default::default() + }, + ); + } + }); + } + // We can still allow editing the text during generation since + // the pieces are still appended to the end. There is no + // ownership issue because of the immediate mode GUI. if ui.text_edit_multiline(&mut self.text).changed() { // FIXME: We're clearing the pieces here, but we can handle // this better. @@ -475,7 +492,7 @@ fn draw_line(ui: &mut egui::Ui, src: Meta, dst: Meta, highlighted: bool) { } else { egui::Color32::from_rgba_premultiplied(128, 128, 128, 255) }; - let stroke = egui::Stroke::new(1.0, color); + let stroke = egui::Stroke::new(if highlighted { 2.0 } else { 1.0 }, color); let src = src.pos + src.size / 2.0; let dst = dst.pos + dst.size / 2.0; ui.painter().line_segment([src, dst], stroke); diff --git a/src/openai.rs b/src/openai.rs index 2441aec..0768ba8 100644 --- a/src/openai.rs +++ b/src/openai.rs @@ -381,8 +381,8 @@ impl Settings { // on startup with futures::executor::block_on. // FIXME: This is blocking. We do have a way of sending a - // command to the worker to fetch the models, but it's on the - // parent struct, so we'll need to return some kind of command + // request to the worker to fetch the models, but it's on the + // parent struct, so we'll need to return some kind of request // from here to the parent to tell it to fetch the models. Then // when the models are ready, they're sent back to the main // thread and all is well with no blocking. But this is fine @@ -422,7 +422,7 @@ impl Settings { // We're using the same interface as `drama_llama`. Eventually we can define a // trait if all the stars align, but not so soon. #[derive(Debug)] -pub(crate) enum Command { +pub(crate) enum Request { /// Worker should cancel any current generation, but not shut down. Dropping /// the channel will shut down the worker. Stop, @@ -443,7 +443,7 @@ pub(crate) enum Response { /// Available models. The UI should probably display these. models: Vec, }, - /// Worker is busy generating a response. Attached is the command that + /// Worker is busy generating a response. Attached is the request that /// would have been acted upon. // although with OpenAI's streaming API and our design, there is no reason // we can't have concurrent generations going eventually, however there are @@ -452,7 +452,7 @@ pub(crate) enum Response { // prevent some cases like deleting a head while it's generating, however // starting new generations should be fine. // TODO: Handle the above carefully in the App. Try to break it. - Busy { command: Command }, + Busy { request: Request }, /// The worker has predicted a piece of text along with OpenAI specific /// metadata // (since we're actually paying for it, might as well use it). @@ -472,7 +472,7 @@ pub(crate) struct Worker { // We do need to run the executor in a separate thread. We can't run it in // the main thread because it's blocking. handle: Option>, - to_worker: Option>, + to_worker: Option>, from_worker: Option>, } @@ -514,20 +514,20 @@ impl Worker { rt.block_on(async move { // The logic here is syncronous. We do want to wait for one - // command to finish before starting the next one. Otherwise we + // request to finish before starting the next one. Otherwise we // could use `for_each_concurrent` or something, but we would - // have to associate the commands with the appropriate nodes. + // have to associate the requests with the appropriate nodes. // This can wait until some changes in `App` and `Story` are // made so we can support multiple "heads" and lock the UI // appropriately. - while let Some(command) = from_main.next().await { - let send_response = match command { - Command::Stop => { + while let Some(request) = from_main.next().await { + let send_response = match request { + Request::Stop => { // We are already stopped. We just tell main we're // done. to_main.send(Response::Done).await } - Command::FetchModels => { + Request::FetchModels => { let models = match client.list_models().await { Ok(models) => models, Err(e) => { @@ -546,7 +546,7 @@ impl Worker { .send(Response::Models { models }) .await } - Command::Predict { opts } => { + Request::Predict { opts } => { let args: openai_rust::chat::ChatArguments = opts.into(); let mut stream = @@ -559,13 +559,13 @@ impl Worker { // like with `drama_llama`, at this point we're // going to check for stop signals. We could // also `select!` on the channel and the stream - // to handle other commands concurrently, but + // to handle other requests concurrently, but // I'm unsure about cancel safety at the moment. // The docs on this in the openai crate are not // specific on this. TODO: read source while let Ok(cmd) = from_main.try_next() { match cmd { - Some(Command::Stop) => { + Some(Request::Stop) => { log::debug!("Generation cancelled."); // Break the outer loop which will // drop the stream and cancel the @@ -584,12 +584,12 @@ impl Worker { } Some(cmd) => { // We don't care about other - // commands while generating. We + // requests while generating. We // *could* handle them concurrently, // but not right now. For the moment // we will send them back as busy. to_main - .send(Response::Busy { command: cmd }) + .send(Response::Busy { request: cmd }) .await.ok(); } } @@ -639,7 +639,7 @@ impl Worker { match send_response { Ok(_) => { // Response sent successfully. We can now accept the - // next command. + // next request. } Err(e) => { if e.is_disconnected() { @@ -671,12 +671,12 @@ impl Worker { /// an error. In this case await `stop` instead or terminate the process, /// since it shouldn't happen. If the channel is full the UI is flooding the /// channel with requests which shouldn't happen since the worker checks for - /// commands at regular intervals, sending them back as `Busy` if it's + /// requests at regular intervals, sending them back as `Busy` if it's /// currently generating. - pub fn try_stop(&mut self) -> Result<(), futures::channel::mpsc::TrySendError> { + pub fn try_stop(&mut self) -> Result<(), futures::channel::mpsc::TrySendError> { log::debug!("Telling worker to cancel current generation."); if let Some(to_worker) = self.to_worker.as_mut() { - to_worker.try_send(Command::Stop)?; + to_worker.try_send(Request::Stop)?; } Ok(()) @@ -692,10 +692,10 @@ impl Worker { /// receiver is full. This should not happen. If it does, the UI is sending /// too many requests. This is a bug in the UI code and/or the worker since /// this shouldn't be possible. - pub fn shutdown(&mut self) -> Result<(), futures::channel::mpsc::TrySendError> { + pub fn shutdown(&mut self) -> Result<(), futures::channel::mpsc::TrySendError> { match self.try_stop() { Ok(_) => { - // we sent the stop command. Now we can drop the channel to + // we sent the stop request. Now we can drop the channel to // trigger the worker to shut down. }, Err(e) => { @@ -736,7 +736,7 @@ impl Worker { /// /// # Panics /// * If the worker is not alive. - pub fn predict(&mut self, opts: ChatArguments) -> Result<(), futures::channel::mpsc::TrySendError> { + pub fn predict(&mut self, opts: ChatArguments) -> Result<(), futures::channel::mpsc::TrySendError> { if !self.is_alive() { // So the futures API does not allow us to construct an error since // the fields are private and the only constructors are private. @@ -746,7 +746,7 @@ impl Worker { } if let Some(to_worker) = self.to_worker.as_mut() { - to_worker.try_send(Command::Predict { opts })?; + to_worker.try_send(Request::Predict { opts })?; } Ok(()) diff --git a/src/story.rs b/src/story.rs index 96ac428..2686e28 100644 --- a/src/story.rs +++ b/src/story.rs @@ -4,15 +4,6 @@ use serde::{Deserialize, Serialize}; use crate::node::{Meta, Node}; -#[derive(Default, Serialize, Deserialize)] -pub struct Story { - active_path: Option>, - pub title: String, - author_to_id: HashMap, - id_to_author: Vec, - root: Node, -} - #[derive(derive_more::From)] pub enum AuthorID { String(String), @@ -25,6 +16,19 @@ impl From<&str> for AuthorID { } } +static_assertions::assert_impl_all!(AuthorID: Send, Sync); + +#[derive(Default, Serialize, Deserialize)] +pub struct Story { + active_path: Option>, + pub title: String, + author_to_id: HashMap, + id_to_author: Vec, + root: Node, +} + +static_assertions::assert_impl_all!(Story: Send, Sync); + impl Story { pub fn new(title: String, author: String) -> Self { let mut new = Self { @@ -131,17 +135,24 @@ impl Story { } /// Draw UI for the story. + /// + /// If `lock_topology` is true, the user cannot add or remove nodes. #[cfg(feature = "gui")] - pub fn draw(&mut self, ui: &mut egui::Ui) -> Option { + pub fn draw( + &mut self, + ui: &mut egui::Ui, + lock_topology: bool, + ) -> Option { use crate::node::PathAction; ui.label(self.to_string()); // Draw, and update active path if changed. - if let Some(PathAction { path, action }) = self - .root - .draw(ui, self.active_path.as_ref().map(|v| v.as_slice())) - { + if let Some(PathAction { path, action }) = self.root.draw( + ui, + self.active_path.as_ref().map(|v| v.as_slice()), + lock_topology, + ) { self.active_path = Some(path); // FIXME: as it turns out all the actions are mutually exclusive, // so we can probably use an enum rather than a struct. The user can @@ -159,9 +170,12 @@ impl Story { } /// Remove the head as well as all its children. + /// + /// Note: The root node is never removed. pub fn decapitate(&mut self) { if let Some(path) = &mut self.active_path { if path.is_empty() { + // There is always at least one node in the story. self.active_path = None; } else { let head_index = path.pop().unwrap();