Skip to content

Commit

Permalink
Add GraphQL client API (#119)
Browse files Browse the repository at this point in the history
* Introduce client api with nextEntryArgs stub

* Implement current nextEntryArgs RPC method using graphql

* Fix basic graphql endpoint test

* Add tests

* Update changelog

* Handle errors

* Clean up

* Refactor for storage provider

* Revert changelog style change

* Move request/response structs to graphql client module

* Adapt EntryArgsResponse implementation to GraphQL

* Recover old file structure for now

* Improve test

* Switch back branch

* Clarify test scope

* Handle serialisation in EntryArgsResponse impl

* Implement deserialisation

* Implement serialisation

* Rearrange file

Co-authored-by: Sam Andreae <contact@samandreae.com>
  • Loading branch information
cafca and sandreae authored May 19, 2022
1 parent f8eb3ca commit c1b75d3
Show file tree
Hide file tree
Showing 15 changed files with 327 additions and 46 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Refactor module structure, propagate errors in worker to service manager [#97](https://github.com/p2panda/aquadoggo/pull/97)
- Restructure storage modules and remove JSON RPC [#101](https://github.com/p2panda/aquadoggo/pull/101)
- Implement new methods required for replication defined by `EntryStore` trait [#102](https://github.com/p2panda/aquadoggo/pull/102)
- GraphQL client API [#119](https://github.com/p2panda/aquadoggo/pull/119)

### Changed

Expand Down
6 changes: 3 additions & 3 deletions aquadoggo/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::sync::Arc;

use crate::config::Configuration;
use crate::db::Pool;
use crate::graphql::{build_static_schema, StaticSchema};
use crate::graphql::{build_root_schema, RootSchema};

/// Inner data shared across all services.
pub struct Data {
Expand All @@ -16,13 +16,13 @@ pub struct Data {
pub pool: Pool,

/// Static GraphQL schema.
pub schema: StaticSchema,
pub schema: RootSchema,
}

impl Data {
/// Initialize new data instance with shared database connection pool.
pub fn new(pool: Pool, config: Configuration) -> Self {
let schema = build_static_schema(pool.clone());
let schema = build_root_schema(pool.clone());

Self {
config,
Expand Down
4 changes: 3 additions & 1 deletion aquadoggo/src/db/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ use crate::db::stores::StorageEntry;
use crate::db::stores::StorageLog;
use crate::db::Pool;
use crate::errors::StorageProviderResult;
use crate::rpc::{EntryArgsRequest, EntryArgsResponse, PublishEntryRequest, PublishEntryResponse};
use crate::graphql::client::{
EntryArgsRequest, EntryArgsResponse, PublishEntryRequest, PublishEntryResponse,
};

pub struct SqlStorage {
pub(crate) pool: Pool,
Expand Down
8 changes: 4 additions & 4 deletions aquadoggo/src/db/stores/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ mod tests {

use crate::db::stores::entry::StorageEntry;
use crate::db::stores::test_utils::test_db;
use crate::rpc::EntryArgsRequest;
use crate::graphql::client::EntryArgsRequest;

#[tokio::test]
async fn insert_entry() {
Expand Down Expand Up @@ -456,16 +456,16 @@ mod tests {

let update_operation = Operation::new_update(
schema.clone(),
vec![next_entry_args.entry_hash_backlink.clone().unwrap().into()],
vec![next_entry_args.backlink.clone().unwrap().into()],
fields.clone(),
)
.unwrap();

let update_entry = Entry::new(
&next_entry_args.log_id,
Some(&update_operation),
next_entry_args.entry_hash_skiplink.as_ref(),
next_entry_args.entry_hash_backlink.as_ref(),
next_entry_args.skiplink.as_ref(),
next_entry_args.backlink.as_ref(),
&next_entry_args.seq_num,
)
.unwrap();
Expand Down
8 changes: 4 additions & 4 deletions aquadoggo/src/db/stores/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use p2panda_rs::storage_provider::traits::StorageProvider;
use p2panda_rs::test_utils::constants::{DEFAULT_HASH, DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID};

use crate::db::provider::SqlStorage;
use crate::rpc::{EntryArgsRequest, PublishEntryRequest};
use crate::graphql::client::{EntryArgsRequest, PublishEntryRequest};
use crate::test_helpers::initialize_db;

pub fn test_operation() -> Operation {
Expand Down Expand Up @@ -129,7 +129,7 @@ pub async fn test_db(no_of_entries: usize) -> SqlStorage {
.await
.unwrap();

let backlink = next_entry_args.entry_hash_backlink.clone().unwrap();
let backlink = next_entry_args.backlink.clone().unwrap();

// Construct the next UPDATE operation, we use the backlink hash in the prev_op vector
let update_operation =
Expand All @@ -138,8 +138,8 @@ pub async fn test_db(no_of_entries: usize) -> SqlStorage {
let update_entry = Entry::new(
&next_entry_args.log_id,
Some(&update_operation),
next_entry_args.entry_hash_skiplink.as_ref(),
next_entry_args.entry_hash_backlink.as_ref(),
next_entry_args.skiplink.as_ref(),
next_entry_args.backlink.as_ref(),
&next_entry_args.seq_num,
)
.unwrap();
Expand Down
2 changes: 1 addition & 1 deletion aquadoggo/src/errors.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// SPDX-License-Identifier: AGPL-3.0-or-later

/// A specialized result type for the storage provider.
pub type StorageProviderResult<T> = anyhow::Result<T, Box<dyn std::error::Error + Sync + Send>>;
pub type StorageProviderResult<T> = anyhow::Result<T, Box<dyn std::error::Error + Send + Sync>>;
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@

mod request;
mod response;
mod root;
pub(crate) mod u64_string;

pub use request::{EntryArgsRequest, PublishEntryRequest};
pub use response::{EntryArgsResponse, PublishEntryResponse};
pub use root::ClientRoot;
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: AGPL-3.0-or-later

use serde::Serialize;
use async_graphql::Object;
use serde::{Deserialize, Serialize};

use p2panda_rs::entry::{LogId, SeqNum};
use p2panda_rs::hash::Hash;
Expand All @@ -11,27 +12,48 @@ use crate::db::models::EntryRow;
/// Response body of `panda_getEntryArguments`.
///
/// `seq_num` and `log_id` are returned as strings to be able to represent large integers in JSON.
#[derive(Serialize, Debug)]
#[derive(Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct EntryArgsResponse {
pub entry_hash_backlink: Option<Hash>,
pub entry_hash_skiplink: Option<Hash>,
pub seq_num: SeqNum,
#[serde(with = "super::u64_string::log_id_string_serialisation")]
pub log_id: LogId,

#[serde(with = "super::u64_string::seq_num_string_serialisation")]
pub seq_num: SeqNum,

pub backlink: Option<Hash>,

pub skiplink: Option<Hash>,
}

#[Object]
impl EntryArgsResponse {
#[graphql(name = "logId")]
async fn log_id(&self) -> String {
self.log_id.clone().as_u64().to_string()
}

#[graphql(name = "seqNum")]
async fn seq_num(&self) -> String {
self.seq_num.clone().as_u64().to_string()
}

async fn backlink(&self) -> Option<String> {
self.backlink.clone().map(|hash| hash.as_str().to_string())
}

async fn skiplink(&self) -> Option<String> {
self.skiplink.clone().map(|hash| hash.as_str().to_string())
}
}

impl AsEntryArgsResponse for EntryArgsResponse {
fn new(
entry_hash_backlink: Option<Hash>,
entry_hash_skiplink: Option<Hash>,
seq_num: SeqNum,
log_id: LogId,
) -> Self {
fn new(backlink: Option<Hash>, skiplink: Option<Hash>, seq_num: SeqNum, log_id: LogId) -> Self {
EntryArgsResponse {
entry_hash_backlink,
entry_hash_skiplink,
seq_num,
log_id,
seq_num,
backlink,
skiplink,
}
}
}
Expand Down
141 changes: 141 additions & 0 deletions aquadoggo/src/graphql/client/root.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// SPDX-License-Identifier: AGPL-3.0-or-later

use async_graphql::{Context, Error, Object, Result};
use p2panda_rs::document::DocumentId;
use p2panda_rs::identity::Author;
use p2panda_rs::storage_provider::traits::StorageProvider;

use crate::db::provider::SqlStorage;
use crate::db::Pool;

use super::{EntryArgsRequest, EntryArgsResponse};

#[derive(Default, Debug, Copy, Clone)]
/// The GraphQL root for the client api that p2panda clients can use to connect to a node.
pub struct ClientRoot;

#[Object]
impl ClientRoot {
/// Return required arguments for publishing the next entry.
async fn next_entry_args(
&self,
ctx: &Context<'_>,
#[graphql(
name = "publicKey",
desc = "Public key that will publish using the returned entry arguments"
)]
public_key_param: String,
#[graphql(
name = "documentId",
desc = "Document id to which the entry's operation will apply"
)]
document_id_param: Option<String>,
) -> Result<EntryArgsResponse> {
// Parse and validate parameters
let document_id = match document_id_param {
Some(val) => Some(val.parse::<DocumentId>()?),
None => None,
};
let args = EntryArgsRequest {
author: Author::new(&public_key_param)?,
document: document_id,
};

// Prepare database connection
let pool = ctx.data::<Pool>()?;
let provider = SqlStorage {
pool: pool.to_owned(),
};

provider
.get_entry_args(&args)
.await
.map_err(|err| Error::from(err))
}
}

#[cfg(test)]
mod tests {
use async_graphql::{value, Response};
use p2panda_rs::entry::{LogId, SeqNum};
use serde_json::json;

use crate::config::Configuration;
use crate::context::Context;
use crate::graphql::client::EntryArgsResponse;
use crate::server::build_server;
use crate::test_helpers::{initialize_db, TestClient};

#[tokio::test]
async fn next_entry_args_valid_query() {
let pool = initialize_db().await;
let context = Context::new(pool.clone(), Configuration::default());
let client = TestClient::new(build_server(context));

// Selected fields need to be alphabetically sorted because that's what the `json` macro
// that is used in the assert below produces.
let response = client
.post("/graphql")
.json(&json!({
"query": r#"{
nextEntryArgs(
publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a"
) {
logId,
seqNum,
backlink,
skiplink
}
}"#,
}))
.send()
.await
// .json::<GQLResponse<EntryArgsGQLResponse>>()
.json::<Response>()
.await;

let expected_entry_args = EntryArgsResponse {
log_id: LogId::new(1),
seq_num: SeqNum::new(1).unwrap(),
backlink: None,
skiplink: None,
};
let received_entry_args: EntryArgsResponse = match response.data {
async_graphql::Value::Object(result_outer) => {
async_graphql::from_value(result_outer.get("nextEntryArgs").unwrap().to_owned())
.unwrap()
}
_ => panic!("Expected return value to be an object"),
};

assert_eq!(received_entry_args, expected_entry_args);
}

#[tokio::test]
async fn next_entry_args_error_response() {
let pool = initialize_db().await;
let context = Context::new(pool.clone(), Configuration::default());
let client = TestClient::new(build_server(context));

// Selected fields need to be alphabetically sorted because that's what the `json` macro
// that is used in the assert below produces.

let response = client
.post("/graphql")
.json(&json!({
"query": r#"{
nextEntryArgs(publicKey: "nope") {
logId
}
}"#,
}))
.send()
.await;

let response: Response = response.json().await;
assert_eq!(
response.errors[0].message,
"invalid hex encoding in author string"
)
}
}
Loading

0 comments on commit c1b75d3

Please sign in to comment.