Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Setup snapshot testing for editoast_derive using insta #9938

Merged
merged 3 commits into from
Dec 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -595,8 +595,10 @@ jobs:
postgis/postgis:latest \
psql postgresql://postgres:password@localhost:5432 -f /init_db.sql

# snapshot testing library `insta` requires CI=true
docker run --name=editoast-test --net=host -v $PWD/output:/output \
-e DATABASE_URL="postgres://osrd:password@localhost:5432/osrd" \
-e CI="true" \
${{ fromJSON(needs.build.outputs.stable_tags).editoast-test }} \
/bin/sh -c "diesel migration run --locked-schema && RUST_BACKTRACE=1 cargo test --workspace --verbose -- --test-threads=4 && grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o /output/lcov.info"

Expand Down
1 change: 1 addition & 0 deletions editoast/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
target
*.snap.new
52 changes: 48 additions & 4 deletions editoast/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions editoast/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -213,3 +213,10 @@ debug = "limited"
[profile.dev-for-debug]
inherits = "dev"
debug = "full"

# Makes snapshot diffs faster to compute.
# insta is used in editast_derive to track changes made to proc-macros.
# Cf. https://insta.rs/docs/quickstart/#optional-faster-runs
[profile.dev.package]
insta.opt-level = 3
similar.opt-level = 3
17 changes: 17 additions & 0 deletions editoast/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,20 @@ OpenApi when a change has been made to an endpoint, run the following command:
```sh
cargo run openapi > openapi.yaml
```

## Working with `editoast_derive`

We define some custom procedural macros in the `editoast_derive` crate. These rely on snapshot testing library [`insta`](https://insta.rs/). It basically works like this:

1. Change the output of a macro
2. Run the tests using `cargo test`
3. Since the output has changed, the test will fail, showing a diff of the old vs. new snapshot content. The new snapshot will be saved to disk with the extension `*.snap.new`.
4. If the new snapshot is correct, rename it to `*.snap` and commit it.

> [!TIP]
> You can use [`cargo-insta`](https://insta.rs/docs/cli/) to review pending snapshots and accept them conveniently.
> ```sh
> $ cargo insta review
> ```

For more information, visit the [`insta` documentation](https://insta.rs/docs/).
2 changes: 2 additions & 0 deletions editoast/editoast_derive/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ syn = "2.0"
proc-macro = true

[dev-dependencies]
insta = "1.41"
pretty_assertions = "1.4.1"
prettyplease = "0.2"

[lints]
workspace = true
23 changes: 23 additions & 0 deletions editoast/editoast_derive/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,3 +235,26 @@ fn extract_type(ty: &syn::Type) -> Option<String> {
_ => None,
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_construction() {
crate::assert_macro_expansion!(
expand_editoast_error,
syn::parse_quote! {
#[derive(EditoastError)]
#[editoast_error(base_id = "infra", default_status = 500)]
pub enum InfraApiError {
#[editoast_error(status = 404, no_context)]
NotFound { infra_id: i64 },
#[editoast_error(status = 400)]
BadRequest { message: String },
InternalError,
}
}
);
}
}
32 changes: 32 additions & 0 deletions editoast/editoast_derive/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,3 +237,35 @@ pub fn model(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
.unwrap_or_else(darling::Error::write_errors)
.into()
}

#[cfg(test)]
mod test_utils {
pub(crate) fn pretty_tokens(tokens: &proc_macro2::TokenStream) -> String {
let file = syn::parse_file(tokens.to_string().as_str()).unwrap();
prettyplease::unparse(&file)
}

macro_rules! assert_macro_expansion {
($expansion:path, $derive_input:expr) => {
let input: syn::DeriveInput = $derive_input;
let source = crate::test_utils::pretty_tokens(&<syn::DeriveInput as quote::ToTokens>::to_token_stream(&input));
let expansion = $expansion(&input).expect("macro should expand faultlessly");
let expected = crate::test_utils::pretty_tokens(&expansion);

// HACK: sadly insta doesn't let us print multiline strings in the snapshot description
// or info sections. So we have to incorporate the source input into the snapshot content
// in order to keep it pretty printed and next to its expansion.
insta::with_settings!({
omit_expression => true,
}, {
let sep = "-".repeat(77);
insta::assert_snapshot!(format!("// Source\n// {sep}\n\n{source}\n// Macro expansion\n// {sep}\n\n{expected}"));
});
};
}

pub(crate) use assert_macro_expansion;
}

#[cfg(test)]
use test_utils::assert_macro_expansion;
40 changes: 23 additions & 17 deletions editoast/editoast_derive/src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,21 +87,27 @@ pub fn model(input: &DeriveInput) -> Result<TokenStream> {
}

#[cfg(test)]
#[test]
fn test_construction() {
let input = syn::parse_quote! {
#[derive(Clone, Model)]
#[model(table = editoast_models::tables::osrd_infra_document)]
#[model(row(type_name = "DocumentRow", derive(Debug)))]
#[model(changeset(type_name = "DocumentChangeset", public, derive(Debug)))] // fields are public
#[model(gen(ops = crud, batch_ops = crud, list))]
struct Document {
#[model(column = "id", preferred, primary)]
id_: i64,
#[model(identifier, json)]
content_type: String,
data: Vec<u8>,
}
};
let _ = model(&input).expect("should generate");
mod tests {
use super::*;

#[test]
fn test_construction() {
crate::assert_macro_expansion!(
model,
syn::parse_quote! {
#[derive(Clone, Model)]
#[model(table = editoast_models::tables::osrd_infra_document)]
#[model(row(type_name = "DocumentRow", derive(Debug)))]
#[model(changeset(type_name = "DocumentChangeset", public, derive(Debug)))] // fields are public
#[model(gen(ops = crud, batch_ops = crud, list))]
struct Document {
#[model(column = "id", preferred, primary)]
id_: i64,
#[model(identifier, json)]
content_type: String,
data: Vec<u8>,
}
}
);
}
}
83 changes: 83 additions & 0 deletions editoast/editoast_derive/src/model/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -457,3 +457,86 @@ trait TokensIf: Sized {
}

impl<T: ToTokens> TokensIf for T {}

/// Generates an expression that splits a query into chunks to accommodate libpq's maximum number of binded parameters
///
/// This is a hack around a libpq limitation (cf. <https://github.com/diesel-rs/diesel/issues/2414>).
/// The rows to process are split into chunks for which at most `2^16 - 1` parameters are sent to libpq.
/// Therefore we need to know how many parameters are sent per row.
/// The result collection can be parametrized.
///
/// # On concurrency
///
/// There seem to be a problem with concurrent queries using deadpool, panicking with
/// 'Cannot access shared transaction state'. So this macro do not run each chunk's query concurrently.
/// While AsyncPgConnection supports pipelining, each query will be sent one after the other.
/// (But hey, it's still better than just making one query per row :p)
#[derive(Clone)]
struct LibpqChunkedIteration {
/// The number of binded values per row
parameters_per_row: usize,
/// The maximum number of rows per chunk (actual chunk size may be smaller, but never bigger)
chunk_size_limit: usize,
/// The identifier of the values to iterate over (must implement `IntoIterator<Item = ParameterType>`)
values_ident: syn::Ident,
/// How to collect the results
collector: LibpqChunkedIterationCollector,
/// The identifier of the chunk iteration variable
chunk_iteration_ident: syn::Ident,
/// The body of the chunk iteration
chunk_iteration_body: proc_macro2::TokenStream,
}

/// Describes how to collect the results of a chunked iteration
#[derive(Clone)]
enum LibpqChunkedIterationCollector {
/// All results are pushed into a Vec (item type has to be inferable)
VecPush,
/// Extends an existing collection. It's initialization expression must be provided.
///
/// The initialized collection must implement `Extend<Model>`.
Extend { collection_init: syn::Expr },
}

impl LibpqChunkedIteration {
fn with_iteration_body(&self, body: proc_macro2::TokenStream) -> Self {
Self {
chunk_iteration_body: body,
..self.clone()
}
}
}

impl ToTokens for LibpqChunkedIteration {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let Self {
parameters_per_row,
chunk_size_limit,
values_ident,
chunk_iteration_ident,
chunk_iteration_body,
collector,
} = self;
let (init, extend) = match collector {
LibpqChunkedIterationCollector::VecPush => {
(syn::parse_quote! { Vec::new() }, quote::quote! { push })
}
LibpqChunkedIterationCollector::Extend { collection_init } => {
(collection_init.clone(), quote::quote! { extend })
}
};
tokens.extend(quote::quote! {
const LIBPQ_MAX_PARAMETERS: usize = 2_usize.pow(16) - 1;
// We need to divide further because of AsyncPgConnection, maybe it is related to connection pipelining
const ASYNC_SUBDIVISION: usize = 2_usize;
const CHUNK_SIZE: usize = LIBPQ_MAX_PARAMETERS / ASYNC_SUBDIVISION / #parameters_per_row;
let mut result = #init;
let chunks = #values_ident.chunks(CHUNK_SIZE.min(#chunk_size_limit));
for #chunk_iteration_ident in chunks {
let chunk_result = { #chunk_iteration_body };
result.#extend(chunk_result);
}
result
});
}
}
Loading
Loading