Skip to content

Commit 6b5fd5a

Browse files
committed
editoast: clean up and fix deadlock tests
1 parent d06d182 commit 6b5fd5a

File tree

13 files changed

+193
-175
lines changed

13 files changed

+193
-175
lines changed

editoast/editoast_models/src/db_connection_pool.rs

+8
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,14 @@ impl DbConnectionPoolV2 {
134134
/// # }
135135
/// ```
136136
///
137+
/// ### Deadlocks
138+
///
139+
/// We encountered a deadlock error in our tests,
140+
/// especially those using `empty_infra` and `small_infra`.
141+
/// Adding `#[serial_test::serial]` solved the issue.
142+
/// We tried increasing the deadlock timeout, but that didn't work.
143+
/// Using random `infra_id` with rand didn't help either.
144+
///
137145
/// ## Guidelines
138146
///
139147
/// To prevent these issues, prefer the following patterns:

editoast/src/fixtures.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -474,7 +474,7 @@ pub mod tests {
474474
Infra::changeset()
475475
.name("small_infra".to_owned())
476476
.last_railjson_version()
477-
.persist(railjson, db_pool)
477+
.persist(railjson, db_pool.get().await.unwrap().deref_mut())
478478
.await
479479
.unwrap()
480480
}

editoast/src/generated_data/mod.rs

+22-18
Original file line numberDiff line numberDiff line change
@@ -86,38 +86,39 @@ pub trait GeneratedData {
8686
}
8787

8888
/// Refresh all the generated data of a given infra
89+
#[tracing::instrument(level = "debug", skip_all, fields(infra_id))]
8990
pub async fn refresh_all(
9091
db_pool: Arc<DbConnectionPoolV2>,
91-
infra: i64,
92+
infra_id: i64,
9293
infra_cache: &InfraCache,
9394
) -> Result<()> {
9495
// The other layers depend on track section layer.
9596
// We must wait until its completion before running the other requests in parallel
96-
TrackSectionLayer::refresh_pool(db_pool.clone(), infra, infra_cache).await?;
97-
debug!("⚙️ Infra {infra}: track section layer is generated");
97+
TrackSectionLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache).await?;
98+
debug!("⚙️ Infra {infra_id}: track section layer is generated");
9899
// The analyze step significantly improves the performance when importing and generating together
99100
// It doesn’t seem to make a different when the generation step is ran separately
100101
// It isn’t clear why without analyze the Postgres server seems to run at 100% without halting
101102
sql_query("analyze")
102103
.execute(db_pool.get().await?.deref_mut())
103104
.await?;
104-
debug!("⚙️ Infra {infra}: database analyzed");
105+
debug!("⚙️ Infra {infra_id}: database analyzed");
105106
futures::try_join!(
106-
SpeedSectionLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
107-
SignalLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
108-
SwitchLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
109-
BufferStopLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
110-
ElectrificationLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
111-
DetectorLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
112-
OperationalPointLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
113-
PSLSignLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
114-
NeutralSectionLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
115-
NeutralSignLayer::refresh_pool(db_pool.clone(), infra, infra_cache),
107+
SpeedSectionLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
108+
SignalLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
109+
SwitchLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
110+
BufferStopLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
111+
ElectrificationLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
112+
DetectorLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
113+
OperationalPointLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
114+
PSLSignLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
115+
NeutralSectionLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
116+
NeutralSignLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache),
116117
)?;
117-
debug!("⚙️ Infra {infra}: object layers is generated");
118+
debug!("⚙️ Infra {infra_id}: object layers is generated");
118119
// The error layer depends on the other layers and must be executed at the end.
119-
ErrorLayer::refresh_pool(db_pool.clone(), infra, infra_cache).await?;
120-
debug!("⚙️ Infra {infra}: errors layer is generated");
120+
ErrorLayer::refresh_pool(db_pool.clone(), infra_id, infra_cache).await?;
121+
debug!("⚙️ Infra {infra_id}: errors layer is generated");
121122
Ok(())
122123
}
123124

@@ -171,7 +172,10 @@ pub mod tests {
171172
use crate::modelsv2::fixtures::create_empty_infra;
172173
use editoast_models::DbConnectionPoolV2;
173174

174-
#[rstest] // Slow test
175+
#[rstest]
176+
// Slow test
177+
// PostgreSQL deadlock can happen in this test, see section `Deadlock` of [DbConnectionPoolV2::get] for more information
178+
#[serial_test::serial]
175179
async fn refresh_all_test() {
176180
let db_pool = DbConnectionPoolV2::for_tests();
177181
let infra = create_empty_infra(db_pool.get_ok().deref_mut()).await;

editoast/src/main.rs

+3-1
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,9 @@ async fn import_railjson(
630630
let railjson: RailJson = serde_json::from_reader(BufReader::new(railjson_file))?;
631631

632632
println!("🍞 Importing infra {infra_name}");
633-
let mut infra = infra.persist_v2(railjson, db_pool.clone()).await?;
633+
let mut infra = infra
634+
.persist(railjson, db_pool.get().await?.deref_mut())
635+
.await?;
634636

635637
infra
636638
.bump_version(db_pool.get().await?.deref_mut())

editoast/src/modelsv2/fixtures.rs

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
use std::io::Cursor;
2-
use std::sync::Arc;
32

43
use chrono::Utc;
54
use editoast_schemas::infra::InfraObject;
@@ -287,15 +286,15 @@ where
287286
railjson_object
288287
}
289288

290-
pub async fn create_small_infra(db_pool: Arc<DbConnectionPoolV2>) -> Infra {
289+
pub async fn create_small_infra(conn: &mut DbConnection) -> Infra {
291290
let railjson: RailJson = serde_json::from_str(include_str!(
292291
"../../../tests/data/infras/small_infra/infra.json"
293292
))
294293
.unwrap();
295294
Infra::changeset()
296295
.name("small_infra".to_owned())
297296
.last_railjson_version()
298-
.persist_v2(railjson, db_pool)
297+
.persist(railjson, conn)
299298
.await
300299
.unwrap()
301300
}

editoast/src/modelsv2/infra.rs

+64-46
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,9 @@ use crate::modelsv2::get_geometry_layer_table;
3636
use crate::modelsv2::get_table;
3737
use crate::modelsv2::prelude::*;
3838
use crate::modelsv2::railjson::persist_railjson;
39-
use crate::modelsv2::railjson::persist_railjson_v2;
4039
use crate::modelsv2::Create;
4140
use crate::tables::infra::dsl;
4241
use editoast_models::DbConnection;
43-
use editoast_models::DbConnectionPool;
4442
use editoast_models::DbConnectionPoolV2;
4543
use editoast_schemas::infra::RailJson;
4644
use editoast_schemas::infra::RAILJSON_VERSION;
@@ -77,16 +75,11 @@ pub struct Infra {
7775
}
7876

7977
impl InfraChangeset {
80-
pub async fn persist(
81-
self,
82-
railjson: RailJson,
83-
db_pool: Arc<DbConnectionPool>,
84-
) -> Result<Infra> {
85-
let conn = &mut db_pool.get().await?;
78+
pub async fn persist(self, railjson: RailJson, conn: &mut DbConnection) -> Result<Infra> {
8679
let infra = self.create(conn).await?;
8780
// TODO: lock infra for update
8881
debug!("🛤 Begin importing all railjson objects");
89-
if let Err(e) = persist_railjson(db_pool, infra.id, railjson).await {
82+
if let Err(e) = persist_railjson(conn, infra.id, railjson).await {
9083
error!("Could not import infrastructure {}. Rolling back", infra.id);
9184
infra.delete(conn).await?;
9285
return Err(e);
@@ -95,23 +88,6 @@ impl InfraChangeset {
9588
Ok(infra)
9689
}
9790

98-
pub async fn persist_v2(
99-
self,
100-
railjson: RailJson,
101-
db_pool: Arc<DbConnectionPoolV2>,
102-
) -> Result<Infra> {
103-
let infra = self.create(db_pool.get().await?.deref_mut()).await?;
104-
// TODO: lock infra for update
105-
debug!("🛤 Begin importing all railjson objects");
106-
if let Err(e) = persist_railjson_v2(db_pool.clone(), infra.id, railjson).await {
107-
error!("Could not import infrastructure {}. Rolling back", infra.id);
108-
infra.delete(db_pool.get().await?.deref_mut()).await?;
109-
return Err(e);
110-
};
111-
debug!("🛤 Import finished successfully");
112-
Ok(infra)
113-
}
114-
11591
#[must_use = "builder methods are intended to be chained"]
11692
pub fn last_railjson_version(self) -> Self {
11793
self.railjson_version(RAILJSON_VERSION.to_owned())
@@ -337,8 +313,6 @@ pub mod tests {
337313

338314
use super::Infra;
339315
use crate::error::EditoastError;
340-
use crate::fixtures::tests::db_pool;
341-
use crate::fixtures::tests::IntoFixture;
342316
use crate::modelsv2::fixtures::create_empty_infra;
343317
use crate::modelsv2::infra::DEFAULT_INFRA_VERSION;
344318
use crate::modelsv2::prelude::*;
@@ -359,6 +333,8 @@ pub mod tests {
359333
}
360334

361335
#[rstest]
336+
// PostgreSQL deadlock can happen in this test, see section `Deadlock` of [DbConnectionPoolV2::get] for more information
337+
#[serial_test::serial]
362338
async fn clone_infra_with_new_name_returns_new_cloned_infra() {
363339
// GIVEN
364340
let db_pool = DbConnectionPoolV2::for_tests();
@@ -378,15 +354,15 @@ pub mod tests {
378354
#[rstest]
379355
#[serial_test::serial]
380356
async fn persists_railjson_ko_version() {
381-
let pool = db_pool();
357+
let db_pool = DbConnectionPoolV2::for_tests();
382358
let railjson_with_invalid_version = RailJson {
383359
version: "0".to_string(),
384360
..Default::default()
385361
};
386362
let res = Infra::changeset()
387363
.name("test".to_owned())
388364
.last_railjson_version()
389-
.persist(railjson_with_invalid_version, pool)
365+
.persist(railjson_with_invalid_version, db_pool.get_ok().deref_mut())
390366
.await;
391367
assert!(res.is_err());
392368
let expected_error = RailJsonError::UnsupportedVersion {
@@ -418,14 +394,13 @@ pub mod tests {
418394
version: RAILJSON_VERSION.to_string(),
419395
};
420396

421-
let pool = db_pool();
397+
let db_pool = DbConnectionPoolV2::for_tests();
422398
let infra = Infra::changeset()
423399
.name("persist_railjson_ok_infra".to_owned())
424400
.last_railjson_version()
425-
.persist(railjson.clone(), pool.clone())
401+
.persist(railjson.clone(), db_pool.get_ok().deref_mut())
426402
.await
427-
.expect("could not persist infra")
428-
.into_fixture(pool.clone());
403+
.expect("could not persist infra");
429404

430405
// THEN
431406
assert_eq!(infra.railjson_version, railjson.version);
@@ -435,51 +410,94 @@ pub mod tests {
435410
objects
436411
}
437412

438-
let conn = &mut pool.get().await.unwrap();
439413
let id = infra.id;
440414

441415
assert_eq!(
442-
sort::<BufferStop>(find_all_schemas(conn, id).await.unwrap()),
416+
sort::<BufferStop>(
417+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
418+
.await
419+
.unwrap()
420+
),
443421
sort(railjson.buffer_stops)
444422
);
445423
assert_eq!(
446-
sort::<Route>(find_all_schemas(conn, id).await.unwrap()),
424+
sort::<Route>(
425+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
426+
.await
427+
.unwrap()
428+
),
447429
sort(railjson.routes)
448430
);
449431
assert_eq!(
450-
sort::<SwitchType>(find_all_schemas(conn, id).await.unwrap()),
432+
sort::<SwitchType>(
433+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
434+
.await
435+
.unwrap()
436+
),
451437
sort(railjson.extended_switch_types)
452438
);
453439
assert_eq!(
454-
sort::<Switch>(find_all_schemas(conn, id).await.unwrap()),
440+
sort::<Switch>(
441+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
442+
.await
443+
.unwrap()
444+
),
455445
sort(railjson.switches)
456446
);
457447
assert_eq!(
458-
sort::<TrackSection>(find_all_schemas(conn, id).await.unwrap()),
448+
sort::<TrackSection>(
449+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
450+
.await
451+
.unwrap()
452+
),
459453
sort(railjson.track_sections)
460454
);
461455
assert_eq!(
462-
sort::<SpeedSection>(find_all_schemas(conn, id).await.unwrap()),
456+
sort::<SpeedSection>(
457+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
458+
.await
459+
.unwrap()
460+
),
463461
sort(railjson.speed_sections)
464462
);
465463
assert_eq!(
466-
sort::<NeutralSection>(find_all_schemas(conn, id).await.unwrap()),
464+
sort::<NeutralSection>(
465+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
466+
.await
467+
.unwrap()
468+
),
467469
sort(railjson.neutral_sections)
468470
);
469471
assert_eq!(
470-
sort::<Electrification>(find_all_schemas(conn, id).await.unwrap()),
472+
sort::<Electrification>(
473+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
474+
.await
475+
.unwrap()
476+
),
471477
sort(railjson.electrifications)
472478
);
473479
assert_eq!(
474-
sort::<Signal>(find_all_schemas(conn, id).await.unwrap()),
480+
sort::<Signal>(
481+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
482+
.await
483+
.unwrap()
484+
),
475485
sort(railjson.signals)
476486
);
477487
assert_eq!(
478-
sort::<Detector>(find_all_schemas(conn, id).await.unwrap()),
488+
sort::<Detector>(
489+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
490+
.await
491+
.unwrap()
492+
),
479493
sort(railjson.detectors)
480494
);
481495
assert_eq!(
482-
sort::<OperationalPoint>(find_all_schemas(conn, id).await.unwrap()),
496+
sort::<OperationalPoint>(
497+
find_all_schemas(db_pool.get_ok().deref_mut(), id)
498+
.await
499+
.unwrap()
500+
),
483501
sort(railjson.operational_points)
484502
);
485503
}

0 commit comments

Comments
 (0)