From 75d6ff723603b5258668dd18b82607bdc9b74536 Mon Sep 17 00:00:00 2001 From: To-om Date: Wed, 30 Jun 2021 19:32:06 +0200 Subject: [PATCH] #2105 Add force parameter in database cloner tool --- .gitignore | 1 + conf/application.sample.conf | 8 ++++ conf/cloner.sample.conf | 24 ++++++++++ migration/src/main/resources/reference.conf | 1 + .../scala/org/thp/thehive/cloner/Cloner.scala | 47 ++++++++++++------- 5 files changed, 65 insertions(+), 16 deletions(-) create mode 100644 conf/cloner.sample.conf diff --git a/.gitignore b/.gitignore index 2c87b915d9..90ff884861 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ logs bin conf/application.conf conf/migration.conf +/conf/cloner.conf graphql.config.json graphql.schema.json .graphqlconfig diff --git a/conf/application.sample.conf b/conf/application.sample.conf index 4de3da18e0..1045ee97b3 100644 --- a/conf/application.sample.conf +++ b/conf/application.sample.conf @@ -21,6 +21,14 @@ db.janusgraph { keyspace: thehive } } + index.search { + backend: lucene + directory: /opt/thp/thehive/index + # If TheHive is in cluster ElasticSearch must be used: + // backend: elasticsearch + // hostname: ["ip1", "ip2"] + // index-name: thehive + } ## For test only ! # Comment the two lines below before enable Cassandra database diff --git a/conf/cloner.sample.conf b/conf/cloner.sample.conf new file mode 100644 index 0000000000..990147dde4 --- /dev/null +++ b/conf/cloner.sample.conf @@ -0,0 +1,24 @@ +# This is a sample configuration for the database cloner tool + +# Configuration of the source database (same format as in application.conf) +from.db.janusgraph { + storage { + // backend: cql + // hostname: ["ip1", "ip2"] + } + index.search { + backend: lucene + directory: /opt/thp/thehive/index + } +} +# Configuration of the target database +to.db.janusgraph { + storage { + // backend: cql + // hostname: ["ip1", "ip2"] + } + index.search { + backend: lucene + directory: /opt/thp/thehive/otherIndex + } +} diff --git a/migration/src/main/resources/reference.conf b/migration/src/main/resources/reference.conf index 90a131e6a0..57b1bb0eeb 100644 --- a/migration/src/main/resources/reference.conf +++ b/migration/src/main/resources/reference.conf @@ -117,3 +117,4 @@ to { } } batchSize: 100 +force: false \ No newline at end of file diff --git a/migration/src/main/scala/org/thp/thehive/cloner/Cloner.scala b/migration/src/main/scala/org/thp/thehive/cloner/Cloner.scala index 5d15b3cadd..6b91022fc9 100644 --- a/migration/src/main/scala/org/thp/thehive/cloner/Cloner.scala +++ b/migration/src/main/scala/org/thp/thehive/cloner/Cloner.scala @@ -1,7 +1,7 @@ package org.thp.thehive.cloner import akka.actor.ActorSystem -import com.typesafe.config.{Config, ConfigFactory} +import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} import org.apache.tinkerpop.gremlin.structure.T import org.thp.scalligraph.SingleInstance import org.thp.scalligraph.janus.JanusDatabase @@ -31,6 +31,9 @@ object Cloner extends App with IntegrityCheckApp { ) } + def addConfig(config: Config, path: String, value: Any): Config = + config.withValue(path, ConfigValueFactory.fromAnyRef(value)) + val defaultLoggerConfigFile = "/etc/thehive/logback-cloner.xml" if (System.getProperty("logger.file") == null && Files.exists(Paths.get(defaultLoggerConfigFile))) System.setProperty("logger.file", defaultLoggerConfigFile) @@ -54,7 +57,9 @@ object Cloner extends App with IntegrityCheckApp { .valueName("") .required() .action((f, c) => ConfigFactory.parseFileAnySyntax(f).withFallback(c)) - .text("configuration file") + .text("configuration file"), + opt[Unit]('f', "force") + .action((_, c) => addConfig(c, "force", true)) ) } val defaultConfig = @@ -78,19 +83,28 @@ object Cloner extends App with IntegrityCheckApp { val thehiveSchema = new TheHiveSchemaDefinition val cortexSchema = new CortexSchemaDefinition - if (sourceDatabase.version(thehiveSchema.name) != thehiveSchema.operations.operations.length + 1) { - println( - "The schema of TheHive is not valid " + - s"(found ${sourceDatabase.version(thehiveSchema.name)}, expected ${thehiveSchema.operations.operations.length + 1})" - ) - sys.exit(1) + + { + val expectedVersion = thehiveSchema.operations.operations.length + 1 + val foundVersion = sourceDatabase.version(thehiveSchema.name) + if (foundVersion != expectedVersion) { + println(s"The schema of TheHive is not valid (expected: $expectedVersion, found: $foundVersion)") + if (config.getBoolean("force")) + println("Continuing ...") + else + sys.exit(1) + } } - if (sourceDatabase.version(cortexSchema.name) != cortexSchema.operations.operations.length + 1) { - println( - "The schema of Cortex is not valid " + - s"(found ${sourceDatabase.version(cortexSchema.name)}, expected ${cortexSchema.operations.operations.length + 1})" - ) - sys.exit(1) + { + val expectedVersion = cortexSchema.operations.operations.length + 1 + val foundVersion = sourceDatabase.version(cortexSchema.name) + if (foundVersion != expectedVersion) { + println(s"The schema of Cortex is not valid (expected: $expectedVersion, found: $foundVersion)") + if (config.getBoolean("force")) + println("Continuing ...") + else + sys.exit(1) + } } val destDatabase: Database = getDatabase( @@ -111,8 +125,8 @@ object Cloner extends App with IntegrityCheckApp { // don't create initial values val models = destDatabase.extraModels ++ thehiveSchema.modelList ++ cortexSchema.modelList destDatabase.createSchema(models) - destDatabase.setVersion(thehiveSchema.name, thehiveSchema.operations.operations.length + 1) - destDatabase.setVersion(cortexSchema.name, cortexSchema.operations.operations.length + 1) + destDatabase.setVersion(thehiveSchema.name, sourceDatabase.version(thehiveSchema.name)) + destDatabase.setVersion(cortexSchema.name, sourceDatabase.version(cortexSchema.name)) val batchSize: Int = config.getInt("batchSize") @@ -167,6 +181,7 @@ object Cloner extends App with IntegrityCheckApp { println("Add indices ...") destDatabase.addSchemaIndexes(models) + println("Run checks ...") runChecks(destDatabase, Configuration(config)) destDatabase.close() } finally {