Skip to content

Commit

Permalink
#175 Add docker daemon in Cortex docker image
Browse files Browse the repository at this point in the history
  • Loading branch information
To-om committed Jun 3, 2019
1 parent e94933d commit 99d2076
Show file tree
Hide file tree
Showing 7 changed files with 83 additions and 42 deletions.
7 changes: 2 additions & 5 deletions app/org/thp/cortex/controllers/JobCtrl.scala
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,8 @@ class JobCtrl @Inject()(
"message" artifact.message(),
"tags" artifact.tags(),
"tlp" artifact.tlp(),
"attachment" Json.obj(
"contentType" attachment.contentType,
"id" attachment.id,
"name" attachment.name,
"size" attachment.size)
"attachment" Json
.obj("contentType" attachment.contentType, "id" attachment.id, "name" attachment.name, "size" attachment.size)
)
}
.runWith(Sink.seq)
Expand Down
25 changes: 13 additions & 12 deletions app/org/thp/cortex/services/DockerJobRunnerSrv.scala
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,11 @@ class DockerJobRunnerSrv(client: DockerClient, autoUpdate: Boolean, implicit val
Try {
logger.info(s"Docker is available:\n${client.info()}")
true
}.getOrElse {
logger.info(s"Docker is not available")
}.recover {
case error
logger.info(s"Docker is not available", error)
false
}
}.get

def run(jobDirectory: Path, dockerImage: String, job: Job, timeout: Option[FiniteDuration])(implicit ec: ExecutionContext): Future[Unit] = {
import scala.collection.JavaConverters._
Expand Down Expand Up @@ -90,15 +91,15 @@ class DockerJobRunnerSrv(client: DockerClient, autoUpdate: Boolean, implicit val
client.waitContainer(containerCreation.id())
()
}.andThen {
case r
if (!Files.exists(jobDirectory.resolve("output").resolve("output.json"))) {
val message = r.fold(e s"Docker creation error: ${e.getMessage}\n", _ "") +
Try(client.logs(containerCreation.id(), LogsParam.stdout(), LogsParam.stderr()).readFully())
.recover { case e s"Container logs can't be read (${e.getMessage}" }
val report = Json.obj("success" false, "errorMessage" message)
Files.write(jobDirectory.resolve("output").resolve("output.json"), report.toString.getBytes(StandardCharsets.UTF_8))
}
}
case r
if (!Files.exists(jobDirectory.resolve("output").resolve("output.json"))) {
val message = r.fold(e s"Docker creation error: ${e.getMessage}\n", _ "") +
Try(client.logs(containerCreation.id(), LogsParam.stdout(), LogsParam.stderr()).readFully())
.recover { case e s"Container logs can't be read (${e.getMessage}" }
val report = Json.obj("success" false, "errorMessage" message)
Files.write(jobDirectory.resolve("output").resolve("output.json"), report.toString.getBytes(StandardCharsets.UTF_8))
}
}
timeout
.fold(execution)(t execution.withTimeout(t, client.stopContainer(containerCreation.id(), 3)))
.andThen {
Expand Down
19 changes: 14 additions & 5 deletions docker.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,26 @@ mappings in Docker ~= (_.filterNot {
})
dockerCommands ~= { dc =>
val (dockerInitCmds, dockerTailCmds) = dc
.collect {
case ExecCmd("RUN", "chown", _*) => ExecCmd("RUN", "chown", "-R", "daemon:root", ".")
case other => other
.flatMap {
case ExecCmd("RUN", "chown", _*) => Some(ExecCmd("RUN", "chown", "-R", "daemon:root", "."))
case Cmd("USER", _) => None
case other => Some(other)
}
.splitAt(4)
dockerInitCmds ++
Seq(
Cmd("USER", "root"),
ExecCmd("RUN", "bash", "-c",
"apt-get update && " +
"apt-get install -y --no-install-recommends python-pip python2.7-dev python3-pip python3-dev ssdeep libfuzzy-dev libfuzzy2 libimage-exiftool-perl libmagic1 build-essential git libssl-dev dnsutils && " +
"wget -q -O - https://download.docker.com/linux/static/stable/x86_64/docker-18.09.0.tgz | " +
"tar -xzC /usr/local/bin/ --strip-components 1 && " +
"addgroup --system dockremap && " +
"adduser --system --ingroup dockremap dockremap && " +
"addgroup --system docker && " +
"usermod --append --groups docker daemon &&" +
"echo 'dockremap:165536:65536' >> /etc/subuid && " +
"echo 'dockremap:165536:65536' >> /etc/subgid && " +
"apt-get update && " +
"apt-get install -y --no-install-recommends python-pip python2.7-dev python3-pip python3-dev ssdeep libfuzzy-dev libfuzzy2 libimage-exiftool-perl libmagic1 build-essential git libssl-dev dnsutils iptables && " +
"pip2 install -U pip setuptools && " +
"pip3 install -U pip setuptools && " +
"hash -r && " +
Expand Down
5 changes: 1 addition & 4 deletions docker/cortex/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
version: "2"
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.0
image: elasticsearch:6.8.0
environment:
- http.host=0.0.0.0
- transport.host=0.0.0.0
- xpack.security.enabled=false
- cluster.name=hive
- script.inline=true
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
Expand Down
57 changes: 41 additions & 16 deletions package/docker/entrypoint
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,23 @@ CONFIG_ES=1
CONFIG=1
CONFIG_FILE=/etc/cortex/application.conf
ANALYZER_PATH=/opt/Cortex-Analyzers/analyzers
ANALYZER_URLS=()
RESPONDER_PATH=/opt/Cortex-Analyzers/responders
RESPONDER_URLS=()
START_DOCKER=0

function usage {
cat <<- _EOF_
Available options:
--no-config | do not try to configure TheHive (add secret and elasticsearch)
--no-config-secret | do not add random secret to configuration
--no-config-es | do not add elasticsearch hosts to configuration
--es-hosts <esconfig> | use this string to configure elasticsearch hosts (format: ["host1:9300","host2:9300"])
--es-uri <uri> | use this string to configure elasticsearch hosts (format: http(s)://host:port,host:port(/prefix)?querystring)
--es-hostname <host> | resolve this hostname to find elasticseach instances
--secret <secret> | secret to secure sessions
--analyzer-path <path> | where analyzers are located
--responder-path <path> | where responders are located
--analyzer-url <url> | where analyzers are located (url or path)
--responder-url <url> | where responders are located (url or path)
--start-docker | start a internal docker (inside container) to run analyzers/responders
_EOF_
exit 1
}
Expand All @@ -30,13 +34,18 @@ do
"--no-config") CONFIG=0;;
"--no-config-secret") CONFIG_SECRET=0;;
"--no-config-es") CONFIG_ES=0;;
"--es-hosts") shift; ES_HOSTS=$1;;
"--es-hosts") echo "--es-hosts is deprecated, please use --es-uri"
usage;;
"--es-uri") shift; ES_URI=$1;;
"--es-hostname") shift; ES_HOSTNAME=$1;;
"--secret") shift; SECRET=$1;;
"--analyzer-path") shift; ANALYZER_PATH=$1;;
"--responder-path") shift; RESPONDER_PATH=$1;;
"--analyzer-url") shift; ANALYZER_URLS+=$1;;
"--responder-url") shift; RESPONDER_URLS+=$1;;
"--start-docker") START_DOCKER=1;;
"--") STOP=1;;
*) usage
*) echo "unrecognized option: $1"; usage;;
esac
shift
done
Expand All @@ -56,39 +65,55 @@ then

if test $CONFIG_ES = 1
then
if test -z "$ES_HOSTS"
if test -z "$ES_URI"
then
function join_es_hosts {
echo -n "[\"$1"
echo -n $1:9200
shift
printf "%s:9300\"]" "${@/#/:9300\",\"}"
printf "%s," "${@/#/:9200}"
}

ES=$(getent ahostsv4 $ES_HOSTNAME | awk '{ print $1 }' | sort -u)
if test -z "$ES"
then
echo "Warning automatic elasticsearch host config fails"
else
ES_HOSTS=$(join_es_hosts $ES)
ES_URI=http://$(join_es_hosts $ES)
fi
fi
if test -n "$ES_HOSTS"
if test -n "$ES_URI"
then
echo Using elasticsearch host: $ES_HOSTS
echo search.host=$ES_HOSTS >> $CONFIG_FILE
echo Using elasticsearch uri: $ES_URI
echo search.uri=\"$ES_URI\" >> $CONFIG_FILE
else
echo elasticsearch host not configured
fi
fi

echo analyzer.path=[\"$ANALYZER_PATH\"] >> $CONFIG_FILE
echo responder.path=[\"$RESPONDER_PATH\"] >> $CONFIG_FILE
function join_urls {
echo -n \"$1\"
shift
for U do echo -n ,\"$U\"; done
# printf ",\"%s\"" $@
}
test ${#ANALYZER_URLS} = 0 && ANALYZER_URLS+=$ANALYZER_PATH
test ${#RESPONDER_URLS} = 0 && RESPONDER_URLS+=$RESPONDER_PATH

echo analyzer.urls=\[$(join_urls ${ANALYZER_URLS[@]})\] >> $CONFIG_FILE
echo responder.urls=\[$(join_urls ${RESPONDER_URLS[@]})\] >> $CONFIG_FILE

echo 'include file("/etc/cortex/application.conf")' >> $CONFIG_FILE
fi

exec bin/cortex \
test $START_DOCKER = 1 && dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &> /dev/null &
DOCKER_PID=$!

echo config file is:
cat $CONFIG_FILE
su -s /bin/sh -c "cd /opt/cortex; bin/cortex \
-Dconfig.file=$CONFIG_FILE \
-Dlogger.file=/etc/cortex/logback.xml \
-Dpidfile.path=/dev/null \
$@
$@" daemon

test $START_DOCKER = 1 && kill ${DOCKER_PID}
6 changes: 6 additions & 0 deletions test/resources/analyzers/blocker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
FROM debian:latest

WORKDIR /analyzer
RUN apt update && apt install -y jq
COPY blocker.sh blocker/blocker.sh
ENTRYPOINT ["blocker/blocker.sh"]
6 changes: 6 additions & 0 deletions test/resources/analyzers/blocker/blocker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/sh

while true
do
sleep 10
done

0 comments on commit 99d2076

Please sign in to comment.