diff --git a/.gitignore b/.gitignore index 5414691bc..bc55d3127 100644 --- a/.gitignore +++ b/.gitignore @@ -84,3 +84,4 @@ venv.bak/ # Cython debug symbols cython_debug/ +assemblyline/common/frequency.c diff --git a/assemblyline/cachestore/__init__.py b/assemblyline/cachestore/__init__.py index eeb01c8a1..6b02c80d0 100644 --- a/assemblyline/cachestore/__init__.py +++ b/assemblyline/cachestore/__init__.py @@ -1,5 +1,6 @@ import re +from typing import AnyStr from assemblyline.common import forge from assemblyline.common.isotime import now_as_iso @@ -10,9 +11,9 @@ class CacheStore(object): - def __init__(self, component, config=None, datastore=None): + def __init__(self, component: str, config=None, datastore=None): if not component: - raise ValueError("Cannot instanciate a cachestore without providing a component name.") + raise ValueError("Cannot instantiate a cachestore without providing a component name.") if not COMPONENT_VALIDATOR.match(component): raise ValueError("Invalid component name. (Only letters, numbers, underscores and dots allowed)") @@ -24,13 +25,13 @@ def __init__(self, component, config=None, datastore=None): self.datastore = datastore or forge.get_datastore(config=config) self.filestore = FileStore(*config.filestore.cache) - def __enter__(self): + def __enter__(self) -> 'CacheStore': return self def __exit__(self, ex_type, exc_val, exc_tb): self.filestore.close() - def save(self, cache_key, data, ttl=DEFAULT_CACHE_LEN, force=False): + def save(self, cache_key: str, data: AnyStr, ttl=DEFAULT_CACHE_LEN, force=False): if not COMPONENT_VALIDATOR.match(cache_key): raise ValueError("Invalid cache_key for cache item. " "(Only letters, numbers, underscores and dots allowed)") @@ -40,17 +41,39 @@ def save(self, cache_key, data, ttl=DEFAULT_CACHE_LEN, force=False): self.datastore.cached_file.save(new_key, {'expiry_ts': now_as_iso(ttl), 'component': self.component}) self.filestore.put(new_key, data, force=force) - def get(self, cache_key): + def upload(self, cache_key: str, path: str, ttl=DEFAULT_CACHE_LEN): + if not COMPONENT_VALIDATOR.match(cache_key): + raise ValueError("Invalid cache_key for cache item. " + "(Only letters, numbers, underscores and dots allowed)") + new_key = f"{self.component}_{cache_key}" if self.component else cache_key + self.datastore.cached_file.save(new_key, {'expiry_ts': now_as_iso(ttl), 'component': self.component}) + self.filestore.upload(new_key, path, force=True) + + def touch(self, cache_key: str, ttl=DEFAULT_CACHE_LEN): + if not COMPONENT_VALIDATOR.match(cache_key): + raise ValueError("Invalid cache_key for cache item. " + "(Only letters, numbers, underscores and dots allowed)") + if not self.exists(cache_key): + raise KeyError(cache_key) + + new_key = f"{self.component}_{cache_key}" if self.component else cache_key + self.datastore.cached_file.save(new_key, {'expiry_ts': now_as_iso(ttl), 'component': self.component}) + + def get(self, cache_key: str) -> bytes: + new_key = f"{self.component}_{cache_key}" if self.component else cache_key return self.filestore.get(new_key) - def exists(self, cache_key): + def download(self, cache_key: str, path: str): new_key = f"{self.component}_{cache_key}" if self.component else cache_key + return self.filestore.download(new_key, path) + def exists(self, cache_key: str): + new_key = f"{self.component}_{cache_key}" if self.component else cache_key return self.filestore.exists(new_key) - def delete(self, cache_key, db_delete=True): + def delete(self, cache_key: str, db_delete=True): new_key = f"{self.component}_{cache_key}" if self.component else cache_key self.filestore.delete(new_key) diff --git a/assemblyline/datastore/helper.py b/assemblyline/datastore/helper.py index d4a550aad..d5d50bd6c 100644 --- a/assemblyline/datastore/helper.py +++ b/assemblyline/datastore/helper.py @@ -916,17 +916,17 @@ def get_attack_matrix_from_keys(self, keys): return out @elasticapm.capture_span(span_type='datastore') - def get_service_with_delta(self, service_name, version=None, as_obj=True): + def get_service_with_delta(self, service_name, version=None, as_obj=True) -> Union[Service, dict, None]: svc = self.ds.service_delta.get(service_name) if svc is None: - return svc + return None if version is not None: svc.version = version svc_version_data = self.ds.service.get(f"{service_name}_{svc.version}") if svc_version_data is None: - return svc_version_data + return None svc_version_data = recursive_update(svc_version_data.as_primitives(strip_null=True), svc.as_primitives(strip_null=True)) diff --git a/assemblyline/filestore/__init__.py b/assemblyline/filestore/__init__.py index 1423734ba..5119cf2b9 100644 --- a/assemblyline/filestore/__init__.py +++ b/assemblyline/filestore/__init__.py @@ -1,6 +1,8 @@ +from __future__ import annotations import json import logging +from typing import AnyStr, TYPE_CHECKING, List, Tuple from urllib.parse import urlparse, parse_qs, unquote import elasticapm @@ -13,6 +15,9 @@ from assemblyline.filestore.transport.s3 import TransportS3 from assemblyline.filestore.transport.sftp import TransportSFTP +if TYPE_CHECKING: + from assemblyline.filestore.transport.base import Transport + class FileStoreException(Exception): pass @@ -163,7 +168,7 @@ def close(self): self.log.warning('Transport problem: %s', trace) @elasticapm.capture_span(span_type='filestore') - def delete(self, path, location='all'): + def delete(self, path: str, location='all'): with elasticapm.capture_span(name='delete', span_type='filestore', labels={'path': path}): for t in self.slice(location): try: @@ -173,7 +178,7 @@ def delete(self, path, location='all'): self.log.info('Transport problem: %s', trace) @elasticapm.capture_span(span_type='filestore') - def download(self, src_path, dest_path, location='any'): + def download(self, src_path: str, dest_path: str, location='any'): successful = False transports = [] download_errors = [] @@ -191,7 +196,7 @@ def download(self, src_path, dest_path, location='any'): return transports @elasticapm.capture_span(span_type='filestore') - def exists(self, path, location='any'): + def exists(self, path, location='any') -> List[Transport]: transports = [] for t in self.slice(location): try: @@ -205,7 +210,7 @@ def exists(self, path, location='any'): return transports @elasticapm.capture_span(span_type='filestore') - def get(self, path, location='any'): + def get(self, path: str, location='any') -> bytes: for t in self.slice(location): try: if t.exists(path): @@ -215,7 +220,7 @@ def get(self, path, location='any'): self.log.warning('Transport problem: %s', trace) @elasticapm.capture_span(span_type='filestore') - def put(self, dst_path, content, location='all', force=False): + def put(self, dst_path: str, content: AnyStr, location='all', force=False) -> List[Transport]: transports = [] for t in self.slice(location): if force or not t.exists(dst_path): @@ -239,7 +244,7 @@ def slice(self, location): return transports @elasticapm.capture_span(span_type='filestore') - def upload(self, src_path, dst_path, location='all', force=False): + def upload(self, src_path: str, dst_path: str, location='all', force=False) -> List[Transport]: transports = [] for t in self.slice(location): if force or not t.exists(dst_path): @@ -251,7 +256,7 @@ def upload(self, src_path, dst_path, location='all', force=False): return transports @elasticapm.capture_span(span_type='filestore') - def upload_batch(self, local_remote_tuples, location='all'): + def upload_batch(self, local_remote_tuples, location='all') -> List[Tuple[str, str, str]]: failed_tuples = [] for (src_path, dst_path) in local_remote_tuples: try: diff --git a/assemblyline/filestore/transport/azure.py b/assemblyline/filestore/transport/azure.py index 46f720d58..9ac404e60 100644 --- a/assemblyline/filestore/transport/azure.py +++ b/assemblyline/filestore/transport/azure.py @@ -134,7 +134,7 @@ def upload(self, src_path, dst_path): raise # Buffer based functions - def get(self, path): + def get(self, path: str) -> bytes: key = self.normalize(path) my_blob = BytesIO() diff --git a/assemblyline/filestore/transport/ftp.py b/assemblyline/filestore/transport/ftp.py index 9d906e0de..4d63e2af1 100644 --- a/assemblyline/filestore/transport/ftp.py +++ b/assemblyline/filestore/transport/ftp.py @@ -200,7 +200,7 @@ def upload_batch(self, local_remote_tuples): # Buffer based functions @reconnect_retry_on_fail - def get(self, path) -> bytes: + def get(self, path: str) -> bytes: path = self.normalize(path) bio = BytesIO() self.ftp.retrbinary('RETR ' + path, bio.write) diff --git a/assemblyline/filestore/transport/http.py b/assemblyline/filestore/transport/http.py index 4128515ea..7b783bb21 100644 --- a/assemblyline/filestore/transport/http.py +++ b/assemblyline/filestore/transport/http.py @@ -98,7 +98,7 @@ def upload_batch(self, local_remote_tuples): raise TransportException("READ ONLY TRANSPORT: Method not implemented") # Buffer based functions - def get(self, path): + def get(self, path: str) -> bytes: path = self.normalize(path) resp = self.session.get(path, auth=self.auth, cert=self.pki, verify=self.verify) if resp.ok: diff --git a/assemblyline/filestore/transport/local.py b/assemblyline/filestore/transport/local.py index 27b624696..59d3c0a93 100644 --- a/assemblyline/filestore/transport/local.py +++ b/assemblyline/filestore/transport/local.py @@ -89,7 +89,7 @@ def upload(self, src_path, dst_path): assert (self.exists(dst_path)) # Buffer based functions - def get(self, path): + def get(self, path: str) -> bytes: path = self.normalize(path) fh = None try: diff --git a/assemblyline/filestore/transport/s3.py b/assemblyline/filestore/transport/s3.py index de3683479..7da3f1a0e 100644 --- a/assemblyline/filestore/transport/s3.py +++ b/assemblyline/filestore/transport/s3.py @@ -154,7 +154,7 @@ def upload(self, src_path, dst_path): self.with_retries(self.client.upload_file, src_path, self.bucket, dst_path) # Buffer based functions - def get(self, path): + def get(self, path: str) -> bytes: fd, dst_path = tempfile.mkstemp(prefix="s3_transport.", suffix=".download") os.close(fd) # We don't need the file descriptor open diff --git a/assemblyline/filestore/transport/sftp.py b/assemblyline/filestore/transport/sftp.py index c9d700fba..f76954fa2 100644 --- a/assemblyline/filestore/transport/sftp.py +++ b/assemblyline/filestore/transport/sftp.py @@ -140,7 +140,7 @@ def upload_batch(self, local_remote_tuples): # Buffer based functions @reconnect_retry_on_fail - def get(self, path): + def get(self, path: str) -> bytes: path = self.normalize(path) bio = BytesIO() with self.sftp.open(path) as sftp_handle: diff --git a/assemblyline/odm/models/config.py b/assemblyline/odm/models/config.py index 27badff6e..95dd6d97b 100644 --- a/assemblyline/odm/models/config.py +++ b/assemblyline/odm/models/config.py @@ -703,6 +703,8 @@ class Services(odm.Model): stages: List[str] = odm.List(odm.Keyword()) # Substitution variables for image paths (for custom registry support) image_variables: Dict[str, str] = odm.Mapping(odm.Keyword(default='')) + # Same as above, but only applied in the updater, used in dev setups and local registries + update_image_variables: Dict[str, str] = odm.Mapping(odm.Keyword(default='')) # Default update channel to be used for new services preferred_update_channel: str = odm.Keyword() # Allow container registries with self signed certs for service updates @@ -720,6 +722,7 @@ class Services(odm.Model): "min_service_workers": 0, "stages": SERVICE_STAGES, "image_variables": {}, + "update_image_variables": {}, "preferred_update_channel": "stable", "allow_insecure_registry": False, "cpu_reservation": 0.25 diff --git a/assemblyline/odm/models/service.py b/assemblyline/odm/models/service.py index f923dd603..a38b472d6 100644 --- a/assemblyline/odm/models/service.py +++ b/assemblyline/odm/models/service.py @@ -9,8 +9,8 @@ @odm.model(index=False, store=False) class EnvironmentVariable(odm.Model): - name = odm.Keyword() - value = odm.Keyword() + name: str = odm.Keyword() + value: str = odm.Keyword() @odm.model(index=False, store=False) @@ -39,20 +39,21 @@ class PersistentVolume(odm.Model): class DependencyConfig(odm.Model): container = odm.Compound(DockerConfig) volumes = odm.Mapping(odm.Compound(PersistentVolume), default={}) + run_as_core: bool = odm.Boolean(default=False) @odm.model(index=False, store=False) class UpdateSource(odm.Model): - name = odm.Keyword() - password = odm.Optional(odm.Keyword(default="")) - pattern = odm.Optional(odm.Keyword(default="")) - private_key = odm.Optional(odm.Keyword(default="")) - ca_cert = odm.Optional(odm.Keyword(default="")) - ssl_ignore_errors = odm.Boolean(default=False) - proxy = odm.Optional(odm.Keyword(default="")) - uri = odm.Keyword() - username = odm.Optional(odm.Keyword(default="")) - headers = odm.List(odm.Compound(EnvironmentVariable), default=[]) + name: str = odm.Keyword() + password: Opt[str] = odm.Optional(odm.Keyword(default="")) + pattern: Opt[str] = odm.Optional(odm.Keyword(default="")) + private_key: Opt[str] = odm.Optional(odm.Keyword(default="")) + ca_cert: Opt[str] = odm.Optional(odm.Keyword(default="")) + ssl_ignore_errors: bool = odm.Boolean(default=False) + proxy: Opt[str] = odm.Optional(odm.Keyword(default="")) + uri: str = odm.Keyword() + username: Opt[str] = odm.Optional(odm.Keyword(default="")) + headers: list[EnvironmentVariable] = odm.List(odm.Compound(EnvironmentVariable), default=[]) default_classification = odm.Classification(default=Classification.UNRESTRICTED) @@ -62,9 +63,9 @@ class UpdateConfig(odm.Model): generates_signatures = odm.Boolean(index=True, default=False) method = odm.Enum(values=['run', 'build']) # Are we going to run or build a container? run_options = odm.Optional(odm.Compound(DockerConfig)) # If we are going to run a container, which one? - sources = odm.List(odm.Compound(UpdateSource), default=[]) # Generic external resources we need - update_interval_seconds = odm.Integer() # Update check interval in seconds - wait_for_update = odm.Boolean(default=False) + sources: list[UpdateSource] = odm.List(odm.Compound(UpdateSource), default=[]) # Generic external resources we need + update_interval_seconds: int = odm.Integer() # Update check interval in seconds + wait_for_update: bool = odm.Boolean(default=False) @odm.model(index=False, store=False) @@ -90,7 +91,7 @@ class Service(odm.Model): is_external = odm.Boolean(default=False) licence_count = odm.Integer(default=0) - name = odm.Keyword(store=True, copyto="__text__") + name: str = odm.Keyword(store=True, copyto="__text__") version = odm.Keyword(store=True) # Should the result cache be disabled for this service diff --git a/assemblyline/odm/models/service_delta.py b/assemblyline/odm/models/service_delta.py index 2b75fc11c..b4ac7c227 100644 --- a/assemblyline/odm/models/service_delta.py +++ b/assemblyline/odm/models/service_delta.py @@ -48,6 +48,7 @@ class PersistentVolumeDelta(odm.Model): class DependencyConfigDelta(odm.Model): container = odm.Optional(odm.Compound(DockerConfigDelta)) volumes = odm.Mapping(odm.Compound(PersistentVolumeDelta), default={}) + run_as_core: bool = odm.Optional(odm.Boolean()) @odm.model(index=False, store=False) diff --git a/dev/k8s/README.md b/dev/k8s/README.md new file mode 100644 index 000000000..f19ede269 --- /dev/null +++ b/dev/k8s/README.md @@ -0,0 +1,32 @@ +# Assemblyline Dev Setup (Kubernetes) +- Follow steps in [K8S appliance](https://github.com/CybercentreCanada/assemblyline-helm-chart/tree/master/appliance) for local Kubernetes setup +- Enable registry add-on for microK8S (other registries can be used like Harbor but involves more setup which isn't covered here) + - Test: curl localhost:32000/v2/_catalog +- When ready to build, run local_dev_containers.sh script with tag as parameter. +- Run helm install|upgrade using new tags in values.yaml. +- Use Lens or kubectl to monitor status of deployment +- You can create local service-base images by passing an optional build-arg on a docker build command otherwise will pull latest. + - ie. docker build . -f service-base.Dockerfile --build-arg build_no=dev0 +- Debugging: Visual Code's [Bridge to Kubernetes](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.mindaro) & +[Kubernetes](https://marketplace.visualstudio.com/items?itemName=ms-kubernetes-tools.vscode-kubernetes-tools) extensions + - Add the following to settings.json (assuming using microk8s installed from snap): + ``` + "vs-kubernetes": { + "vs-kubernetes.namespace": "al", + "vs-kubernetes.kubectl-path": "/snap/kubectl/current/kubectl", + "vs-kubernetes.helm-path": "/snap/helm/current/helm", + "vs-kubernetes.minikube-path": "/snap/bin/microk8s", + "vs-kubernetes.kubectlVersioning": "user-provided", + "vs-kubernetes.outputFormat": "yaml", + "vs-kubernetes.kubeconfig": "/var/snap/microk8s/current/credentials/client.config", + "vs-kubernetes.knownKubeconfigs": [], + "vs-kubernetes.autoCleanupOnDebugTerminate": false, + "vs-kubernetes.nodejs-autodetect-remote-root": true, + "vs-kubernetes.nodejs-remote-root": "", + "vs-kubernetes.nodejs-debug-port": 9229, + "vs-kubernetes.local-tunnel-debug-provider": "", + "checkForMinikubeUpgrade": false, + "imageBuildTool": "Docker" + } + ``` + - Specific to Updater/Scaler: You need to provide an environment variable in your launch targets called 'KUBECONFIG' that points to where your kubeconfig file is. diff --git a/dev/k8s/local_dev.Dockerfile b/dev/k8s/local_dev.Dockerfile new file mode 100644 index 000000000..3a765cf81 --- /dev/null +++ b/dev/k8s/local_dev.Dockerfile @@ -0,0 +1,55 @@ +FROM python:3.9-slim-buster + +ENV PYTHONPATH=${PYTHONPATH}:/var/lib/assemblyline/.local/lib/python3.9/site-packages/:/opt/alv4/assemblyline-base:/opt/alv4/assemblyline-core:/opt/alv4/assemblyline-service-server:/opt/alv4/assemblyline-service-client:/opt/alv4/assemblyline-ui:/opt/alv4/assemblyline_client:/opt/alv4/assemblyline-v4-service:/opt/alv4/assemblyline-service-client + +# SSDEEP pkg requirments +RUN apt-get update -yy \ + && apt-get install -yy build-essential libffi-dev libfuzzy-dev libldap2-dev libsasl2-dev libmagic1 \ + && rm -rf /var/lib/apt/lists/* + +# Create Assemblyline source directory +RUN useradd -b /var/lib -U -m assemblyline +RUN mkdir -p /etc/assemblyline +RUN mkdir -p /var/cache/assemblyline +RUN mkdir -p /var/lib/assemblyline +RUN mkdir -p /var/lib/assemblyline/flowjs +RUN mkdir -p /var/lib/assemblyline/bundling +RUN mkdir -p /var/log/assemblyline +WORKDIR /opt/alv4 +ENV PATH=/var/lib/assemblyline/.local/bin:$PATH + +# Install and uninstall the pypi version, so that docker can cache the +# dependency installation making repeated rebuilds with changing local changes faster +RUN pip install assemblyline[test] assemblyline_core[test] assemblyline_ui[test,scoketio] \ + assemblyline_client[test] assemblyline_service_server[test] \ + assemblyline_service_client[test] assemblyline_v4_service[test] \ + && pip uninstall -y assemblyline assemblyline_core assemblyline_ui \ + assemblyline_service_server assemblyline_client \ + assemblyline_service_client assemblyline_v4_service + +# +COPY assemblyline-base assemblyline-base +RUN pip install -e ./assemblyline-base[test] + +COPY assemblyline-core assemblyline-core +RUN pip install -e ./assemblyline-core[test] + +COPY assemblyline-ui assemblyline-ui +RUN pip install -e ./assemblyline-ui[socketio,test] + +COPY assemblyline_client assemblyline_client +RUN pip install -e ./assemblyline_client[test] + +COPY assemblyline-service-server assemblyline-service-server +RUN pip install -e ./assemblyline-service-server[test] + +COPY assemblyline-service-client assemblyline-service-client +RUN pip install -e ./assemblyline-service-client[test] + +COPY assemblyline-v4-service assemblyline-v4-service +RUN pip install -e ./assemblyline-v4-service[test] + + +# RUN pip uninstall -y assemblyline assemblyline_core assemblyline_ui \ +# assemblyline_service_server assemblyline_client \ +# assemblyline_service_client assemblyline_v4_service \ No newline at end of file diff --git a/dev/k8s/local_dev_containers.sh b/dev/k8s/local_dev_containers.sh new file mode 100755 index 000000000..cfdd5454b --- /dev/null +++ b/dev/k8s/local_dev_containers.sh @@ -0,0 +1,26 @@ +#!/bin/bash -ex + +# Script assumes running from context of alv4 to pull in base, core, service-server, client, ui dirs for main container build + +echo "Building $1" + +# Build & push main container +(docker build . -t localhost:32000/cccs/assemblyline:$1 -f assemblyline-base/dev/k8s/local_dev.Dockerfile) +(docker tag localhost:32000/cccs/assemblyline:$1 localhost:32000/cccs/assemblyline:latest) + +# Build core containers +cd assemblyline-base/dev/k8s/ +(docker tag localhost:32000/cccs/assemblyline:$1 localhost:32000/cccs/assemblyline-core:$1) +(docker build . -t localhost:32000/cccs/assemblyline-ui:$1 -f ui.Dockerfile --build-arg build_no=$1) +(docker build . -t localhost:32000/cccs/assemblyline-socketio:$1 -f socketio.Dockerfile --build-arg build_no=$1) +(docker build . -t localhost:32000/cccs/assemblyline-service-server:$1 -f service-server.Dockerfile --build-arg build_no=$1) + +# Push core to local registry +(docker push localhost:32000/cccs/assemblyline-core:$1) +(docker push localhost:32000/cccs/assemblyline-ui:$1) +(docker push localhost:32000/cccs/assemblyline-socketio:$1) +(docker push localhost:32000/cccs/assemblyline-service-server:$1) + +# Build service-base +(docker build . -t cccs/assemblyline-v4-service-base:$1 -f service-base.Dockerfile --build-arg build_no=$1) +(docker tag cccs/assemblyline-v4-service-base:$1 cccs/assemblyline-v4-service-base:latest) diff --git a/dev/k8s/service-base.Dockerfile b/dev/k8s/service-base.Dockerfile new file mode 100644 index 000000000..8182d5ed9 --- /dev/null +++ b/dev/k8s/service-base.Dockerfile @@ -0,0 +1,14 @@ +ARG build_no=latest +FROM localhost:32000/cccs/assemblyline:$build_no + +# Setup environment varibles +ENV PYTHONPATH $PYTHONPATH:/opt/al_service +ENV SERVICE_API_HOST http://al_service_server:5003 +ENV SERVICE_API_AUTH_KEY ThisIsARandomAuthKey...ChangeMe! +ENV CONTAINER_MODE true + +RUN mkdir -p /opt/al_service +RUN touch /opt/al_service/__init__.py +USER assemblyline + +CMD ["python", "/opt/alv4/assemblyline-v4-service/docker/process_handler.py"] diff --git a/dev/k8s/service-server.Dockerfile b/dev/k8s/service-server.Dockerfile new file mode 100644 index 000000000..31613d223 --- /dev/null +++ b/dev/k8s/service-server.Dockerfile @@ -0,0 +1,4 @@ +ARG build_no=latest +FROM localhost:32000/cccs/assemblyline:$build_no + +CMD ["gunicorn", "assemblyline_service_server.patched:app", "--config=python:assemblyline_service_server.gunicorn_config", "--worker-class", "gevent"] diff --git a/dev/k8s/socketio.Dockerfile b/dev/k8s/socketio.Dockerfile new file mode 100644 index 000000000..a4d54e4b6 --- /dev/null +++ b/dev/k8s/socketio.Dockerfile @@ -0,0 +1,4 @@ +ARG build_no=latest +FROM localhost:32000/cccs/assemblyline:$build_no + +CMD ["gunicorn", "-b", ":5002", "-w", "1", "-k", "geventwebsocket.gunicorn.workers.GeventWebSocketWorker", "assemblyline_ui.socketsrv:app"] diff --git a/dev/k8s/ui.Dockerfile b/dev/k8s/ui.Dockerfile new file mode 100644 index 000000000..b54d7244a --- /dev/null +++ b/dev/k8s/ui.Dockerfile @@ -0,0 +1,4 @@ +ARG build_no=latest +FROM localhost:32000/cccs/assemblyline:$build_no + +CMD ["gunicorn", "assemblyline_ui.patched:app", "--config=python:assemblyline_ui.gunicorn_config", "--worker-class", "gevent"] diff --git a/docker/al_dev/Dockerfile b/docker/al_dev/Dockerfile index d8ee3a0b9..903663874 100644 --- a/docker/al_dev/Dockerfile +++ b/docker/al_dev/Dockerfile @@ -3,7 +3,7 @@ FROM python:3.9-slim-buster # assemblyline-core, assemblyline-service-server and assemblyline-service-client code is checked out # Setup environment varibles -ENV PYTHONPATH /opt/alv4/assemblyline-base:/opt/alv4/assemblyline-core:/opt/alv4/assemblyline-service-server:/opt/alv4/assemblyline-service-client:/opt/alv4/assemblyline-ui +ENV PYTHONPATH /opt/alv4/assemblyline-base:/opt/alv4/assemblyline-core:/opt/alv4/assemblyline-service-server:/opt/alv4/assemblyline-service-client:/opt/alv4/assemblyline_client:/opt/alv4/assemblyline-ui # SSDEEP pkg requirments RUN apt-get update && apt-get install -yy build-essential libffi-dev libfuzzy-dev libldap2-dev libsasl2-dev libmagic1 && rm -rf /var/lib/apt/lists/*