diff --git a/analyzers/VMRay/VMRay.json b/analyzers/VMRay/VMRay.json index 59dea9b56..07d13170b 100644 --- a/analyzers/VMRay/VMRay.json +++ b/analyzers/VMRay/VMRay.json @@ -3,9 +3,13 @@ "license": "AGPL-V3", "author": "Nils Kuhnert, CERT-Bund", "url": "https://github.com/BSI-CERT-Bund/cortex-analyzers", - "version": "3.0", + "version": "3.1", "description": "VMRay Sandbox file analysis.", - "dataTypeList": ["hash", "file"], + "dataTypeList": [ + "hash", + "file", + "url" + ], "command": "VMRay/vmray.py", "baseConfig": "VMRay", "configurationItems": [ @@ -28,7 +32,7 @@ "description": "Verify certificates", "type": "boolean", "multi": false, - "required": true, + "required": false, "defaultValue": true }, { @@ -39,12 +43,115 @@ "required": false }, { - "name": "disablereanalyze", - "description": "If set to true, samples won't get re-analyzed.", + "name": "query_retry_wait", + "description": "The amount of seconds to wait before trying to fetch the results.", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 10 + }, + { + "name": "recursive_sample_limit", + "description": "The maximum amount of recursive samples which will be analyzed. 0 disables recursion.", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 10 + }, + { + "name": "reanalyze", + "description": "If set to true, known samples will be re-analyzed on submission. This is enabled by default.", + "type": "boolean", + "multi": false, + "required": false, + "defaultValue": true + }, + { + "name": "shareable", + "description": "If set to true, the hash of the sample will be shared with VirusTotal if the TLP level is white or green.", "type": "boolean", "multi": false, "required": false, "defaultValue": false + }, + { + "name": "archive_password", + "description": "The password that will be used to extract archives.", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "malware" + }, + { + "name": "archive_compound_sample", + "description": "If set to true, files inside archives are treated as a single, compound sample. Otherwise, each file is treated as its own sample.", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "max_jobs", + "description": "Limits the amount of jobs that can be created by jobrules for a submission.", + "type": "number", + "multi": false, + "required": false + }, + { + "name": "enable_reputation", + "description": "If set to true, reputation lookups will be performed for submitted samples and analysis artifacts (file hash and URL lookups) by the VMRay cloud reputation service and additional third party services. The user analyzer setting is used as default value for this parameter.", + "type": "boolean", + "multi": false, + "required": false + }, + { + "name": "enable_whois", + "description": "If set to true, domains seen during analyses are queried with external WHOIS service. The user analyzer setting is used as default value for this parameter.", + "type": "boolean", + "multi": false, + "required": false + }, + { + "name": "analyzer_mode", + "description": "Specifies which types of analyzers will be used for analyzing this sample. Supported strings are 'reputation', 'reputation_static', 'reputation_static_dynamic', 'static_dynamic', and 'static'. The user analyzer setting is used as default value for this parameter.", + "type": "string", + "multi": false, + "required": false + }, + { + "name": "known_malicious", + "description": "If set to true, triage will be used to pre-filter known malicious samples by results of reputation lookup (if allowed) and static analysis. The user analyzer setting is used as default value for this parameter.", + "type": "boolean", + "multi": false, + "required": false + }, + { + "name": "known_benign", + "description": "If set to true, triage will be used to pre-filter known benign samples by results of reputation lookup (if allowed) and static analysis. The user analyzer setting is used as default value for this parameter.", + "type": "boolean", + "multi": false, + "required": false + }, + { + "name": "tags", + "description": "Tags to attach to the sample.", + "type": "string", + "multi": true, + "required": false + }, + { + "name": "timeout", + "description": "Analysis timeout in seconds.", + "type": "number", + "multi": false, + "required": false + }, + { + "name": "net_scheme_name", + "description": "Name of the network schema.", + "type": "string", + "multi": false, + "required": false } ] -} +} \ No newline at end of file diff --git a/analyzers/VMRay/vmray.py b/analyzers/VMRay/vmray.py index 52d41e566..c748c4b2d 100755 --- a/analyzers/VMRay/vmray.py +++ b/analyzers/VMRay/vmray.py @@ -7,98 +7,289 @@ class VMRayAnalyzer(Analyzer): """ VMRay analyzer that uses VMRayClient to connect to an VMRay instance. Allows uploading a sample and getting - information via hash. More info regarding configuration in the complete documentation. + information bac via submission data. More info regarding configuration in the complete documentation. """ + + _namespace = "VMRay" + + _severity_mapping = { + "whitelisted": "safe", + "suspicious": "suspicious", + "malicious": "malicious", + "blacklisted": "malicious", + } + + _ioc_mapping = { + "domains": ("domain", "domain"), + "email_addresses": ("email", "mail"), + "emails": ("sender", "mail"), + "files": ("filename", "filename"), + "ips": ("ip_address", "ip"), + "mutexes": ("mutex_name", "other"), + "registry": ("reg_key_name", "registry"), + "urls": ("url", "url"), + } + def __init__(self): Analyzer.__init__(self) - self.url = self.get_param('config.url', None, 'No VMRay url given.').rstrip('/ ') - self.disable_reanalyze = self.get_param('config.disablereanalyze', False) + self.reanalyze = self.get_param("config.reanalyze", True) + self.shareable = self.get_param("config.shareable", False) + self.tags = self.get_param("config.tags", ["TheHive"]) + self.user_config = { + "timeout": self.get_param("config.timeout", None), + "net_scheme_name": self.get_param("config.net_scheme_name", None), + } - # Check for string and boolean True - if self.disable_reanalyze == 'true' or self.disable_reanalyze: - reanalyze = False - else: - reanalyze = True + self.query_retry_wait = self.get_param("config.query_retry_wait", 10) + self.recursive_sample_limit = self.get_param( + "config.recursive_sample_limit", 10 + ) - verify = self.get_param('config.certverify', None, 'Certificate verification parameter is missing.') - certpath = self.get_param('config.certpath', None) + verify = self.get_param("config.certverify", True) + certpath = self.get_param("config.certpath", None) if verify and certpath: verify = certpath - self.vmrc = VMRayClient(url=self.url, - key=self.get_param('config.key', None, 'No VMRay API key given.'), - cert=verify, - reanalyze=reanalyze) + + archive_compound_sample = self.get_param( + "config.archive_compound_sample", False + ) + if archive_compound_sample: + archive_action = "compound_sample" + else: + archive_action = "separate_samples" + + self.vmrc = VMRayClient( + url=self.get_param("config.url", None, "No VMRay URL given.").rstrip("/ "), + key=self.get_param("config.key", None, "No VMRay API key given."), + reanalyze=self.reanalyze, + verify=verify, + archive_password=self.get_param("config.archive_password", "malware"), + archive_action=archive_action, + max_jobs=self.get_param("config.max_jobs", None), + enable_reputation=self.get_param("config.enable_reputation", None), + enable_whois=self.get_param("config.enable_whois", None), + analyzer_mode=self.get_param("config.analyzer_mode", None), + known_malicious=self.get_param("config.known_malicious", None), + known_benign=self.get_param("config.known_benign", None), + ) + + def _build_sample_node(self, sample, current_recursion_level): + sample_id = sample["sample_id"] + sample["sample_analyses"] = self.vmrc.get_sample_analyses(sample_id) + sample["sample_threat_indicators"] = self.vmrc.get_sample_threat_indicators( + sample_id + ) + sample["sample_mitre_attack"] = self.vmrc.get_sample_mitre_attack(sample_id) + sample["sample_iocs"] = self.vmrc.get_sample_iocs(sample_id) + if self.recursive_sample_limit > current_recursion_level: + sample["sample_child_samples"] = [ + self.vmrc.get_sample(child_sample_id) + for child_sample_id in sample["sample_child_sample_ids"] + ] + for child_sample in sample["sample_child_samples"]: + self._build_sample_node(child_sample, current_recursion_level + 1) + + def _build_report(self, submissions=None, samples=None): + if not submissions and not samples: + self.error( + "Either submissions or samples must be provided in order to build a report" + ) + return + sample_ids = ( + [sample["sample_id"] for sample in samples] + if samples + else [submission["submission_sample_id"] for submission in submissions] + ) + # note: the dictionary fetched in case the reanalysis is disabled is incomplete. we need to query the samples again in all cases + samples = [self.vmrc.get_sample(sample_id) for sample_id in sample_ids] + for sample in samples: + self._build_sample_node(sample, 0) + return {"samples": samples} + + def _wait_for_results(self, submission_result): + # Ref: #332: check if job was submitted + if not self.reanalyze: + if len(submission_result["errors"]) > 0: + # Sample has alredy been analyzed and reanalysis is turned off, get the reports + self.report(self._build_report(samples=submission_result["samples"])) + return # stop waiting for report, because we already have it + + running_submissions = submission_result["submissions"] + finished_submissions = [] + while len(running_submissions) != len(finished_submissions): + finished_submissions.extend( + [ + updated_submission + for updated_submission in ( + self.vmrc.update_submission(current_submission["submission_id"]) + for current_submission in running_submissions + ) + if "submission_finished" in updated_submission + and updated_submission["submission_finished"] + ] + ) + sleep(self.query_retry_wait) + + # Return the results + self.report(self._build_report(submissions=finished_submissions)) def run(self): - if self.data_type == 'hash': - self.report({'scanreport': self.vmrc.get_sample(self.get_data())}) - elif self.data_type == 'file': - filepath = self.get_param('file') - filename = self.get_param('filename') - submit_report = self.vmrc.submit_sample(filepath=filepath, - filename=filename) - # Ref: #332: check if job was submitted - if self.disable_reanalyze: - if len(submit_report['data']['errors']) > 0: - if submit_report['result'] == 'ok': - # Sample is already there, get the report - self.report({'scanreport': self.vmrc.get_sample(samplehash=submit_report['data']['samples'][0]['sample_sha256hash'])}) - return - else: - self.error('Error while submitting sample to VMRay: {}.' - .format([error_msg for error_msg in submit_report['data']['errors']])) - # Check for completion - while not self.vmrc.query_job_status(submissionid=submit_report['data']['submissions'][0]['submission_id']): - sleep(10) - - # Return the results - self.report({'scanreport': self.vmrc.get_sample( - samplehash=submit_report['data']['submissions'][0]['submission_sample_sha256']) - }) + if self.data_type == "hash": + # don't run anything, try to build a report using existing results instead + samples = self.vmrc.get_samples_by_hash(self.get_data()) + if samples: + self.report(self._build_report(samples=samples)) + else: + self.report({"samples": samples}) + elif self.data_type == "file": + shareable = self.shareable and self.get_param("tlp") in (0, 1) + self._wait_for_results( + self.vmrc.submit_file_sample( + file_path=self.get_param("file"), + file_name=self.get_param("filename"), + tags=self.tags, + shareable=shareable, + user_config=self.user_config, + ) + ) + elif self.data_type == "url": + shareable = self.shareable and self.get_param("tlp") in (0, 1) + self._wait_for_results( + self.vmrc.submit_url_sample( + url_sample=self.get_data(), + tags=self.tags, + shareable=shareable, + user_config=self.user_config, + ) + ) else: - self.error('Data type currently not supported') + self.error("Data type currently not supported") - def summary(self, raw): + def _taxonomies_for_samples(self, samples): taxonomies = [] - namespace = "VMRay" - predicate = "Scan" + for sample in samples: + level = self._severity_mapping.get(sample["sample_severity"], "info") + value = "{}".format(sample["sample_score"]) + if len(samples) > 1: + value += " (from sample {})".format(sample["sample_id"]) + taxonomies.append( + self.build_taxonomy(level, self._namespace, "Score", value) + ) - r = { - 'reports': [] - } + for threat_indicator in sample.get("sample_threat_indicators", {}).get( + "threat_indicators", [] + ): + predicate = threat_indicator.get("category", None) + value = threat_indicator.get("operation", "") + if predicate: + taxonomies.append( + self.build_taxonomy(level, self._namespace, predicate, value) + ) + + for mitre_technique in sample.get("sample_mitre_attack", {}).get( + "mitre_attack_techniques", [] + ): + predicate = mitre_technique.get("technique_id", None) + value = mitre_technique.get("technique", "Unknown MITRE technique") + if "tactics" in mitre_technique: + value += " using tactics: {}".format( + ", ".join(mitre_technique["tactics"]) + ) + if predicate: + taxonomies.append( + self.build_taxonomy(level, self._namespace, predicate, value) + ) + + # add child sample taxonomies if they have been added + taxonomies.extend( + self._taxonomies_for_samples(sample.get("sample_child_samples", [])) + ) + return taxonomies + + def _sandbox_reports_for_samples(self, samples): + sandbox = "vmray" + sandbox_type = "on-premise" + sandbox_reports = [] + for sample in samples: + permalink = sample.get("sample_webif_url", None) + score = sample.get("sample_vti_score", 0) + sandbox_report = { + "permalink": permalink, + "score": score, + "sandbox-type": sandbox_type, + "{}-sandbox".format(sandbox_type): sandbox, + } + sandbox_reports.append(sandbox_report) - if raw.get('scanreport', None) and len(raw.get('scanreport').get('data')) > 0: - for scan in raw.get('scanreport').get('data'): - r['reports'].append({ - 'score': scan.get('sample_score'), - 'sample_severity': scan.get('sample_severity'), - 'sample_last_reputation_severity': scan.get('sample_last_reputation_severity'), - 'url': scan.get('sample_webif_url') - }) - - if len(r["reports"]) == 0: - value = "No Scan" - level = "info" - taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) + # add child sample taxonomies if they have been added + sandbox_reports.extend( + self._sandbox_reports_for_samples( + sample.get("sample_child_samples", []) + ) + ) + return sandbox_reports + + def summary(self, raw): + taxonomies = [] + sandbox_reports = [] + samples = raw.get("samples", []) + if len(samples) == 0: + taxonomies.append( + self.build_taxonomy("info", self._namespace, "None", "No Scan") + ) else: - for s in r["reports"]: - i = 1 - if s["sample_severity"] == "not_suspicious": - level = "safe" - elif s["sample_severity"] == "malicious": - level = "malicious" - else: - level = "info" - - if len(r["reports"]) > 1: - value = "{}( from scan {})".format(s["score"], i) - else: - value = "{}".format(s["score"]) - taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) - i += 1 - - return {"taxonomies": taxonomies} - - -if __name__ == '__main__': + taxonomies.extend(self._taxonomies_for_samples(samples)) + sandbox_reports.extend(self._sandbox_reports_for_samples(samples)) + return {"taxonomies": taxonomies, "sandbox-reports": sandbox_reports} + + def _artifacts_for_samples(self, samples): + artifacts = [] + for sample in samples: + link = sample.get("sample_webif_url", None) + iocs = sample.get("sample_iocs", {}).get("iocs", {}) + + for ( + ioc_type, + (ioc_payload_name, ioc_data_type), + ) in self._ioc_mapping.items(): + if ioc_type in iocs: + for ioc_node in iocs[ioc_type]: + severity = ioc_node.get("severity", "unknown") + level = self._severity_mapping.get(severity, "info") + tags = list(set((severity, level, ioc_node["type"]))) + payload = ioc_node[ioc_payload_name] + + context_tags = [] + if "hashes" in ioc_node: + for hash_node in ioc_node["hashes"]: + if "sha256_hash" in hash_node: + hash_value = hash_node["sha256_hash"] + context_tags.append("sha256:{}".format(hash_value)) + artifacts.append( + self.build_artifact( + "hash", hash_value, message=link, tags=tags + ) + ) + elif "operations" in ioc_node: + for operation in ioc_node["operations"]: + context_tags.append("operation:{}".format(operation)) + + tags.extend(set(context_tags)) + artifacts.append( + self.build_artifact( + ioc_data_type, payload, message=link, tags=tags + ) + ) + + # add child samples if they have been added + artifacts.extend( + self._artifacts_for_samples(sample.get("sample_child_samples", [])) + ) + return artifacts + + def artifacts(self, raw): + return self._artifacts_for_samples(raw.get("samples", [])) + + +if __name__ == "__main__": VMRayAnalyzer().run() diff --git a/analyzers/VMRay/vmrayclient.py b/analyzers/VMRay/vmrayclient.py index d66935d1c..20b83c468 100755 --- a/analyzers/VMRay/vmrayclient.py +++ b/analyzers/VMRay/vmrayclient.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + import base64 import json import os @@ -7,31 +8,40 @@ class VMRayClientError(Exception): - """Parent class for all specific errors used by VMRayClient.""" + """ Parent class for all specific errors used by VMRayClient. """ + + pass + + +class VMRayAPIError(VMRayClientError): + """ Raised in case the VMRay API returns an eror. """ + pass class UnknownHashTypeError(VMRayClientError): - """Raised when length of hash as hex-string (or in bits) is not 32 (128 bit), 40 (160 bit) or 64 (256 bit).""" + """ Raised when length of hash as hex-string (or in bits) is not 32 (128 bit), 40 (160 bit) or 64 (256 bit). """ + pass class BadResponseError(VMRayClientError): - """HTTP return status is not 200.""" + """ Raised in case the VMRay API returns a non-2xx status code. """ + pass class SampleFileNotFoundError(VMRayClientError): """Sample file was not found under given filepath.""" + pass -class UnknownSubmissionIdError(VMRayClientError): - """Thrown on invalid submission id or if id request fails.""" - pass +def _filter_dict(data): + return dict(filter(lambda item: item[1] is not None, data.items())) -class VMRayClient: +class VMRayClient(object): """ Client that connects to the VMRay api and allows searching for samples via hash and uploading a new sample to VMRay. @@ -39,106 +49,287 @@ class VMRayClient: :type url: str :param key: API Key :type key: str - :param cert: Certificate for ssl validation in case the server certificate is self-signed. **Default: True** - :type cert: [bool, str] :param reanalyze: Force reanalyzation. VMRay does not provide additional information if sample has already been uploaded, so this could be useful to obtain information. **Default: True** :type reanalyze: bool + :param verify: Certificate for ssl validation in case the server certificate is self-signed. **Default: True** + :type verify: [bool, str] """ - def __init__(self, url, key, cert=True, reanalyze=True): + + _submit_endpoint = "/rest/sample/submit" + _submission_endpoint = "/rest/submission/{}" + _sample_endpoint = "/rest/sample/{}" + _sample_hash_endpoint = "/rest/sample/{t}/{h}" + _sample_analyses_endpoint = "/rest/analysis/sample/{}" + _sample_iocs_endpoint = "/rest/sample/{}/iocs" + _sample_mitre_endpoint = "/rest/sample/{}/mitre_attack" + _sample_threat_indicators_endpoint = "/rest/sample/{}/threat_indicators" + _continuation_endpoint = "/rest/continuation/{}" + + def __init__( + self, + url, + key, + recursive_sample_limit=10, + reanalyze=True, + verify=True, + **optional_parameters + ): self.url = url self.key = key - if cert and os.path.isfile(cert): - self.cert = cert - else: - self.cert = False self.reanalyze = reanalyze + self.recursive_sample_limit = recursive_sample_limit self.headers = self._prepare_headers() self.session = sessions.Session() self.session.headers = self.headers - self.session.verify = self.cert + self.session.verify = verify + self.optional_parameters = optional_parameters def _prepare_headers(self): """Prepares connection headers for authorization. :returns: Dict with HTTP headers :rtype: dict""" - headers = {'Authorization': 'api_key {}'.format(self.key)} + headers = {"Authorization": "api_key {}".format(self.key)} return headers - def get_sample(self, samplehash): + def _check_response(self, res): """ - Downloads information about a sample using a given hash. - - :param samplehash: hash to search for. Has to be either md5, sha1 or sha256 - :type samplehash: str - :returns: Dictionary of results - :rtype: dict + Check the response code of the API and either return the results or raise an error. """ - apiurl = '/rest/sample/' - if len(samplehash) == 32: # MD5 - apiurl += 'md5/' - elif len(samplehash) == 40: # SHA1 - apiurl += 'sha1/' - elif len(samplehash) == 64: # SHA256 - apiurl += 'sha256/' + if res.status_code < 200 or res.status_code > 299: + raise BadResponseError( + "HTTP response code from VMRay indicates an error: {c} ({t})".format( + c=res.status_code, t=res.text + ) + ) else: - raise UnknownHashTypeError('Sample hash has an unknown length.') + response_json = res.json() + if response_json.get("result") == "ok": + data = response_json.get("data", []) + if "continuation_id" in response_json: + result = self.session.get( + url="{}{}".format( + self.url, + self._continuation_endpoint.format( + response_json["continuation_id"] + ), + ) + ) + data.extend(self._check_response(result)) + return data + else: + error_content = "Error from VMRay via API: {}" + if "data" in response_json: + error_content = error_content.format( + "; ".join(response_json["data"]["errors"]) + ) + elif "error_msg" in response_json: + error_content = error_content.format(response_json["error_msg"]) + else: + error_content = error_content.format("Unspecified error occurred") + raise VMRayAPIError(error_content) - res = self.session.get(self.url + apiurl + samplehash) - if res.status_code == 200: - return json.loads(res.text) - else: - raise BadResponseError('Response from VMRay was not HTTP 200.' - ' Responsecode: {}; Text: {}'.format(res.status_code, res.text)) + def submit_url_sample( + self, url_sample, tags=["TheHive"], shareable=False, user_config={} + ): + """ + Uploads a new URL sample to VMRay api. + + :param url_sample: url to be analyzed + :type url_sample: str + :param tags: List of tags to apply to the sample + :type tags: list(str) + :returns: List of submissions and samples + :rtype: list(dict) + """ + params = _filter_dict(self.optional_parameters) + params.update( + { + "sample_url": url_sample, + "reanalyze": self.reanalyze, + "shareable": shareable, + "max_recursive_samples": self.recursive_sample_limit, + } + ) + if tags: + params["tags"] = ",".join(filter(None, tags)) + + user_config = _filter_dict(user_config) + if user_config: + params["user_config"] = json.dumps(user_config) - def submit_sample(self, filepath, filename, tags=['TheHive']): + return self._check_response( + self.session.post( + url="{}{}".format(self.url, self._submit_endpoint), + params=params, + ) + ) + + def submit_file_sample( + self, file_path, file_name, tags=["TheHive"], shareable=False, user_config={} + ): """ - Uploads a new sample to VMRay api. Filename gets sent base64 encoded. + Uploads a new file sample to VMRay API. Filename gets sent base64 encoded. - :param filepath: path to sample - :type filepath: str - :param filename: filename of the original file - :type filename: str + :param file_path: path to sample + :type file_path: str + :param file_name: filename of the original file + :type file_name: str :param tags: List of tags to apply to the sample :type tags: list(str) - :returns: Dictionary of results - :rtype: dict + :returns: List of submissions and samples + :rtype: list(dict) """ - apiurl = '/rest/sample/submit?sample_file' - params = {'sample_filename_b64enc': base64.b64encode(filename.encode('utf-8')), - 'reanalyze': self.reanalyze} + params = _filter_dict(self.optional_parameters) + params.update( + { + "sample_filename_b64enc": base64.b64encode(file_name.encode("utf-8")), + "reanalyze": self.reanalyze, + "shareable": shareable, + "max_recursive_samples": self.recursive_sample_limit, + } + ) if tags: - params['tags'] = ','.join(tags) - - if os.path.isfile(filepath): - res = self.session.post(url=self.url + apiurl, - files=[('sample_file', open(filepath, mode='rb'))], - params=params) - if res.status_code == 200: - return json.loads(res.text) - else: - raise BadResponseError('Response from VMRay was not HTTP 200.' - ' Responsecode: {}; Text: {}'.format(res.status_code, res.text)) + params["tags"] = ",".join(filter(None, tags)) + + user_config = _filter_dict(user_config) + if user_config: + params["user_config"] = json.dumps(user_config) + + if os.path.isfile(file_path): + return self._check_response( + self.session.post( + url="{}{}".format(self.url, self._submit_endpoint), + files=[("sample_file", open(file_path, mode="rb"))], + params=params, + ) + ) else: - raise SampleFileNotFoundError('Given sample file was not found.') + raise SampleFileNotFoundError("Given sample file was not found.") + + def get_sample_threat_indicators(self, sample_id): + """ + Download Threat Indicators for a given sample id. + + :param sample_id: ID of the sample + :type sample_id: int + :returns: Dictionary of Threat Indicators + :rtype: dict + """ + return self._check_response( + self.session.get( + url="{}{}".format( + self.url, self._sample_threat_indicators_endpoint.format(sample_id) + ), + ) + ) + + def get_sample_mitre_attack(self, sample_id): + """ + Download MITRE ATT&CK(tm) information for a given sample id. + + :param sample_id: ID of the sample + :type sample_id: int + :returns: Dictionary of MITRE ATT&CK(tm) information + :rtype: dict + """ + + return self._check_response( + self.session.get( + url="{}{}".format( + self.url, self._sample_mitre_endpoint.format(sample_id) + ), + ) + ) + + def get_sample(self, sample_id): + """ + Query sample with a given sample id. - def query_job_status(self, submissionid): + :param sample_id: ID of the sample + :type sample_id: int + :returns: Dictionary of Samples + :rtype: dict """ - Queries vmray to check id a job was - - :param submissionid: ID of the job/submission - :type submissionid: int - :returns: True if job finished, false if not - :rtype: bool + return self._check_response( + self.session.get( + url="{}{}".format(self.url, self._sample_endpoint.format(sample_id)), + ) + ) + + def get_samples_by_hash(self, sample_hash): """ + Downloads information about a all samplse matching the given hash. - apiurl = '/rest/submission/' - result = self.session.get('{}{}{}'.format(self.url, apiurl, submissionid)) - if result.status_code == 200: - submission_info = json.loads(result.text) - if submission_info.get('data', {}).get('submission_finished', False): # Or something like that - return True + :param samplehash: hash to search for. Has to be either md5, sha1 or sha256 + :type samplehash: str + :returns: List of samples + :rtype: list(dict) + """ + if len(sample_hash) == 32: # MD5 + hash_type = "md5" + elif len(sample_hash) == 40: # SHA1 + hash_type = "sha1" + elif len(sample_hash) == 64: # SHA256 + hash_type = "sha256" else: - raise UnknownSubmissionIdError('Submission id seems invalid, response was not HTTP 200.') - return False + raise UnknownHashTypeError("Sample hash has an unknown length.") + + return self._check_response( + self.session.get( + url="{}{}".format( + self.url, + self._sample_hash_endpoint.format(t=hash_type, h=sample_hash), + ), + ) + ) + + def get_sample_iocs(self, sample_id): + """ + Query IOCs for a given sample id. + + :param sample_id: ID of the sample + :type sample_id: int + :returns: Dictionary of IOCs + :rtype: dict + """ + return self._check_response( + self.session.get( + url="{}{}".format( + self.url, self._sample_iocs_endpoint.format(sample_id) + ), + ) + ) + + def update_submission(self, submission_id): + """ + Queries the current state of a submission. + + :param submission_id: ID of the submission + :type submission_id: int + :returns: Dictionary representing the submission + :rtype: dict + """ + return self._check_response( + self.session.get( + "{}{}".format(self.url, self._submission_endpoint.format(submission_id)) + ) + ) + + def get_sample_analyses(self, sample_id): + """ + Queries analyses about a sample using a given sample id. + + :param sample_id: ID of the sample + :type sample_id: int + :returns: List of analyses + :rtype: list(dict) + """ + return self._check_response( + self.session.get( + "{}{}".format( + self.url, self._sample_analyses_endpoint.format(sample_id) + ) + ) + ) diff --git a/thehive-templates/VMRay_3_0/short.html b/thehive-templates/VMRay_3_0/short.html deleted file mode 100644 index 5fc0dabfb..000000000 --- a/thehive-templates/VMRay_3_0/short.html +++ /dev/null @@ -1,3 +0,0 @@ - - {{t.namespace}}:{{t.predicate}}="{{t.value}}" - diff --git a/thehive-templates/VMRay_3_0/long.html b/thehive-templates/VMRay_3_1/long.html similarity index 72% rename from thehive-templates/VMRay_3_0/long.html rename to thehive-templates/VMRay_3_1/long.html index f81c35192..2ce2e970a 100644 --- a/thehive-templates/VMRay_3_0/long.html +++ b/thehive-templates/VMRay_3_1/long.html @@ -1,8 +1,8 @@ -