-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathvalues.yaml
171 lines (156 loc) · 6.07 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
# Set the default storage classes for microk8s
redisStorageClass: microk8s-hostpath
updateStorageClass: microk8s-hostpath
persistantStorageClass: microk8s-hostpath
# Turn off replication since there is only one elastic node
# NOTE: If you decide to add more datastore node this should be set
# to 1 once there are more then 3 datastore nodes
elasticDefaultReplicas: 0
# Use a smaller amount of shard since there is only one node
elasticDefaultShards: 1
elasticAlertShards: 1
elasticEmptyResultShards: 2
elasticFileShards: 2
elasticFileScoreShards: 2
elasticResultShards: 9
elasticSafelistShards: 2
elasticSubmissionShards: 3
# Turn off logs gathering to saved on processing power, you can still have access
# to logs via kubectl or Lens
# NOTE: This could be turned back on if the appliance node is powerful enough
# If you turn it on, make sure you set internalELKStack to true as well
enableLogging: false
# Turn off metrics gathering to saved on processing power (This uses a lot of CPU/Memory)
# NOTE: This could be turned back on if the appliance node is powerful enough
# If you turn it on, make sure you set internalELKStack to true as well
enableMetrics: false
enableMetricbeat: false
# Turn off Application performance metrics to save processing power
# NOTE: This could be turned back on if you need insight on the performance of the core components
# If you turn it on, make sure you set internalELKStack to true as well
# ** You will only need this if you are doing development on core components
enableAPM: false
# Turn off self contained ELK stack for logging and metrics
# NOTE: Do no forget to turn this on if you enable either logging, metrics or APM
internalELKStack: false
# Make sure we use the same elasticsearch DB for logging and data when
# internal logging is turned on.
# NOTE: This should never be set to true for an appliance
seperateInternalELKStack: false
# Set kibana to use the internal ELK Stack if logging is turned on
kibana:
elasticsearchHosts: http://datastore-master:9200
# Maximum upload file size (larger than 100MB is not recommanded)
ingressAnnotations:
nginx.ingress.kubernetes.io/proxy-body-size: 100m
ingressClassName: "public"
# No need to auto scaler on low resources system
# NOTE: If you want to turn it back on, make sure the "metrics-server" addon
# is installed on your cluster
# > sudo microk8s enable metrics-server
useAutoScaler: false
# Stop reserving CPU for component
# NOTE: Comment those lines if you use more then 1 nodes for your appliance
# and that the total # cores for all your nodes is greater or equal then 16 cores
defaultReqCPU: "0m"
alerterReqCPU: "0m"
archiverReqCPU: "0m"
apmReqCPU: "0m"
dispatcherReqCPU: "0m"
elasticHelperReqCPU: "0m"
esMetricsReqCPU: "0m"
expiryReqCPU: "0m"
frontendReqCPU: "0m"
heartbeatReqCPU: "0m"
installJobReqCPU: "0m"
internalUIReqCPU: "0m"
ingestUIReqCPU: "0m"
ingesterReqCPU: "0m"
logstashReqCPU: "0m"
metricsReqCPU: "0m"
plumberReqCPU: "0m"
redisVolatileReqCPU: "0m"
redisPersistentReqCPU: "0m"
scalerReqCPU: "0m"
serviceServerReqCPU: "0m"
socketIOReqCPU: "0m"
statisticsReqCPU: "0m"
uiReqCPU: "0m"
updaterReqCPU: "0m"
workflowReqCPU: "0m"
# Memory reservation
defaultReqRam: 64Mi
dispatcherReqRam: 128Mi
frontendReqRam: 32Mi
ingestUIReqRam: 256Mi
internalUIReqRam: 256Mi
scalerReqRam: 256Mi
serviceServerReqRam: 256Mi
socketIOReqRam: 64Mi
uiReqRam: 256Mi
# Internal configuration for assemblyline components. See the assemblyline
# administration documentation for more details.
# https://cybercentrecanada.github.io/assemblyline4_docs/
configuration:
core:
scaler:
# Allow the cluster to be overallocated up to 200%
cpu_overallocation: 2
service_defaults:
min_instances: 1
logging:
log_level: WARNING
services:
# CPU reservation as been lowered so we could squeeze more services into smaller appliance
# If you are having issues with services timing out a lot under heavy load, bring it back
# to its default 0.6 value.
cpu_reservation: 0.1
ui:
# !! MUST CHANGE !!
# Replace localhost with the domain at which you will reach your cluster, this cannot be an IP.
# If you do not have a domain, use an nip.io address. (e.g. 192.168.0.1.nip.io points to ip 192.168.0.1)
fqdn: "localhost"
# Max submission size in bytes (Leave as is). Significant tweaking required to go beyond this.
submission:
max_file_size: 104857600
# Configuration for the elasticsearch instance used for system data.
# NOTE: Make sure you adjust the maximum storage size to a value appropriate
# to your system as the default value is quite small.
datastore:
replicas: 1
esJavaOpts: "-Xms2g -Xmx2g -Dlog4j2.formatMsgNoLookups=true"
resources:
requests:
cpu: 250m
memory: 2Gi
volumeClaimTemplate:
storageClassName: microk8s-hostpath
resources:
requests:
storage: 20Gi
esConfig:
elasticsearch.yml: |
ingest.geoip.downloader.enabled: false
logger.level: WARN
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
# xpack.security.http.ssl.enabled: ${DATASTORE_SSL_ENABLED}
# xpack.security.http.ssl.verification_mode: full
# xpack.security.http.ssl.key: ${DATASTORE_SSL_KEY}
# xpack.security.http.ssl.certificate: ${DATASTORE_SSL_CERTIFICIATE}
cluster.routing.allocation.disk.watermark.low: "10gb"
cluster.routing.allocation.disk.watermark.high: "5gb"
cluster.routing.allocation.disk.watermark.flood_stage: "1gb"
# Configuration for the file storage
# NOTE: Make sure you adjust the max amount of storage files can take
# on your system to a proper value as the default is quite small
filestore:
persistence:
size: 20Gi
storageClass: microk8s-hostpath
resources:
requests:
memory: 512Mi