зеркало из https://github.com/mozilla/MozDef.git
Merge branch 'master' into master
This commit is contained in:
Коммит
8342769e26
|
@ -17,3 +17,4 @@ cloudy_mozdef/aws_parameters.json
|
|||
cloudy_mozdef/aws_parameters.sh
|
||||
docs/source/_build
|
||||
docs/source/_static
|
||||
*.swp
|
||||
|
|
19
CHANGELOG
19
CHANGELOG
|
@ -5,6 +5,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [v1.39.0] - 2019-05-29
|
||||
### Added
|
||||
- Pagination of Web UI tables
|
||||
- Added support for SQS in replacement of Rabbitmq for alerts
|
||||
- Support for no_auth for watchlist
|
||||
- Cron script for closing indexes
|
||||
- Documentation on AlertActions
|
||||
|
||||
### Changed
|
||||
- Removed dependency on '_type' field in Elasticsearch
|
||||
|
||||
### Fixed
|
||||
- Slackbot reconnects successfully during network errors
|
||||
- Relative Kibana URLs now work correctly with protocol
|
||||
|
||||
|
||||
## [v1.38.5] - 2019-04-09
|
||||
### Added
|
||||
- Support for CSS themes
|
||||
|
@ -76,7 +92,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
|
|||
- Added checks on sending SQS messages to only accept intra-account messages
|
||||
- Improved docker performance and disk space requirements
|
||||
|
||||
[Unreleased]: https://github.com/mozilla/MozDef/compare/v1.38.5...HEAD
|
||||
[Unreleased]: https://github.com/mozilla/MozDef/compare/v1.39.0...HEAD
|
||||
[v1.39.0]: https://github.com/mozilla/MozDef/compare/v1.38.5...v1.39.0
|
||||
[v1.38.5]: https://github.com/mozilla/MozDef/compare/v1.38.4...v1.38.5
|
||||
[v1.38.4]: https://github.com/mozilla/MozDef/compare/v1.38.3...v1.38.4
|
||||
[v1.38.3]: https://github.com/mozilla/MozDef/compare/v1.38.2...v1.38.3
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[![Build Status](https://travis-ci.org/mozilla/MozDef.svg?branch=master)](https://travis-ci.org/mozilla/MozDef)
|
||||
[![Documentation Status](https://readthedocs.org/projects/mozdef/badge/?version=latest)](http://mozdef.readthedocs.io/en/latest/?badge=latest)
|
||||
|
||||
# MozDef: Mozilla Enterprise Defense Platform
|
||||
# MozDef: ![LOGO](docs/source/images/moz_defense-platform_01.png)
|
||||
|
||||
## Documentation:
|
||||
|
||||
|
|
|
@ -14,6 +14,10 @@ from mozdef_util.utilities.logger import logger
|
|||
class AlertDeadmanGeneric(DeadmanAlertTask):
|
||||
|
||||
def main(self):
|
||||
# We override the event indices to search for
|
||||
# because our deadman alerts might look past 48 hours
|
||||
self.event_indices = ["events-weekly"]
|
||||
|
||||
self._config = self.parse_json_alert_config('deadman_generic.json')
|
||||
for alert_cfg in self._config['alerts']:
|
||||
try:
|
||||
|
|
|
@ -35,9 +35,9 @@ class AlertWatchList(AlertTask):
|
|||
else:
|
||||
logger.error('The watchlist request failed. Status {0}.\n'.format(r))
|
||||
|
||||
def process_alert(self, term):
|
||||
def process_alert(self):
|
||||
search_query = SearchQuery(minutes=20)
|
||||
content = QueryStringMatch(str(term))
|
||||
content = QueryStringMatch(str(self.watchterm))
|
||||
search_query.add_must(content)
|
||||
self.filtersManual(search_query)
|
||||
self.searchEventsSimple()
|
||||
|
|
|
@ -106,10 +106,7 @@ class AlertTask(Task):
|
|||
self._configureKombu()
|
||||
self._configureES()
|
||||
|
||||
# We want to select all event indices
|
||||
# and filter out the window based on timestamp
|
||||
# from the search query
|
||||
self.event_indices = ["events-*"]
|
||||
self.event_indices = ['events', 'events-previous']
|
||||
|
||||
def classname(self):
|
||||
return self.__class__.__name__
|
||||
|
|
|
@ -32,14 +32,14 @@ RABBITMQ = {
|
|||
'alertqueue': 'mozdef.alert'
|
||||
}
|
||||
|
||||
es_server = "http://localhost:9200"
|
||||
|
||||
if os.getenv('OPTIONS_ESSERVERS'):
|
||||
ES = {
|
||||
'servers': [os.getenv('OPTIONS_ESSERVERS')]
|
||||
}
|
||||
else:
|
||||
ES = {
|
||||
'servers': ['http://localhost:9200']
|
||||
}
|
||||
es_server = os.getenv('OPTIONS_ESSERVERS')
|
||||
|
||||
ES = {
|
||||
'servers': [es_server]
|
||||
}
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"known": [
|
||||
{
|
||||
"range": "8.32.0.0/16",
|
||||
"site": "OFFICE1",
|
||||
"format": "{0} is in OFFICE1."
|
||||
},
|
||||
{
|
||||
"range": "4a00:7a49:232::/48",
|
||||
"site": "OFFICE2",
|
||||
"format": "{0} is in OFFICE2."
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
import json
|
||||
from operator import add
|
||||
import os
|
||||
import re
|
||||
|
||||
import netaddr
|
||||
|
||||
|
||||
CONFIG_FILE = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
'ip_source_enrichment.json')
|
||||
|
||||
|
||||
def _find_ip_addresses(string):
|
||||
'''List all of the IPv4 and IPv6 addresses found in a string.'''
|
||||
|
||||
ipv4_rx = '(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
|
||||
ipv6_rx = '(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))'
|
||||
|
||||
ipv4 = re.findall(ipv4_rx, string)
|
||||
ipv6 = map(
|
||||
lambda match: match[0] if isinstance(match, tuple) else match,
|
||||
re.findall(ipv6_rx, string))
|
||||
|
||||
return ipv4 + ipv6
|
||||
|
||||
|
||||
def enrich(alert, known_ips):
|
||||
'''Add information to alerts containing IP addresses that describes
|
||||
the source location of the IP address if it can be determined based
|
||||
on a configured mapping.
|
||||
'''
|
||||
|
||||
def find_ips(value):
|
||||
if isinstance(value, str):
|
||||
return _find_ip_addresses(value)
|
||||
|
||||
if isinstance(value, list) or isinstance(value, tuple):
|
||||
found = [find_ips(item) for item in value]
|
||||
return reduce(add, found, [])
|
||||
|
||||
if isinstance(value, dict):
|
||||
found = [find_ips(item) for item in value.values()]
|
||||
return reduce(add, found, [])
|
||||
|
||||
return []
|
||||
|
||||
def ip_in_range(ip):
|
||||
return lambda known: netaddr.IPAddress(ip) in netaddr.IPSet([known['range']])
|
||||
|
||||
ips = find_ips(alert)
|
||||
|
||||
alert = alert.copy()
|
||||
|
||||
alert['details']['sites'] = []
|
||||
|
||||
for ip in set(ips):
|
||||
matching_descriptions = filter(ip_in_range(ip), known_ips)
|
||||
|
||||
for desc in matching_descriptions:
|
||||
enriched = desc['format'].format(ip, desc['site'])
|
||||
|
||||
alert['summary'] += '; ' + enriched
|
||||
|
||||
alert['details']['sites'].append({
|
||||
'ip': ip,
|
||||
'site': desc['site'],
|
||||
})
|
||||
|
||||
return alert
|
||||
|
||||
|
||||
def _load_config(file_path):
|
||||
'''Private
|
||||
|
||||
Read and parse a file from disk as JSON into a dictionary.
|
||||
'''
|
||||
|
||||
with open(file_path) as config_file:
|
||||
return json.load(config_file)
|
||||
|
||||
|
||||
class message(object):
|
||||
'''Alert plugin interface that handles messages (alerts).
|
||||
This plugin will look for IP addresses in any of the values of an
|
||||
alert dictionary. For each IP address found, it will append some
|
||||
text to the summary of the alert to provide more information
|
||||
about where the IP originates from if it is recognized.
|
||||
|
||||
The expected format of the configuration file,
|
||||
`ip_source_enrichment.json.conf`, is as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"known": [
|
||||
{
|
||||
"range": "1.2.3.4/8",
|
||||
"site": "office1",
|
||||
"format": "IPv4 {0} is from {1}"
|
||||
},
|
||||
{
|
||||
"range": "1a2b:3c4d:123::/48",
|
||||
"site": "office2",
|
||||
"format": "IPv6 {0} is from {1}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The format string can accept zero to two parameters. The first
|
||||
will be the IP address found and the second will be the
|
||||
value of the corresponding 'site'.
|
||||
|
||||
The modified alert will have a `details.sites` field added to it,
|
||||
with the following form:
|
||||
|
||||
```json
|
||||
{
|
||||
"details": {
|
||||
"sites": [
|
||||
{
|
||||
"ip": "1.2.3.4",
|
||||
"site": "office1"
|
||||
},
|
||||
{
|
||||
"ip": "1a2b:3c4d:123::",
|
||||
"site": "office2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self._config = _load_config(CONFIG_FILE)
|
||||
|
||||
def onMessage(self, message):
|
||||
known_ips = self._config.get('known', [])
|
||||
|
||||
return enrich(message, known_ips)
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"elasticSearchAddress": "http://127.0.0.1:9200",
|
||||
"indicesToSearch": [
|
||||
"events-weekly"
|
||||
],
|
||||
"maxConnections": 32,
|
||||
"matchTags": [
|
||||
"portscan"
|
||||
],
|
||||
"searchWindow": {
|
||||
"hours": 24
|
||||
}
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2017 Mozilla Corporation
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from mozdef_util.query_models import SearchQuery, TermMatch
|
||||
from mozdef_util.elasticsearch_client import ElasticsearchClient
|
||||
|
||||
|
||||
CONFIG_FILE = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
'port_scan_enrichment.json')
|
||||
|
||||
MISSING_REQUIRED_KEY_ERR_MSG = 'invalid configuration; '\
|
||||
'missing key "elasticSearchAddress" must be a URL '\
|
||||
'pointing to the ElasticSearch instance used by MozDef'
|
||||
|
||||
|
||||
class message(object):
|
||||
'''Alert plugin that handles messages (alerts) tagged as containing
|
||||
information about a port scan having been detected. This plugin
|
||||
will add information to such alerts describing any connections
|
||||
successfully established by the IP address from which the port
|
||||
scan originates.
|
||||
|
||||
The expected format of the configuration file,
|
||||
`port_scan_enrichment.json`, is as such:
|
||||
|
||||
```json
|
||||
{
|
||||
"elasticSearchAddress": "http://127.0.0.1:9200",
|
||||
"indicesToSearch": [
|
||||
"events-weekly"
|
||||
],
|
||||
"maxConnections": 32,
|
||||
"matchTags": [
|
||||
"portscan"
|
||||
],
|
||||
"searchWindow": {
|
||||
"hours": 12,
|
||||
"minutes": 30,
|
||||
"seconds": 59
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`elasticSearchAddress` must be provided and must be a URL pointing
|
||||
to the ElasticSearch instance containing MozDef alerts.
|
||||
`indicesToSearch` is an array of names of indices to search in ES.
|
||||
If not provided or else an empty array, it defaults to `["events-weekly"]`.
|
||||
`maxConnections` is the maximum number of successful
|
||||
connections to list. If set to 0, all will be listed.
|
||||
`matchTags` is a list of tags to match against. This plugin will
|
||||
run against any alert containing any of the specified tags. If
|
||||
`matchTags` is not provided or is an empty array, it will default
|
||||
to `["portscan"]`
|
||||
The `searchWindow` option is an object containing keyword
|
||||
arguments to be passed to Python's `datetime.timedelta` function
|
||||
and can thus contain any keys corresponding to the keyword
|
||||
arguments that would be passed to the `datetime.datetime` function.
|
||||
If `searchWindow` is not present or is an empty object, the
|
||||
default search window is 24 hours.
|
||||
|
||||
The modified alert will have a `details.recentconnections` field
|
||||
appended to it, formatted like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"details": {
|
||||
"recentconnections": [
|
||||
{
|
||||
"destinationipaddress": "1.2.3.4",
|
||||
"destinationport": 80,
|
||||
"timestamp": "2016-07-13 22:33:31.625443+00:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That is, each connection will be described in an array and be an
|
||||
object containing the IP address and port over which the connection
|
||||
was established and the time the connection was made.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
config = _load_config(CONFIG_FILE)
|
||||
|
||||
try:
|
||||
es_address = config['elasticSearchAddress']
|
||||
except KeyError:
|
||||
raise KeyError(MISSING_REQUIRED_KEY_ERR_MSG)
|
||||
|
||||
es_client = ElasticsearchClient(es_address)
|
||||
|
||||
search_indices = config.get('searchIndices', [])
|
||||
|
||||
self.max_connections = config.get('maxConnections', 0)
|
||||
self.match_tags = config.get('matchTags', [])
|
||||
self.search_window = config.get('searchWindow', {})
|
||||
|
||||
if len(search_indices) == 0:
|
||||
search_indices = ['alerts']
|
||||
|
||||
if self.max_connections == 0:
|
||||
self.max_connections = None
|
||||
|
||||
if len(self.match_tags) == 0:
|
||||
self.match_tags = ['portscan']
|
||||
|
||||
if len(self.search_window) == 0:
|
||||
self.search_window = {'hours': 24}
|
||||
|
||||
# Store our ES client in a closure bound to the plugin object.
|
||||
# The intent behind this approach is to make the interface to
|
||||
# the `enrich` function require dependency injection for testing.
|
||||
def search_fn(query):
|
||||
return query.execute(es_client, indices=search_indices)
|
||||
|
||||
self.search = search_fn
|
||||
|
||||
def onMessage(self, message):
|
||||
alert_tags = message.get('tags', [])
|
||||
|
||||
should_enrich = any([
|
||||
tag in alert_tags
|
||||
for tag in self.match_tags
|
||||
])
|
||||
|
||||
if should_enrich:
|
||||
return enrich(
|
||||
message,
|
||||
self.search,
|
||||
self.search_window,
|
||||
self.max_connections)
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def _load_config(file_path):
|
||||
'''Private
|
||||
|
||||
Load the alert plugin configuration from a file.
|
||||
'''
|
||||
|
||||
with open(file_path) as config_file:
|
||||
return json.load(config_file)
|
||||
|
||||
|
||||
def take(ls, n_items=None):
|
||||
'''Take only N items from a list.'''
|
||||
|
||||
if n_items is None:
|
||||
return ls
|
||||
|
||||
return ls[:n_items]
|
||||
|
||||
|
||||
def enrich(alert, search_fn, search_window, max_connections):
|
||||
'''Enrich an alert with information about recent connections made by
|
||||
the 'details.sourceipaddress'.
|
||||
|
||||
`search_fn` is expected to be a function that accepts a single argument,
|
||||
a `SearchQuery` object, and returns a list of results from Elastic Search.
|
||||
|
||||
`search_window` is expected to be a dictionary specifying the amount of
|
||||
time into the past to query for events.
|
||||
|
||||
`max_connections` is expected to be the maximum number of connections to
|
||||
list in the modified alert or else `None` if no limit should be applied.
|
||||
|
||||
Returns a modified alert based on a copy of the original.
|
||||
'''
|
||||
|
||||
search_query = SearchQuery(**search_window)
|
||||
|
||||
search_query.add_must([
|
||||
TermMatch('category', 'bro'),
|
||||
TermMatch('source', 'conn'),
|
||||
TermMatch(
|
||||
'details.sourceipaddress',
|
||||
alert['details']['sourceipaddress'])
|
||||
])
|
||||
|
||||
results = search_fn(search_query)
|
||||
|
||||
events = [
|
||||
hit.get('_source', {})
|
||||
for hit in results.get('hits', [])
|
||||
]
|
||||
|
||||
alert['details']['recentconnections'] = []
|
||||
|
||||
for event in take(events, max_connections):
|
||||
alert['details']['recentconnections'].append({
|
||||
'destinationipaddress': event['details']['destinationipaddress'],
|
||||
'destinationport': event['details']['destinationport'],
|
||||
'timestamp': event['timestamp']
|
||||
})
|
||||
|
||||
return alert
|
|
@ -7,6 +7,8 @@ AMI_MAP_TEMP_FILE := /tmp/mozdef-ami-map.txt
|
|||
DEV_STACK_PARAMS_FILENAME := aws_parameters.dev.json
|
||||
# For more information on the rationale behind the code in STACK_PARAMS see https://github.com/aws/aws-cli/issues/2429#issuecomment-441133480
|
||||
DEV_STACK_PARAMS := $(shell test -e $(DEV_STACK_PARAMS_FILENAME) && python -c 'import json,sys;f=open(sys.argv[1]);print(" ".join([",".join(["%s=\\\"%s\\\""%(k,v) for k,v in x.items()]) for x in json.load(f)]));f.close()' $(DEV_STACK_PARAMS_FILENAME))
|
||||
OIDC_CLIENT_ID := $(shell test -e $(DEV_STACK_PARAMS_FILENAME) && python -c 'import json,sys;f=open(sys.argv[1]);print(next((x["ParameterValue"] for x in json.load(f) if x["ParameterKey"]=="OIDCClientId"),""));f.close()' $(DEV_STACK_PARAMS_FILENAME))
|
||||
DOMAIN_NAME := $(shell test -e $(DEV_STACK_PARAMS_FILENAME) && python -c 'import json,sys;f=open(sys.argv[1]);print(next((x["ParameterValue"] for x in json.load(f) if x["ParameterKey"]=="DomainName"),""));f.close()' $(DEV_STACK_PARAMS_FILENAME))
|
||||
# MozDef uses a nested CF stack, the mozdef-parent.yml will tie all child stacks together and load them from S3
|
||||
# See also mozdef.infosec.mozilla.org bucket
|
||||
S3_DEV_BUCKET_NAME := mozdef.infosec.allizom.org
|
||||
|
@ -39,6 +41,7 @@ packer-build-github: ## Build the base AMI with packer
|
|||
create-dev-stack: test ## Create everything you need for a fresh new stack!
|
||||
@export AWS_REGION=$(AWS_REGION)
|
||||
@echo "Make sure you have an environment variable OIDC_CLIENT_SECRET set."
|
||||
@test -n "$(OIDC_CLIENT_SECRET_PARAM_ARG)" -a -n "$(OIDC_CLIENT_ID)" -o -z "$(OIDC_CLIENT_SECRET_PARAM_ARG)" -a -z "$(OIDC_CLIENT_ID)"
|
||||
aws cloudformation create-stack --stack-name $(STACK_NAME) --template-url $(S3_DEV_STACK_URI)mozdef-parent.yml \
|
||||
--capabilities CAPABILITY_IAM \
|
||||
--parameters $(OIDC_CLIENT_SECRET_PARAM_ARG) \
|
||||
|
@ -53,6 +56,7 @@ create-dev-s3-bucket:
|
|||
.PHONY: update-dev-stack
|
||||
update-dev-stack: test ## Updates the nested stack on AWS
|
||||
@export AWS_REGION=$(AWS_REGION)
|
||||
@test -n "$(OIDC_CLIENT_SECRET_PARAM_ARG)" -a -n "$(OIDC_CLIENT_ID)" -o -z "$(OIDC_CLIENT_SECRET_PARAM_ARG)" -a -z "$(OIDC_CLIENT_ID)"
|
||||
aws cloudformation update-stack --stack-name $(STACK_NAME) --template-url $(S3_DEV_STACK_URI)mozdef-parent.yml \
|
||||
--capabilities CAPABILITY_IAM \
|
||||
--parameters $(OIDC_CLIENT_SECRET_PARAM_ARG) \
|
||||
|
@ -93,3 +97,7 @@ diff-dev-templates:
|
|||
.PHONY: diff-prod-templates
|
||||
diff-prod-templates:
|
||||
tempdir=`mktemp --directory`; aws s3 sync $(S3_PROD_BUCKET_URI) "$$tempdir" --exclude="*" --include="*.yml"; diff --recursive --unified "$$tempdir" cloudformation; rm -rf "$$tempdir"
|
||||
|
||||
.PHONY: bind-domain-name
|
||||
bind-domain-name:
|
||||
ci/bind_domain_name "$(DOMAIN_NAME)" "$(STACK_NAME)"
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
DOMAIN_NAME="$1"
|
||||
STACK_NAME="$2"
|
||||
test -n "${DOMAIN_NAME}" -a -n "${STACK_NAME}"
|
||||
|
||||
DOMAIN_NAME_ZONE="${DOMAIN_NAME#*.}."
|
||||
ZONE_ID="$(aws route53 list-hosted-zones-by-name --dns-name ${DOMAIN_NAME_ZONE} --query "HostedZones[?Name == '${DOMAIN_NAME_ZONE}'].Id" --output text)"
|
||||
INSTANCE_STACK_ARN="$(aws cloudformation describe-stack-resource --stack-name ${STACK_NAME} --logical-resource-id MozDefInstance --query 'StackResourceDetail.PhysicalResourceId' --output text)"
|
||||
instance_stack_name_prefix="${INSTANCE_STACK_ARN##*:stack/}"
|
||||
INSTANCE_STACK_NAME="${instance_stack_name_prefix%%/*}"
|
||||
ELB_ARN="$(aws cloudformation describe-stack-resource --stack-name ${INSTANCE_STACK_NAME} --logical-resource-id MozDefElasticLoadBalancingV2LoadBalancer --query 'StackResourceDetail.PhysicalResourceId' --output text)"
|
||||
#elb_name_prefix="${ELB_ARN##*:loadbalancer/app/}"
|
||||
#ELB_NAME="${elb_name_prefix%%/*}"
|
||||
ELB_DNS_NAME=$(aws elbv2 describe-load-balancers --load-balancer-arns ${ELB_ARN} --query 'LoadBalancers[0].DNSName' --output text)
|
||||
ELB_HOSTED_ZONE_ID=$(aws elbv2 describe-load-balancers --load-balancer-arns ${ELB_ARN} --query 'LoadBalancers[0].CanonicalHostedZoneId' --output text)
|
||||
CHANGE_BATCH=$(cat <<END_HEREDOC
|
||||
{"Changes": [{"Action": "UPSERT", "ResourceRecordSet": {"Name": "${DOMAIN_NAME}", "Type": "A", "AliasTarget": {"HostedZoneId": "${ELB_HOSTED_ZONE_ID}", "DNSName": "${ELB_DNS_NAME}", "EvaluateTargetHealth": true}}}]}
|
||||
END_HEREDOC
|
||||
)
|
||||
echo "Changing Route53 ${DOMAIN_NAME} to ${ELB_DNS_NAME} in ELB Hosted Zone ${ELB_HOSTED_ZONE_ID}"
|
||||
CHANGE_ID=$(aws route53 change-resource-record-sets --hosted-zone-id ${ZONE_ID} --change-batch "${CHANGE_BATCH}" --query 'ChangeInfo.Id' --output text)
|
||||
CHANGE_STATUS=$(aws route53 get-change --id ${CHANGE_ID} --query 'ChangeInfo.Status' --output text)
|
||||
echo "DNS Change is ${CHANGE_STATUS}"
|
||||
while [ "${CHANGE_STATUS}" = "PENDING" ]; do
|
||||
echo -n "."
|
||||
sleep 5
|
||||
CHANGE_STATUS=$(aws route53 get-change --id ${CHANGE_ID} --query 'ChangeInfo.Status' --output text)
|
||||
done
|
||||
echo "DNS Change is ${CHANGE_STATUS}"
|
|
@ -25,6 +25,7 @@ echo " Head Ref : ${CODEBUILD_WEBHOOK_HEAD_REF}"
|
|||
echo " Trigger : ${CODEBUILD_WEBHOOK_TRIGGER}"
|
||||
|
||||
if [[ "branch/master" == "${CODEBUILD_WEBHOOK_TRIGGER}" \
|
||||
|| "branch/reinforce2019" == "${CODEBUILD_WEBHOOK_TRIGGER}" \
|
||||
|| "${CODEBUILD_WEBHOOK_TRIGGER}" =~ ^tag\/v[0-9]+\.[0-9]+\.[0-9]+(\-(prod|pre|testing))?$ ]]; then
|
||||
echo "Codebuild is ubuntu 14.04. Installing packer in order to compensate. Someone should build a CI docker container \;)."
|
||||
wget -nv https://releases.hashicorp.com/packer/1.3.5/packer_1.3.5_linux_amd64.zip
|
||||
|
|
|
@ -19,10 +19,16 @@ sed '/# INSERT MAPPING HERE.*/{
|
|||
r '"${AMI_MAP_TEMP_FILE}"'
|
||||
}' cloudformation/mozdef-parent.yml > ${TMPDIR}/mozdef-parent.yml
|
||||
|
||||
echo "Injecting the region AMI mapping into the mozdef-parent.yml CloudFormation template"
|
||||
sed '/# INSERT MAPPING HERE.*/{
|
||||
s/# INSERT MAPPING HERE.*//g
|
||||
r '"${AMI_MAP_TEMP_FILE}"'
|
||||
}' cloudformation/mozdef-parent-reinforce.yml > ${TMPDIR}/mozdef-parent-reinforce.yml
|
||||
|
||||
echo "Uploading CloudFormation templates to S3 directory ${VERSIONED_BUCKET_URI}/"
|
||||
# Sync all .yml files except mozdef-parent.yml
|
||||
aws s3 sync cloudformation/ ${VERSIONED_BUCKET_URI} --exclude="*" --include="*.yml" --exclude="mozdef-parent.yml"
|
||||
# cp modified mozdef-parent.yml from TMPDIR to S3
|
||||
aws s3 cp ${TMPDIR}/mozdef-parent.yml ${VERSIONED_BUCKET_URI}/
|
||||
|
||||
aws s3 cp ${TMPDIR}/mozdef-parent-reinforce.yml ${VERSIONED_BUCKET_URI}/
|
||||
rm -rf "${TMPDIR}"
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
AWSTemplateFormatVersion: 2010-09-09
|
||||
Description: Setup an alert writers environment for use with MozDef for AWS. Note this is PoC only.
|
||||
Parameters:
|
||||
VpcId:
|
||||
Type: AWS::EC2::VPC::Id
|
||||
Description: 'The VPC ID of the VPC to deploy in (Example : vpc-abcdef12)'
|
||||
PublicSubnetIds:
|
||||
Type: List<AWS::EC2::Subnet::Id>
|
||||
Description: 'A comma delimited list of public subnet IDs (Example: subnet-abcdef12,subnet-bcdef123)'
|
||||
MozDefSecurityGroup:
|
||||
Type: AWS::EC2::SecurityGroup::Id
|
||||
Description: The security group the MozDef instance runs in. This is needed to access ES.
|
||||
ESUrl:
|
||||
Type: String
|
||||
Description: 'The location of elasticsearch deployed in managed-es.'
|
||||
Resources:
|
||||
MozDefLayer:
|
||||
Type: AWS::Lambda::LayerVersion
|
||||
Properties:
|
||||
LayerName: MozDef
|
||||
Description: Mozilla Enterprise Defense Platform Dependencies
|
||||
Content:
|
||||
S3Bucket: public.us-west-2.security.allizom.org
|
||||
S3Key: mozdef-lambda-layer/layer-latest.zip
|
||||
CompatibleRuntimes:
|
||||
- python2.7
|
||||
LicenseInfo: 'MPL 2.0'
|
||||
LambdalertIAMRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service: lambda.amazonaws.com
|
||||
Action: sts:AssumeRole
|
||||
ManagedPolicyArns:
|
||||
- arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole
|
||||
AlertWritersEnv:
|
||||
Type: "AWS::Lambda::Function"
|
||||
Properties:
|
||||
Handler: "lambdalert.handle"
|
||||
Role:
|
||||
Fn::GetAtt:
|
||||
- "LambdalertIAMRole"
|
||||
- "Arn"
|
||||
Code:
|
||||
S3Bucket: public.us-west-2.security.allizom.org
|
||||
S3Key: mozdef-lambda-layer/function-latest.zip
|
||||
Layers:
|
||||
- !Ref MozDefLayer
|
||||
Environment:
|
||||
Variables:
|
||||
OPTIONS_ESSERVERS: !Ref ESUrl
|
||||
OPTIONS_MQPROTOCOL: sqs
|
||||
VpcConfig:
|
||||
SecurityGroupIds:
|
||||
- !Ref MozDefSecurityGroup
|
||||
SubnetIds: !Ref PublicSubnetIds
|
||||
ReservedConcurrentExecutions: 1
|
||||
Runtime: "python2.7"
|
||||
Timeout: 120
|
|
@ -0,0 +1,45 @@
|
|||
AWSTemplateFormatVersion: '2010-09-09'
|
||||
Description: Template to build out users for insecure account this is only used for training and testing.
|
||||
Parameters:
|
||||
SNSReceiverArn:
|
||||
Type: String
|
||||
Description: The ARN of the SNS topic to post credentials to. Note that this leaks credentials.
|
||||
Resources:
|
||||
IAMUser1:
|
||||
Type: AWS::IAM::User
|
||||
Properties:
|
||||
Path: /
|
||||
ManagedPolicyArns:
|
||||
- arn:aws:iam::aws:policy/AdministratorAccess
|
||||
IAMUser1Keys:
|
||||
Type: AWS::IAM::AccessKey
|
||||
Properties:
|
||||
UserName: !Ref 'IAMUser1'
|
||||
CaptureSetupData:
|
||||
Type: Custom::DataCapture
|
||||
Version: '1.0'
|
||||
Properties:
|
||||
ServiceToken: !Ref SNSReceiverArn
|
||||
AccessKey: !Ref 'IAMUser1Keys'
|
||||
SecretAccessKey: !GetAtt 'IAMUser1Keys.SecretAccessKey'
|
||||
lbURL: !GetAtt 'MyLoadBalancer.DNSName'
|
||||
AccountID: !Ref 'AWS::AccountId'
|
||||
MyLoadBalancer:
|
||||
Type: AWS::ElasticLoadBalancing::LoadBalancer
|
||||
Properties:
|
||||
AvailabilityZones:
|
||||
- us-west-2a
|
||||
Listeners:
|
||||
- LoadBalancerPort: '80'
|
||||
InstancePort: '80'
|
||||
Protocol: HTTP
|
||||
Outputs:
|
||||
AccessKey:
|
||||
Description: AccessKey
|
||||
Value: !Ref 'IAMUser1Keys'
|
||||
SecretAccessKey:
|
||||
Description: SecretAccessKey
|
||||
Value: !GetAtt 'IAMUser1Keys.SecretAccessKey'
|
||||
LBUrl:
|
||||
Description: lburl
|
||||
Value: !GetAtt 'MyLoadBalancer.DNSName'
|
|
@ -280,9 +280,8 @@ Resources:
|
|||
OPTIONS_TASKEXCHANGE=${CloudTrailSQSNotificationQueueName}
|
||||
path: /opt/mozdef/docker/compose/cloudy_mozdef_mq_cloudtrail.env
|
||||
- content: |
|
||||
# This is the additional worker reserved for future use
|
||||
OPTIONS_TASKEXCHANGE=${MozDefSQSQueueName}
|
||||
path: /opt/mozdef/docker/compose/cloudy_mozdef_mq_sns_sqs.env
|
||||
path: /opt/mozdef/docker/compose/cloudy_mozdef_mq_sqs.env
|
||||
- content: |
|
||||
[Unit]
|
||||
Description=Docker Compose container starter
|
||||
|
@ -312,7 +311,7 @@ Resources:
|
|||
- chmod --verbose 600 /opt/mozdef/docker/compose/rabbitmq.env
|
||||
- chmod --verbose 600 /opt/mozdef/docker/compose/cloudy_mozdef.env
|
||||
- chmod --verbose 600 /opt/mozdef/docker/compose/cloudy_mozdef_kibana.env
|
||||
- chmod --verbose 600 /opt/mozdef/docker/compose/cloudy_mozdef_mq_sns_sqs.env
|
||||
- chmod --verbose 600 /opt/mozdef/docker/compose/cloudy_mozdef_mq_sqs.env
|
||||
- mkdir --verbose --parents ${EFSMountPoint}
|
||||
- echo '*.* @@127.0.0.1:514' >> /etc/rsyslog.conf
|
||||
- systemctl enable rsyslog
|
||||
|
|
|
@ -0,0 +1,366 @@
|
|||
AWSTemplateFormatVersion: 2010-09-09
|
||||
Description: Deploy MozDef into AWS
|
||||
Metadata:
|
||||
'AWS::CloudFormation::Interface':
|
||||
ParameterGroups:
|
||||
- Label:
|
||||
default: EC2 Instance
|
||||
Parameters:
|
||||
- InstanceType
|
||||
- KeyName
|
||||
- SSHIngressCIDR
|
||||
- Label:
|
||||
default: Certificate
|
||||
Parameters:
|
||||
- ACMCertArn
|
||||
- Label:
|
||||
default: OIDC Configuration (optional) If not set this will use basic auth.
|
||||
Parameters:
|
||||
- OIDCAuthorizationEndpoint
|
||||
- OIDCClientId
|
||||
- OIDCClientSecret
|
||||
- OIDCIssuer
|
||||
- OIDCTokenEndpoint
|
||||
- OIDCUserInfoEndpoint
|
||||
- Label:
|
||||
default: Experimental Features
|
||||
Parameters:
|
||||
- LeakCredentialSNSArn
|
||||
ParameterLabels:
|
||||
InstanceType:
|
||||
default: EC2 Instance Type
|
||||
KeyName:
|
||||
default: EC2 SSH Key Name
|
||||
SSHIngressCIDR:
|
||||
default: Inbound SSH allowed IP address CIDR
|
||||
DomainName:
|
||||
default: FQDN to host MozDef at
|
||||
ACMCertArn:
|
||||
default: ACM Certificate ARN
|
||||
OIDCAuthorizationEndpoint:
|
||||
default: OIDC authorization endpoint.
|
||||
OIDCClientId:
|
||||
default: OIDC Client ID.
|
||||
OIDCClientSecret:
|
||||
default: OIDC Client Secret.
|
||||
OIDCIssuer:
|
||||
default: OIDC issuer.
|
||||
OIDCTokenEndpoint:
|
||||
default: OIDC oauth token endpoint.
|
||||
OIDCUserInfoEndpoint:
|
||||
default: OIDC user info endpoint.
|
||||
LeakCredentialSNSArn: Arn of the SNS topic to post admin creds to.
|
||||
Parameters:
|
||||
InstanceType:
|
||||
Type: String
|
||||
Description: EC2 instance type, e.g. m1.small, m1.large, etc.
|
||||
Default: m5.large
|
||||
KeyName:
|
||||
Type: AWS::EC2::KeyPair::KeyName
|
||||
Description: Name of an existing EC2 KeyPair to enable SSH access to the web server
|
||||
SSHIngressCIDR:
|
||||
Type: String
|
||||
AllowedPattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'
|
||||
ConstraintDescription: A valid CIDR (e.g. 203.0.113.0/24)
|
||||
Description: The CIDR of IP addresses from which to allow inbound SSH connections
|
||||
DomainName:
|
||||
Type: String
|
||||
Description: The fully qualified DNS name you will host CloudyMozDef at.
|
||||
Default: cloudymozdef.security.allizom.org
|
||||
ACMCertArn:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: "The ARN of your pre-issued ACM cert. (Example: arn:aws:acm:us-west-2:123456789012:certificate/abcdef01-2345-6789-abcd-ef0123456789)"
|
||||
OIDCAuthorizationEndpoint:
|
||||
Type: String
|
||||
Default: Unset
|
||||
ConstraintDescription: A valid URL
|
||||
Description: "The url of the authorization endpoint found for your oidc provider generall found on (Example: https://auth.example.com/.well-known/openid-configuration)"
|
||||
OIDCClientId:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: The client ID that your OIDC provider issues you for your Mozdef instance.
|
||||
OIDCClientSecret:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: The secret that your OIDC provider issues you for your Mozdef instance.
|
||||
NoEcho: true
|
||||
OIDCIssuer:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: Generally can be found at the .well-known endpoint for your provider.
|
||||
OIDCTokenEndpoint:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: Generally can be found at the .well-known endpoint for your provider.
|
||||
OIDCUserInfoEndpoint:
|
||||
Type: String
|
||||
Default: Unset
|
||||
Description: Generally can be found at the .well-known endpoint for your provider.
|
||||
LeakCredentialSNSArn:
|
||||
Type: String
|
||||
Description: The arn of the sns topic to post a credential back to from the account. Do not use unless you are deploying this for reinforce workshop. This will attack the MozDef account.
|
||||
# A RegionMap of AMI IDs is required by AWS Marketplace https://docs.aws.amazon.com/marketplace/latest/userguide/cloudformation.html#aws-cloudformation-template-preparation
|
||||
# INSERT MAPPING HERE : This template does not work in this state. The mapping is replaced with a working AWS region to AMI ID mapping as well as a variable map with the S3TemplateLocationPrefix by cloudy_mozdef/ci/publish_versioned_templates. The resulting functioning CloudFormation template is uploaded to S3 for the version being built.
|
||||
Conditions:
|
||||
LeakACredential: !Not [!Equals [!Ref LeakCredentialSNSArn, ""]]
|
||||
Resources:
|
||||
LeakedCredentials:
|
||||
Condition: LeakACredential
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
SNSReceiverArn: !Ref LeakCredentialSNSArn
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ] , mozdef-credential-leak.yml ] ]
|
||||
MozDefVPC:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ] , mozdef-vpc.yml ] ]
|
||||
MozDefSecurityGroups:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
SSHIngressCIDR: !Ref SSHIngressCIDR
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ] , mozdef-security-group.yml ] ]
|
||||
MozDefIAMRoleAndInstanceProfile:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
CloudTrailS3BucketName: !GetAtt MozDefCloudTrail.Outputs.CloudTrailS3BucketName
|
||||
CloudTrailSQSQueueArn: !GetAtt MozDefCloudTrail.Outputs.CloudTrailSQSQueueArn
|
||||
MozDefSQSQueueArn: !GetAtt MozDefSQS.Outputs.SQSQueueArn
|
||||
MozDefAlertSqsQueueArn: !GetAtt MozDefSQS.Outputs.AlertTaskSQSQueueArn
|
||||
# CloudTrailS3BucketIAMRoleArn we leave empty as we will consume CloudTrail logs from our own account
|
||||
ESServiceLinkedRoleExists: !GetAtt ESServiceLinkedRoleExists.RoleExists
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], base-iam.yml ] ]
|
||||
MozDefInstance:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
InstanceType: !Ref InstanceType
|
||||
KeyName: !Ref KeyName
|
||||
IamInstanceProfile: !GetAtt MozDefIAMRoleAndInstanceProfile.Outputs.InstanceProfileArn
|
||||
AutoScaleGroupSubnetIds: !Join [ ',', [!GetAtt MozDefVPC.Outputs.Subnet1, !GetAtt MozDefVPC.Outputs.Subnet2, !GetAtt MozDefVPC.Outputs.Subnet3 ]]
|
||||
AMIImageId: !FindInMap [ RegionMap, !Ref 'AWS::Region', HVM64 ]
|
||||
EFSID: !GetAtt MozDefEFS.Outputs.EFSID
|
||||
MozDefSecurityGroupId: !GetAtt MozDefSecurityGroups.Outputs.MozDefSecurityGroupId
|
||||
MozDefLoadBalancerSecurityGroupId: !GetAtt MozDefSecurityGroups.Outputs.MozDefLoadBalancerSecurityGroupId
|
||||
MozDefACMCertArn: !Ref ACMCertArn
|
||||
ESURL: !GetAtt MozDefES.Outputs.ElasticsearchURL
|
||||
KibanaURL: !GetAtt MozDefES.Outputs.ElasticsearchKibanaURL
|
||||
KibanaDomainOnlyURL: !GetAtt MozDefES.Outputs.ElasticsearchDomainOnlyURL
|
||||
OIDCClientId: !Ref OIDCClientId
|
||||
OIDCClientSecret: !Ref OIDCClientSecret
|
||||
OIDCAuthorizationEndpoint: !Ref OIDCAuthorizationEndpoint
|
||||
OIDCIssuer: !Ref OIDCIssuer
|
||||
OIDCTokenEndpoint: !Ref OIDCTokenEndpoint
|
||||
OIDCUserInfoEndpoint: !Ref OIDCUserInfoEndpoint
|
||||
CloudTrailSQSNotificationQueueName: !GetAtt MozDefCloudTrail.Outputs.CloudTrailSQSQueueName
|
||||
MozDefSQSQueueName: !GetAtt MozDefSQS.Outputs.SQSQueueName
|
||||
DomainName: !Ref DomainName
|
||||
AlertQueueUrl: !GetAtt MozDefSQS.Outputs.AlertTaskSQSQueueUrl
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-instance.yml ] ]
|
||||
MozDefES:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
DependsOn: MozDefIAMRoleAndInstanceProfile
|
||||
Properties:
|
||||
Parameters:
|
||||
SubnetIds: !Join [ ',', [!GetAtt MozDefVPC.Outputs.Subnet1, !GetAtt MozDefVPC.Outputs.Subnet2, !GetAtt MozDefVPC.Outputs.Subnet3 ]]
|
||||
BlockStoreSizeGB: '100'
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
MozDefInstanceSecurityGroup: !GetAtt MozDefSecurityGroups.Outputs.MozDefSecurityGroupId
|
||||
ESInstanceCount: '1'
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-es.yml ] ]
|
||||
MozDefEFS:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
SubnetList: !Join [ ',', [!GetAtt MozDefVPC.Outputs.Subnet1, !GetAtt MozDefVPC.Outputs.Subnet2, !GetAtt MozDefVPC.Outputs.Subnet3 ]]
|
||||
NumberOfSubnets: !GetAtt NumberOfSubnets.Length
|
||||
MozDefSecurityGroup: !GetAtt MozDefSecurityGroups.Outputs.MozDefSecurityGroupId
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-efs.yml ] ]
|
||||
MozDefSQS:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-sqs.yml ] ]
|
||||
MozDefCloudTrail:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-cloudtrail.yml ] ]
|
||||
MozDefVPCFlowLogs:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
MozDefSQSQueueArn: !GetAtt MozDefSQS.Outputs.SQSQueueArn
|
||||
MozDefSQSQueueUrl: !GetAtt MozDefSQS.Outputs.SQSQueueUrl
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-vpc-flow-logs.yml ] ]
|
||||
CloudFormationLambdaIAMRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service:
|
||||
- lambda.amazonaws.com
|
||||
Action:
|
||||
- sts:AssumeRole
|
||||
Policies:
|
||||
-
|
||||
PolicyName: AllowLambdaLogging
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
-
|
||||
Effect: Allow
|
||||
Action:
|
||||
- logs:*
|
||||
- iam:ListRoles
|
||||
Resource: '*'
|
||||
GetArrayLengthLambdaFunction:
|
||||
Type: AWS::Lambda::Function
|
||||
DependsOn: CloudFormationLambdaIAMRole
|
||||
# This DependsOn shouldn't be needed because the "Role" value is set to
|
||||
# "!GetAtt CloudFormationLambdaIAMRole.Arn" but without DependsOn the error
|
||||
# "Template error: IAM role mozdef-aws-nested-CloudFormationLambdaIAMRole-108UCUPESC6WG doesn't exist"
|
||||
# occurs on stack creation for this Lambda Function resource. The DependsOn
|
||||
# prevents the error.
|
||||
Properties:
|
||||
Code:
|
||||
ZipFile: |
|
||||
import cfnresponse
|
||||
import secrets, string
|
||||
def handler(event, context):
|
||||
length = len(event['ResourceProperties']['Array'])
|
||||
physical_id = ''.join(secrets.choice(string.ascii_uppercase + string.digits) for i in range(13))
|
||||
cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Length': length}, "GetArrayLength-%s" % physical_id)
|
||||
Handler: index.handler
|
||||
Runtime: python3.6
|
||||
Role: !GetAtt CloudFormationLambdaIAMRole.Arn
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
Timeout: 20
|
||||
NumberOfSubnets:
|
||||
Type: AWS::CloudFormation::CustomResource
|
||||
Properties:
|
||||
Array:
|
||||
- !GetAtt MozDefVPC.Outputs.Subnet1
|
||||
- !GetAtt MozDefVPC.Outputs.Subnet2
|
||||
- !GetAtt MozDefVPC.Outputs.Subnet3
|
||||
ServiceToken: !GetAtt GetArrayLengthLambdaFunction.Arn
|
||||
DoesRoleExistLambdaFunction:
|
||||
Type: AWS::Lambda::Function
|
||||
DependsOn: CloudFormationLambdaIAMRole
|
||||
# This DependsOn shouldn't be needed because the "Role" value is set to
|
||||
# "!GetAtt CloudFormationLambdaIAMRole.Arn" but without DependsOn the error
|
||||
# "Template error: IAM role mozdef-aws-nested-CloudFormationLambdaIAMRole-108UCUPESC6WG doesn't exist"
|
||||
# occurs on stack creation for this Lambda Function resource. The DependsOn
|
||||
# prevents the error.
|
||||
Properties:
|
||||
Code:
|
||||
ZipFile: |
|
||||
import cfnresponse
|
||||
import boto3, secrets, string
|
||||
def handler(event, context):
|
||||
paginator = boto3.client('iam').get_paginator('list_roles')
|
||||
args = {'PathPrefix': event['ResourceProperties']['PathPrefix']} if 'PathPrefix' in event['ResourceProperties'] else {}
|
||||
iterator = paginator.paginate(**args).search(
|
||||
"Roles[?RoleName == '%s'][]" % event['ResourceProperties']['RoleName'])
|
||||
response = {'RoleExists': len([x for x in iterator]) > 0}
|
||||
physical_id = ''.join(
|
||||
secrets.choice(string.ascii_uppercase + string.digits) for i in
|
||||
range(13))
|
||||
cfnresponse.send(event, context, cfnresponse.SUCCESS, response,
|
||||
"DoesRoleExist-%s" % physical_id)
|
||||
Handler: index.handler
|
||||
Runtime: python3.6
|
||||
Role: !GetAtt CloudFormationLambdaIAMRole.Arn
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
Timeout: 20
|
||||
ESServiceLinkedRoleExists:
|
||||
Type: AWS::CloudFormation::CustomResource
|
||||
Properties:
|
||||
RoleName: AWSServiceRoleForAmazonElasticsearchService
|
||||
PathPrefix: '/aws-service-role/es.amazonaws.com/'
|
||||
ServiceToken: !GetAtt DoesRoleExistLambdaFunction.Arn
|
||||
MozDefAlertWriterEnv:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
PublicSubnetIds: !Join [ ',', [!GetAtt MozDefVPC.Outputs.Subnet1, !GetAtt MozDefVPC.Outputs.Subnet2, !GetAtt MozDefVPC.Outputs.Subnet3 ]]
|
||||
MozDefSecurityGroup: !GetAtt MozDefSecurityGroups.Outputs.MozDefSecurityGroupId
|
||||
ESUrl: !GetAtt MozDefES.Outputs.ElasticsearchURL
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ] , mozdef-alert-developer.yml ] ]
|
||||
MozDefVPCFlowLogs:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !GetAtt MozDefVPC.Outputs.VpcId
|
||||
MozDefSQSQueueArn: !GetAtt MozDefSQS.Outputs.SQSQueueArn
|
||||
MozDefSQSQueueUrl: !GetAtt MozDefSQS.Outputs.SQSQueueUrl
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ] , mozdef-vpc-flow-logs.yml ] ]
|
||||
Outputs:
|
||||
LoadBalancerDNSName:
|
||||
Description: The DNS name of the ALB hosting MozDef. If using OIDC or SSL point your DNS at this. If using basic auth no DNS is necessary.
|
||||
Value: !GetAtt MozDefInstance.Outputs.LoadBalancerDNSName
|
|
@ -215,6 +215,19 @@ Resources:
|
|||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-cloudtrail.yml ] ]
|
||||
MozDefVPCFlowLogs:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
Parameters:
|
||||
VpcId: !Ref VpcId
|
||||
MozDefSQSQueueArn: !GetAtt MozDefSQS.Outputs.SQSQueueArn
|
||||
MozDefSQSQueueUrl: !GetAtt MozDefSQS.Outputs.SQSQueueUrl
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
TemplateURL: !Join [ '', [ !FindInMap [ VariableMap, Variables, S3TemplateLocation ], mozdef-vpc-flow-logs.yml ] ]
|
||||
CloudFormationLambdaIAMRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
|
|
|
@ -52,6 +52,9 @@ Outputs:
|
|||
SQSQueueName:
|
||||
Description: Name of the SQS Queue that MozDef will consume events from
|
||||
Value: !GetAtt MozDefSQSQueue.QueueName
|
||||
SQSQueueUrl:
|
||||
Description: URL of the SQS Queue that MozDef will consume events from
|
||||
Value: !Ref MozDefSQSQueue
|
||||
AlertTaskSQSQueueArn:
|
||||
Description: ARN of the SQS Queue that MozDef will consume events from
|
||||
Value: !GetAtt MozDefSQSAlertTaskQueue.Arn
|
||||
|
@ -60,6 +63,4 @@ Outputs:
|
|||
Value: !GetAtt MozDefSQSAlertTaskQueue.QueueName
|
||||
AlertTaskSQSQueueUrl:
|
||||
Description: The SQS queue url for the alerttask exchange as used in kombu.
|
||||
Value:
|
||||
!Join ['', ['https://', 'sqs', ".", !Ref "AWS::Region",".amazonaws.com", "/"
|
||||
, !Ref "AWS::AccountId", "/",!GetAtt MozDefSQSAlertTaskQueue.QueueName]]
|
||||
Value: !Ref MozDefSQSAlertTaskQueue
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
AWSTemplateFormatVersion: 2010-09-09
|
||||
Description: Pipeline to send VPC Flow Logs to MozDef
|
||||
Parameters:
|
||||
VpcId:
|
||||
Type: AWS::EC2::VPC::Id
|
||||
Default: vpc-dc8eacb4
|
||||
Description: 'The VPC ID of the VPC to deploy in (Example : vpc-abcdef12)'
|
||||
MozDefSQSQueueUrl:
|
||||
Type: String
|
||||
Description: 'The SQS URL to send MozDef structured events to for consumption'
|
||||
MozDefSQSQueueArn:
|
||||
Type: String
|
||||
Description: 'The SQS ARN to send MozDef structured events to for consumption'
|
||||
Resources:
|
||||
LogGroup:
|
||||
Type: AWS::Logs::LogGroup
|
||||
Properties:
|
||||
RetentionInDays: 1
|
||||
FlowLogRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service: vpc-flow-logs.amazonaws.com
|
||||
Action: sts:AssumeRole
|
||||
Policies:
|
||||
- PolicyName: AllowWriteCloudWatchLogs
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- logs:CreateLogGroup
|
||||
- logs:CreateLogStream
|
||||
- logs:PutLogEvents
|
||||
- logs:DescribeLogGroups
|
||||
- logs:DescribeLogStreams
|
||||
Resource: "*"
|
||||
FlowLog:
|
||||
Type: AWS::EC2::FlowLog
|
||||
Properties:
|
||||
DeliverLogsPermissionArn: !GetAtt FlowLogRole.Arn
|
||||
# We can't use !GetAtt LogGroup.Arn because it actually returns and Arn suffixed with ":*"
|
||||
LogDestination: !Join [ ':', [ 'arn:aws:logs', !Ref 'AWS::Region', !Ref 'AWS::AccountId', 'log-group', !Ref 'LogGroup' ] ]
|
||||
ResourceId: !Ref VpcId
|
||||
ResourceType: VPC
|
||||
TrafficType: ALL
|
||||
FlowLogProcessorRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service: lambda.amazonaws.com
|
||||
Action: sts:AssumeRole
|
||||
ManagedPolicyArns:
|
||||
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
|
||||
Policies:
|
||||
- PolicyName: AllowSendToSQS
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- sqs:DeleteMessage
|
||||
- sqs:DeleteMessageBatch
|
||||
- sqs:GetQueueAttributes
|
||||
- sqs:GetQueueUrl
|
||||
- sqs:SendMessage
|
||||
- sqs:SendMessageBatch
|
||||
Resource: !Ref MozDefSQSQueueArn
|
||||
FlowLogProcessor:
|
||||
Type: AWS::Lambda::Function
|
||||
Properties:
|
||||
Code:
|
||||
ZipFile: |
|
||||
import os, boto3, gzip, base64, json, socket, sys
|
||||
from datetime import datetime
|
||||
|
||||
PROTO_NUM_MAP = {num: name[8:] for name, num in vars(socket).items() if name.startswith("IPPROTO")}
|
||||
FIELD_NAMES = [
|
||||
'version', 'account-id', 'interface-id', 'srcaddr', 'dstaddr', 'srcport',
|
||||
'dstport', 'protocol', 'packets', 'bytes', 'start', 'end', 'action',
|
||||
'log-status']
|
||||
|
||||
def lambda_handler(event, context):
|
||||
client = boto3.client('sqs')
|
||||
raw_data = event.get('awslogs', {}).get('data')
|
||||
data = json.loads(
|
||||
gzip.decompress(base64.b64decode(raw_data)).decode('utf-8'))
|
||||
entries = []
|
||||
for log_event_record in data.get('logEvents', ''):
|
||||
log_event_record_values = log_event_record['message'].split(' ')
|
||||
log_event = {FIELD_NAMES[i]: log_event_record_values[i]
|
||||
for i in range(len(FIELD_NAMES))}
|
||||
if log_event.get('log-status') != 'OK':
|
||||
print('Skipping {} entry : {}'.format(log_event.get('log-status'), log_event_record['message']))
|
||||
continue
|
||||
|
||||
# TODO : Do we want to do something with log_status NODATA and SKIPDATA events?
|
||||
message = dict(
|
||||
category='vpc-flow',
|
||||
hostname=socket.getfqdn(),
|
||||
processid=os.getpid(),
|
||||
processname=sys.argv[0],
|
||||
severity='INFO',
|
||||
source='vpc_flow')
|
||||
message['utctimestamp'] = datetime.utcfromtimestamp(
|
||||
int(log_event_record['timestamp'] / 1000)).strftime('%Y-%m-%dT%H:%M:%S+00:00')
|
||||
message['summary'] = '{srcaddr}:{srcport} -> {dstaddr}:{dstport} {bytes} bytes {action}'.format(**log_event)
|
||||
message['details'] = dict(
|
||||
destinationipaddress=log_event['dstaddr'],
|
||||
destinationport=int(log_event['dstport']),
|
||||
sourceipaddress=log_event['srcaddr'],
|
||||
sourceport=int(log_event['srcport']),
|
||||
success=log_event['action'] == 'ACCEPT',
|
||||
capture_window_start=datetime.utcfromtimestamp(
|
||||
int(log_event['start'])).strftime('%Y-%m-%dT%H:%M:%S+00:00'),
|
||||
capture_window_end=datetime.utcfromtimestamp(
|
||||
int(log_event['end'])).strftime('%Y-%m-%dT%H:%M:%S+00:00'),
|
||||
version=int(log_event['version']),
|
||||
pkts=int(log_event['packets']),
|
||||
proto=PROTO_NUM_MAP.get(int(log_event['protocol']), 'unknown').lower(),
|
||||
recipientaccountid=log_event['account-id'],
|
||||
interface_id=log_event['interface-id'],
|
||||
bytes=int(log_event['bytes']))
|
||||
entry = dict(
|
||||
Id=log_event_record['id'],
|
||||
MessageBody=json.dumps(message))
|
||||
entries.append(entry)
|
||||
print('Going to send entry : {}'.format(entry))
|
||||
if len(entries) == 10:
|
||||
print('sending batch')
|
||||
response = client.send_message_batch(
|
||||
QueueUrl=os.getenv('SQS_URL'),
|
||||
Entries=entries)
|
||||
# TODO : Process the response and do something about failures
|
||||
del entries[:]
|
||||
if len(entries) > 0:
|
||||
print('sending final batch')
|
||||
response = client.send_message_batch(
|
||||
QueueUrl=os.getenv('SQS_URL'),
|
||||
Entries=entries)
|
||||
Description: Transform VPC Flow logs into MozDef events
|
||||
Environment:
|
||||
Variables:
|
||||
SQS_URL: !Ref MozDefSQSQueueUrl
|
||||
Handler: index.lambda_handler
|
||||
MemorySize: 128
|
||||
Role: !GetAtt FlowLogProcessorRole.Arn
|
||||
Runtime: python3.7
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
Timeout: 30
|
||||
FlowLogProcessorPermission:
|
||||
Type: AWS::Lambda::Permission
|
||||
Properties:
|
||||
Action: lambda:InvokeFunction
|
||||
FunctionName: !GetAtt FlowLogProcessor.Arn
|
||||
Principal: !Join [ '.', [ 'logs', !Ref 'AWS::Region', 'amazonaws.com' ] ]
|
||||
SourceAccount: !Ref 'AWS::AccountId'
|
||||
SourceArn: !GetAtt LogGroup.Arn
|
||||
FlowLogSubscriptionFilter:
|
||||
Type: AWS::Logs::SubscriptionFilter
|
||||
Properties:
|
||||
DestinationArn: !GetAtt FlowLogProcessor.Arn
|
||||
FilterPattern: '[version, account, eni, source, destination, srcport, destport="22", protocol="6", packets, bytes, windowstart, windowend, action="ACCEPT", flowlogstatus]'
|
||||
LogGroupName: !Ref LogGroup
|
|
@ -0,0 +1,133 @@
|
|||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: "Create a vpc for Mozilla Deployment of Cloudy Mozdef."
|
||||
Resources:
|
||||
InternetGateway:
|
||||
Type: "AWS::EC2::InternetGateway"
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
VPC:
|
||||
Type: "AWS::EC2::VPC"
|
||||
Properties:
|
||||
CidrBlock: "10.0.0.0/16"
|
||||
EnableDnsSupport: True
|
||||
EnableDnsHostnames: True
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
AttachGateway:
|
||||
Type: AWS::EC2::VPCGatewayAttachment
|
||||
Properties:
|
||||
VpcId:
|
||||
Ref: VPC
|
||||
InternetGatewayId:
|
||||
Ref: InternetGateway
|
||||
RouteTable:
|
||||
Type: "AWS::EC2::RouteTable"
|
||||
Properties:
|
||||
VpcId:
|
||||
Ref: VPC
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
DefaultRoute:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
RouteTableId:
|
||||
Ref: RouteTable
|
||||
DestinationCidrBlock: 0.0.0.0/0
|
||||
GatewayId:
|
||||
Ref: InternetGateway
|
||||
Subnet1:
|
||||
Type: "AWS::EC2::Subnet"
|
||||
Properties:
|
||||
AvailabilityZone:
|
||||
Fn::Select:
|
||||
- 0
|
||||
- Fn::GetAZs: ""
|
||||
CidrBlock: "10.0.0.0/24"
|
||||
MapPublicIpOnLaunch: True
|
||||
VpcId:
|
||||
Ref: VPC
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
Subnet2:
|
||||
Type: "AWS::EC2::Subnet"
|
||||
Properties:
|
||||
AvailabilityZone:
|
||||
Fn::Select:
|
||||
- 1
|
||||
- Fn::GetAZs: ""
|
||||
CidrBlock: "10.0.1.0/24"
|
||||
MapPublicIpOnLaunch: True
|
||||
VpcId:
|
||||
Ref: VPC
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
Subnet3:
|
||||
Type: "AWS::EC2::Subnet"
|
||||
Properties:
|
||||
AvailabilityZone:
|
||||
Fn::Select:
|
||||
- 2
|
||||
- Fn::GetAZs: ""
|
||||
CidrBlock: "10.0.2.0/24"
|
||||
MapPublicIpOnLaunch: True
|
||||
VpcId:
|
||||
Ref: VPC
|
||||
Tags:
|
||||
- Key: application
|
||||
Value: mozdef
|
||||
- Key: stack
|
||||
Value: !Ref AWS::StackName
|
||||
RouteAc1:
|
||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
||||
Properties:
|
||||
RouteTableId:
|
||||
Ref: RouteTable
|
||||
SubnetId:
|
||||
Ref: Subnet1
|
||||
RouteAc2:
|
||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
||||
Properties:
|
||||
RouteTableId:
|
||||
Ref: RouteTable
|
||||
SubnetId:
|
||||
Ref: Subnet2
|
||||
RouteAc3:
|
||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
||||
Properties:
|
||||
RouteTableId:
|
||||
Ref: RouteTable
|
||||
SubnetId:
|
||||
Ref: Subnet3
|
||||
Outputs:
|
||||
VpcId:
|
||||
Description: The ID of the VPC created.
|
||||
Value:
|
||||
Ref: VPC
|
||||
Subnet1:
|
||||
Description: The id of subnet1 in the first az.
|
||||
Value:
|
||||
Ref: Subnet1
|
||||
Subnet2:
|
||||
Description: The id of subnet2 in the second az.
|
||||
Value:
|
||||
Ref: Subnet2
|
||||
Subnet3:
|
||||
Description: The id of subnet3 in the third az.
|
||||
Value:
|
||||
Ref: Subnet3
|
|
@ -0,0 +1,63 @@
|
|||
AWSTemplateFormatVersion: 2010-09-09
|
||||
Description: Setup an alert writers environment for use with MozDef for AWS. Note this is PoC only.
|
||||
Parameters:
|
||||
VpcId:
|
||||
Type: AWS::EC2::VPC::Id
|
||||
Description: 'The VPC ID of the VPC to deploy in (Example : vpc-abcdef12)'
|
||||
PublicSubnetIds:
|
||||
Type: List<AWS::EC2::Subnet::Id>
|
||||
Description: 'A comma delimited list of public subnet IDs (Example: subnet-abcdef12,subnet-bcdef123)'
|
||||
MozDefSecurityGroup:
|
||||
Type: AWS::EC2::SecurityGroup::Id
|
||||
Description: The security group the MozDef instance runs in. This is needed to access ES.
|
||||
ESUrl:
|
||||
Type: String
|
||||
Description: 'The location of elasticsearch deployed in managed-es.'
|
||||
Resources:
|
||||
MozDefLayer:
|
||||
Type: AWS::Lambda::LayerVersion
|
||||
Properties:
|
||||
LayerName: MozDef
|
||||
Description: Mozilla Enterprise Defense Platform Dependencies
|
||||
Content:
|
||||
S3Bucket: public.us-west-2.security.allizom.org
|
||||
S3Key: mozdef-lambda-layer/layer-latest.zip
|
||||
CompatibleRuntimes:
|
||||
- python2.7
|
||||
LicenseInfo: 'MPL 2.0'
|
||||
LambdalertIAMRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service: lambda.amazonaws.com
|
||||
Action: sts:AssumeRole
|
||||
ManagedPolicyArns:
|
||||
- arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole
|
||||
AlertWritersEnv:
|
||||
Type: "AWS::Lambda::Function"
|
||||
Properties:
|
||||
Handler: "lambdalert.handle"
|
||||
Role:
|
||||
Fn::GetAtt:
|
||||
- "LambdalertIAMRole"
|
||||
- "Arn"
|
||||
Code:
|
||||
S3Bucket: public.us-west-2.security.allizom.org
|
||||
S3Key: mozdef-lambda-layer/function-latest.zip
|
||||
Layers:
|
||||
- !Ref MozDefLayer
|
||||
Environment:
|
||||
Variables:
|
||||
OPTIONS_ESSERVERS: !Ref ESUrl
|
||||
OPTIONS_MQPROTOCOL: sqs
|
||||
VpcConfig:
|
||||
SecurityGroupIds:
|
||||
- !Ref MozDefSecurityGroup
|
||||
SubnetIds: !Ref PublicSubnetIds
|
||||
ReservedConcurrentExecutions: 1
|
||||
Runtime: "python2.7"
|
||||
Timeout: 120
|
|
@ -4,3 +4,4 @@ build/lib/*
|
|||
build/python/*
|
||||
lib/*
|
||||
python/*
|
||||
build/lambdalert.py
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2017 Mozilla Corporation
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from lib.alerttask import AlertTask
|
||||
from mozdef_util.query_models import SearchQuery, TermMatch
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
logger = logging.getLogger()
|
||||
h = logging.StreamHandler(sys.stdout)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
return logger
|
||||
|
||||
|
||||
class AlertCloudtrailLoggingDisabled(AlertTask):
|
||||
def _configureKombu(self):
|
||||
"""Override the normal behavior of this in order to run in lambda."""
|
||||
pass
|
||||
|
||||
def alertToMessageQueue(self, alertDict):
|
||||
"""Override the normal behavior of this in order to run in lambda."""
|
||||
pass
|
||||
|
||||
def main(self):
|
||||
# How many minutes back in time would you like to search?
|
||||
search_query = SearchQuery(minutes=15)
|
||||
|
||||
# What would you like to search for?
|
||||
# search_query.add_must([
|
||||
# TermMatch('source', 'cloudtrail'),
|
||||
# TermMatch('details.eventname', 'DescribeTable')
|
||||
# ])
|
||||
|
||||
self.filtersManual(search_query)
|
||||
self.searchEventsSimple()
|
||||
self.walkEvents()
|
||||
|
||||
def onEvent(self, event):
|
||||
category = 'AWSCloudtrail'
|
||||
|
||||
# Useful tag and severity rankings for your alert.
|
||||
tags = ['cloudtrail', 'aws', 'cloudtrailpagerduty']
|
||||
severity = 'CRITICAL'
|
||||
|
||||
# What message should surface in the user interface when this fires?
|
||||
summary = 'The alert fired!'
|
||||
|
||||
return self.createAlertDict(summary, category, tags, [event], severity)
|
||||
|
||||
# Learn more about MozDef alerts by exploring the "Alert class!"
|
||||
|
||||
|
||||
def handle(event, context):
|
||||
logger = setup_logging()
|
||||
logger.debug('Function initialized.')
|
||||
a = AlertCloudtrailLoggingDisabled()
|
||||
return a.main()
|
|
@ -0,0 +1,12 @@
|
|||
from mozdef_util.plugin_set import PluginSet
|
||||
from mozdef_util.utilities.logger import logger
|
||||
|
||||
|
||||
class AlertPluginSet(PluginSet):
|
||||
|
||||
def send_message_to_plugin(self, plugin_class, message, metadata=None):
|
||||
if 'utctimestamp' in message and 'summary' in message:
|
||||
message_log_str = u'{0} received message: ({1}) {2}'.format(plugin_class.__module__, message['utctimestamp'], message['summary'])
|
||||
logger.info(message_log_str)
|
||||
|
||||
return plugin_class.onMessage(message), metadata
|
|
@ -0,0 +1,553 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2017 Mozilla Corporation
|
||||
|
||||
import collections
|
||||
import json
|
||||
import kombu
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import netaddr
|
||||
|
||||
from configlib import getConfig, OptionParser
|
||||
from datetime import datetime
|
||||
from collections import Counter
|
||||
from celery import Task
|
||||
from celery.utils.log import get_task_logger
|
||||
from config import RABBITMQ, ES, ALERT_PLUGINS
|
||||
|
||||
from mozdef_util.utilities.toUTC import toUTC
|
||||
from mozdef_util.elasticsearch_client import ElasticsearchClient
|
||||
from mozdef_util.query_models import TermMatch, ExistsMatch
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
|
||||
from lib.alert_plugin_set import AlertPluginSet
|
||||
|
||||
|
||||
# utility functions used by AlertTask.mostCommon
|
||||
# determine most common values
|
||||
# in a list of dicts
|
||||
def keypaths(nested):
|
||||
""" return a list of nested dict key paths
|
||||
like: [u'_source', u'details', u'program']
|
||||
"""
|
||||
for key, value in nested.iteritems():
|
||||
if isinstance(value, collections.Mapping):
|
||||
for subkey, subvalue in keypaths(value):
|
||||
yield [key] + subkey, subvalue
|
||||
else:
|
||||
yield [key], value
|
||||
|
||||
|
||||
def dictpath(path):
|
||||
""" split a string representing a
|
||||
nested dictionary path key.subkey.subkey
|
||||
"""
|
||||
for i in path.split("."):
|
||||
yield "{0}".format(i)
|
||||
|
||||
|
||||
def getValueByPath(input_dict, path_string):
|
||||
"""
|
||||
Gets data/value from a dictionary using a dotted accessor-string
|
||||
http://stackoverflow.com/a/7534478
|
||||
path_string can be key.subkey.subkey.subkey
|
||||
"""
|
||||
return_data = input_dict
|
||||
for chunk in path_string.split("."):
|
||||
return_data = return_data.get(chunk, {})
|
||||
return return_data
|
||||
|
||||
|
||||
def hostname_from_ip(ip):
|
||||
try:
|
||||
reversed_dns = socket.gethostbyaddr(ip)
|
||||
return reversed_dns[0]
|
||||
except socket.herror:
|
||||
return None
|
||||
|
||||
|
||||
def add_hostname_to_ip(ip, output_format, require_internal=True):
|
||||
ip_obj = netaddr.IPNetwork(ip)[0]
|
||||
if require_internal and not ip_obj.is_private():
|
||||
return ip
|
||||
hostname = hostname_from_ip(ip)
|
||||
if hostname is None:
|
||||
return ip
|
||||
else:
|
||||
return output_format.format(ip, hostname)
|
||||
|
||||
|
||||
class AlertTask(Task):
|
||||
|
||||
abstract = True
|
||||
|
||||
def __init__(self):
|
||||
self.alert_name = self.__class__.__name__
|
||||
self.main_query = None
|
||||
|
||||
# Used to store any alerts that were thrown
|
||||
self.alert_ids = []
|
||||
|
||||
# List of events
|
||||
self.events = None
|
||||
# List of aggregations
|
||||
# e.g. when aggregField is email: [{value:'evil@evil.com',count:1337,events:[...]}, ...]
|
||||
self.aggregations = None
|
||||
|
||||
self.log.debug("starting {0}".format(self.alert_name))
|
||||
self.log.debug(RABBITMQ)
|
||||
self.log.debug(ES)
|
||||
|
||||
self._configureKombu()
|
||||
self._configureES()
|
||||
|
||||
# We want to select all event indices
|
||||
# and filter out the window based on timestamp
|
||||
# from the search query
|
||||
self.event_indices = ["events-*"]
|
||||
|
||||
def classname(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
return get_task_logger("%s.%s" % (__name__, self.alert_name))
|
||||
|
||||
def parse_config(self, config_filename, config_keys):
|
||||
myparser = OptionParser()
|
||||
self.config = None
|
||||
(self.config, args) = myparser.parse_args([])
|
||||
for config_key in config_keys:
|
||||
temp_value = getConfig(config_key, "", config_filename)
|
||||
setattr(self.config, config_key, temp_value)
|
||||
|
||||
def _discover_task_exchange(self):
|
||||
"""Use configuration information to understand the message queue protocol.
|
||||
return: amqp, sqs
|
||||
"""
|
||||
return getConfig("mqprotocol", "amqp", None)
|
||||
|
||||
def __build_conn_string(self):
|
||||
exchange_protocol = self._discover_task_exchange()
|
||||
if exchange_protocol == "amqp":
|
||||
connString = "amqp://{0}:{1}@{2}:{3}//".format(
|
||||
RABBITMQ["mquser"],
|
||||
RABBITMQ["mqpassword"],
|
||||
RABBITMQ["mqserver"],
|
||||
RABBITMQ["mqport"],
|
||||
)
|
||||
return connString
|
||||
elif exchange_protocol == "sqs":
|
||||
connString = "sqs://{}".format(getConfig("alertSqsQueueUrl", None, None))
|
||||
if connString:
|
||||
connString = connString.replace('https://','')
|
||||
return connString
|
||||
|
||||
def _configureKombu(self):
|
||||
"""
|
||||
Configure kombu for amqp or sqs
|
||||
"""
|
||||
try:
|
||||
connString = self.__build_conn_string()
|
||||
self.mqConn = kombu.Connection(connString)
|
||||
if connString.find('sqs') == 0:
|
||||
self.mqConn.transport_options['region'] = os.getenv('DEFAULT_AWS_REGION', 'us-west-2')
|
||||
self.alertExchange = kombu.Exchange(
|
||||
name=RABBITMQ["alertexchange"], type="topic", durable=True
|
||||
)
|
||||
self.alertExchange(self.mqConn).declare()
|
||||
alertQueue = kombu.Queue(
|
||||
os.getenv('OPTIONS_ALERTSQSQUEUEURL').split('/')[4], exchange=self.alertExchange
|
||||
)
|
||||
else:
|
||||
self.alertExchange = kombu.Exchange(
|
||||
name=RABBITMQ["alertexchange"], type="topic", durable=True
|
||||
)
|
||||
self.alertExchange(self.mqConn).declare()
|
||||
alertQueue = kombu.Queue(
|
||||
RABBITMQ["alertqueue"], exchange=self.alertExchange
|
||||
)
|
||||
alertQueue(self.mqConn).declare()
|
||||
self.mqproducer = self.mqConn.Producer(serializer="json")
|
||||
self.log.debug("Kombu configured")
|
||||
except Exception as e:
|
||||
self.log.error(
|
||||
"Exception while configuring kombu for alerts: {0}".format(e)
|
||||
)
|
||||
|
||||
def _configureES(self):
|
||||
"""
|
||||
Configure elasticsearch client
|
||||
"""
|
||||
try:
|
||||
self.es = ElasticsearchClient(ES["servers"])
|
||||
self.log.debug("ES configured")
|
||||
except Exception as e:
|
||||
self.log.error("Exception while configuring ES for alerts: {0}".format(e))
|
||||
|
||||
def mostCommon(self, listofdicts, dictkeypath):
|
||||
"""
|
||||
Given a list containing dictionaries,
|
||||
return the most common entries
|
||||
along a key path separated by .
|
||||
i.e. dictkey.subkey.subkey
|
||||
returned as a list of tuples
|
||||
[(value,count),(value,count)]
|
||||
"""
|
||||
inspectlist = list()
|
||||
path = list(dictpath(dictkeypath))
|
||||
for i in listofdicts:
|
||||
for k in list(keypaths(i)):
|
||||
if not (set(k[0]).symmetric_difference(path)):
|
||||
inspectlist.append(k[1])
|
||||
|
||||
return Counter(inspectlist).most_common()
|
||||
|
||||
def alertToMessageQueue(self, alertDict):
|
||||
"""
|
||||
Send alert to the kombu based message queue. The default is rabbitmq.
|
||||
"""
|
||||
try:
|
||||
# cherry pick items from the alertDict to send to the alerts messageQueue
|
||||
mqAlert = dict(severity="INFO", category="")
|
||||
if "severity" in alertDict:
|
||||
mqAlert["severity"] = alertDict["severity"]
|
||||
if "category" in alertDict:
|
||||
mqAlert["category"] = alertDict["category"]
|
||||
if "utctimestamp" in alertDict:
|
||||
mqAlert["utctimestamp"] = alertDict["utctimestamp"]
|
||||
if "eventtimestamp" in alertDict:
|
||||
mqAlert["eventtimestamp"] = alertDict["eventtimestamp"]
|
||||
mqAlert["summary"] = alertDict["summary"]
|
||||
self.log.debug(mqAlert)
|
||||
ensurePublish = self.mqConn.ensure(
|
||||
self.mqproducer, self.mqproducer.publish, max_retries=10
|
||||
)
|
||||
ensurePublish(
|
||||
alertDict,
|
||||
exchange=self.alertExchange,
|
||||
routing_key=RABBITMQ["alertqueue"],
|
||||
)
|
||||
self.log.debug("alert sent to the alert queue")
|
||||
except Exception as e:
|
||||
self.log.error(
|
||||
"Exception while sending alert to message queue: {0}".format(e)
|
||||
)
|
||||
|
||||
def alertToES(self, alertDict):
|
||||
"""
|
||||
Send alert to elasticsearch
|
||||
"""
|
||||
try:
|
||||
res = self.es.save_alert(body=alertDict)
|
||||
self.log.debug("alert sent to ES")
|
||||
self.log.debug(res)
|
||||
return res
|
||||
except Exception as e:
|
||||
self.log.error("Exception while pushing alert to ES: {0}".format(e))
|
||||
|
||||
def tagBotNotify(self, alert):
|
||||
"""
|
||||
Tag alert to be excluded based on severity
|
||||
If 'ircchannel' is set in an alert, we automatically notify mozdefbot
|
||||
"""
|
||||
alert["notify_mozdefbot"] = True
|
||||
if alert["severity"] == "NOTICE" or alert["severity"] == "INFO":
|
||||
alert["notify_mozdefbot"] = False
|
||||
|
||||
# If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
|
||||
if (
|
||||
"ircchannel" in alert and alert["ircchannel"] != "" and alert["ircchannel"] is not None
|
||||
):
|
||||
alert["notify_mozdefbot"] = True
|
||||
return alert
|
||||
|
||||
def saveAlertID(self, saved_alert):
|
||||
"""
|
||||
Save alert to self so we can analyze it later
|
||||
"""
|
||||
self.alert_ids.append(saved_alert["_id"])
|
||||
|
||||
def filtersManual(self, query):
|
||||
"""
|
||||
Configure filters manually
|
||||
|
||||
query is a search query object with date_timedelta populated
|
||||
|
||||
"""
|
||||
# Don't fire on already alerted events
|
||||
duplicate_matcher = TermMatch("alert_names", self.determine_alert_classname())
|
||||
if duplicate_matcher not in query.must_not:
|
||||
query.add_must_not(duplicate_matcher)
|
||||
|
||||
self.main_query = query
|
||||
|
||||
def determine_alert_classname(self):
|
||||
alert_name = self.classname()
|
||||
# Allow alerts like the generic alerts (one python alert but represents many 'alerts')
|
||||
# can customize the alert name
|
||||
if hasattr(self, "custom_alert_name"):
|
||||
alert_name = self.custom_alert_name
|
||||
return alert_name
|
||||
|
||||
def executeSearchEventsSimple(self):
|
||||
"""
|
||||
Execute the search for simple events
|
||||
"""
|
||||
return self.main_query.execute(self.es, indices=self.event_indices)
|
||||
|
||||
def searchEventsSimple(self):
|
||||
"""
|
||||
Search events matching filters, store events in self.events
|
||||
"""
|
||||
try:
|
||||
results = self.executeSearchEventsSimple()
|
||||
self.events = results["hits"]
|
||||
self.log.debug(self.events)
|
||||
except Exception as e:
|
||||
self.log.error("Error while searching events in ES: {0}".format(e))
|
||||
|
||||
def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
|
||||
"""
|
||||
Search events, aggregate matching ES filters by aggregationPath,
|
||||
store them in self.aggregations as a list of dictionaries
|
||||
keys:
|
||||
value: the text value that was found in the aggregationPath
|
||||
count: the hitcount of the text value
|
||||
events: the sampled list of events that matched
|
||||
allevents: the unsample, total list of matching events
|
||||
aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
|
||||
relative to the _source that's returned from elastic search.
|
||||
ex: details.sourceipaddress
|
||||
"""
|
||||
|
||||
# We automatically add the key that we're matching on
|
||||
# for aggregation, as a query requirement
|
||||
aggreg_key_exists = ExistsMatch(aggregationPath)
|
||||
if aggreg_key_exists not in self.main_query.must:
|
||||
self.main_query.add_must(aggreg_key_exists)
|
||||
|
||||
try:
|
||||
esresults = self.main_query.execute(self.es, indices=self.event_indices)
|
||||
results = esresults["hits"]
|
||||
|
||||
# List of aggregation values that can be counted/summarized by Counter
|
||||
# Example: ['evil@evil.com','haxoor@noob.com', 'evil@evil.com'] for an email aggregField
|
||||
aggregationValues = []
|
||||
for r in results:
|
||||
aggregationValues.append(getValueByPath(r["_source"], aggregationPath))
|
||||
|
||||
# [{value:'evil@evil.com',count:1337,events:[...]}, ...]
|
||||
aggregationList = []
|
||||
for i in Counter(aggregationValues).most_common():
|
||||
idict = {"value": i[0], "count": i[1], "events": [], "allevents": []}
|
||||
for r in results:
|
||||
if (
|
||||
getValueByPath(r["_source"], aggregationPath).encode(
|
||||
"ascii", "ignore"
|
||||
) == i[0]
|
||||
):
|
||||
# copy events detail into this aggregation up to our samples limit
|
||||
if len(idict["events"]) < samplesLimit:
|
||||
idict["events"].append(r)
|
||||
# also copy all events to a non-sampled list
|
||||
# so we mark all events as alerted and don't re-alert
|
||||
idict["allevents"].append(r)
|
||||
aggregationList.append(idict)
|
||||
|
||||
self.aggregations = aggregationList
|
||||
self.log.debug(self.aggregations)
|
||||
except Exception as e:
|
||||
self.log.error("Error while searching events in ES: {0}".format(e))
|
||||
|
||||
def walkEvents(self, **kwargs):
|
||||
"""
|
||||
Walk through events, provide some methods to hook in alerts
|
||||
"""
|
||||
if len(self.events) > 0:
|
||||
for i in self.events:
|
||||
alert = self.onEvent(i, **kwargs)
|
||||
if alert:
|
||||
alert = self.tagBotNotify(alert)
|
||||
self.log.debug(alert)
|
||||
alert = self.alertPlugins(alert)
|
||||
alertResultES = self.alertToES(alert)
|
||||
self.tagEventsAlert([i], alertResultES)
|
||||
self.alertToMessageQueue(alert)
|
||||
self.hookAfterInsertion(alert)
|
||||
self.saveAlertID(alertResultES)
|
||||
# did we not match anything?
|
||||
# can also be used as an alert trigger
|
||||
if len(self.events) == 0:
|
||||
alert = self.onNoEvent(**kwargs)
|
||||
if alert:
|
||||
alert = self.tagBotNotify(alert)
|
||||
self.log.debug(alert)
|
||||
alertResultES = self.alertToES(alert)
|
||||
self.alertToMessageQueue(alert)
|
||||
self.hookAfterInsertion(alert)
|
||||
self.saveAlertID(alertResultES)
|
||||
|
||||
def walkAggregations(self, threshold, config=None):
|
||||
"""
|
||||
Walk through aggregations, provide some methods to hook in alerts
|
||||
"""
|
||||
if len(self.aggregations) > 0:
|
||||
for aggregation in self.aggregations:
|
||||
if aggregation["count"] >= threshold:
|
||||
aggregation["config"] = config
|
||||
alert = self.onAggregation(aggregation)
|
||||
if alert:
|
||||
alert = self.tagBotNotify(alert)
|
||||
self.log.debug(alert)
|
||||
alert = self.alertPlugins(alert)
|
||||
alertResultES = self.alertToES(alert)
|
||||
# even though we only sample events in the alert
|
||||
# tag all events as alerted to avoid re-alerting
|
||||
# on events we've already processed.
|
||||
self.tagEventsAlert(aggregation["allevents"], alertResultES)
|
||||
self.alertToMessageQueue(alert)
|
||||
self.saveAlertID(alertResultES)
|
||||
|
||||
def alertPlugins(self, alert):
|
||||
"""
|
||||
Send alerts through a plugin system
|
||||
"""
|
||||
|
||||
plugin_dir = os.path.join(os.path.dirname(__file__), "../plugins")
|
||||
plugin_set = AlertPluginSet(plugin_dir, ALERT_PLUGINS)
|
||||
alertDict = plugin_set.run_plugins(alert)[0]
|
||||
|
||||
return alertDict
|
||||
|
||||
def createAlertDict(
|
||||
self,
|
||||
summary,
|
||||
category,
|
||||
tags,
|
||||
events,
|
||||
severity="NOTICE",
|
||||
url=None,
|
||||
ircchannel=None,
|
||||
):
|
||||
"""
|
||||
Create an alert dict
|
||||
"""
|
||||
alert = {
|
||||
"utctimestamp": toUTC(datetime.now()).isoformat(),
|
||||
"severity": severity,
|
||||
"summary": summary,
|
||||
"category": category,
|
||||
"tags": tags,
|
||||
"events": [],
|
||||
"ircchannel": ircchannel,
|
||||
}
|
||||
if url:
|
||||
alert["url"] = url
|
||||
|
||||
for e in events:
|
||||
alert["events"].append(
|
||||
{
|
||||
"documentindex": e["_index"],
|
||||
"documentsource": e["_source"],
|
||||
"documentid": e["_id"],
|
||||
}
|
||||
)
|
||||
self.log.debug(alert)
|
||||
return alert
|
||||
|
||||
def onEvent(self, event, *args, **kwargs):
|
||||
"""
|
||||
To be overriden by children to run their code
|
||||
to be used when creating an alert using an event
|
||||
must return an alert dict or None
|
||||
"""
|
||||
pass
|
||||
|
||||
def onNoEvent(self, *args, **kwargs):
|
||||
"""
|
||||
To be overriden by children to run their code
|
||||
when NOTHING matches a filter
|
||||
which can be used to trigger on the absence of
|
||||
events much like a dead man switch.
|
||||
This is to be used when creating an alert using an event
|
||||
must return an alert dict or None
|
||||
"""
|
||||
pass
|
||||
|
||||
def onAggregation(self, aggregation):
|
||||
"""
|
||||
To be overriden by children to run their code
|
||||
to be used when creating an alert using an aggregation
|
||||
must return an alert dict or None
|
||||
"""
|
||||
pass
|
||||
|
||||
def hookAfterInsertion(self, alert):
|
||||
"""
|
||||
To be overriden by children to run their code
|
||||
to be used when creating an alert using an aggregation
|
||||
"""
|
||||
pass
|
||||
|
||||
def tagEventsAlert(self, events, alertResultES):
|
||||
"""
|
||||
Update the event with the alertid/index
|
||||
and update the alert_names on the event itself so it's
|
||||
not re-alerted
|
||||
"""
|
||||
try:
|
||||
for event in events:
|
||||
if "alerts" not in event["_source"]:
|
||||
event["_source"]["alerts"] = []
|
||||
event["_source"]["alerts"].append(
|
||||
{"index": alertResultES["_index"], "id": alertResultES["_id"]}
|
||||
)
|
||||
|
||||
if "alert_names" not in event["_source"]:
|
||||
event["_source"]["alert_names"] = []
|
||||
event["_source"]["alert_names"].append(self.determine_alert_classname())
|
||||
|
||||
self.es.save_event(
|
||||
index=event["_index"], body=event["_source"], doc_id=event["_id"]
|
||||
)
|
||||
# We refresh here to ensure our changes to the events will show up for the next search query results
|
||||
self.es.refresh(event["_index"])
|
||||
except Exception as e:
|
||||
self.log.error("Error while updating events in ES: {0}".format(e))
|
||||
|
||||
def main(self):
|
||||
"""
|
||||
To be overriden by children to run their code
|
||||
"""
|
||||
pass
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
"""
|
||||
Main method launched by celery periodically
|
||||
"""
|
||||
try:
|
||||
self.main(*args, **kwargs)
|
||||
self.log.debug("finished")
|
||||
except Exception as e:
|
||||
self.log.exception("Exception in main() method: {0}".format(e))
|
||||
|
||||
def parse_json_alert_config(self, config_file):
|
||||
"""
|
||||
Helper function to parse an alert config file
|
||||
"""
|
||||
alert_dir = os.path.join(os.path.dirname(__file__), "..")
|
||||
config_file_path = os.path.abspath(os.path.join(alert_dir, config_file))
|
||||
json_obj = {}
|
||||
with open(config_file_path, "r") as fd:
|
||||
try:
|
||||
json_obj = json.load(fd)
|
||||
except ValueError:
|
||||
sys.stderr.write("FAILED to open the configuration file\n")
|
||||
|
||||
return json_obj
|
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
from celery.schedules import crontab, timedelta
|
||||
import time
|
||||
import logging
|
||||
import os
|
||||
|
||||
ALERTS = {
|
||||
# 'pythonfile.pythonclass':{'schedule': crontab(minute='*/10')},
|
||||
# 'pythonfile.pythonclass':{'schedule': timedelta(minutes=10),'kwargs':dict(hostlist=['nsm3', 'nsm5'])},
|
||||
}
|
||||
|
||||
ALERT_PLUGINS = [
|
||||
# 'relative pythonfile name (exclude the .py) - EX: sso_dashboard',
|
||||
]
|
||||
|
||||
ALERT_ACTIONS = [
|
||||
# 'relative pythonfile name (exclude the .py) - EX: sso_dashboard',
|
||||
]
|
||||
|
||||
RABBITMQ = {
|
||||
'mqserver': 'localhost',
|
||||
'mquser': 'guest',
|
||||
'mqpassword': 'guest',
|
||||
'mqport': 5672,
|
||||
'alertexchange': 'alerts',
|
||||
'alertqueue': 'mozdef.alert'
|
||||
}
|
||||
|
||||
if os.getenv('OPTIONS_ESSERVERS'):
|
||||
ES = {
|
||||
'servers': [os.getenv('OPTIONS_ESSERVERS')]
|
||||
}
|
||||
else:
|
||||
ES = {
|
||||
'servers': ['http://localhost:9200']
|
||||
}
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'simple': {
|
||||
'format': '%(levelname)s %(message)s',
|
||||
'datefmt': '%y %b %d, %H:%M:%S',
|
||||
},
|
||||
'standard': {
|
||||
'format': '%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d: %(message)s'
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'simple'
|
||||
},
|
||||
'celery': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'celery.log',
|
||||
'formatter': 'standard',
|
||||
'maxBytes': 1024 * 1024 * 100, # 100 mb
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'celery': {
|
||||
'handlers': ['celery', 'console'],
|
||||
'level': 'INFO',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
logging.Formatter.converter = time.gmtime
|
|
@ -0,0 +1,9 @@
|
|||
from alerttask import AlertTask
|
||||
|
||||
|
||||
class DeadmanAlertTask(AlertTask):
|
||||
|
||||
def executeSearchEventsSimple(self):
|
||||
# We override this method to specify the size as 1
|
||||
# since we only care about if ANY events are found or not
|
||||
return self.main_query.execute(self.es, indices=self.event_indices, size=1)
|
|
@ -50,7 +50,7 @@
|
|||
"set -e",
|
||||
"sudo yum update -y",
|
||||
"sudo yum makecache fast",
|
||||
"sudo yum install -y glibc-devel gcc libstdc++ libffi-devel zlib-devel make mysql-devel python python-devel python-pip git docker python3",
|
||||
"sudo yum install -y glibc-devel gcc libstdc++ libffi-devel zlib-devel make python python-devel python-pip git docker python3",
|
||||
"sudo pip install virtualenv docker-compose",
|
||||
"sudo systemctl enable docker",
|
||||
"sudo systemctl start docker",
|
||||
|
@ -59,7 +59,7 @@
|
|||
"cd /opt/mozdef",
|
||||
"sudo git checkout {{ user `github_branch`}}",
|
||||
"sudo git rev-parse HEAD",
|
||||
"sudo touch docker/compose/cloudy_mozdef.env docker/compose/rabbitmq.env docker/compose/cloudy_mozdef_mq_cloudtrail.env docker/compose/cloudy_mozdef_mq_sns_sqs.env docker/compose/cloudy_mozdef_kibana.env",
|
||||
"sudo touch docker/compose/cloudy_mozdef.env docker/compose/rabbitmq.env docker/compose/cloudy_mozdef_mq_cloudtrail.env docker/compose/cloudy_mozdef_mq_sqs.env docker/compose/cloudy_mozdef_kibana.env",
|
||||
"sudo sed --in-place s/latest/{{ user `github_branch`}}/g docker/compose/docker-compose-cloudy-mozdef.yml",
|
||||
"sudo docker-compose --file docker/compose/docker-compose-cloudy-mozdef.yml --project-name mozdef pull",
|
||||
"sudo rm --recursive --force --verbose /tmp/* /home/ec2-user/.bash_history /root/.ssh /home/ec2-user/.ssh/known_hosts /home/ec2-user/.ssh/authorized_keys"
|
||||
|
|
|
@ -11,20 +11,12 @@ import hjson
|
|||
import sys
|
||||
import os
|
||||
import requests
|
||||
import mozdef_client as mozdef
|
||||
from mozdef_util.utilities.dot_dict import DotDict
|
||||
|
||||
try:
|
||||
import urllib.parse
|
||||
|
||||
quote_url = urllib.parse.quote
|
||||
except ImportError:
|
||||
# Well hello there python2 user!
|
||||
import urllib
|
||||
|
||||
quote_url = urllib.quote
|
||||
import traceback
|
||||
|
||||
import mozdef_client as mozdef
|
||||
|
||||
from mozdef_util.utilities.dot_dict import DotDict
|
||||
|
||||
|
||||
def fatal(msg):
|
||||
print(msg)
|
||||
|
|
|
@ -137,12 +137,13 @@ def main():
|
|||
{"$project": {"address": 1}},
|
||||
{"$limit": options.iplimit}
|
||||
])
|
||||
IPList = []
|
||||
ips = []
|
||||
for ip in ipCursor:
|
||||
IPList.append(ip['address'])
|
||||
ips.append(ip['address'])
|
||||
uniq_ranges = netaddr.cidr_merge(ips)
|
||||
# to text
|
||||
with open(options.outputfile, 'w') as outputfile:
|
||||
for ip in IPList:
|
||||
for ip in uniq_ranges:
|
||||
outputfile.write("{0}\n".format(ip))
|
||||
outputfile.close()
|
||||
# to s3?
|
||||
|
|
|
@ -109,11 +109,11 @@ def main():
|
|||
|
||||
# post to elastic search servers directly without going through
|
||||
# message queues in case there is an availability issue
|
||||
es.save_event(index=index, body=json.dumps(healthlog))
|
||||
es.save_object(index=index, body=json.dumps(healthlog))
|
||||
# post another doc with a static docid and tag
|
||||
# for use when querying for the latest status
|
||||
healthlog['tags'] = ['mozdef', 'status', 'latest']
|
||||
es.save_event(index=index, doc_id=getDocID(server), body=json.dumps(healthlog))
|
||||
es.save_object(index=index, doc_id=getDocID(server), body=json.dumps(healthlog))
|
||||
|
||||
|
||||
def initConfig():
|
||||
|
|
|
@ -26,7 +26,7 @@ def getESAlerts(es):
|
|||
# We use an ExistsMatch here just to satisfy the
|
||||
# requirements of a search query must have some "Matchers"
|
||||
search_query.add_must(ExistsMatch('summary'))
|
||||
results = search_query.execute(es, indices=['alerts-*'], size=10000)
|
||||
results = search_query.execute(es, indices=['alerts'], size=10000)
|
||||
return results
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
[options]
|
||||
hostname=<add_hostname>
|
||||
username=<add_username>
|
||||
password=<add_password>
|
||||
database=<add_database>
|
||||
url=http://localhost:8080/events
|
|
@ -1,198 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
import copy
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import ConfigParser
|
||||
import socket
|
||||
import MySQLdb
|
||||
from requests import Session
|
||||
from optparse import OptionParser
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class MozDefError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return repr(self.msg)
|
||||
|
||||
|
||||
class MozDefEvent():
|
||||
# create requests session to allow for keep alives
|
||||
httpsession = Session()
|
||||
# Turns off needless and repetitive .netrc check for creds
|
||||
httpsession.trust_env = False
|
||||
debug = False
|
||||
verify_certificate = False
|
||||
# Never fail (ie no unexcepted exceptions sent to user, such as server/network not responding)
|
||||
fire_and_forget_mode = True
|
||||
log = {}
|
||||
log['timestamp'] = datetime.isoformat(datetime.now())
|
||||
log['hostname'] = socket.getfqdn()
|
||||
log['processid'] = os.getpid()
|
||||
log['processname'] = sys.argv[0]
|
||||
log['severity'] = 'INFO'
|
||||
log['summary'] = None
|
||||
log['category'] = 'event'
|
||||
log['tags'] = list()
|
||||
log['details'] = dict()
|
||||
|
||||
def __init__(self, url='http://localhost/events', summary=None, category='event', severity='INFO', tags=[], details={}):
|
||||
self.summary = summary
|
||||
self.category = category
|
||||
self.severity = severity
|
||||
self.tags = tags
|
||||
self.details = details
|
||||
self.url = url
|
||||
|
||||
def send(self, timestamp=None, summary=None, category=None, severity=None, tags=None, details=None, hostname=None):
|
||||
log_msg = copy.copy(self.log)
|
||||
|
||||
if timestamp is None:
|
||||
log_msg['timestamp'] = self.timestamp
|
||||
|
||||
else:
|
||||
log_msg['timestamp'] = timestamp
|
||||
|
||||
if summary is None:
|
||||
log_msg['summary'] = self.summary
|
||||
else:
|
||||
log_msg['summary'] = summary
|
||||
|
||||
if category is None:
|
||||
log_msg['category'] = self.category
|
||||
else:
|
||||
log_msg['category'] = category
|
||||
|
||||
if severity is None:
|
||||
log_msg['severity'] = self.severity
|
||||
else:
|
||||
log_msg['severity'] = severity
|
||||
|
||||
if tags is None:
|
||||
log_msg['tags'] = self.tags
|
||||
else:
|
||||
log_msg['tags'] = tags
|
||||
|
||||
if details is None:
|
||||
log_msg['details'] = self.details
|
||||
else:
|
||||
log_msg['details'] = details
|
||||
|
||||
if hostname is None:
|
||||
log_msg['hostname'] = self.hostname
|
||||
else:
|
||||
log_msg['hostname'] = hostname
|
||||
|
||||
if type(log_msg['details']) != dict:
|
||||
raise MozDefError('details must be a dict')
|
||||
elif type(log_msg['tags']) != list:
|
||||
raise MozDefError('tags must be a list')
|
||||
elif summary is None:
|
||||
raise MozDefError('Summary is a required field')
|
||||
|
||||
try:
|
||||
self.httpsession.post(self.url, json.dumps(log_msg, encoding='utf-8'), verify=self.verify_certificate)
|
||||
|
||||
except Exception as e:
|
||||
if not self.fire_and_forget_mode:
|
||||
raise e
|
||||
|
||||
|
||||
def main():
|
||||
'''
|
||||
connect to vidyo's mysql, read in calls and write to mozdef
|
||||
'''
|
||||
mdEvent = MozDefEvent(options.url)
|
||||
mdEvent.debug = True
|
||||
mdEvent.fire_and_forget_mode = False
|
||||
|
||||
# connect to mysql
|
||||
db=MySQLdb.connect(host=options.hostname, user=options.username,passwd=options.password,db=options.database)
|
||||
c=db.cursor(MySQLdb.cursors.DictCursor)
|
||||
|
||||
c.execute("select * from ConferenceCall2 where JoinTime between NOW() - INTERVAL 30 MINUTE and NOW() or LeaveTime between NOW() - INTERVAL 30 MINUTE and NOW()")
|
||||
rows=c.fetchall()
|
||||
c.close()
|
||||
|
||||
# Build dictionary of calls in order to consolidate multiple rows for a single call
|
||||
calls = {}
|
||||
for row in rows:
|
||||
id = row['UniqueCallID']
|
||||
# Copy the row's info if we don't already have the final completed call state
|
||||
if id not in calls or (id in calls and calls[id]['CallState'] != 'COMPLETED'):
|
||||
calls[id] = row
|
||||
|
||||
# Massage call data and send to MozDef
|
||||
for key in calls.keys():
|
||||
call = calls[key]
|
||||
if call['LeaveTime'] is not None:
|
||||
duration = call['LeaveTime'] - call['JoinTime']
|
||||
call['CallDuration'] = duration.seconds
|
||||
|
||||
# fix up the data for json
|
||||
for k in call.keys():
|
||||
# convert datetime objects to isoformat for json serialization
|
||||
if isinstance(call[k], datetime):
|
||||
call[k] = call[k].isoformat()
|
||||
# make sure it's a string, not unicode forced into a string
|
||||
if isinstance(call[k],str):
|
||||
# db has unicode stored as string, so decode, then encode
|
||||
call[k] = call[k].decode('utf-8','ignore').encode('ascii','ignore')
|
||||
|
||||
mdEvent.send(timestamp=call['JoinTime'],
|
||||
summary='Vidyo call status for ' + call['UniqueCallID'].encode('ascii', 'ignore'),
|
||||
tags=['vidyo'],
|
||||
details=call,
|
||||
category='vidyo',
|
||||
hostname=socket.gethostname()
|
||||
)
|
||||
|
||||
|
||||
def getConfig(optionname, thedefault, configfile):
|
||||
"""read an option from a config file or set a default
|
||||
send 'thedefault' as the data class you want to get a string back
|
||||
i.e. 'True' will return a string
|
||||
True will return a bool
|
||||
1 will return an int
|
||||
"""
|
||||
retvalue = thedefault
|
||||
opttype = type(thedefault)
|
||||
if os.path.isfile(configfile):
|
||||
config = ConfigParser.ConfigParser()
|
||||
config.readfp(open(configfile))
|
||||
if config.has_option('options', optionname):
|
||||
if opttype == bool:
|
||||
retvalue = config.getboolean('options', optionname)
|
||||
elif opttype == int:
|
||||
retvalue = config.getint('options', optionname)
|
||||
elif opttype == float:
|
||||
retvalue = config.getfloat('options', optionname)
|
||||
else:
|
||||
retvalue = config.get('options', optionname)
|
||||
return retvalue
|
||||
|
||||
|
||||
def initConfig(configfile):
|
||||
# default options
|
||||
options.url = getConfig('url', 'http://localhost:8080/events', configfile)
|
||||
options.username = getConfig('username', '', configfile)
|
||||
options.password = getConfig('password', '', configfile)
|
||||
options.database = getConfig('database', '', configfile)
|
||||
options.hostname = getConfig('hostname', '', configfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use")
|
||||
(options, args) = parser.parse_args()
|
||||
initConfig(options.configfile)
|
||||
main()
|
|
@ -1,10 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
source /opt/mozdef/envs/python/bin/activate
|
||||
/opt/mozdef/envs/mozdef/cron/vidyo2MozDef.py -c /opt/mozdef/envs/mozdef/cron/vidyo2MozDef.conf
|
||||
|
|
@ -265,9 +265,9 @@ services:
|
|||
max-size: "10m"
|
||||
env_file:
|
||||
- cloudy_mozdef.env
|
||||
- cloudy_mozdef_mq_sns_sqs.env
|
||||
- cloudy_mozdef_mq_sqs.env
|
||||
restart: always
|
||||
command: bash -c 'python esworker_sns_sqs.py -c esworker_sns_sqs.conf'
|
||||
command: bash -c 'python esworker_sqs.py -c esworker_sqs.conf'
|
||||
scale: 1
|
||||
depends_on:
|
||||
- base
|
||||
|
|
|
@ -1,54 +1,6 @@
|
|||
---
|
||||
version: '3.7'
|
||||
services:
|
||||
nginx:
|
||||
image: mozdef/mozdef_nginx
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/nginx/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_nginx
|
||||
- mozdef_nginx:latest
|
||||
restart: always
|
||||
command: /usr/sbin/nginx
|
||||
depends_on:
|
||||
- kibana
|
||||
- meteor
|
||||
ports:
|
||||
- 80:80
|
||||
- 8080:8080
|
||||
- 9090:9090
|
||||
# - 8081:8081
|
||||
networks:
|
||||
- default
|
||||
mongodb:
|
||||
image: mozdef/mozdef_mongodb
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/mongodb/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_mongodb
|
||||
- mozdef_mongodb:latest
|
||||
restart: always
|
||||
command: /usr/bin/mongod --smallfiles --config /etc/mongod.conf
|
||||
volumes:
|
||||
- mongodb:/var/lib/mongo
|
||||
networks:
|
||||
- default
|
||||
kibana:
|
||||
image: mozdef/mozdef_kibana
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/kibana/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_kibana
|
||||
- mozdef_kibana:latest
|
||||
restart: always
|
||||
command: bin/kibana --elasticsearch=http://elasticsearch:9200
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
networks:
|
||||
- default
|
||||
elasticsearch:
|
||||
image: mozdef/mozdef_elasticsearch
|
||||
build:
|
||||
|
@ -82,6 +34,56 @@ services:
|
|||
# - 15672:15672 # Admin interface
|
||||
networks:
|
||||
- default
|
||||
mongodb:
|
||||
image: mozdef/mozdef_mongodb
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/mongodb/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_mongodb
|
||||
- mozdef_mongodb:latest
|
||||
restart: always
|
||||
command: /usr/bin/mongod --smallfiles --config /etc/mongod.conf
|
||||
volumes:
|
||||
- mongodb:/var/lib/mongo
|
||||
# ports:
|
||||
# - 3002:3002
|
||||
networks:
|
||||
- default
|
||||
kibana:
|
||||
image: mozdef/mozdef_kibana
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/kibana/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_kibana
|
||||
- mozdef_kibana:latest
|
||||
restart: always
|
||||
command: bin/kibana --elasticsearch=http://elasticsearch:9200
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
networks:
|
||||
- default
|
||||
nginx:
|
||||
image: mozdef/mozdef_nginx
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: docker/compose/nginx/Dockerfile
|
||||
cache_from:
|
||||
- mozdef/mozdef_nginx
|
||||
- mozdef_nginx:latest
|
||||
restart: always
|
||||
command: /usr/sbin/nginx
|
||||
depends_on:
|
||||
- kibana
|
||||
- meteor
|
||||
ports:
|
||||
- 80:80
|
||||
- 8080:8080
|
||||
- 9090:9090
|
||||
# - 8081:8081
|
||||
networks:
|
||||
- default
|
||||
|
||||
# MozDef Specific Containers
|
||||
base:
|
||||
|
|
|
@ -21,8 +21,7 @@ RUN \
|
|||
useradd -ms /bin/bash -d /opt/mozdef -m mozdef && \
|
||||
mkdir /opt/mozdef/envs && \
|
||||
cd /opt/mozdef && \
|
||||
yum install -y mysql-devel \
|
||||
python \
|
||||
yum install -y python \
|
||||
python-devel \
|
||||
python-pip && \
|
||||
yum clean all && \
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"visState": "{\"title\":\"All Events Area\",\"type\":\"area\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"utctimestamp per second\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"area\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"drawLinesBetweenPoints\":true,\"showCircles\":true,\"interpolate\":\"linear\",\"valueAxis\":\"ValueAxis-1\"}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false,\"type\":\"area\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"utctimestamp\",\"interval\":\"s\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}}],\"listeners\":{}}",
|
||||
"uiStateJSON": "{}",
|
||||
"description": "",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"events-weekly\",\"query\":{\"match_all\":{}},\"filter\":[]}"
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"field\":\"category\",\"order\":\"desc\",\"orderBy\":\"1\",\"size\":5},\"schema\":\"segment\",\"type\":\"terms\"}],\"listeners\":{},\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":false,\"legendPosition\":\"right\",\"type\":\"pie\"},\"title\":\"Category Pie Graph\",\"type\":\"pie\"}",
|
||||
"uiStateJSON": "{}",
|
||||
"description": "",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"events-weekly\",\"query\":{\"match_all\":{}},\"filter\":[]}"
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"visState": "{\"title\":\"DestinationIP Bar Graph\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"details.destinationipaddress: Descending\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false,\"type\":\"histogram\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"details.destinationipaddress\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"uiStateJSON": "{}",
|
||||
"description": "",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"events-weekly\",\"query\":{\"match_all\":{}},\"filter\":[]}"
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"visState": "{\"title\":\"SourceIP Bar Graph\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"details.sourceipaddress: Descending\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false,\"type\":\"histogram\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"details.sourceipaddress\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
|
||||
"uiStateJSON": "{}",
|
||||
"description": "",
|
||||
"version": 1,
|
||||
"kibanaSavedObjectMeta": {
|
||||
"searchSourceJSON": "{\"index\":\"events-weekly\",\"query\":{\"match_all\":{}},\"filter\":[]}"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"title": "alerts-*",
|
||||
"timeFieldName": "utctimestamp",
|
||||
"notExpandable": true,
|
||||
"fields": "[{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"category\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"notify_mozdefbot\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"severity\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"summary\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"tags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"utctimestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"
|
||||
}
|
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 18 KiB |
|
@ -70,11 +70,11 @@ We need to install a python2.7 virtualenv.
|
|||
|
||||
On Yum-based systems::
|
||||
|
||||
sudo yum install make zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel pcre-devel gcc gcc-c++ mysql-devel
|
||||
sudo yum install make zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel pcre-devel gcc gcc-c++
|
||||
|
||||
On APT-based systems::
|
||||
|
||||
sudo apt-get install make zlib1g-dev libbz2-dev libssl-dev libncurses5-dev libsqlite3-dev libreadline-dev tk-dev libpcre3-dev libpcre++-dev build-essential g++ libmysqlclient-dev
|
||||
sudo apt-get install make zlib1g-dev libbz2-dev libssl-dev libncurses5-dev libsqlite3-dev libreadline-dev tk-dev libpcre3-dev libpcre++-dev build-essential g++
|
||||
|
||||
Then::
|
||||
|
||||
|
|
|
@ -41,9 +41,21 @@ def bulkindex():
|
|||
bulkpost=request.body.read()
|
||||
# bottlelog('request:{0}\n'.format(bulkpost))
|
||||
request.body.close()
|
||||
if len(bulkpost)>10: # TODO Check for bulk format.
|
||||
# iterate on messages and post to event message queue
|
||||
try: # Handles json array bulk format [{},{},...]
|
||||
messages = json.loads(bulkpost)
|
||||
for event in messages:
|
||||
# don't post the items telling us where to post things..
|
||||
if 'index' not in event:
|
||||
ensurePublish=mqConn.ensure(mqproducer,mqproducer.publish,max_retries=10)
|
||||
ensurePublish(event,exchange=eventTaskExchange,routing_key=options.taskexchange)
|
||||
return
|
||||
except ValueError as e:
|
||||
bottlelog('Decoded raw input failed with {0}'.format(e))
|
||||
pass
|
||||
|
||||
if len(bulkpost)>10: # Handles single element format {}
|
||||
# TODO Check for other bulk formats.
|
||||
# iterate on messages and post to event message queue
|
||||
eventlist=[]
|
||||
for i in bulkpost.splitlines():
|
||||
eventlist.append(i)
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* jQuery Highlight plugin
|
||||
*
|
||||
* Based on highlight v3 by Johann Burkard
|
||||
* http://johannburkard.de/blog/programming/javascript/highlight-javascript-text-higlighting-jquery-plugin.html
|
||||
*
|
||||
* Code a little bit refactored and cleaned (in my humble opinion).
|
||||
* Most important changes:
|
||||
* - has an option to highlight only entire words (wordsOnly - false by default),
|
||||
* - has an option to be case sensitive (caseSensitive - false by default)
|
||||
* - highlight element tag and class names can be specified in options
|
||||
*
|
||||
* Usage:
|
||||
* // wrap every occurrance of text 'lorem' in content
|
||||
* // with <span class='highlight'> (default options)
|
||||
* $('#content').highlight('lorem');
|
||||
*
|
||||
* // search for and highlight more terms at once
|
||||
* // so you can save some time on traversing DOM
|
||||
* $('#content').highlight(['lorem', 'ipsum']);
|
||||
* $('#content').highlight('lorem ipsum');
|
||||
*
|
||||
* // search only for entire word 'lorem'
|
||||
* $('#content').highlight('lorem', { wordsOnly: true });
|
||||
*
|
||||
* // don't ignore case during search of term 'lorem'
|
||||
* $('#content').highlight('lorem', { caseSensitive: true });
|
||||
*
|
||||
* // wrap every occurrance of term 'ipsum' in content
|
||||
* // with <em class='important'>
|
||||
* $('#content').highlight('ipsum', { element: 'em', className: 'important' });
|
||||
*
|
||||
* // remove default highlight
|
||||
* $('#content').unhighlight();
|
||||
*
|
||||
* // remove custom highlight
|
||||
* $('#content').unhighlight({ element: 'em', className: 'important' });
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2009 Bartek Szopka
|
||||
*
|
||||
* Licensed under MIT license.
|
||||
*
|
||||
*/
|
||||
|
||||
jQuery.extend({
|
||||
highlight: function (node, re, nodeName, className) {
|
||||
if (node.nodeType === 3) {
|
||||
var match = node.data.match(re);
|
||||
if (match) {
|
||||
var highlight = document.createElement(nodeName || 'span');
|
||||
highlight.className = className || 'highlight';
|
||||
var wordNode = node.splitText(match.index);
|
||||
wordNode.splitText(match[0].length);
|
||||
var wordClone = wordNode.cloneNode(true);
|
||||
highlight.appendChild(wordClone);
|
||||
wordNode.parentNode.replaceChild(highlight, wordNode);
|
||||
return 1; //skip added node in parent
|
||||
}
|
||||
} else if ((node.nodeType === 1 && node.childNodes) && // only element nodes that have children
|
||||
!/(script|style)/i.test(node.tagName) && // ignore script and style nodes
|
||||
!(node.tagName === nodeName.toUpperCase() && node.className === className)) { // skip if already highlighted
|
||||
for (var i = 0; i < node.childNodes.length; i++) {
|
||||
i += jQuery.highlight(node.childNodes[i], re, nodeName, className);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
jQuery.fn.unhighlight = function (options) {
|
||||
var settings = { className: 'highlight', element: 'span' };
|
||||
jQuery.extend(settings, options);
|
||||
|
||||
return this.find(settings.element + "." + settings.className).each(function () {
|
||||
var parent = this.parentNode;
|
||||
parent.replaceChild(this.firstChild, this);
|
||||
parent.normalize();
|
||||
}).end();
|
||||
};
|
||||
|
||||
jQuery.fn.highlight = function (words, options) {
|
||||
var settings = { className: 'highlight', element: 'span', caseSensitive: false, wordsOnly: false };
|
||||
jQuery.extend(settings, options);
|
||||
|
||||
if (words.constructor === String) {
|
||||
words = [words];
|
||||
}
|
||||
words = jQuery.grep(words, function(word, i){
|
||||
return word != '';
|
||||
});
|
||||
words = jQuery.map(words, function(word, i) {
|
||||
return word.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&");
|
||||
});
|
||||
if (words.length == 0) { return this; };
|
||||
|
||||
var flag = settings.caseSensitive ? "" : "i";
|
||||
var pattern = "(" + words.join("|") + ")";
|
||||
if (settings.wordsOnly) {
|
||||
pattern = "\\b" + pattern + "\\b";
|
||||
}
|
||||
var re = new RegExp(pattern, flag);
|
||||
|
||||
return this.each(function () {
|
||||
jQuery.highlight(this, re, settings.element, settings.className);
|
||||
});
|
||||
};
|
||||
|
|
@ -269,6 +269,19 @@ if ( Meteor.isClient ) {
|
|||
return pluginsForEndPoint( endpoint );
|
||||
} );
|
||||
|
||||
jQuery.fn.highlight = function (str, className) {
|
||||
var regex = new RegExp(str, "gi");
|
||||
return this.each(function () {
|
||||
$(this).contents().filter(function() {
|
||||
return this.nodeType == 3 && regex.test(this.nodeValue);
|
||||
}).replaceWith(function() {
|
||||
return (this.nodeValue || "").replace(regex, function(match) {
|
||||
return "<span class=\"" + className + "\">" + match + "</span>";
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
UI.registerHelper( 'ipDecorate', function( elementText ) {
|
||||
//decorate text containing an ipv4 address
|
||||
var anelement = $( $.parseHTML( '<span>' + elementText + '</span>' ) )
|
||||
|
@ -277,23 +290,9 @@ if ( Meteor.isClient ) {
|
|||
//clean up potential interference chars
|
||||
w = w.replace( /,|:|;|\[|\]/g, '' )
|
||||
if ( isIPv4( w ) ) {
|
||||
//console.log(w);
|
||||
anelement.
|
||||
highlight( w,
|
||||
{
|
||||
wordsOnly: false,
|
||||
element: "em",
|
||||
className: "ipaddress"
|
||||
} );
|
||||
anelement.highlight(w, 'ipaddress');
|
||||
} else if ( isHostname( w ) ) {
|
||||
//console.log(w);
|
||||
anelement.
|
||||
highlight( w,
|
||||
{
|
||||
wordsOnly: false,
|
||||
element: "em",
|
||||
className: "hostname"
|
||||
} );
|
||||
anelement.highlight(w, 'hostname');
|
||||
}
|
||||
} );
|
||||
//add a drop down menu to any .ipaddress
|
||||
|
|
|
@ -31,7 +31,7 @@ Copyright (c) 2014 Mozilla Corporation
|
|||
</li>
|
||||
{{#if isFeature "kibana"}}
|
||||
<li>
|
||||
<a target="_blank" href={{mozdef.kibanaURL}}>
|
||||
<a target="_blank" href={{ resolveKibanaURL mozdef.kibanaURL }}>
|
||||
<span class="moz">
|
||||
<img src="/images/logo-elastic-kibana-dk.svg" width="23" height="20">
|
||||
</span>
|
||||
|
|
|
@ -42,8 +42,9 @@ resolveKibanaURL = function(url){
|
|||
if ( kibanaURL.hostname == 'relative' ){
|
||||
// we were passed something like OPTIONS_METEOR_KIBANAURL=http://relative:9090/app/kibana
|
||||
// so lets figure out where we should be
|
||||
dnsURL=new URL(document.URL);
|
||||
dnsURL = new URL(document.URL);
|
||||
kibanaURL.hostname = dnsURL.hostname;
|
||||
kibanaURL.protocol = dnsURL.protocol;
|
||||
}
|
||||
return kibanaURL;
|
||||
};
|
||||
|
|
|
@ -13,10 +13,13 @@ Copyright (c) 2014 Mozilla Corporation
|
|||
--txt-secondary-color: #000;
|
||||
--txt-shadow-color: #000;
|
||||
--txt-highlight-color: rgba(165, 170, 172, 0.904);
|
||||
--arm-color: #d1b61e;
|
||||
--arm-focus-color: #e7c714a9;
|
||||
--txt-disabled-color: #576d54;
|
||||
--a-link-color: rgb(245, 222, 179);
|
||||
--row-color-odd: rgba(30,87,153,.7);
|
||||
--row-color-even: #636c85;
|
||||
}
|
||||
}
|
||||
|
||||
html{
|
||||
background: none;
|
||||
|
@ -262,8 +265,27 @@ h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 {
|
|||
.btn {
|
||||
border: 1px outset;
|
||||
border-radius: 4px;
|
||||
color: #999;
|
||||
}
|
||||
color: var(--txt-primary-color);
|
||||
background-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btn-warning.active,
|
||||
.btn-warning:active,
|
||||
.btn-warning:hover,
|
||||
.open > .dropdown-toggle.btn-warning {
|
||||
color: var(--txt-secondary-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btnAlertAcked,
|
||||
.btnAlertAcked.active,
|
||||
.btnAlertAcked:active,
|
||||
.btnAlertAcked:hover > .btn {
|
||||
color: var(--txt-disabled-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
|
||||
input[type="search"] {
|
||||
|
@ -291,7 +313,7 @@ input[type="search"] {
|
|||
|
||||
.table-striped > tbody > tr:nth-of-type(2n+1) {
|
||||
background-color: var(--row-color-even)
|
||||
}
|
||||
}
|
||||
|
||||
.table-hover tbody tr:hover > td,
|
||||
.table-hover tbody tr:hover > th,
|
||||
|
|
|
@ -11,16 +11,13 @@ Copyright (c) 2014 Mozilla Corporation
|
|||
--bg-secondary-color: #2d5fa0;
|
||||
--row-color-odd: #2a2f35;
|
||||
--row-color-even: #636c85;
|
||||
--ack-edit-color: #a2a9b2;
|
||||
--ack-edit-border-color: #adadad;
|
||||
--ack-edit-focus-color: #557750;
|
||||
--ack-edit-disabled-color: #557750;
|
||||
--arm-color: #e69006;
|
||||
--arm-focus-color: #d58512;
|
||||
--txt-primary-color: #fff;
|
||||
--txt-secondary-color: #000;
|
||||
--txt-disabled-color: #576d54;
|
||||
--a-link-color: #a2a9b2;
|
||||
}
|
||||
}
|
||||
|
||||
/*base css */
|
||||
html{
|
||||
|
@ -193,23 +190,23 @@ caption, legend {
|
|||
.alert.alert-NOTICE {
|
||||
--alert-bg-color: #4a6785;
|
||||
--alert-color: white;
|
||||
}
|
||||
}
|
||||
.alert.alert-WARNING {
|
||||
--alert-bg-color: #ffd351;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-CRITICAL {
|
||||
--alert-bg-color: #d04437;
|
||||
--alert-color: white;
|
||||
}
|
||||
}
|
||||
.alert.alert-INFO {
|
||||
--alert-bg-color: #cccccc;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-ERROR {
|
||||
--alert-bg-color: #d04437;
|
||||
--alert-color: white;
|
||||
}
|
||||
}
|
||||
|
||||
.alert {
|
||||
color: var(--alert-color);
|
||||
|
@ -217,7 +214,7 @@ caption, legend {
|
|||
text-transform: uppercase;
|
||||
display: table-cell;
|
||||
font-weight: bold;
|
||||
}
|
||||
}
|
||||
|
||||
.alert-row a {
|
||||
color: wheat;
|
||||
|
@ -227,7 +224,15 @@ caption, legend {
|
|||
color: white;
|
||||
}
|
||||
|
||||
.modal-body .row {
|
||||
.modal-header {
|
||||
color: var(--font-focus);
|
||||
}
|
||||
|
||||
.modal-body {
|
||||
color: var(--font-focus);
|
||||
}
|
||||
|
||||
.modal-body .row {
|
||||
color: black;
|
||||
}
|
||||
/*bootstrap overrides*/
|
||||
|
@ -235,8 +240,27 @@ caption, legend {
|
|||
.btn {
|
||||
border: 1px outset;
|
||||
border-radius: 4px;
|
||||
color: #999;
|
||||
}
|
||||
color: var(--txt-primary-color);
|
||||
background-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btn-warning.active,
|
||||
.btn-warning:active,
|
||||
.btn-warning:hover,
|
||||
.open > .dropdown-toggle.btn-warning {
|
||||
color: var(--txt-secondary-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btnAlertAcked,
|
||||
.btnAlertAcked.active,
|
||||
.btnAlertAcked:active,
|
||||
.btnAlertAcked:hover > .btn {
|
||||
color: var(--txt-disabled-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
|
||||
input[type="search"] {
|
||||
|
@ -404,10 +428,10 @@ sidenav {
|
|||
-ms-transition: all 400ms ease;
|
||||
-o-transition: all 400ms ease;
|
||||
transition: all 400ms ease;
|
||||
}
|
||||
}
|
||||
|
||||
/*pull out triangle*/
|
||||
sidenav:after {
|
||||
sidenav:after {
|
||||
position: absolute;
|
||||
content: ' ';
|
||||
width: 0;
|
||||
|
@ -417,34 +441,34 @@ sidenav {
|
|||
border-width: 30px 30px;
|
||||
border-style: solid;
|
||||
border-color: transparent transparent transparent var(--bg-secondary-color);
|
||||
}
|
||||
sidenav ul {
|
||||
}
|
||||
sidenav ul {
|
||||
width: 14em;
|
||||
list-style-type: none;
|
||||
margin: auto;
|
||||
padding: 1em;
|
||||
}
|
||||
sidenav div{
|
||||
}
|
||||
sidenav div{
|
||||
margin:auto;
|
||||
}
|
||||
sidenav:hover {
|
||||
}
|
||||
sidenav:hover {
|
||||
left: 0;
|
||||
}
|
||||
sidenav .filters-col .row {
|
||||
}
|
||||
sidenav .filters-col .row {
|
||||
margin-top: 45px;
|
||||
padding: 0 0.5em;
|
||||
}
|
||||
sidenav .reset-filter {
|
||||
}
|
||||
sidenav .reset-filter {
|
||||
text-align: center;
|
||||
margin-top: 20px;
|
||||
}
|
||||
.form-horizontal .form-group {
|
||||
}
|
||||
.form-horizontal .form-group {
|
||||
margin-left: 5px;
|
||||
margin-right: 5px;
|
||||
padding-top: 5px;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 1000px) {
|
||||
@media screen and (max-width: 1000px) {
|
||||
sidenav {
|
||||
background: var(--bg-primary-color);
|
||||
border-left: 15px solid var(--bg-secondary-color);
|
||||
|
@ -501,7 +525,7 @@ sidenav {
|
|||
div.dc-chart {
|
||||
float: none;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* globe styling */
|
||||
.globe-container {
|
||||
|
|
|
@ -13,6 +13,8 @@ Copyright (c) 2014 Mozilla Corporation
|
|||
--txt-secondary-color: #fff;
|
||||
--txt-shadow-color: #aaa;
|
||||
--txt-highlight-color: rgba(165, 170, 172, 0.904);
|
||||
--arm-color: #d1b61e;
|
||||
--arm-focus-color: #e7c714a9;
|
||||
--a-link-color: rgb(49, 130, 189);
|
||||
--row-color-odd: rgba(30,87,153,.1);
|
||||
--row-color-even: #636c85;
|
||||
|
@ -193,23 +195,23 @@ caption, legend {
|
|||
.alert.alert-NOTICE {
|
||||
--alert-bg-color: #4a6785;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-WARNING {
|
||||
--alert-bg-color: #ffd351;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-CRITICAL {
|
||||
--alert-bg-color: #d04437;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-INFO {
|
||||
--alert-bg-color: #cccccc;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
.alert.alert-ERROR {
|
||||
--alert-bg-color: #d04437;
|
||||
--alert-color: black;
|
||||
}
|
||||
}
|
||||
|
||||
.alert {
|
||||
color: var(--alert-color);
|
||||
|
@ -217,7 +219,7 @@ caption, legend {
|
|||
text-transform: uppercase;
|
||||
display: table-cell;
|
||||
font-weight: bold;
|
||||
}
|
||||
}
|
||||
|
||||
.alert-row a {
|
||||
color: var(--a-link-color);
|
||||
|
@ -270,6 +272,26 @@ h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 {
|
|||
.btn {
|
||||
border: 1px outset;
|
||||
border-radius: 4px;
|
||||
color: var(--txt-primary-color);
|
||||
background-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btn-warning.active,
|
||||
.btn-warning:active,
|
||||
.btn-warning:hover,
|
||||
.open > .dropdown-toggle.btn-warning {
|
||||
color: var(--txt-secondary-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btnAlertAcked,
|
||||
.btnAlertAcked.active,
|
||||
.btnAlertAcked:active,
|
||||
.btnAlertAcked:hover > .btn {
|
||||
color: var(--txt-shadow-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -13,22 +13,6 @@ Template.side_nav_menu.helpers( {
|
|||
//subscription has records?
|
||||
return features.find().count() > 0;
|
||||
},
|
||||
resolveKibanaURL: function( url ) {
|
||||
// special function just for the menu
|
||||
// to adjust the kibana URL if we are told to make it 'relative'
|
||||
// to whatever DNS name we are running on
|
||||
// i.e. pass in http://relative:9090/app/kibana
|
||||
// when the running dns is something.com
|
||||
// and we will set the hostname to something.com instead of 'relative'
|
||||
var kibanaURL = new URL( url );
|
||||
if ( kibanaURL.hostname == 'relative' ) {
|
||||
// we were passed something like OPTIONS_METEOR_KIBANAURL=http://relative:9090/app/kibana
|
||||
// so lets figure out where we should be
|
||||
dnsURL = new URL( document.URL );
|
||||
kibanaURL.hostname = dnsURL.hostname;
|
||||
}
|
||||
return kibanaURL;
|
||||
},
|
||||
// loads kibana dashboards
|
||||
kibanadashboards: function() {
|
||||
Meteor.call( 'loadKibanaDashboards' );
|
||||
|
|
|
@ -18,8 +18,7 @@ Copyright (c) 2014 Mozilla Corporation
|
|||
--row-color-even: #636c85;
|
||||
--ack-edit-color: #a2a9b2;
|
||||
--ack-edit-border-color: #adadad;
|
||||
--ack-edit-focus-color: #557750;
|
||||
--ack-edit-disabled-color: #557750;
|
||||
--txt-shadow-color: #576d54;
|
||||
--arm-color: #e69006;
|
||||
--arm-focus-color: #d58512;
|
||||
--font-main: #fff;
|
||||
|
@ -232,6 +231,14 @@ caption, legend {
|
|||
color: white;
|
||||
}
|
||||
|
||||
.modal-header {
|
||||
color: var(--font-focus);
|
||||
}
|
||||
|
||||
.modal-body {
|
||||
color: var(--font-focus);
|
||||
}
|
||||
|
||||
.modal-body .row {
|
||||
color: black;
|
||||
}
|
||||
|
@ -253,6 +260,15 @@ caption, legend {
|
|||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btnAlertAcked,
|
||||
.btnAlertAcked.active,
|
||||
.btnAlertAcked:active,
|
||||
.btnAlertAcked:hover > .btn {
|
||||
color: var(--txt-shadow-color);
|
||||
background-color: var(--arm-focus-color);
|
||||
border-color: var(--arm-color);
|
||||
}
|
||||
|
||||
.btn-notice {
|
||||
border: 1px outset;
|
||||
border-radius: 4px;
|
||||
|
|
|
@ -46,7 +46,7 @@ class SearchQuery(object):
|
|||
def add_aggregation(self, input_obj):
|
||||
self.append_to_array(self.aggregation, input_obj)
|
||||
|
||||
def execute(self, elasticsearch_client, indices=['events-*'], size=1000, request_timeout=30):
|
||||
def execute(self, elasticsearch_client, indices=['events', 'events-previous'], size=1000, request_timeout=30):
|
||||
if self.must == [] and self.must_not == [] and self.should == [] and self.aggregation == []:
|
||||
raise AttributeError('Must define a must, must_not, should query, or aggregation')
|
||||
|
||||
|
|
|
@ -36,7 +36,9 @@ class message(object):
|
|||
'details.requestparameters.logstreamname',
|
||||
'details.requestparameters.source',
|
||||
'details.requestparameters.tagging',
|
||||
'details.requestparameters.logging',
|
||||
'details.responseelements.role',
|
||||
'details.responseelements.policy',
|
||||
'details.requestparameters.rule',
|
||||
'details.responseelements.subnets',
|
||||
'details.responseelements.endpoint',
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
import hashlib
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''
|
||||
takes an incoming vidyo call record and assigns a static ID
|
||||
so we always update the same doc for current status.
|
||||
'''
|
||||
|
||||
# this plugin
|
||||
# sets the type field
|
||||
self.registration = ['uniquecallid']
|
||||
self.priority = 5
|
||||
|
||||
def onMessage(self, message, metadata):
|
||||
docid = hashlib.md5('vidyouniquecallid' + message['details']['uniquecallid']).hexdigest()
|
||||
metadata['id'] = docid
|
||||
message['type'] = 'vidyo'
|
||||
return (message, metadata)
|
|
@ -33,7 +33,6 @@ kombu==4.1.0
|
|||
meld3==1.0.2
|
||||
mozdef-client==1.0.11
|
||||
mozdef-util==1.0.8
|
||||
MySQL-python==1.2.5
|
||||
netaddr==0.7.1
|
||||
nose==1.3.7
|
||||
oauth2client==1.4.12
|
||||
|
@ -60,7 +59,7 @@ slackclient==1.0.9
|
|||
supervisor==3.3.1
|
||||
tzlocal==1.4
|
||||
uritemplate==0.6
|
||||
urllib3==1.23
|
||||
urllib3==1.24.3
|
||||
uwsgi==2.0.17.1
|
||||
virtualenv==1.11.4
|
||||
tldextract==2.2.0
|
||||
|
|
|
@ -80,10 +80,7 @@ class message(object):
|
|||
search_query.add_aggregation(Aggregation('details.success'))
|
||||
search_query.add_aggregation(Aggregation('details.username'))
|
||||
|
||||
# We want to select all event indices
|
||||
# and filter out the window based on timestamp
|
||||
# from the search query
|
||||
results = search_query.execute(es_client, indices=['events-*'])
|
||||
results = search_query.execute(es_client, indices=['events','events-previous'])
|
||||
|
||||
# any usernames or words to ignore
|
||||
# especially useful if ES is analyzing the username field and breaking apart user@somewhere.com
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), '../../../alerts/plugins')
|
||||
sys.path.append(plugin_path)
|
||||
|
||||
from ip_source_enrichment import enrich
|
||||
|
||||
|
||||
good_ipv4 = '255.0.1.2'
|
||||
good_ipv6 = '3001:4d9c:b29:12f0::'
|
||||
bad_ipv4 = '192.168.0.1'
|
||||
bad_ipv6 = '2001:db8:a0b:12f0::'
|
||||
|
||||
known_ips = [
|
||||
{
|
||||
'range': good_ipv4 + '/8',
|
||||
'site': 'office1',
|
||||
'format': '{0} known',
|
||||
},
|
||||
{
|
||||
'range': good_ipv6 + '/64',
|
||||
'site': 'office2',
|
||||
'format': '{0} known',
|
||||
}
|
||||
]
|
||||
|
||||
alert_with_ipv4 = {
|
||||
'category': 'bro',
|
||||
'tags': ['portscan'],
|
||||
'summary': 'this is a test alert',
|
||||
'details': {
|
||||
'sourceipaddress': good_ipv4,
|
||||
'destinationipaddress': bad_ipv4,
|
||||
'ports': [22, 9001, 25505, 65534]
|
||||
}
|
||||
}
|
||||
|
||||
alert_with_ipv6 = {
|
||||
'category': 'bro',
|
||||
'tags': ['test'],
|
||||
'summary': 'Another test alert',
|
||||
'details': {
|
||||
'sourceipaddress': good_ipv6,
|
||||
'destinationipaddress': bad_ipv6,
|
||||
'port': [22, 9001, 24404, 65532]
|
||||
}
|
||||
}
|
||||
|
||||
alert_with_ipv4_in_summary = {
|
||||
'category': 'test',
|
||||
'tags': ['ip', 'in', 'summary'],
|
||||
'summary': 'Testing:{0} is a random IP in a poorly formatted string'.format(good_ipv4),
|
||||
'details': {}
|
||||
}
|
||||
|
||||
alert_with_ipv6_in_summary = {
|
||||
'category': 'test',
|
||||
'tags': ['ip', 'in', 'summary'],
|
||||
'summary': 'Found IPs ["{0}"]'.format(good_ipv6),
|
||||
'details': {}
|
||||
}
|
||||
|
||||
|
||||
class TestIPSourceEnrichment(object):
|
||||
def test_ipv4_addrs_enriched(self):
|
||||
enriched = enrich(alert_with_ipv4, known_ips)
|
||||
|
||||
assert '{0} known'.format(good_ipv4) in enriched['summary']
|
||||
assert len(enriched['details']['sites']) == 1
|
||||
assert enriched['details']['sites'][0]['site'] == 'office1'
|
||||
|
||||
def test_ipv6_addrs_enriched(self):
|
||||
enriched = enrich(alert_with_ipv6, known_ips)
|
||||
|
||||
assert '{0} known'.format(good_ipv6) in enriched['summary']
|
||||
assert len(enriched['details']['sites']) == 1
|
||||
assert enriched['details']['sites'][0]['site'] == 'office2'
|
||||
|
||||
def test_ipv4_addrs_in_summary_enriched(self):
|
||||
enriched = enrich(alert_with_ipv4_in_summary, known_ips)
|
||||
|
||||
assert '{0} known'.format(good_ipv4) in enriched['summary']
|
||||
assert len(enriched['details']['sites']) == 1
|
||||
assert enriched['details']['sites'][0]['site'] == 'office1'
|
||||
|
||||
def test_ipv6_addrs_in_summary_enriched(self):
|
||||
enriched = enrich(alert_with_ipv6_in_summary, known_ips)
|
||||
|
||||
assert '{0} known'.format(good_ipv6) in enriched['summary']
|
||||
assert len(enriched['details']['sites']) == 1
|
||||
assert enriched['details']['sites'][0]['site'] == 'office2'
|
||||
|
||||
def test_unrecognized_ipv4_addrs_not_enriched(self):
|
||||
enriched = enrich(alert_with_ipv4, known_ips)
|
||||
|
||||
assert '{0} known'.format(bad_ipv4) not in enriched['summary']
|
||||
|
||||
def test_unrecognized_ipv6_addrs_not_enriched(self):
|
||||
enriched = enrich(alert_with_ipv6, known_ips)
|
||||
|
||||
assert '{0} known'.format(bad_ipv6) not in enriched['summary']
|
|
@ -0,0 +1,71 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
# Copyright (c) 2017 Mozilla Corporation
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), '../../../alerts/plugins')
|
||||
sys.path.append(plugin_path)
|
||||
|
||||
from port_scan_enrichment import enrich
|
||||
|
||||
|
||||
EXAMPLE_TIMESTAMP = '2016-07-13 22:33:31.625443+00:00'
|
||||
|
||||
|
||||
def mock_search_fn(results):
|
||||
def search_fn(_query):
|
||||
return results
|
||||
|
||||
return search_fn
|
||||
|
||||
|
||||
class TestPortScanEnrichment(object):
|
||||
def test_alert_enriched(self):
|
||||
results = {
|
||||
'hits': [
|
||||
{
|
||||
'_source': {
|
||||
'details': {
|
||||
'destinationipaddress': '1.2.3.4',
|
||||
'destinationport': 80
|
||||
},
|
||||
'timestamp': EXAMPLE_TIMESTAMP
|
||||
}
|
||||
},
|
||||
{
|
||||
'_source': {
|
||||
'details': {
|
||||
'destinationipaddress': '4.3.2.1',
|
||||
'destinationport': 443
|
||||
},
|
||||
'timestamp': EXAMPLE_TIMESTAMP
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
alert = {
|
||||
'details': {
|
||||
'sourceipaddress': '127.0.0.1'
|
||||
}
|
||||
}
|
||||
|
||||
search_window = {
|
||||
'hours': 1
|
||||
}
|
||||
|
||||
max_conns = 1
|
||||
|
||||
enriched = enrich(
|
||||
alert,
|
||||
mock_search_fn(results),
|
||||
search_window,
|
||||
max_conns)
|
||||
|
||||
assert len(enriched['details']['recentconnections']) == 1
|
||||
assert enriched['details']['recentconnections'][0]['destinationipaddress'] in ['1.2.3.4', '4.3.2.1']
|
||||
assert enriched['details']['recentconnections'][0]['destinationport'] in [80, 443]
|
||||
assert enriched['details']['recentconnections'][0]['timestamp'] == EXAMPLE_TIMESTAMP
|
|
@ -60,6 +60,7 @@ class UnitTestSuite(object):
|
|||
self.es_client.create_alias('events', self.event_index_name)
|
||||
self.es_client.create_index(self.previous_event_index_name, index_config=self.mapping_options)
|
||||
self.es_client.create_alias('events-previous', self.previous_event_index_name)
|
||||
self.es_client.create_alias_multiple_indices('events-weekly', ['events', 'events-previous'])
|
||||
self.es_client.create_index(self.alert_index_name, index_config=self.mapping_options)
|
||||
self.es_client.create_alias('alerts', self.alert_index_name)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче