Merge remote-tracking branch 'origin/master' into proxy_drop_ip

This commit is contained in:
Brandon Myers 2018-11-06 09:58:19 -06:00
Родитель d75707c957 eec93030f4
Коммит 3997ff8b0f
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 8AA79AD83045BBC7
150 изменённых файлов: 1663 добавлений и 1549 удалений

21
.flake8
Просмотреть файл

@ -4,35 +4,14 @@ exclude =
.git
*__init__.py
ignore =
E114 # indentation is not a multiple of four (comment)
E116 # unexpected indentation (comment)
E121 # continuation line under-indented for hanging indent
E122 # continuation line missing indentation or outdented
E123 # closing bracket does not match indentation of opening bracket's line
E124 # closing bracket does not match visual indentation
E125 # continuation line with same indent as next logical line
E126 # continuation line over-indented for hanging indent
E127 # continuation line over-indented for visual indent
E128 # continuation line under-indented for visual indent
E129 # visually indented line with same indent as next logical line
E131 # continuation line unaligned for hanging indent
E222 # multiple spaces after operator
E225 # missing whitespace around operator
E226 # missing whitespace around arithmetic operator
E228 # missing whitespace around modulo operator
E231 # missing whitespace after ','
E241 # multiple spaces after ','
E261 # at least two spaces before inline comment
E265 # block comment should start with '# '
E266 # too many leading '#' for block comment
E301 # expected 1 blank line
E302 # expected 2 blank lines, found 1
E305 # expected 2 blank lines after class or function definition
E402 # module level import not at top of file
E501 # line too long
E711 # comparison to None should be 'if cond is not None
E712 # comparison to True should be 'if cond is True
E713 # test for membership should be 'not in'
E722 # do not use bare except'
F401 # library imported but unused
F601 # dictionary key 'tags' repeated with different values

1
.gitignore поставляемый
Просмотреть файл

@ -14,3 +14,4 @@ alerts/generic_alerts
/data
.vscode
cloudy_mozdef/aws_parameters.json
cloudy_mozdef/aws_parameters.sh

Просмотреть файл

@ -6,14 +6,19 @@ branches:
# Restrict push builds to only master
only:
- master
env:
- DOCKER_COMPOSE_VERSION=1.22.0
before_install:
# Fail immediately on any error
- set -e
# See https://docs.travis-ci.com/user/docker/
- sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
install:
# Build containers
# Choose nobuild if you prefer pulling existing images
# Use BUILD_MODE=pull if you prefer pulling existing images
- make build-tests
#- make nobuild-tests
script:
- make test
# - make test-fast
- make tests

Просмотреть файл

@ -6,25 +6,23 @@
ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
DKR_IMAGES := mozdef_alertplugins mozdef_alerts mozdef_base mozdef_bootstrap mozdef_meteor mozdef_rest \
mozdef_mq_eventtask mozdef_loginput mozdef_cron mozdef_elasticsearch mozdef_mongodb \
mozdef_mq_worker mozdef_loginput mozdef_cron mozdef_elasticsearch mozdef_mongodb \
mozdef_syslog mozdef_nginx mozdef_tester mozdef_rabbitmq mozdef_kibana
USE_DKR_IMAGES := docker/compose/docker-compose-rebuild.yml ## Pass docker/compose/docker-compose-norebuild.yml to use hub.docker.com images
BUILD_MODE := build ## Pass `pull` in order to pull images instead of building them
NAME := mozdef
VERSION := 0.1
NO_CACHE := ## Pass `--no-cache` in order to disable Docker cache
GITHASH := $(shell git rev-parse --short HEAD) ## Pass `latest` to tag docker hub images as latest instead
GITHASH := latest ## Pass `$(git rev-parse --short HEAD`) to tag docker hub images as latest git-hash instead
TEST_CASE := tests ## Run all (`tests`) or a specific test case (ex `tests/alerts/tests/alerts/test_proxy_drop_exfil_domains.py`)
.PHONY:all
all:
@echo 'Available make targets:'
@grep '^[^#[:space:]^\.PHONY.*].*:' Makefile
.PHONY: run run-only
.PHONY: run
run: build ## Run all MozDef containers
docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) up -d
run-only:
docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) up -d
docker-compose -f docker/compose/docker-compose.yml -p $(NAME) up -d
.PHONY: run-cloudy-mozdef restart-cloudy-mozdef
run-cloudy-mozdef: ## Run the MozDef containers necessary to run in AWS (`cloudy-mozdef`). This is used by the CloudFormation-initiated setup.
@ -36,52 +34,49 @@ run-cloudy-mozdef: ## Run the MozDef containers necessary to run in AWS (`cloudy
restart-cloudy-mozdef:
docker-compose -f docker/compose/docker-compose-cloudy-mozdef.yml -p $(NAME) restart
# TODO? add custom test targets for individual tests (what used to be `multiple-tests` for example
# The docker files are still in docker/compose/docker*test*
.PHONY: test tests run-tests
test: build-tests run-tests ## Running tests from locally-built images
tests: build-tests run-tests
run-tests:
docker-compose -f $(USE_DKR_IMAGES) -f tests/docker-compose.yml -p $(NAME) up -d
@echo "Waiting for the instance to come up..."
sleep 10
@echo "Running flake8.."
docker run -it mozdef_tester bash -c "source /opt/mozdef/envs/python/bin/activate && flake8 --config .flake8 ./"
@echo "Running py.test..."
docker run -it --network=mozdef_default mozdef_tester bash -c "source /opt/mozdef/envs/python/bin/activate && py.test --delete_indexes --delete_queues tests"
.PHONY: tests run-tests
test: build-tests run-tests
tests: build-tests run-tests ## Run all tests (getting/building images as needed)
run-test:
run-tests: ## Just run the tests (no build/get). Use `make TEST_CASE=tests/...` for specific tests only
docker-compose -f docker/compose/docker-compose-tests.yml -p test-$(NAME) up -d
docker run -it --rm mozdef/mozdef_tester bash -c "source /opt/mozdef/envs/python/bin/activate && flake8 --config .flake8 ./"
docker run -it --rm --network=test-mozdef_default mozdef/mozdef_tester bash -c "source /opt/mozdef/envs/python/bin/activate && py.test --delete_indexes --delete_queues $(TEST_CASE)"
.PHONY: build
build: ## Build local MozDef images (use make NO_CACHE=--no-cache build to disable caching)
docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) $(NO_CACHE) build base
docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) $(NO_CACHE) build
docker-compose -f docker/compose/docker-compose.yml -p $(NAME) $(NO_CACHE) $(BUILD_MODE)
.PHONY: build-tests nobuild-tests
build-tests:
docker-compose -f $(USE_DKR_IMAGES) -f tests/docker-compose.yml -p $(NAME) $(NO_CACHE) build base
docker-compose -f $(USE_DKR_IMAGES) -f tests/docker-compose.yml -p $(NAME) $(NO_CACHE) build
.PHONY: build-tests
build-tests: ## Build end-to-end test environment only
docker-compose -f docker/compose/docker-compose-tests.yml -p test-$(NAME) $(NO_CACHE) $(BUILD_MODE)
.PHONY: stop down
stop: down
down: ## Shutdown all services we started with docker-compose
docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) stop
docker-compose -f docker/compose/docker-compose.yml -p $(NAME) stop
docker-compose -f docker/compose/docker-compose.yml -p test-$(NAME) stop
.PHONY: docker-push docker-get hub hub-get
docker-push: hub
hub: ## Upload locally built MozDef images tagged as the current git head (hub.docker.com/mozdef).
docker login
@echo "Tagging current docker images with git HEAD shorthash..."
$(foreach var,$(DKR_IMAGES),docker tag $(var) mozdef/$(var):$(GITHASH);)
@echo "Uploading images to docker..."
$(foreach var,$(DKR_IMAGES),docker push mozdef/$(var):$(GITHASH);)
docker-compose -f docker/compose/docker-compose.yml -p $(NAME) push
docker-compose -f docker/compose/docker-compose-tests.yml -p test-$(NAME) push
docker-get: hub-get
hub-get: ## Download all pre-built images (hub.docker.com/mozdef)
$(foreach var,$(DKR_IMAGES),docker pull mozdef/$(var):$(GITHASH);)
docker-compose -f docker/compose/docker-compose.yml -p $(NAME) pull
docker-compose -f docker/compose/docker-compose-test.yml -p test-$(NAME) pull
.PHONY: clean
clean: ## Cleanup all docker volumes and shutdown all related services
-docker-compose -f $(USE_DKR_IMAGES) -f docker/compose/docker-compose.yml -p $(NAME) down -v --remove-orphans
-docker-compose -f docker/compose/docker-compose.yml -p $(NAME) down -v --remove-orphans
-docker-compose -f docker/compose/docker-compose-tests.yml -p test-$(NAME) down -v --remove-orphans
# Shorthands
.PHONY: rebuild
rebuild: clean build
.PHONY: new-alert
new-alert: ## Create an example alert and working alert unit test
python tests/alert_templater.py

Просмотреть файл

@ -0,0 +1,42 @@
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, QueryStringMatch, ExistsMatch, PhraseMatch, WildcardMatch
class TEMPLATE_ALERT_CLASSNAME(AlertTask):
def main(self):
# Create a query to look back the last 20 minutes
search_query = SearchQuery(minutes=20)
# Add search terms to our query
search_query.add_must([
TermMatch('category', 'helloworld'),
ExistsMatch('details.sourceipaddress'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress'
# keep X samples of events at most
self.searchEventsAggregated('details.sourceipaddress', samplesLimit=10)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'hellocategory'
tags = ['hello', 'world']
severity = 'WARNING'
summary = "My first alert!"
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)

Просмотреть файл

@ -67,10 +67,12 @@ class alertConsumer(ConsumerMixin):
def main():
# connect and declare the message queue/kombu objects.
# Event server/exchange/queue
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport)
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(
options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport
)
mqAlertConn = Connection(mqConnString)
# Exchange for alerts we pass to plugins

Просмотреть файл

@ -10,10 +10,11 @@ for alert in ALERTS.keys():
alerts_include = list(set(alerts_include))
BROKER_URL = 'amqp://{0}:{1}@{2}:{3}//'.format(
RABBITMQ['mquser'],
RABBITMQ['mqpassword'],
RABBITMQ['mqserver'],
RABBITMQ['mqport'])
RABBITMQ['mquser'],
RABBITMQ['mqpassword'],
RABBITMQ['mqserver'],
RABBITMQ['mqport']
)
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_CONCURRENCY = 1
CELERY_IGNORE_RESULT = True

Просмотреть файл

@ -50,7 +50,8 @@ class AlertHoneycomb(AlertTask):
offendingIPs.append(ip_match.group(1))
summary = 'Honeypot activity on {0} from IP(s): {1}'.format(
aggreg['value'], ", ".join(sorted(set(offendingIPs))))
aggreg['value'], ", ".join(sorted(set(offendingIPs)))
)
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)

Просмотреть файл

@ -172,9 +172,11 @@ class AlertTask(Task):
self.mqproducer,
self.mqproducer.publish,
max_retries=10)
ensurePublish(alertDict,
ensurePublish(
alertDict,
exchange=self.alertExchange,
routing_key=RABBITMQ['alertqueue'])
routing_key=RABBITMQ['alertqueue']
)
self.log.debug('alert sent to the alert queue')
except Exception as e:
self.log.error('Exception while sending alert to message queue: {0}'.format(e))
@ -201,7 +203,7 @@ class AlertTask(Task):
alert['notify_mozdefbot'] = False
# If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
if 'ircchannel' in alert and alert['ircchannel'] != '' and alert['ircchannel'] != None:
if 'ircchannel' in alert and alert['ircchannel'] != '' and alert['ircchannel'] is not None:
alert['notify_mozdefbot'] = True
return alert

Просмотреть файл

@ -10,8 +10,8 @@ import time
import logging
ALERTS = {
# 'pythonfile.pythonclass':{'schedule': crontab(minute='*/10')},
# 'pythonfile.pythonclass':{'schedule': timedelta(minutes=10),'kwargs':dict(hostlist=['nsm3', 'nsm5'])},
# 'pythonfile.pythonclass':{'schedule': crontab(minute='*/10')},
# 'pythonfile.pythonclass':{'schedule': timedelta(minutes=10),'kwargs':dict(hostlist=['nsm3', 'nsm5'])},
}
ALERT_PLUGINS = [

Просмотреть файл

@ -48,13 +48,13 @@ class AlertMultipleIntelHits(AlertTask):
# someone talking to a bad guy, I want to know who
# someone resolving bad guy's domain name, I want to know who
# bad guy talking to someone, I want to know to whom
if 'Conn::IN_RESP' in e['_source']['details']['seenwhere'] \
or 'HTTP::IN_HOST_HEADER' in e['_source']['details']['seenwhere'] \
or 'DNS::IN_REQUEST' in e['_source']['details']['seenwhere']:
if ('Conn::IN_RESP' in e['_source']['details']['seenwhere'] or
'HTTP::IN_HOST_HEADER' in e['_source']['details']['seenwhere'] or
'DNS::IN_REQUEST' in e['_source']['details']['seenwhere']):
interestingaddres = e['_source']['details']['sourceipaddress']
elif 'Conn::IN_ORIG' in e['_source']['details']['seenwhere'] \
or 'HTTP::IN_X_CLUSTER_CLIENT_IP_HEADER' in e['_source']['details']['seenwhere'] \
or 'HTTP::IN_X_FORWARDED_FOR_HEADER' in e['_source']['details']['seenwhere']:
elif ('Conn::IN_ORIG' in e['_source']['details']['seenwhere'] or
'HTTP::IN_X_CLUSTER_CLIENT_IP_HEADER' in e['_source']['details']['seenwhere'] or
'HTTP::IN_X_FORWARDED_FOR_HEADER' in e['_source']['details']['seenwhere']):
interestingaddres = e['_source']['details']['destinationipaddress']
summary += '{0} in {1} '.format(interestingaddres, e['_source']['details']['seenwhere'])

Просмотреть файл

@ -52,13 +52,12 @@ class message(object):
'Content-type': 'application/json',
}
payload = json.dumps({
"service_key": "{0}".format(self.options.serviceKey),
"event_type": "trigger",
"description": "{0}".format(message['summary']),
"client": "MozDef",
"client_url": "https://" + self.options.clienturl + "/{0}".format(message['events'][0]['documentsource']['alerts'][0]['id']),
# "details": message['events'],
"contexts": [
"service_key": "{0}".format(self.options.serviceKey),
"event_type": "trigger",
"description": "{0}".format(message['summary']),
"client": "MozDef",
"client_url": "https://" + self.options.clienturl + "/{0}".format(message['events'][0]['documentsource']['alerts'][0]['id']),
"contexts": [
{
"type": "link",
"href": "https://" + "{0}".format(doclink),
@ -67,9 +66,9 @@ class message(object):
]
})
r = requests.post(
'https://events.pagerduty.com/generic/2010-04-15/create_event.json',
headers=headers,
data=payload,
'https://events.pagerduty.com/generic/2010-04-15/create_event.json',
headers=headers,
data=payload,
)
# you can modify the message if needed
# plugins registered with lower (>2) priority

Просмотреть файл

@ -40,11 +40,7 @@ class AlertProxyDropNonStandardPort(AlertTask):
# I think it makes sense to alert every time here
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'squid'
tags = ['squid', 'proxy']
severity = 'WARNING'

Просмотреть файл

@ -20,6 +20,7 @@ import re
# alert will not generate an alert event. If the detected key is not in
# the whitelist, an alert will be created.
class SSHKey(AlertTask):
def __init__(self):
# _whitelist contains all whitelisted key paths, loaded from the
@ -65,7 +66,7 @@ class SSHKey(AlertTask):
rem = re.compile(went['hostre'])
except:
continue
if rem.match(hostname) == None:
if rem.match(hostname) is None:
continue
if privkey['path'] == went['path']:
return False
@ -93,6 +94,6 @@ class SSHKey(AlertTask):
summary = 'Private keys detected on {} missing from whitelist'.format(hostname)
ret = self.createAlertDict(summary, category, tags, [event], severity)
ret['details'] = {
'private': alertkeys
}
'private': alertkeys
}
return ret

Просмотреть файл

@ -71,6 +71,7 @@ import netaddr
# ]
# }
class SshLateral(AlertTask):
def __init__(self):
AlertTask.__init__(self)
@ -92,9 +93,9 @@ class SshLateral(AlertTask):
# listed in the configuration file.
def exception_check(self, user, host, srcip):
for x in self._config['exceptions']:
if re.match(x[0], user) != None and \
re.match(x[1], host) != None and \
netaddr.IPAddress(srcip) in netaddr.IPNetwork(x[2]):
if re.match(x[0], user) is not None and \
re.match(x[1], host) is not None and \
netaddr.IPAddress(srcip) in netaddr.IPNetwork(x[2]):
return True
return False
@ -110,13 +111,13 @@ class SshLateral(AlertTask):
srchost = aggreg['events'][0]['_source']['hostname']
srcmatch = False
for x in self._config['hostmustmatch']:
if re.match(x, srchost) != None:
if re.match(x, srchost) is not None:
srcmatch = True
break
if not srcmatch:
return None
for x in self._config['hostmustnotmatch']:
if re.match(x, srchost) != None:
if re.match(x, srchost) is not None:
return None
# Determine if the origin of the connection was from a source outside
@ -126,7 +127,7 @@ class SshLateral(AlertTask):
sampleuser = None
for x in aggreg['events']:
m = re.match('Accepted publickey for (\S+) from (\S+).*', x['_source']['summary'])
if m != None and len(m.groups()) == 2:
if m is not None and len(m.groups()) == 2:
ipaddr = netaddr.IPAddress(m.group(2))
for y in self._config['alertifsource']:
if ipaddr in netaddr.IPNetwork(y):
@ -149,9 +150,9 @@ class SshLateral(AlertTask):
# Check our exception list
if self.exception_check(m.group(1), srchost, m.group(2)):
continue
if sampleip == None:
if sampleip is None:
sampleip = m.group(2)
if sampleuser == None:
if sampleuser is None:
sampleuser = m.group(1)
candidates.append(x)
if len(candidates) == 0:

Просмотреть файл

@ -33,6 +33,8 @@ logger = logging.getLogger(sys.argv[0])
logger.level=logging.DEBUG
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def postLogs(logcache):
#post logs asynchronously with requests workers and check on the results
#expects a queue object from the multiprocessing library
@ -61,6 +63,7 @@ def postLogs(logcache):
logger.fatal("exception posting to %s %r %r [will not retry]\n"%(url,e,postdata))
sys.exit(1)
if __name__ == '__main__':
parser=OptionParser()
parser.add_option("-u", dest='url', default='http://localhost:8080/events/', help="mozdef events URL to use when posting events")
@ -74,15 +77,17 @@ if __name__ == '__main__':
for i in range(0,10):
print(i)
alog=dict(eventtime=pytz.timezone('UTC').localize(datetime.now()).isoformat(),
hostname=socket.gethostname(),
processid=os.getpid(),
processname=sys.argv[0],
severity='INFO',
summary='joe login failed',
category='authentication',
tags=[],
details=[])
alog = dict(
eventtime=pytz.timezone('UTC').localize(datetime.now()).isoformat(),
hostname=socket.gethostname(),
processid=os.getpid(),
processname=sys.argv[0],
severity='INFO',
summary='joe login failed',
category='authentication',
tags=[],
details=[]
)
alog['details']=dict(success=True,username='mozdef')
alog['tags']=['mozdef','stresstest']

Просмотреть файл

@ -52,5 +52,6 @@ class Roulette(Module):
# tell kitnirc that we handled this, no need to pass to other modules.
return True
# Let KitnIRC know what module class it should be loading.
module = Roulette

Просмотреть файл

@ -71,8 +71,14 @@ class Zilla(Module):
return
for bug in res['bugs']:
bugsummary = bug['summary'].encode('utf-8', 'replace')
self.controller.client.msg(self.channel, "\x037\x02WARNING\x03\x02 \x032\x02NEW\x03\x02 bug: {url}{bugid} {summary}".format(summary=bugsummary,
url=self.url, bugid=bug['id']))
self.controller.client.msg(
self.channel,
"\x037\x02WARNING\x03\x02 \x032\x02NEW\x03\x02 bug: {url}{bugid} {summary}".format(
summary=bugsummary,
url=self.url,
bugid=bug['id']
)
)
def start(self, *args, **kwargs):
super(Zilla, self).start(*args, **kwargs)

Просмотреть файл

@ -4,11 +4,11 @@ password = <add_irc_password>
nick = mozdef
username = mozdef
realname = mozdef
join = #somechannel
join = somechannel
mqalertserver = localhost
mquser=mozdef
mqpassword=mozdef
channelkeys={"#somechannel": "somepassword"}
channelkeys={"somechannel": "somepassword"}
[modules]
modules.roulette = 5

Просмотреть файл

@ -124,7 +124,8 @@ def isIP(ip):
def ipLocation(ip):
location = ""
try:
geoip = GeoIP()
geoip_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data/GeoLite2-City.mmdb")
geoip = GeoIP(geoip_data_dir)
geoDict = geoip.lookup_ip(ip)
if geoDict is not None:
if 'error' in geoDict:
@ -331,7 +332,7 @@ class alertConsumer(ConsumerMixin):
# see if we need to delay a bit before sending the alert, to avoid
# flooding the channel
if self.lastalert != None:
if self.lastalert is not None:
delta = toUTC(datetime.now()) - self.lastalert
sys.stdout.write('new alert, delta since last is {}\n'.format(delta))
if delta.seconds < 2:
@ -349,14 +350,17 @@ class alertConsumer(ConsumerMixin):
logger.exception(
"alertworker exception while processing events queue %r" % e)
@run_async
def consumeAlerts(ircBot):
# connect and declare the message queue/kombu objects.
# server/exchange/queue
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport)
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(
options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport
)
mqAlertConn = Connection(mqConnString)
# Exchange for alerts we pass to plugins
@ -390,16 +394,35 @@ def initConfig():
options.username = getConfig('username', 'username', options.configfile)
options.realname = getConfig('realname', 'realname', options.configfile)
options.password = getConfig('password', '', options.configfile)
# Our config parser removes '#'
# so we gotta re-add them
options.join = getConfig('join', '#mzdf', options.configfile)
channels = []
for channel in options.join.split(','):
if not channel.startswith('#'):
channel = '#{0}'.format(channel)
channels.append(channel)
options.join = ','.join(channels)
options.alertircchannel = getConfig(
'alertircchannel',
'',
options.configfile)
options.channelkeys = json.loads(getConfig(
'channelkeys',
'{"#somechannel": "somekey"}',
options.configfile))
# Our config parser stomps out the '#' so we gotta readd
channelkeys = {}
for key, value in options.channelkeys.iteritems():
if not key.startswith('#'):
key = '#{0}'.format(key)
channelkeys[key] = value
options.channelkeys = channelkeys
# message queue options
# server hostname
options.mqalertserver = getConfig(

Просмотреть файл

@ -186,13 +186,16 @@ class alertConsumer(ConsumerMixin):
except ValueError as e:
logger.exception("mozdefbot_slack exception while processing events queue %r" % e)
def consumeAlerts(bot):
# connect and declare the message queue/kombu objects.
# server/exchange/queue
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport)
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(
options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport
)
mqAlertConn = Connection(mqConnString)
# Exchange for alerts we pass to plugins

Просмотреть файл

@ -8,7 +8,8 @@ STACK_PARAMS := file://aws_parameters.json
S3_BUCKET_NAME := mozdef.infosec.allizom.org
S3_BUCKET_PATH := cf
S3_BUCKET_URI := s3://$(S3_BUCKET_NAME)/$(S3_BUCKET_PATH)
S3_STACK_URI := https://s3-$(AWS_REGION).amazonaws.com/$(S3_BUCKET_NAME)/$(S3_BUCKET_PATH)/mozdef-parent.yml
S3_STACK_URI := https://s3-$(AWS_REGION).amazonaws.com/$(S3_BUCKET_NAME)/$(S3_BUCKET_PATH)/
# OIDC_CLIENT_SECRET is set in an environment variable by running "source aws_parameters.sh"
all:
@echo 'Available make targets:'
@ -23,10 +24,13 @@ packer-build: ## Build the base AMI with packer
.PHONY: create-stack
create-stack: test ## Create everything you need for a fresh new stack!
@export AWS_REGION=$(AWS_REGION)
@echo "Make sure you have a param file ($(STACK_PARAMS)) with OIDCClientSecret set."
aws cloudformation create-stack --stack-name $(STACK_NAME) --template-url $(S3_STACK_URI) \
@echo "Make sure you have an environment variable OIDC_CLIENT_SECRET set."
aws cloudformation create-stack --stack-name $(STACK_NAME) --template-url $(S3_STACK_URI)mozdef-parent.yml \
--capabilities CAPABILITY_IAM \
--parameters $(STACK_PARAMS)
--parameters $(STACK_PARAMS) \
--parameters ParameterKey=S3TemplateLocation,ParameterValue=$(S3_STACK_URI) \
ParameterKey=OIDCClientSecret,ParameterValue=$(OIDC_CLIENT_SECRET) \
--output text
.PHONY: create-s3-bucket
create-s3-bucket:
@ -36,9 +40,10 @@ create-s3-bucket:
.PHONY: updated-nested-stack
update-stack: test ## Updates the nested stack on AWS
@export AWS_REGION=$(AWS_REGION)
aws cloudformation update-stack --stack-name $(STACK_NAME) --template-url $(S3_STACK_URI) \
aws cloudformation update-stack --stack-name $(STACK_NAME) --template-url $(S3_STACK_URI)mozdef-parent.yml \
--capabilities CAPABILITY_IAM \
--parameters $(STACK_PARAMS) \
--parameters ParameterKey=S3TemplateLocation,ParameterValue=$(S3_STACK_URI) \
ParameterKey=OIDCClientSecret,ParameterValue=$(OIDC_CLIENT_SECRET) \
--output text
# --ignore-checks=E2502 : https://github.com/awslabs/cfn-python-lint/issues/408

Просмотреть файл

@ -0,0 +1 @@
export OIDC_CLIENT_SECRET=secretgoeshere

Просмотреть файл

@ -14,9 +14,15 @@ Parameters:
MozDefSQSQueueArn:
Type: String
Description: The ARN of the SQS queue that receives events destined for MozDef
ESServiceLinkedRoleExists:
Type: String
Description: Does the ES Service Linked Role already exist. true or false
Default: 'true'
Conditions:
IAMRoleSet:
!Not [!Equals [!Ref CloudTrailS3BucketIAMRoleArn, '']]
CreateESServiceLinkedRole:
!Not [!Equals [!Ref ESServiceLinkedRoleExists, 'true']] # Match either True or 'true' but not 'True'
Resources:
MozDefCloudTrailPolicy:
Type: AWS::IAM::ManagedPolicy
@ -116,6 +122,12 @@ Resources:
Properties:
Roles:
- Ref: MozDefIAMRole
ESServiceLinkedIAMRole:
Type: "AWS::IAM::ServiceLinkedRole"
Condition: CreateESServiceLinkedRole
DeletionPolicy: Retain
Properties:
AWSServiceName: es.amazonaws.com
Outputs:
InstanceProfileArn:
Description: The arn of the Instance Profile

Просмотреть файл

@ -117,6 +117,7 @@ Resources:
- content: |
OPTIONS_ESSERVERS=${ESURL}
OPTIONS_KIBANAURL=${KibanaURL}
OPTIONS_METEOR_KIBANAURL=${KibanaURL}
# See https://github.com/mozilla-iam/mozilla.oidc.accessproxy/blob/master/README.md#setup
client_id=${OIDCClientId}
client_secret=${OIDCClientSecret}
@ -131,6 +132,8 @@ Resources:
OPTIONS_METEOR_AUTHENTICATIONTYPE=oidc
ES={"servers": [${ESURL}]}
cookiename=sesmeteor
# Increase the AWS ES total fields limit from 1000 to 4000
OPTIONS_MAPPING_TOTAL_FIELDS_LIMIT=4000
path: /opt/mozdef/docker/compose/cloudy_mozdef.env
- content: |
client_id=${OIDCClientId}
@ -153,7 +156,7 @@ Resources:
- systemctl start rsyslog
- grep "${EFSID}" /etc/fstab >/dev/null || echo "${EFSID}:/ ${EFSMountPoint} efs tls,_netdev" >> /etc/fstab
- for i in 1 2 3 4 5 6; do mount --verbose --all --types efs defaults && break || sleep 15; done
- cd /opt/mozdef && git pull origin infosec_workweek
- cd /opt/mozdef && git pull origin master
- make -C /opt/mozdef -f /opt/mozdef/Makefile run-cloudy-mozdef
MozDefAutoScaleGroup:
Type: AWS::AutoScaling::AutoScalingGroup

Просмотреть файл

@ -72,6 +72,7 @@ Resources:
CloudTrailSQSQueueArn: !GetAtt MozDefCloudTrail.Outputs.CloudTrailSQSQueueArn
MozDefSQSQueueArn: !GetAtt MozDefSQS.Outputs.SQSQueueArn
# CloudTrailS3BucketIAMRoleArn we leave empty as we will consume CloudTrail logs from our own account
ESServiceLinkedRoleExists: !GetAtt ESServiceLinkedRoleExists.RoleExists
Tags:
- Key: application
Value: mozdef
@ -105,6 +106,7 @@ Resources:
TemplateURL: !Join [ '', [ !Ref S3TemplateLocation, mozdef-instance.yml ] ]
MozDefES:
Type: AWS::CloudFormation::Stack
DependsOn: MozDefIAMRoleAndInstanceProfile
Properties:
Parameters:
SubnetIds: !Join [ ',', !Ref PublicSubnetIds ]
@ -172,9 +174,16 @@ Resources:
Effect: Allow
Action:
- logs:*
- iam:ListRoles
Resource: '*'
GetArrayLengthLambdaFunction:
Type: AWS::Lambda::Function
DependsOn: CloudFormationLambdaIAMRole
# This DependsOn shouldn't be needed because the "Role" value is set to
# "!GetAtt CloudFormationLambdaIAMRole.Arn" but without DependsOn the error
# "Template error: IAM role mozdef-aws-nested-CloudFormationLambdaIAMRole-108UCUPESC6WG doesn't exist"
# occurs on stack creation for this Lambda Function resource. The DependsOn
# prevents the error.
Properties:
Code:
ZipFile: |
@ -198,3 +207,42 @@ Resources:
Properties:
Array: !Ref PublicSubnetIds
ServiceToken: !GetAtt GetArrayLengthLambdaFunction.Arn
DoesRoleExistLambdaFunction:
Type: AWS::Lambda::Function
DependsOn: CloudFormationLambdaIAMRole
# This DependsOn shouldn't be needed because the "Role" value is set to
# "!GetAtt CloudFormationLambdaIAMRole.Arn" but without DependsOn the error
# "Template error: IAM role mozdef-aws-nested-CloudFormationLambdaIAMRole-108UCUPESC6WG doesn't exist"
# occurs on stack creation for this Lambda Function resource. The DependsOn
# prevents the error.
Properties:
Code:
ZipFile: |
import cfnresponse
import boto3, secrets, string
def handler(event, context):
paginator = boto3.client('iam').get_paginator('list_roles')
args = {'PathPrefix': event['ResourceProperties']['PathPrefix']} if 'PathPrefix' in event['ResourceProperties'] else {}
iterator = paginator.paginate(**args).search(
"Roles[?RoleName == '%s'][]" % event['ResourceProperties']['RoleName'])
response = {'RoleExists': len([x for x in iterator]) > 0}
physical_id = ''.join(
secrets.choice(string.ascii_uppercase + string.digits) for i in
range(13))
cfnresponse.send(event, context, cfnresponse.SUCCESS, response,
"DoesRoleExist-%s" % physical_id)
Handler: index.handler
Runtime: python3.6
Role: !GetAtt CloudFormationLambdaIAMRole.Arn
Tags:
- Key: application
Value: mozdef
- Key: stack
Value: !Ref AWS::StackName
Timeout: 20
ESServiceLinkedRoleExists:
Type: AWS::CloudFormation::CustomResource
Properties:
RoleName: AWSServiceRoleForAmazonElasticsearchService
PathPrefix: '/aws-service-role/es.amazonaws.com/'
ServiceToken: !GetAtt DoesRoleExistLambdaFunction.Arn

Просмотреть файл

@ -29,12 +29,22 @@ function check_img() {
check_img || exit 127
if [ -n "$AWS_CONFIG_FILE" ]; then
config_file_mount="-v ${AWS_CONFIG_FILE}:${AWS_CONFIG_FILE}"
fi
if [ -n "$AWS_SHARED_CREDENTIALS_FILE" ]; then
config_file_mount="${config_file_mount} -v ${AWS_SHARED_CREDENTIALS_FILE}:${AWS_SHARED_CREDENTIALS_FILE}"
fi
dmake_env_file="`mktemp`"
trap "{ rm -f \"$dmake_env_file\"; }" EXIT
env | egrep "^AWS|OIDC_CLIENT_SECRET" > "$dmake_env_file"
exec docker run --rm --name ${CONTAINER_NAME} \
-u $(id -u) \
-v ${AWS_CREDS_DIR}:/root/.aws \
${config_file_mount} \
-v $(pwd):${DOCKER_PROJECT_DIR} \
-e "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}" \
-e "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}" \
-e "AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}" \
-e "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}" \
--env-file "$dmake_env_file" \
${HUB}/${IMG_NAME}:latest make $@

Просмотреть файл

@ -30,7 +30,7 @@
"sudo systemctl enable docker",
"sudo mkdir -p /opt/mozdef/",
"sudo git clone https://github.com/mozilla/MozDef /opt/mozdef",
"cd /opt/mozdef && sudo git checkout origin/infosec_workweek"
"cd /opt/mozdef && sudo git checkout origin/master"
]}
]
}

Просмотреть файл

@ -21,6 +21,7 @@ except ImportError:
quote_url = urllib.quote
import traceback
class DotDict(dict):
'''dict.item notation for dict()'s'''
__getattr__ = dict.__getitem__
@ -33,222 +34,232 @@ class DotDict(dict):
value = DotDict(value)
self[key] = value
def fatal(msg):
print(msg)
sys.exit(1)
def debug(msg):
sys.stderr.write('+++ {}\n'.format(msg))
#This is from https://auth0.com/docs/api/management/v2#!/Logs/get_logs
#and https://github.com/auth0/auth0-logs-to-logentries/blob/master/index.js (MIT)
log_types=DotDict({
's': {
"event": 'Success Login',
"level": 1 # Info
},
'slo': {
"event": 'Success Logout',
"level": 1 # Info
},
'flo': {
"event": 'Failed Logout',
"level": 3 # Error
},
'seacft': {
"event": 'Success Exchange (Authorization Code for Access Token)',
"level": 1 # Info
},
'feacft': {
"event": 'Failed Exchange (Authorization Code for Access Token)',
"level": 3 # Error
},
'f': {
"event": 'Failed Login',
"level": 3 # Error
},
'w': {
"event": 'Warnings During Login',
"level": 2 # Warning
},
'du': {
"event": 'Deleted User',
"level": 1 # Info
},
'fu': {
"event": 'Failed Login (invalid email/username)',
"level": 3 # Error
},
'fp': {
"event": 'Failed Login (wrong password)',
"level": 3 # Error
},
'fc': {
"event": 'Failed by Connector',
"level": 3 # Error
},
'fco': {
"event": 'Failed by CORS',
"level": 3 # Error
},
'con': {
"event": 'Connector Online',
"level": 1 # Info
},
'coff': {
"event": 'Connector Offline',
"level": 3 # Error
},
'fcpro': {
"event": 'Failed Connector Provisioning',
"level": 4 # Critical
},
'ss': {
"event": 'Success Signup',
"level": 1 # Info
},
'fs': {
"event": 'Failed Signup',
"level": 3 # Error
},
'cs': {
"event": 'Code Sent',
"level": 0 # Debug
},
'cls': {
"event": 'Code/Link Sent',
"level": 0 # Debug
},
'sv': {
"event": 'Success Verification Email',
"level": 0 # Debug
},
'fv': {
"event": 'Failed Verification Email',
"level": 0 # Debug
},
'scp': {
"event": 'Success Change Password',
"level": 1 # Info
},
'fcp': {
"event": 'Failed Change Password',
"level": 3 # Error
},
'sce': {
"event": 'Success Change Email',
"level": 1 # Info
},
'fce': {
"event": 'Failed Change Email',
"level": 3 # Error
},
'scu': {
"event": 'Success Change Username',
"level": 1 # Info
},
'fcu': {
"event": 'Failed Change Username',
"level": 3 # Error
},
'scpn': {
"event": 'Success Change Phone Number',
"level": 1 # Info
},
'fcpn': {
"event": 'Failed Change Phone Number',
"level": 3 # Error
},
'svr': {
"event": 'Success Verification Email Request',
"level": 0 # Debug
},
'fvr': {
"event": 'Failed Verification Email Request',
"level": 3 # Error
},
'scpr': {
"event": 'Success Change Password Request',
"level": 0 # Debug
},
'fcpr': {
"event": 'Failed Change Password Request',
"level": 3 # Error
},
'fn': {
"event": 'Failed Sending Notification',
"level": 3 # Error
},
'sapi': {
"event": 'API Operation',
"level": 1 # Info
},
'fapi': {
"event": 'Failed API Operation',
"level": 3 # Error
},
'limit_wc': {
"event": 'Blocked Account',
"level": 4 # Critical
},
'limit_ui': {
"event": 'Too Many Calls to /userinfo',
"level": 4 # Critical
},
'api_limit': {
"event": 'Rate Limit On API',
"level": 4 # Critical
},
'sdu': {
"event": 'Successful User Deletion',
"level": 1 # Info
},
'fdu': {
"event": 'Failed User Deletion',
"level": 3 # Error
},
'sd': {
"event": 'Success Delegation',
"level": 3 # error
},
'fd': {
"event": 'Failed Delegation',
"level": 3 # error
},
'seccft': {
"event": "Success Exchange (Client Credentials for Access Token)",
"level": 1
},
'feccft': {
"event": "Failed Exchange (Client Credentials for Access Token)",
"level": 1
},
'fsa': {
"event": "Failed Silent Auth",
"level": 3
},
'ssa': {
"event": "Success Silent Auth",
"level": 1
},
'fepft': {
"event": "Failed Exchange (Password for Access Token)",
"level": 3
},
'limit_mu': {
"event": "Blocked IP Address",
"level": 3
},
'sepft': {
"event": "Success Exchange (Password for Access Token)",
"level": 1
},
'fcoa': {
"event": "Failed Cross Origin Authentication",
"level": 3
}
# This is from https://auth0.com/docs/api/management/v2#!/Logs/get_logs
# and https://github.com/auth0/auth0-logs-to-logentries/blob/master/index.js (MIT)
# levels
# 0 = Debug
# 1 = Info
# 2 = Warning
# 3 = Error
# 4 = Critical
log_types = DotDict({
's': {
"event": 'Success Login',
"level": 1
},
'slo': {
"event": 'Success Logout',
"level": 1
},
'flo': {
"event": 'Failed Logout',
"level": 3
},
'seacft': {
"event": 'Success Exchange (Authorization Code for Access Token)',
"level": 1
},
'feacft': {
"event": 'Failed Exchange (Authorization Code for Access Token)',
"level": 3
},
'f': {
"event": 'Failed Login',
"level": 3
},
'w': {
"event": 'Warnings During Login',
"level": 2
},
'du': {
"event": 'Deleted User',
"level": 1
},
'fu': {
"event": 'Failed Login (invalid email/username)',
"level": 3
},
'fp': {
"event": 'Failed Login (wrong password)',
"level": 3
},
'fc': {
"event": 'Failed by Connector',
"level": 3
},
'fco': {
"event": 'Failed by CORS',
"level": 3
},
'con': {
"event": 'Connector Online',
"level": 1
},
'coff': {
"event": 'Connector Offline',
"level": 3
},
'fcpro': {
"event": 'Failed Connector Provisioning',
"level": 4
},
'ss': {
"event": 'Success Signup',
"level": 1
},
'fs': {
"event": 'Failed Signup',
"level": 3
},
'cs': {
"event": 'Code Sent',
"level": 0
},
'cls': {
"event": 'Code/Link Sent',
"level": 0
},
'sv': {
"event": 'Success Verification Email',
"level": 0
},
'fv': {
"event": 'Failed Verification Email',
"level": 0
},
'scp': {
"event": 'Success Change Password',
"level": 1
},
'fcp': {
"event": 'Failed Change Password',
"level": 3
},
'sce': {
"event": 'Success Change Email',
"level": 1
},
'fce': {
"event": 'Failed Change Email',
"level": 3
},
'scu': {
"event": 'Success Change Username',
"level": 1
},
'fcu': {
"event": 'Failed Change Username',
"level": 3
},
'scpn': {
"event": 'Success Change Phone Number',
"level": 1
},
'fcpn': {
"event": 'Failed Change Phone Number',
"level": 3
},
'svr': {
"event": 'Success Verification Email Request',
"level": 0
},
'fvr': {
"event": 'Failed Verification Email Request',
"level": 3
},
'scpr': {
"event": 'Success Change Password Request',
"level": 0
},
'fcpr': {
"event": 'Failed Change Password Request',
"level": 3
},
'fn': {
"event": 'Failed Sending Notification',
"level": 3
},
'sapi': {
"event": 'API Operation',
"level": 1
},
'fapi': {
"event": 'Failed API Operation',
"level": 3
},
'limit_wc': {
"event": 'Blocked Account',
"level": 4
},
'limit_ui': {
"event": 'Too Many Calls to /userinfo',
"level": 4
},
'api_limit': {
"event": 'Rate Limit On API',
"level": 4
},
'sdu': {
"event": 'Successful User Deletion',
"level": 1
},
'fdu': {
"event": 'Failed User Deletion',
"level": 3
},
'sd': {
"event": 'Success Delegation',
"level": 3
},
'fd': {
"event": 'Failed Delegation',
"level": 3
},
'seccft': {
"event": "Success Exchange (Client Credentials for Access Token)",
"level": 1
},
'feccft': {
"event": "Failed Exchange (Client Credentials for Access Token)",
"level": 1
},
'fsa': {
"event": "Failed Silent Auth",
"level": 3
},
'ssa': {
"event": "Success Silent Auth",
"level": 1
},
'fepft': {
"event": "Failed Exchange (Password for Access Token)",
"level": 3
},
'limit_mu': {
"event": "Blocked IP Address",
"level": 3
},
'sepft': {
"event": "Success Exchange (Password for Access Token)",
"level": 1
},
'fcoa': {
"event": "Failed Cross Origin Authentication",
"level": 3
}
})
def process_msg(mozmsg, msg):
"""Normalization function for auth0 msg.
@mozmsg: MozDefEvent (mozdef message)
@ -339,6 +350,7 @@ def process_msg(mozmsg, msg):
return mozmsg
def load_state(fpath):
"""Load last msg id we've read from auth0 (log index).
@fpath string (path to state file)
@ -351,6 +363,7 @@ def load_state(fpath):
pass
return state
def save_state(fpath, state):
"""Saves last msg id we've read from auth0 (log index).
@fpath string (path to state file)
@ -359,6 +372,7 @@ def save_state(fpath, state):
with open(fpath, mode='w') as fd:
fd.write(str(state)+'\n')
def byteify(input):
"""Convert input to ascii"""
if isinstance(input, dict):
@ -371,6 +385,7 @@ def byteify(input):
else:
return input
def fetch_auth0_logs(config, headers, fromid):
lastid = fromid
@ -421,18 +436,21 @@ def fetch_auth0_logs(config, headers, fromid):
else:
return (0, 0, 0, lastid)
def main():
#Configuration loading
# Configuration loading
config_location = os.path.dirname(sys.argv[0]) + '/' + 'auth02mozdef.json'
with open(config_location) as fd:
config = DotDict(hjson.load(fd))
if config == None:
if config is None:
print("No configuration file 'auth02mozdef.json' found.")
sys.exit(1)
headers = {'Authorization': 'Bearer {}'.format(config.auth0.token),
'Accept': 'application/json'}
headers = {
'Authorization': 'Bearer {}'.format(config.auth0.token),
'Accept': 'application/json'
}
fromid = load_state(config.state_file)
# Auth0 will interpret a 0 state as an error on our hosted instance, but will accept an empty parameter "as if it was 0"
@ -449,5 +467,6 @@ def main():
save_state(config.state_file, lastid)
if __name__ == "__main__":
main()

Просмотреть файл

@ -71,8 +71,7 @@ def main():
logger.debug('snapshot repo registered')
# do the actual snapshotting
for (index, dobackup, rotation, pruning) in zip(options.indices,
options.dobackup, options.rotation, options.pruning):
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
if dobackup == '1':
index_to_snapshot = index
if rotation == 'daily':
@ -84,9 +83,11 @@ def main():
snapshot_config = {
'indices': index_to_snapshot
}
epoch=calendar.timegm(datetime.utcnow().utctimetuple())
r = requests.put('{0}/_snapshot/s3backup/{1}-{2}?wait_for_completion=true'.format(esserver,index_to_snapshot,epoch),
data=json.dumps(snapshot_config))
epoch = calendar.timegm(datetime.utcnow().utctimetuple())
r = requests.put(
'{0}/_snapshot/s3backup/{1}-{2}?wait_for_completion=true'.format(esserver, index_to_snapshot, epoch),
data=json.dumps(snapshot_config)
)
if 'status' in r.json():
logger.error('Error snapshotting %s: %s' % (index_to_snapshot, r.json()))
else:
@ -121,6 +122,7 @@ echo "DONE!"
except Exception as e:
logger.error("Unhandled exception, terminating: %r"%e)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig(
@ -187,6 +189,7 @@ def initConfig():
options.configfile
)
if __name__ == '__main__':
parser = OptionParser()
defaultconfigfile = sys.argv[0].replace('.py', '.conf')

Просмотреть файл

@ -135,18 +135,30 @@ def searchMongoAlerts(mozdefdb):
# aggregate IPv4 addresses in the most recent alerts
# to find common attackers.
ipv4TopHits = alerts.aggregate([
{"$sort": {"utcepoch":-1}}, # reverse sort the current alerts
{"$limit": 100}, # most recent 100
{"$match": {"events.documentsource.details.sourceipaddress":{"$exists": True}}}, # must have an ip address
{"$match": {"attackerid":{"$exists": False}}}, # must not be already related to an attacker
{"$unwind":"$events"}, # make each event into it's own doc
{"$project":{"_id":0,
"sourceip":"$events.documentsource.details.sourceipaddress"}}, # emit the source ip only
{"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}}, # count by ip
{"$match":{"hitcount":{"$gt":5}}}, # limit to those with X observances
{"$sort": SON([("hitcount", -1), ("_id", -1)])}, # sort
{"$limit": 10} # top 10
])
# reverse sort the current alerts
{"$sort": {"utcepoch": -1}},
# most recent 100
{"$limit": 100},
# must have an ip address
{"$match": {"events.documentsource.details.sourceipaddress": {"$exists": True}}},
# must not be already related to an attacker
{"$match": {"attackerid": {"$exists": False}}},
# make each event into it's own doc
{"$unwind": "$events"},
{"$project": {
"_id": 0,
# emit the source ip only
"sourceip": "$events.documentsource.details.sourceipaddress"
}},
# count by ip
{"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}},
# limit to those with X observances
{"$match": {"hitcount": {"$gt": 5}}},
# sort
{"$sort": SON([("hitcount", -1), ("_id", -1)])},
# top 10
{"$limit": 10}
])
for ip in ipv4TopHits:
# sanity check ip['_id'] which should be the ipv4 address
if isIPv4(ip['_id']) and ip['_id'] not in netaddr.IPSet(['0.0.0.0']):
@ -243,8 +255,8 @@ def searchMongoAlerts(mozdefdb):
# and if they are all the same category
# auto-categorize the attacker
matchingalerts = alerts.find(
{"attackerid":attacker['_id']}
).sort('utcepoch', -1).limit(50)
{"attackerid": attacker['_id']}
).sort('utcepoch', -1).limit(50)
# summarize the alert categories
# returns list of tuples: [(u'bruteforce', 8)]
categoryCounts= mostCommon(matchingalerts,'category')
@ -297,9 +309,11 @@ def broadcastAttacker(attacker):
mqproducer,
mqproducer.publish,
max_retries=10)
ensurePublish(mqAlert,
ensurePublish(
mqAlert,
exchange=alertExchange,
routing_key=options.routingkey)
routing_key=options.routingkey
)
except Exception as e:
logger.error('Exception while publishing attacker: {0}'.format(e))
@ -324,27 +338,29 @@ def genNewAttacker():
return newAttacker
def updateAttackerGeoIP(mozdefdb, attackerID, eventDictionary):
'''given an attacker ID and a dictionary of an elastic search event
look for a valid geoIP in the dict and update the attacker's geo coordinates
'''
# geo ip should be in eventDictionary['details']['sourceipgeolocation']
#"sourceipgeolocation": {
#"city": "Polska",
#"region_code": "73",
#"area_code": 0,
#"time_zone": "Europe/Warsaw",
#"dma_code": 0,
#"metro_code": null,
#"country_code3": "POL",
#"latitude": 52.59309999999999,
#"postal_code": null,
#"longitude": 19.089400000000012,
#"country_code": "PL",
#"country_name": "Poland",
#"continent": "EU"
#logger.debug(eventDictionary)
# "sourceipgeolocation": {
# "city": "Polska",
# "region_code": "73",
# "area_code": 0,
# "time_zone": "Europe/Warsaw",
# "dma_code": 0,
# "metro_code": null,
# "country_code3": "POL",
# "latitude": 52.59309999999999,
# "postal_code": null,
# "longitude": 19.089400000000012,
# "country_code": "PL",
# "country_name": "Poland",
# "continent": "EU"
# }
# logger.debug(eventDictionary)
if 'details' in eventDictionary.keys():
if 'sourceipgeolocation' in eventDictionary['details']:
attackers=mozdefdb['attackers']
@ -377,10 +393,12 @@ def updateMongoWithESEvents(mozdefdb, results):
# potentially with a max mask value (i.e. asn is /8, limit attackers to /24)
sourceIP.prefixlen = 24
if not sourceIP.ip.is_loopback() and not sourceIP.ip.is_private() and not sourceIP.ip.is_reserved():
esrecord = dict(documentid=r['_id'],
documenttype=r['_type'],
documentindex=r['_index'],
documentsource=r['_source'])
esrecord = dict(
documentid=r['_id'],
documenttype=r['_type'],
documentindex=r['_index'],
documentsource=r['_source']
)
logger.debug('Trying to find existing attacker at ' + str(sourceIP))
attacker = attackers.find_one({'indicators.ipv4address': str(sourceIP)})
@ -392,7 +410,7 @@ def updateMongoWithESEvents(mozdefdb, results):
logger.debug('Creating new attacker from ' + str(sourceIP))
newAttacker = genNewAttacker()
#expand the source ip to a /24 for the indicator match.
# expand the source ip to a /24 for the indicator match.
sourceIP.prefixlen = 24
# str sourceIP to get the ip/cidr rather than netblock cidr.
newAttacker['indicators'].append(dict(ipv4address=str(sourceIP)))

Просмотреть файл

@ -78,7 +78,7 @@ def esSearch(es, macassignments=None):
Expecting an event like: user: username@somewhere.com; mac: 5c:f9:38:b1:de:cf; author reason: roamed session; ssid: ANSSID; AP 46/2\n
'''
usermacre=re.compile(r'''user: (?P<username>.*?); mac: (?P<macaddress>.*?); ''',re.IGNORECASE)
correlations={} # list of dicts to populate hits we find
correlations={}
search_query = SearchQuery(minutes=options.correlationminutes)
search_query.add_must(TermMatch('details.program', 'AUTHORIZATION-SUCCESS'))
@ -105,18 +105,19 @@ def esSearch(es, macassignments=None):
except ElasticsearchBadServer:
logger.error('Elastic Search server could not be reached, check network connectivity')
def esStoreCorrelations(es, correlations):
for c in correlations:
event=dict(
utctimestamp=correlations[c]['utctimestamp'],
summary=c,
details=dict(
username=correlations[c]['username'],
macaddress=correlations[c]['macaddress'],
entity=correlations[c]['entity']
),
category='indicators'
)
utctimestamp=correlations[c]['utctimestamp'],
summary=c,
details=dict(
username=correlations[c]['username'],
macaddress=correlations[c]['macaddress'],
entity=correlations[c]['entity']
),
category='indicators'
)
try:
es.save_object(index='intelligence', doc_id=getDocID(c), doc_type='usernamemacaddress', body=json.dumps(event))
except Exception as e:
@ -164,15 +165,15 @@ def initConfig():
# default time period in minutes to look back in time for the aggregation
options.correlationminutes = getConfig('correlationminutes',
150,
options.configfile)
150,
options.configfile)
# default location of the OUI file from IEEE for resolving mac prefixes
# Expects the OUI file from IEEE:
# wget http://www.ieee.org/netstorage/standards/oui.txt
options.ouifilename = getConfig('ouifilename',
'oui.txt',
options.configfile)
'oui.txt',
options.configfile)
if __name__ == '__main__':

Просмотреть файл

@ -27,6 +27,7 @@ logger = logging.getLogger(sys.argv[0])
def loggerTimeStamp(self, record, datefmt=None):
return toUTC(datetime.now()).isoformat()
def initLogger():
logger.level = logging.INFO
formatter = logging.Formatter(
@ -41,9 +42,11 @@ def initLogger():
sh.setFormatter(formatter)
logger.addHandler(sh)
def genMeteorID():
return('%024x' % random.randrange(16**24))
def isFQDN(fqdn):
try:
# We could resolve FQDNs here, but that could tip our hand and it's
@ -54,6 +57,7 @@ def isFQDN(fqdn):
except:
return False
def parse_fqdn_whitelist(fqdn_whitelist_location):
fqdns = []
with open(fqdn_whitelist_location, "r") as text_file:
@ -63,6 +67,7 @@ def parse_fqdn_whitelist(fqdn_whitelist_location):
fqdns.append(line)
return fqdns
def main():
logger.debug('starting')
logger.debug(options)
@ -78,18 +83,18 @@ def main():
fqdnblocklist.delete_many({'dateExpiring': {"$lte": datetime.utcnow()-timedelta(days=options.expireage)}})
# Lastly, export the combined blocklist
fqdnCursor=mozdefdb['fqdnblocklist'].aggregate([
{"$sort": {"dateAdded": -1}},
{"$match": {"address": {"$exists": True}}},
{"$match":
{"$or":[
{"dateExpiring": {"$gte": datetime.utcnow()}},
{"dateExpiring": {"$exists": False}},
]},
},
{"$project":{"address":1}},
{"$limit": options.fqdnlimit}
])
fqdnCursor = mozdefdb['fqdnblocklist'].aggregate([
{"$sort": {"dateAdded": -1}},
{"$match": {"address": {"$exists": True}}},
{"$match": {
"$or": [
{"dateExpiring": {"$gte": datetime.utcnow()}},
{"dateExpiring": {"$exists": False}},
]},
},
{"$project": {"address": 1}},
{"$limit": options.fqdnlimit}
])
FQDNList=[]
for fqdn in fqdnCursor:
if fqdn not in options.fqdnwhitelist:
@ -161,6 +166,7 @@ def s3_upload_file(file_path, bucket_name, key_name):
print("URL: {}".format(url))
return url
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(

Просмотреть файл

@ -27,6 +27,7 @@ logger = logging.getLogger(sys.argv[0])
def loggerTimeStamp(self, record, datefmt=None):
return toUTC(datetime.now()).isoformat()
def initLogger():
logger.level = logging.INFO
formatter = logging.Formatter(
@ -41,9 +42,11 @@ def initLogger():
sh.setFormatter(formatter)
logger.addHandler(sh)
def genMeteorID():
return('%024x' % random.randrange(16**24))
def isIPv4(ip):
try:
# netaddr on it's own considers 1 and 0 to be valid_ipv4
@ -58,12 +61,14 @@ def isIPv4(ip):
except:
return False
def isIPv6(ip):
try:
return netaddr.valid_ipv6(ip)
except:
return False
def aggregateAttackerIPs(attackers):
iplist = []
@ -100,6 +105,7 @@ def aggregateAttackerIPs(attackers):
logger.debug('invalid:' + ip)
return iplist
def parse_network_whitelist(network_whitelist_location):
networks = []
with open(network_whitelist_location, "r") as text_file:
@ -109,6 +115,7 @@ def parse_network_whitelist(network_whitelist_location):
networks.append(line)
return networks
def main():
logger.debug('starting')
logger.debug(options)
@ -143,18 +150,18 @@ def main():
'dateAdded': datetime.utcnow()})
# Lastly, export the combined blocklist
ipCursor=mozdefdb['ipblocklist'].aggregate([
{"$sort": {"dateAdded": -1}},
{"$match": {"address": {"$exists": True}}},
{"$match":
{"$or":[
{"dateExpiring": {"$gte": datetime.utcnow()}},
{"dateExpiring": {"$exists": False}},
]},
},
{"$project":{"address":1}},
{"$limit": options.iplimit}
])
ipCursor = mozdefdb['ipblocklist'].aggregate([
{"$sort": {"dateAdded": -1}},
{"$match": {"address": {"$exists": True}}},
{"$match": {
"$or": [
{"dateExpiring": {"$gte": datetime.utcnow()}},
{"dateExpiring": {"$exists": False}},
]},
},
{"$project": {"address": 1}},
{"$limit": options.iplimit}
])
IPList=[]
for ip in ipCursor:
IPList.append(ip['address'])
@ -231,6 +238,7 @@ def s3_upload_file(file_path, bucket_name, key_name):
print("URL: {}".format(url))
return url
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(

Просмотреть файл

@ -110,9 +110,6 @@
"sourceport" : {
"index" : "not_analyzed",
"type" : "long"
},
"apiversion" : {
"type" : "keyword"
}
}
},

Просмотреть файл

@ -15,8 +15,10 @@ except ImportError:
class UTC(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
@ -26,6 +28,7 @@ import duo_client
import mozdef_client as mozdef
import pickle
def normalize(details):
# Normalizes fields to conform to http://mozdef.readthedocs.io/en/latest/usage.html#mandatory-fields
# This is mainly used for common field names to put inside the details structure
@ -42,6 +45,7 @@ def normalize(details):
normalized[f] = details[f]
return normalized
def process_events(mozmsg, duo_events, etype, state):
# There are some key fields that we use as MozDef fields, those are set to "noconsume"
# After processing these fields, we just pour everything into the "details" fields of Mozdef, except for the
@ -67,8 +71,8 @@ def process_events(mozmsg, duo_events, etype, state):
if i in noconsume:
continue
# Duo client doesn't translate inner dicts to dicts for some reason - its just a string, so we have to process and parse it
if e[i] != None and type(e[i]) == str and e[i].startswith('{'):
# Duo client doesn't translate inner dicts to dicts for some reason - its just a string, so we have to process and parse it
if e[i] is not None and type(e[i]) == str and e[i].startswith('{'):
j = json.loads(e[i])
for x in j:
details[x] = j[x]
@ -93,6 +97,7 @@ def process_events(mozmsg, duo_events, etype, state):
pass
return state
def main():
try:
state = pickle.load(open(options.statepath, 'rb'))
@ -119,6 +124,7 @@ def main():
pickle.dump(state, open(options.statepath, 'wb'))
def initConfig():
options.IKEY = getConfig('IKEY', '', options.configfile)
options.SKEY = getConfig('SKEY', '', options.configfile)
@ -129,6 +135,7 @@ def initConfig():
options.statepath = getConfig('statepath', '', options.configfile)
options.update_tags = getConfig('addtag', '', options.configfile)
if __name__ == '__main__':
parser = OptionParser()
defaultconfigfile = sys.argv[0].replace('.py', '.conf')

Просмотреть файл

@ -118,8 +118,8 @@ def initConfig():
# default time period in minutes to look back in time for the aggregation
options.aggregationminutes = getConfig('aggregationminutes',
15,
options.configfile)
15,
options.configfile)
# configure the index to save events to
options.index = getConfig('index', 'mozdefstate', options.configfile)

Просмотреть файл

@ -170,6 +170,7 @@ def initConfig():
default_mapping_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mozdefStateDefaultMappingTemplate.json')
options.default_mapping_file = getConfig('default_mapping_file', default_mapping_location, options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(

Просмотреть файл

@ -34,8 +34,12 @@ def initLogger():
if options.output == 'syslog':
logger.addHandler(
SysLogHandler(
address=(options.sysloghostname,
options.syslogport)))
address=(
options.sysloghostname,
options.syslogport
)
)
)
else:
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
@ -64,6 +68,7 @@ def writeFrontendStats(data, mongo):
del host['_source']['details'][key]
mongo.healthfrontend.insert(host['_source'])
def getSqsStats(es):
search_query = SearchQuery(minutes=15)
search_query.add_must([
@ -164,13 +169,13 @@ def initConfig():
options.output = getConfig('output', 'stdout', options.configfile)
# syslog hostname
options.sysloghostname = getConfig('sysloghostname', 'localhost',
options.configfile)
options.configfile)
# syslog port
options.syslogport = getConfig('syslogport', 514, options.configfile)
# elastic search server settings
options.esservers = list(getConfig('esservers', 'http://localhost:9200',
options.configfile).split(','))
options.configfile).split(','))
options.mongohost = getConfig('mongohost', 'localhost', options.configfile)
options.mongoport = getConfig('mongoport', 3001, options.configfile)

Просмотреть файл

@ -39,26 +39,26 @@ class State:
try:
with open(self.filename, 'r') as f:
self.data = json.load(f)
iterator = iter(self.data)
except IOError:
self.data = {}
except ValueError:
logger.error("%s state file found but isn't a recognized json format" %
self.filename)
logger.error("%s state file found but isn't a recognized json format" % self.filename)
raise
except TypeError:
logger.error("%s state file found and parsed but it doesn't contain an iterable object" %
self.filename)
logger.error("%s state file found and parsed but it doesn't contain an iterable object" % self.filename)
raise
def write_state_file(self):
'''Write the self.data value into the state file'''
with open(self.filename, 'w') as f:
json.dump(self.data,
f,
sort_keys=True,
indent=4,
separators=(',', ': '))
json.dump(
self.data,
f,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
def main():
if options.output=='syslog':

Просмотреть файл

@ -42,8 +42,7 @@ def esPruneIndexes():
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_indices()
# do the pruning
for (index, dobackup, rotation, pruning) in zip(options.indices,
options.dobackup, options.rotation, options.pruning):
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
try:
if pruning != '0':
index_to_prune = index
@ -110,6 +109,7 @@ def initConfig():
options.configfile).split(',')
)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c",

Просмотреть файл

@ -17,6 +17,7 @@ from configlib import getConfig, OptionParser
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from utils import es as es_module
def initConfig():
options.esservers = list(getConfig(
'esservers',
@ -34,6 +35,7 @@ def initConfig():
options.configfile).split(',')
)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c",

Просмотреть файл

@ -31,6 +31,7 @@ from mozdef_util.utilities.is_cef import isCEF
from mozdef_util.utilities.logger import logger, initLogger
from mozdef_util.elasticsearch_client import ElasticsearchClient, ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException
def getDocID(sqsregionidentifier):
# create a hash to use as the ES doc id
# hostname plus salt as doctype.latest
@ -38,6 +39,7 @@ def getDocID(sqsregionidentifier):
hash.update('{0}.mozdefhealth.latest'.format(sqsregionidentifier))
return hash.hexdigest()
def getQueueSizes():
logger.debug('starting')
logger.debug(options)
@ -114,11 +116,13 @@ def getQueueSizes():
# except Exception as e:
# logger.error("Exception %r when gathering health and status " % e)
def main():
logger.debug('Starting')
logger.debug(options)
getQueueSizes()
def initConfig():
# aws options
options.accesskey = getConfig('accesskey', '', options.configfile)
@ -133,6 +137,7 @@ def initConfig():
options.index = getConfig('index', 'mozdefstate', options.configfile)
options.account = getConfig('account', '', options.configfile)
if __name__ == '__main__':
# configure ourselves
parser = OptionParser()

Просмотреть файл

@ -11,8 +11,7 @@ import sys
import os
from configlib import getConfig, OptionParser
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib'))
from utilities.logger import logger, initLogger
from mozdef_util.utilities.logger import logger, initLogger
def download_generic_alerts(repo_url, save_location, deploy_key):

Просмотреть файл

@ -24,6 +24,7 @@ def fetch_ip_list(aws_key_id, aws_secret_key, s3_bucket, ip_list_filename):
contents = ip_list_key.get_contents_as_string().rstrip()
return contents.split("\n")
def save_ip_list(save_path, ips):
ip_list_contents = '\n'.join(ips)
logger.debug("Saving ip list")
@ -34,6 +35,7 @@ def save_ip_list(save_path, ips):
with open(save_path, "w+") as text_file:
text_file.write(ip_list_contents)
def main():
logger.debug('Starting')
logger.debug(options)
@ -49,6 +51,7 @@ def main():
raise LookupError('IP List contains less than ' + str(options.ips_list_threshold) + ' entries...something is probably up here.')
save_ip_list(options.local_ip_list_path, ips)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig('output', 'stdout', options.configfile)
@ -64,6 +67,7 @@ def initConfig():
options.ips_list_threshold = getConfig('ips_list_threshold', 20, options.configfile)
options.manual_additions = getConfig('manual_additions', '', options.configfile).split(',')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(

Просмотреть файл

@ -24,6 +24,7 @@ from datetime import datetime
from os import stat
from os.path import exists, getsize
class MozDefError(Exception):
def __init__(self, msg):
self.msg = msg
@ -163,6 +164,7 @@ def main():
hostname=socket.gethostname()
)
def getConfig(optionname, thedefault, configfile):
"""read an option from a config file or set a default
send 'thedefault' as the data class you want to get a string back

Просмотреть файл

@ -17,6 +17,8 @@ RUN echo -n "PS1=\"[deploy-shell][\u@\h \W]\$ \"" >> /root/.bashrc
# Setup a home for deployment
RUN mkdir -p /opt/mozdef
RUN mkdir -p /.aws/cli/cache
RUN chown --recursive 1000:1000 /.aws/cli/cache
# Force this as the entrypoint
WORKDIR /opt/mozdef

Просмотреть файл

@ -154,7 +154,7 @@ services:
networks:
- default
mq_eventtask:
image: mozdef/mozdef_mq_eventtask
image: mozdef/mozdef_mq_worker
env_file:
- cloudy_mozdef.env
restart: always
@ -170,7 +170,7 @@ services:
volumes:
- geolite_db:/opt/mozdef/envs/mozdef/data/
mq_cloudtrail:
image: mozdef/mozdef_mq_eventtask
image: mozdef/mozdef_mq_worker
env_file:
- cloudy_mozdef.env
- cloudy_mozdef_mq_cloudtrail.env

Просмотреть файл

@ -1,35 +0,0 @@
---
version: '2.2'
services:
nginx:
image: mozdef/mozdef_nginx
kibana:
image: mozdef/mozdef_kibana
mongodb:
image: mozdef/mozdef_mongodb
elasticsearch:
image: mozdef/mozdef_elasticsearch
rabbitmq:
image: mozdef/mozdef_rabbitmq
base:
image: mozdef/mozdef_base
bootstrap:
image: mozdef/mozdef_bootstrap
alertplugins:
image: mozdef/mozdef_alertplugins
alerts:
image: mozdef/mozdef_alerts
cron:
image: mozdef/mozdef_cron
loginput:
image: mozdef/mozdef_loginput
mq_eventtask:
image: mozdef/mozdef_mq_eventtask
meteor:
image: mozdef/mozdef_meteor
rest:
image: mozdef/mozdef_rest
syslog:
image: mozdef/mozdef_syslog
tester:
image: mozdef/mozdef_tester

Просмотреть файл

@ -1,67 +0,0 @@
---
version: '2.2'
services:
base:
build:
context: ../../
dockerfile: docker/compose/mozdef_base/Dockerfile
nginx:
build:
context: ../../
dockerfile: docker/compose/nginx/Dockerfile
kibana:
build:
context: ../../
dockerfile: docker/compose/kibana/Dockerfile
mongodb:
build:
context: ../../
dockerfile: docker/compose/mongodb/Dockerfile
elasticsearch:
build:
context: ../../
dockerfile: docker/compose/elasticsearch/Dockerfile
rabbitmq:
build:
context: ../../
dockerfile: docker/compose/rabbitmq/Dockerfile
bootstrap:
build:
context: ../../
dockerfile: docker/compose/mozdef_bootstrap/Dockerfile
alertplugins:
build:
context: ../../
dockerfile: docker/compose/mozdef_alertplugins/Dockerfile
alerts:
build:
context: ../../
dockerfile: docker/compose/mozdef_alerts/Dockerfile
cron:
build:
context: ../../
dockerfile: docker/compose/mozdef_cron/Dockerfile
loginput:
build:
context: ../../
dockerfile: docker/compose/mozdef_loginput/Dockerfile
mq_eventtask:
build:
context: ../../
dockerfile: docker/compose/mozdef_mq_eventtask/Dockerfile
meteor:
build:
context: ../../
dockerfile: docker/compose/mozdef_meteor/Dockerfile
rest:
build:
context: ../../
dockerfile: docker/compose/mozdef_rest/Dockerfile
syslog:
build:
context: ../../
dockerfile: docker/compose/mozdef_syslog/Dockerfile
tester:
build:
context: ../../
dockerfile: docker/compose/tester/Dockerfile

Просмотреть файл

@ -1,26 +1,63 @@
---
version: '3.0'
version: '3.7'
services:
elasticsearch:
image: mozdef/mozdef_elasticsearch
build:
context: ../../
dockerfile: docker/compose/elasticsearch/Dockerfile
cache_from:
- mozdef_elasticsearch:latest
- mozdef_base:latest
- mozdef/mozdef_elasticsearch
- mozdef/mozdef_base
depends_on:
- base
restart: always
command: bin/elasticsearch
ports:
- 9200:9200
# ports:
# - 9200:9200
networks:
- default
rabbitmq:
image: mozdef/mozdef_rabbitmq
build:
context: ../../
dockerfile: docker/compose/rabbitmq/Dockerfile
cache_from:
- mozdef_rabbitmq:latest
- mozdef_base:latest
- mozdef/mozdef_rabbitmq
- mozdef/mozdef_base
depends_on:
- base
restart: always
command: rabbitmq-server
ports:
- 5672:5672
# ports:
# - 5672:5672
networks:
- default
base:
image: mozdef/mozdef_base
build:
context: ../../
dockerfile: docker/compose/mozdef_base/Dockerfile
cache_from:
- mozdef_base:latest
- mozdef/mozdef_base
tester:
image: mozdef/mozdef_tester
build:
context: ../../
dockerfile: docker/compose/tester/Dockerfile
cache_from:
- mozdef_tester:latest
- mozdef_base:latest
- mozdef/mozdef_tester
- mozdef/mozdef_base
depends_on:
- base
networks:
- default
networks:
default:

Просмотреть файл

@ -1,7 +1,14 @@
---
version: '2.2'
version: '3.7'
services:
nginx:
image: mozdef/mozdef_nginx
build:
context: ../../
dockerfile: docker/compose/nginx/Dockerfile
cache_from:
- mozdef/mozdef_nginx
- mozdef_nginx:latest
restart: always
command: /usr/sbin/nginx
depends_on:
@ -15,6 +22,13 @@ services:
networks:
- default
mongodb:
image: mozdef/mozdef_mongodb
build:
context: ../../
dockerfile: docker/compose/mongodb/Dockerfile
cache_from:
- mozdef/mozdef_mongodb
- mozdef_mongodb:latest
restart: always
command: /usr/bin/mongod --smallfiles --config /etc/mongod.conf
volumes:
@ -22,6 +36,13 @@ services:
networks:
- default
kibana:
image: mozdef/mozdef_kibana
build:
context: ../../
dockerfile: docker/compose/kibana/Dockerfile
cache_from:
- mozdef/mozdef_kibana
- mozdef_kibana:latest
restart: always
command: bin/kibana --elasticsearch=http://elasticsearch:9200
depends_on:
@ -29,6 +50,13 @@ services:
networks:
- default
elasticsearch:
image: mozdef/mozdef_elasticsearch
build:
context: ../../
dockerfile: docker/compose/elasticsearch/Dockerfile
cache_from:
- mozdef/mozdef_elasticsearch
- mozdef_elasticsearch:latest
command: bin/elasticsearch
restart: always
volumes:
@ -38,6 +66,13 @@ services:
networks:
- default
rabbitmq:
image: mozdef/mozdef_rabbitmq
build:
context: ../../
dockerfile: docker/compose/rabbitmq/Dockerfile
cache_from:
- mozdef/mozdef_rabbitmq
- mozdef_rabbitmq:latest
restart: always
command: rabbitmq-server
volumes:
@ -50,11 +85,25 @@ services:
# MozDef Specific Containers
base:
image: mozdef/mozdef_base
build:
context: ../../
dockerfile: docker/compose/mozdef_base/Dockerfile
cache_from:
- mozdef/mozdef_base
- mozdef_base:latest
command: bash -c 'su - mozdef -c /opt/mozdef/envs/mozdef/cron/update_geolite_db.sh'
volumes:
- geolite_db:/opt/mozdef/envs/mozdef/data
bootstrap:
command: bash -c 'source /opt/mozdef/envs/python/bin/activate && python initial_setup.py http://elasticsearch:9200 cron/defaultMappingTemplate.json cron/backup.conf'
image: mozdef/mozdef_bootstrap
build:
context: ../../
dockerfile: docker/compose/mozdef_bootstrap/Dockerfile
cache_from:
- mozdef/mozdef_bootstrap
- mozdef_bootstrap:latest
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && python initial_setup.py http://elasticsearch:9200 cron/defaultMappingTemplate.json cron/backup.conf'
depends_on:
- base
- elasticsearch
@ -63,8 +112,15 @@ services:
networks:
- default
alertplugins:
image: mozdef/mozdef_alertplugins
build:
context: ../../
dockerfile: docker/compose/mozdef_alertplugins/Dockerfile
cache_from:
- mozdef/mozdef_alertplugins
- mozdef_alertplugins:latest
restart: always
command: bash -c 'sleep 90 && source /opt/mozdef/envs/python/bin/activate && python alert_worker.py -c alert_worker.conf'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && python alert_worker.py -c alert_worker.conf'
depends_on:
- base
- elasticsearch
@ -74,8 +130,15 @@ services:
networks:
- default
alerts:
image: mozdef/mozdef_alerts
build:
context: ../../
dockerfile: docker/compose/mozdef_alerts/Dockerfile
cache_from:
- mozdef/mozdef_alerts
- mozdef_alerts:latest
restart: always
command: bash -c 'sleep 90 && source /opt/mozdef/envs/python/bin/activate && celery -A celeryconfig worker --loglevel=info --beat'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && celery -A celeryconfig worker --loglevel=info --beat'
depends_on:
- base
- elasticsearch
@ -96,8 +159,15 @@ services:
# volumes:
# - geolite_db:/opt/mozdef/envs/mozdef/data/
cron:
image: mozdef/mozdef_cron
build:
context: ../../
dockerfile: docker/compose/mozdef_cron/Dockerfile
cache_from:
- mozdef/mozdef_cron
- mozdef_cron:latest
restart: always
command: bash -c 'sleep 90 && crond -n'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && crond -n'
volumes:
- cron:/opt/mozdef/envs/mozdef/cron
- geolite_db:/opt/mozdef/envs/mozdef/data/
@ -110,8 +180,15 @@ services:
networks:
- default
loginput:
image: mozdef/mozdef_loginput
build:
context: ../../
dockerfile: docker/compose/mozdef_loginput/Dockerfile
cache_from:
- mozdef/mozdef_loginput
- mozdef_loginput:latest
restart: always
command: bash -c 'sleep 90 && source /opt/mozdef/envs/python/bin/activate && python index.py -c index.conf'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && python index.py -c index.conf'
depends_on:
- base
- elasticsearch
@ -119,10 +196,16 @@ services:
- bootstrap
networks:
- default
mq_eventtask:
mq_worker:
image: mozdef/mozdef_mq_worker
build:
context: ../../
dockerfile: docker/compose/mozdef_mq_worker/Dockerfile
cache_from:
- mozdef/mozdef_mq_worker
- mozdef_mq_worker:latest
restart: always
command: bash -c 'sleep 90 && source /opt/mozdef/envs/python/bin/activate && python esworker_eventtask.py -c esworker_eventtask.conf'
scale: 1
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && python esworker_eventtask.py -c esworker_eventtask.conf'
depends_on:
- base
- rabbitmq
@ -134,6 +217,13 @@ services:
volumes:
- geolite_db:/opt/mozdef/envs/mozdef/data/
meteor:
image: mozdef/mozdef_meteor
build:
context: ../../
dockerfile: docker/compose/mozdef_meteor/Dockerfile
cache_from:
- mozdef/mozdef_meteor
- mozdef_meteor:latest
restart: always
command: bash -c 'node bundle/main.js'
depends_on:
@ -142,8 +232,15 @@ services:
networks:
- default
rest:
image: mozdef/mozdef_rest
build:
context: ../../
dockerfile: docker/compose/mozdef_rest/Dockerfile
cache_from:
- mozdef/mozdef_rest
- mozdef_rest:latest
restart: always
command: bash -c 'source /opt/mozdef/envs/python/bin/activate && python index.py -c index.conf'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/elasticsearch/9200";do sleep 1;done && source /opt/mozdef/envs/python/bin/activate && python index.py -c index.conf'
depends_on:
- base
- elasticsearch
@ -152,17 +249,31 @@ services:
networks:
- default
syslog:
image: mozdef/mozdef_syslog
build:
context: ../../
dockerfile: docker/compose/mozdef_syslog/Dockerfile
cache_from:
- mozdef/mozdef_syslog
- mozdef_syslog:latest
restart: always
command: bash -c 'sleep 95 && /usr/sbin/syslog-ng --no-caps -F'
command: bash -c 'while ! timeout 1 bash -c "echo > /dev/tcp/rabbitmq/5672";do sleep 1;done && /usr/sbin/syslog-ng --no-caps -F'
depends_on:
- loginput
- mq_eventtask
- mq_worker
ports:
- 514:514/udp
- 514:514
networks:
- default
tester:
image: mozdef/mozdef_tester
build:
context: ../../
dockerfile: docker/compose/tester/Dockerfile
cache_from:
- mozdef/mozdef_tester
- mozdef_tester:latest
networks:
- default

Просмотреть файл

@ -1,5 +1,5 @@
---
version: '3.0'
version: '3.7'
services:
meteor:
build:

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -26,7 +26,6 @@ parser.add_argument('backup_conf_file', help='The relative path to backup.conf f
args = parser.parse_args()
esserver = os.environ.get('OPTIONS_ESSERVERS')
if esserver is None:
esserver = args.esserver

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,9 +1,9 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"
COPY mq /opt/mozdef/envs/mozdef/mq
COPY docker/compose/mozdef_mq_eventtask/files/esworker_eventtask.conf /opt/mozdef/envs/mozdef/mq/esworker_eventtask.conf
COPY docker/compose/mozdef_mq_worker/files/esworker_eventtask.conf /opt/mozdef/envs/mozdef/mq/esworker_eventtask.conf
RUN chown -R mozdef:mozdef /opt/mozdef/envs/mozdef/mq

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
LABEL maintainer="mozdef@mozilla.com"

Просмотреть файл

@ -1,9 +0,0 @@
[supervisord]
nodaemon=true
autostart=true
autorestart=true
#real time constant stream of demo events/alerts/attackers
[program:realTimeEvents]
priority=10
command=bash -c 'sleep 90 && source /opt/mozdef/envs/python/bin/activate && /opt/mozdef/envs/mozdef/examples/demo/sampleevents.sh'
directory=/opt/mozdef/envs/mozdef/examples/demo/

Просмотреть файл

@ -1,4 +1,4 @@
FROM mozdef_base:latest
FROM mozdef/mozdef_base
COPY tests /opt/mozdef/envs/mozdef/tests
COPY alerts /opt/mozdef/envs/mozdef/alerts

Просмотреть файл

@ -0,0 +1,87 @@
Alert Development Guide
=======================
This guide is for someone seeking to write a MozDef alert.
Starting a feature branch
-------------------------
Before you do anything else, start with checking out the MozDef repo and starting a feature branch::
git clone git@github.com:mozilla/MozDef.git
cd MozDef
git checkout -b name_of_alert_you_want_to_create
How to start your alert
-----------------------
Run::
make new-alert
This will prompt for information and create two things:
- The new alert file
- The new alert test file
You can now edit these files in place, but it is recommended that you run unit-tests on the new alert to make sure it passes before editing (instructions below).
How to run tests on your alert
------------------------------
Requirements:
- Make sure you have the latest version of docker installed.
- Known Issue: docker's overlayfs has a known issue, so you will need to go to Docker => Preferences => Daemon => Advanced and add the following key pair ("storage-driver" : "aufs")
::
make build-tests
make run-tests TEST_CASE=tests/alerts/[YOUR ALERT TEST FILE].py
This test should pass and you will have confirmed you have a working environment.
At this point, begin development and periodically run your unit-tests locally with the following command::
make run-tests TEST_CASE=tests/alerts/[YOUR ALERT TEST FILE].py
Background on concepts
----------------------
- Logs - These are individual log line that are emitted from systems, like an Apache log
- Events - These logs parsed into a JSON format, which exist in MozDef and used with the ELK stack
- Alerts - These are effectively either a 1:1 events to alerts (this thing happens and alert) or a M:1 events to alerts (N of these things happen and alert).
When writing alerts, it's important to keep the above concepts in mind.
Each alert tends to have two different blocks of code:
- main - This is where the alert defines the criteria for the types of events it wants to look at
- onAggregation/onEvent - This is where the alert defines what happens when it sees those events, such as post processing of events and making them into a useful summary to emit as an alert.
In both cases, because the alert is simple Python, you will find that getting started writing alerts is pretty easy. It's important to note that when you iterate on the alert to regularly test to ensure that the alert is still firing. Should you run into a space where it's not firing, the best way to approach this is to backout the recent change and review the alert and tests to ensure that the expectations are still in sync.
How to get the alert in MozDef?
-------------------------------
The best way to get your alert into MozDef (once it's completed) is to propose a pull request and ask for a review from a MozDef developer. They will be able to help you get the most out of the alert and help point out pitfalls. Once the alert is accepted into MozDef master, there is a process by which MozDef installations can make use or 'enable' that alert. It's best to work with that MozDef instance's maintainer to enable any new alerts.
Questions?
----------
This guide is not intended to teach you how to develop in Python, there are good resources below to help you get more experience with Python. However, should you have questions or run into problems trying to write an alert, we would like to hear from you (in IRC/Slack) so we can:
- help you get what you want to get done
- make it easier for anyone to contribue alerts
Resources
---------
Python for Beginners <https://www.python.org/about/gettingstarted/>

Просмотреть файл

@ -12,18 +12,15 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@ -39,7 +36,7 @@ templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
@ -59,13 +56,13 @@ copyright = u'2014, Mozilla'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@ -73,27 +70,27 @@ exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
@ -105,26 +102,26 @@ html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@ -141,48 +138,48 @@ def setup(app):
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MozDefdoc'
@ -191,14 +188,14 @@ htmlhelp_basename = 'MozDefdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
@ -210,23 +207,23 @@ latex_documents = [
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
@ -238,7 +235,7 @@ man_pages = [
]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@ -251,13 +248,13 @@ texinfo_documents = [
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# texinfo_no_detailmenu = False

Просмотреть файл

@ -7,8 +7,12 @@ Here's how to make MozDef go using the provided docker compose files:
1) Pull the repo: git clone https://github.com/mozilla/MozDef.git
2) Build the docker containers:
docker-compose -f docker/compose/docker-compose.yml -f docker/compose/docker-compose-rebuild.yml -p mozdef build
docker-compose -f docker/compose/docker-compose.yml -f docker/compose/docker-compose-rebuild.yml -p mozdef build
3) Run the containers:
docker-compose -f docker/compose/docker-compose.yml -f docker/compose/docker-compose-rebuild.yml -p mozdef up
docker-compose -f docker/compose/docker-compose.yml -f docker/compose/docker-compose-rebuild.yml -p mozdef up
4) Firefox yourself to http://localhost to see the main UI (when the container starts)
5) Login using a locally created account (click login, create and choose a username/password)

Просмотреть файл

@ -13,6 +13,7 @@ Table of Contents
introduction
demo
installation
alert_development_guide
screenshots
usage
cloud_deployment

Просмотреть файл

@ -6,10 +6,9 @@ The installation process has been tested on CentOS 7.
Build and run MozDef
--------------------
You can quickly install MozDef with an automated build generation using `docker`:
You can quickly install MozDef with an automated build generation using `docker`::
make build
make run
You're done! Now go to:
@ -29,13 +28,13 @@ You're done! Now go to:
Run tests
---------
Simply run:
Simply run::
make test
Note, if you end up with a clobbered ES index, or anything like that which might end up in failing tests, you can clean
the environment with:
the environment with::
make clean
@ -47,7 +46,7 @@ Manual Installation for Yum or Apt based distros
Summary
*******
This section explains the manual installation process for the MozDef system.
This section explains the manual installation process for the MozDef system::
git clone https://github.com/mozilla/MozDef.git mozdef
@ -79,7 +78,7 @@ Then::
wget https://www.python.org/ftp/python/2.7.11/Python-2.7.11.tgz
tar xvzf Python-2.7.11.tgz
cd Python-2.7.11
./configure --prefix=/opt/mozdef/python2.7 --enable-shared LDFLAGS="-W1,--rpath=/opt/mozdef/python.2.7/lib"
./configure --prefix=/opt/mozdef/python2.7 --enable-shared LDFLAGS="-Wl,--rpath=/opt/mozdef/python2.7/lib"
make
make install
@ -156,17 +155,18 @@ We first need to install `Mongodb`_ since it's the DB used by Meteor.
On Yum-based systems:
In /etc/yum.repo.d/mongo, add::
In /etc/yum.repos.d/mongo.repo, add::
[mongodb]
[mongodb-org-3.4]
name=MongoDB Repository
baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64/
gpgcheck=0
baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/3.4/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc
Then you can install mongodb::
sudo yum install mongodb
sudo yum install mongodb-org
On APT-based systems::
@ -178,11 +178,11 @@ We have a mongod.conf in the config directory prepared for you. To use it simply
For meteor installation follow these steps::
curl https://install.meteor.com/?release=1.4.2.3 | sh
curl https://install.meteor.com/?release=1.8 | sh
wget https://nodejs.org/dist/v4.7.0/node-v4.7.0.tar.gz
tar xvzf node-v4.7.0.tar.gz
cd node-v4.7.0
wget https://nodejs.org/dist/v8.12.0/node-v8.12.0.tar.gz
tar xvzf node-v8.12.0.tar.gz
cd node-v8.12.0
./configure
make
sudo make install
@ -348,15 +348,13 @@ We use `uwsgi`_ to interface python and nginx, in your venv execute the followin
vim loginput.ini
Alternatively, if you do not wish to use the systemd unit files for starting these processes
you can start the restapi and loginput processes from within your venv via:
you can start the restapi and loginput processes from within your venv via::
cd /opt/mozdef/envs/mozdef
cd /opt/mozdef/envs/python
source bin/activate
(mozdef) [mozdef@mozdev mozdef]$ uwsgi --ini rest/restapi.ini
(mozdef) [mozdef@mozdev mozdef]$ uwsgi --ini loginput/loginput.ini
sudo cp nginx.conf /etc/nginx
# modify /etc/nginx/nginx.conf to reflect your server, and any path changes you've made.
sudo vim /etc/nginx/nginx.conf
@ -372,9 +370,9 @@ Supervisord
We use supervisord to run the alerts and alertplugins. If you plan on starting services manually, you can skip this step.
To install supervisord perform the following as the user mozdef:
To install supervisord perform the following as the user mozdef::
cd /opt/mozdef/envs/mozdef
cd /opt/mozdef/envs/python
source bin/activate
cd bin
pip install supervisor
@ -393,7 +391,7 @@ MozDef supports Elasticsearch version 5.x
Installation instructions are available on `Elasticsearch website`_.
You should prefer packages over archives if one is available for your distribution.
Add the repo in /etc/yum/repos.d/elasticsearch.repo:
Add the repo in /etc/yum/repos.d/elasticsearch.repo::
[elasticsearch-5.x]
name=Elasticsearch repository for 5.x packages
@ -431,7 +429,7 @@ Kibana
`Kibana`_ is a webapp to visualize and search your Elasticsearch cluster data::
Create the Repo in /etc/yum/repos.d/kibana.repo:
Create the Repo in /etc/yum/repos.d/kibana.repo::
[kibana-5.x]
name=Kibana repository for 5.x packages
@ -442,6 +440,8 @@ Create the Repo in /etc/yum/repos.d/kibana.repo:
autorefresh=1
type=rpm-md
::
sudo yum install kibana
Now you'll need to configure kibana to work with your system:
@ -462,7 +462,7 @@ Start Services
**************
To use the included systemd files you'll copy them to your system's default directory of /etc/systemd/system/.
Ensure it has root file permissions so that systemd can start it.
Ensure it has root file permissions so that systemd can start it::
cp /opt/mozdef/systemdfiles/web/mozdefweb.service /etc/systemd/system/
cp /opt/mozdef/systemdfiles/web/mozdefrestapi.service /etc/systemd/system/
@ -473,7 +473,7 @@ Ensure it has root file permissions so that systemd can start it.
cp /opt/mozdef/systemdfiles/alert/mozdefbot.service /etc/systemd/system/
cp /opt/mozdef/systemdfiles/alert/mozdefalertplugins.service /etc/systemd/system/
Then you will need to enable them:
Then you will need to enable them::
systemctl enable mozdefweb.service
systemctl enable mozdefrestapi.service
@ -484,11 +484,11 @@ Then you will need to enable them:
systemctl enable mozdefalertplugins.service
systemctl enable mongod.service
Reload systemd:
Reload systemd::
systemctl daemon-reload
Now you can start your services:
Now you can start your services::
systemctl start mongod
systemctl start mozdefalerts
@ -500,7 +500,7 @@ Now you can start your services:
systemctl start mozdefalertplugins
Alternatively you can start the following services manually in this way from inside the venv as mozdef:
Alternatively you can start the following services manually in this way from inside the venv as mozdef::
# Eventtask worker
cd ~/MozDef/mq
@ -515,7 +515,7 @@ To initialize elasticsearch indices and load some sample data::
(mozdef) [mozdef@mozdev mozdef]$ cd examples/es-docs/
(mozdef) [mozdef@mozdev es-docs]$ python inject.py
To add more sample data you can run the following from inside the venv:
To add more sample data you can run the following from inside the venv::
(mozdef) [mozdef@mozdev mozdef]$ cd ~/mozdef/examples/demo
(mozdef) [mozdef@mozdev demo]$ ./syncalerts.sh

Просмотреть файл

@ -1,8 +1,8 @@
Overview
========
What?
----
Easiest to describe The Mozilla Defense Platform (MozDef) as a set of micro-services you can use as an open source Security Informaition and Event Management (SIEM) overlay on top of Elasticsearch.
-----
It's easiest to describe The Mozilla Defense Platform (MozDef) as a set of micro-services you can use as an open source Security Information and Event Management (SIEM) overlay on top of Elasticsearch.
Why?
----

Просмотреть файл

@ -65,6 +65,7 @@ def setConfig(option,value,configfile):
config.write(configfp)
configfp.close()
def postLogs(logcache):
#post logs asynchronously with requests workers and check on the results
#expects a queue object from the multiprocessing library
@ -83,23 +84,25 @@ def postLogs(logcache):
#posts.append((r,postdata,url))
except Empty as e:
pass
#for p,postdata,url in posts:
#try:
#if p.result().status_code >=500:
#logger.error("exception posting to %s %r [will retry]\n"%(url,p.result().status_code))
##try again later when the next message in forces other attempts at posting.
#logcache.put(postdata)
#except ClosedPoolError as e:
##logger.fatal("Closed Pool Error exception posting to %s %r %r [will retry]\n"%(url,e,postdata))
#logcache.put(postdata)
#except Exception as e:
#logger.fatal("exception posting to %s %r %r [will not retry]\n"%(url,e,postdata))
#sys.exit(1)
# for p, postdata, url in posts:
# try:
# if p.result().status_code >= 500:
# logger.error("exception posting to %s %r [will retry]\n" % (url, p.result().status_code))
# # try again later when the next message in forces other attempts at posting.
# logcache.put(postdata)
# except ClosedPoolError as e:
# logger.fatal("Closed Pool Error exception posting to %s %r %r [will retry]\n" % (url, e, postdata))
# logcache.put(postdata)
# except Exception as e:
# logger.fatal("exception posting to %s %r %r [will not retry]\n" % (url, e, postdata))
# sys.exit(1)
def genRandomIPv4():
#random, IPs
return '.'.join("%d" % (random.randint(0,254)) for x in range(4))
def genAttackerIPv4():
#random, but not too random as to allow for alerting about attacks from
#the same IP.
@ -165,6 +168,7 @@ def makeEvents():
except KeyboardInterrupt as e:
sys.exit(1)
def makeAlerts():
'''
send events that will be correlated into alerts
@ -234,6 +238,7 @@ def makeAlerts():
except KeyboardInterrupt as e:
sys.exit(1)
def makeAttackers():
'''
send events that will be correlated into attackers using pre-defined IPs

Просмотреть файл

@ -1,385 +0,0 @@
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2014 Mozilla Corporation
*/
//collections shared by client/server
events = new Meteor.Collection("events");
alerts = new Meteor.Collection("alerts");
investigations = new Meteor.Collection("investigations");
incidents = new Meteor.Collection("incidents");
veris = new Meteor.Collection("veris");
kibanadashboards = new Meteor.Collection("kibanadashboards");
mozdefsettings = new Meteor.Collection("mozdefsettings");
healthfrontend = new Meteor.Collection("healthfrontend");
healthescluster = new Meteor.Collection("healthescluster");
healthesnodes = new Meteor.Collection("healthesnodes");
healtheshotthreads = new Meteor.Collection("healtheshotthreads");
attackers = new Meteor.Collection("attackers");
actions = new Meteor.Collection("actions");
userActivity = new Meteor.Collection("userActivity");
ipblocklist = new Meteor.Collection("ipblocklist");
fqdnblocklist = new Meteor.Collection("fqdnblocklist");
if (Meteor.isServer) {
//Publishing setups
Meteor.publish("mozdefsettings",function(){
return mozdefsettings.find();
});
Meteor.publish("alerts-summary", function (searchregex,timeperiod,recordlimit) {
//tail the last 100 records by default
//default parameters
timeperiod = typeof timeperiod !== 'undefined' ? timeperiod: 'tail';
searchregex = typeof searchregex !== 'undefined' ? searchregex: '';
recordlimit = ['number'].indexOf(typeof(recordlimit)) ? 100:recordlimit;
//sanity check the record limit
if ( recordlimit >10000 || recordlimit < 1){
recordlimit = 100;
}
if ( timeperiod ==='tail' || timeperiod == 'none' ){
return alerts.find(
{summary: {$regex:searchregex}},
{fields:{
_id:1,
esmetadata:1,
utctimestamp:1,
utcepoch:1,
summary:1,
severity:1,
category:1,
acknowledged:1,
acknowledgedby:1,
url:1
},
sort: {utcepoch: -1},
limit:recordlimit}
);
} else {
//determine the utcepoch range
beginningtime=moment().utc();
//expect timeperiod like '1 days'
timevalue=Number(timeperiod.split(" ")[0]);
timeunits=timeperiod.split(" ")[1];
beginningtime.subtract(timevalue,timeunits);
return alerts.find(
{summary: {$regex:searchregex},
utcepoch: {$gte: beginningtime.unix()}},
{fields:{
_id:1,
esmetadata:1,
utctimestamp:1,
utcepoch:1,
summary:1,
severity:1,
category:1,
acknowledged:1
},
sort: {utcepoch: -1},
limit:recordlimit}
);
}
});
Meteor.publish("alerts-details",function(alertid,includeEvents){
//return alerts.find({'esmetadata.id': alertid});
//alert ids can be either mongo or elastic search IDs
//look for both to publish to the collection.
//default parameters
includeEvents = typeof includeEvents !== 'undefined' ? includeEvents: true;
if ( includeEvents ){
return alerts.find({
$or:[
{'esmetadata.id': alertid},
{'_id': alertid},
]
});
}else{
return alerts.find({
$or:[
{'esmetadata.id': alertid},
{'_id': alertid},
]
},
{fields:{events:0},
});
}
});
Meteor.publish("alerts-count", function () {
var self = this;
var count = 0;
var initializing = true;
var recordID=Meteor.uuid();
//get a count by watching for only 1 new entry sorted in reverse date order.
//use that hook to return a find().count rather than iterating the entire result set over and over
var handle = alerts.find({}, {sort: {utcepoch: -1},limit:1}).observeChanges({
added: function (newDoc,oldDoc) {
count=alerts.find().count();
if (!initializing) {
self.changed("alerts-count", recordID,{count: count});
//console.log('added alerts count to' + count);
}
},
changed: function (newDoc,oldDoc) {
count=alerts.find().count();
if (!initializing) {
self.changed("alerts-count", recordID,{count: count});
//console.log('changed alerts count to' + count);
}
},
removed: function (newDoc,oldDoc) {
count=alerts.find().count();
if (!initializing) {
self.changed("alerts-count", recordID,{count: count});
//console.log('changed alerts count to' + count);
}
}
});
initializing = false;
self.added("alerts-count", recordID,{count: count});
//console.log('count is ready: ' + count);
self.ready();
// Stop observing the cursor when client unsubs.
// Stopping a subscription automatically takes
// care of sending the client any removed messages.
self.onStop(function () {
//console.log('stopped publishing alerts count.')
handle.stop();
});
});
//publish the last X event/alerts
//using document index instead of date
// Meteor.publish("attacker-details",function(attackerid){
// return attackers.find({'_id': attackerid},
// {fields: {
// events:{$slice: 20,
// $sort: { documentindex: -1 }},
// alerts:{$slice: -10}
// }}
// );
// });
Meteor.publish("attacker-details",function(attackerid){
return attackers.find({'_id': attackerid},
{fields: {
events:{$slice: -20},
alerts:{$slice: -10}
},
sort: { 'events.documentsource.utctimestamp': -1 },
reactive:false
}
);
});
Meteor.publish("attackers-summary", function () {
//limit to the last 100 records by default
//to ease the sync transfer to dc.js/crossfilter
return attackers.find({},
{fields:{
events:0,
alerts:0,
},
sort: {lastseentimestamp: -1},
limit:100});
});
Meteor.publish("attackers-summary-landmass", function () {
//limit to the last 100 records by default
//to ease the sync transfer to dc.js/crossfilter
var inModifier = { $in: ["broxss", "brotunnel", "brosqli"]};
return attackers.find({"events.documentsource.category": inModifier},
{sort: {lastseentimestamp: -1},
limit: 100});
});
Meteor.publish("investigations-summary", function () {
return investigations.find({},
{fields: {
_id:1,
summary:1,
phase:1,
dateOpened:1,
dateClosed:1,
creator:1
},
sort: {dateOpened: -1},
limit:100});
});
Meteor.publish("investigation-details",function(investigationid){
return investigations.find({'_id': investigationid});
});
Meteor.publish("incidents-summary", function () {
return incidents.find({},
{fields: {
_id:1,
summary:1,
phase:1,
dateOpened:1,
dateClosed:1,
creator:1
},
sort: {dateOpened: -1},
limit:100});
});
Meteor.publish("incident-details",function(incidentid){
return incidents.find({'_id': incidentid});
});
Meteor.publish("veris", function () {
return veris.find({}, {limit:0});
});
Meteor.publish("healthfrontend", function () {
return healthfrontend.find({}, {limit:0});
});
Meteor.publish("healthescluster", function () {
return healthescluster.find({}, {limit:0});
});
Meteor.publish("healthesnodes", function () {
return healthesnodes.find({}, {limit:0});
});
Meteor.publish("healtheshotthreads", function () {
return healtheshotthreads.find({}, {limit:0});
});
Meteor.publish("kibanadashboards", function () {
return kibanadashboards.find({},{sort:{name:1}, limit:30});
});
Meteor.publish("userActivity", function () {
return userActivity.find({},{sort:{userID:1}, limit:100});
});
Meteor.publish("ipblocklist", function () {
return ipblocklist.find({},{limit:0});
})
Meteor.publish("fqdnblocklist", function () {
return fqdnblocklist.find({},{limit:0});
})
//access rules from clients
//barebones to allow you to specify rules
incidents.allow({
insert: function (userId, doc) {
// the user must be logged in
return (userId);
},
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
},
remove: function (userId, doc) {
// can only remove one's own indicents
return doc.creator === Meteor.user().profile.email;
},
fetch: ['creator']
});
attackers.allow({
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
}
});
alerts.allow({
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
}
});
investigations.allow({
insert: function (userId, doc) {
// the user must be logged in
return (userId);
},
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
},
remove: function (userId, doc) {
// can only remove one's own items
return doc.creator === Meteor.user().profile.email;
},
fetch: ['creator']
});
userActivity.allow({
insert: function (userId, doc) {
// the user must be logged in
return (userId);
},
remove: function (userId, doc) {
// can only remove one's own items
return doc.userId === Meteor.user().profile.email;
},
});
ipblocklist.allow({
insert: function (userId, doc) {
// the user must be logged in
return (userId);
},
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
},
remove: function (userId, doc) {
// the user must be logged in
return (userId);
},
fetch: ['creator']
});
fqdnblocklist.allow({
insert: function (userId, doc) {
// the user must be logged in
return (userId);
},
update: function (userId, doc, fields, modifier) {
// the user must be logged in
return (userId);
},
remove: function (userId, doc) {
// the user must be logged in
return (userId);
},
fetch: ['creator']
});
};
if (Meteor.isClient) {
//client side collections:
alertsCount = new Meteor.Collection("alerts-count");
//client-side subscriptions to low volume collections
Meteor.subscribe("mozdefsettings");
Meteor.subscribe("veris");
Meteor.subscribe("kibanadashboards");
Meteor.subscribe("userActivity");
};

Просмотреть файл

@ -26,6 +26,7 @@ def status():
response.body = json.dumps(dict(status='ok', service='loginput'))
return response
@route('/test')
@route('/test/')
def testindex():
@ -34,6 +35,8 @@ def testindex():
response.status=200
#act like elastic search bulk index
@route('/_bulk',method='POST')
@route('/_bulk/',method='POST')
def bulkindex():
@ -56,17 +59,19 @@ def bulkindex():
except ValueError as e:
response.status=500
return
if not 'index' in json.loads(i).keys(): # don't post the items telling us where to post things..
# don't post the items telling us where to post things..
if 'index' not in json.loads(i):
ensurePublish=mqConn.ensure(mqproducer,mqproducer.publish,max_retries=10)
ensurePublish(eventDict,exchange=eventTaskExchange,routing_key=options.taskexchange)
except ValueError:
bottlelog('value error {0}'.format(i))
return
@route('/_status')
@route('/_status/')
@route('/nxlog/', method=['POST','PUT'])
@route('/nxlog', method=['POST','PUT'])
@route('/nxlog', method=['POST','PUT'])
@route('/events/',method=['POST','PUT'])
@route('/events', method=['POST','PUT'])
def eventsindex():
@ -88,6 +93,7 @@ def eventsindex():
return
@route('/cef', method=['POST','PUT'])
@route('/cef/',method=['POST','PUT'])
#debug(True)
@ -109,6 +115,7 @@ def cefindex():
ensurePublish(cefDict,exchange=eventTaskExchange,routing_key=options.taskexchange)
return
@route('/custom/<application>',method=['POST','PUT'])
def customindex(application):
'''

Просмотреть файл

@ -8,7 +8,7 @@ Copyright (c) 2014 Mozilla Corporation
<template name="alertssummary">
<div class="container">
<div class="row col-xs-10">
<div class="col-xs-1"><button class="btn btn-xs" data-toggle="collapse" data-target="#alertSearch">search</button></div>
<div class="col-xs-1"><button class="btn btn-default btn-xs" data-toggle="collapse" data-target="#alertSearch">search</button></div>
<div class="col-xs-9">
<div id='alertSearch' class="collapse">
<form class="form-horizontal">
@ -45,7 +45,7 @@ Copyright (c) 2014 Mozilla Corporation
</div>
<div class="form-group">
<div class="col-xs-8 col-xs-offset-2">
<button class="btn btn-xs alertssearch">search</button>
<button class="btn btn-default btn-xs alertssearch">search</button>
</div>
</div>
@ -62,7 +62,7 @@ Copyright (c) 2014 Mozilla Corporation
<div id="ringChart-severity" class="col-xs-3 text-center"><h5 class="upperwhite" id="Severity">Severity: <span class="filter"></span></h5></div>
</div>
<div class="row col-xs-10">
<div class="col-xs-1"><button class="btn btn-xs reset">reset filters</button></div>
<div class="col-xs-1"><button class="btn btn-default btn-xs reset">reset filters</button></div>
<div class="col-xs-4 record-count">
<span id="displayCount"></span>{{displayedAlerts}} displayed/total: <span id="totalAlerts">{{totalAlerts}}</span>
</div>

Просмотреть файл

@ -7,17 +7,11 @@ Copyright (c) 2014 Mozilla Corporation
Router.configure({
// the default layout
layoutTemplate: 'layout'
});
Router.onBeforeAction(function () {
// before we go anywhere, make sure we have settings
this.subscribe('mozdefsettings').wait();
if (this.ready()) {
this.next();
} else {
this.render('Loading settings...');
}
layoutTemplate: 'layout',
// ensure we have settings
waitOn: function() {
return Meteor.subscribe('mozdefsettings')
}
});
Router.map(function () {

Просмотреть файл

@ -184,12 +184,11 @@ function registerLoginViaHeader() {
});
}
//generate login tokens
var stampedToken = Accounts._generateStampedLoginToken();
// return ala: https://github.com/meteor/meteor/blob/devel/packages/accounts-base/accounts_server.js#L340
// per https://github.com/meteor/meteor/blob/devel/packages/accounts-base/accounts_server.js#L263
// generating and storing the stamped login token is optional
// so we just return the userId and let the accounts module do it's thing
return {
userId: userId,
stampedLoginToken: stampedToken
userId: userId
}
});
}

Просмотреть файл

@ -1,11 +1,8 @@
import os
import geoip2.database
class GeoIP(object):
def __init__(self, db_location=None):
if db_location is None:
db_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data/GeoLite2-City.mmdb")
def __init__(self, db_location):
try:
self.db = geoip2.database.Reader(db_location)
except IOError:

Просмотреть файл

@ -56,6 +56,6 @@ setup(
test_suite='tests',
tests_require=[],
url='https://github.com/mozilla/MozDef/tree/master/lib',
version='1.0.1',
version='1.0.2',
zip_safe=False,
)

Просмотреть файл

@ -43,7 +43,7 @@ except ImportError as e:
class RoleManager:
def __init__(self, region='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
def __init__(self, region_name='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.credentials = {}
@ -52,7 +52,7 @@ class RoleManager:
try:
self.local_conn_sts = boto.sts.connect_to_region(
**get_aws_credentials(
region,
region_name,
self.aws_access_key_id,
self.aws_secret_access_key))
except Exception, e:
@ -69,12 +69,11 @@ class RoleManager:
raise
try:
creds = get_aws_credentials(
self.session_credentials.access_key,
self.session_credentials.secret_key,
self.session_credentials.session_token) if self.session_credentials else {}
self.session_conn_sts = boto.sts.connect_to_region(
region=region,
**creds)
region_name,
self.session_credentials.access_key,
self.session_credentials.secret_key,
self.session_credentials.session_token) if self.session_credentials else {}
self.session_conn_sts = boto.sts.connect_to_region(**creds)
except Exception, e:
logger.error("Unable to connect to STS with session token due to exception %s" % e.message)
raise
@ -126,10 +125,11 @@ class RoleManager:
'aws_secret_access_key': credential.secret_key,
'security_token': credential.session_token} if credential else {}
def get_aws_credentials(region=None, accesskey=None, secretkey=None, security_token=None):
result = {}
if region not in ['', '<add_region>', None]:
result['region'] = region
result['region_name'] = region
if accesskey not in ['', '<add_accesskey>', None]:
result['aws_access_key_id'] = accesskey
if secretkey not in ['', '<add_secretkey>', None]:

Просмотреть файл

@ -22,15 +22,14 @@ import requests
from mozdef_util.elasticsearch_client import ElasticsearchClient, ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException
from utilities.toUTC import toUTC
from utilities.to_unicode import toUnicode
from utilities.remove_at import removeAt
from utilities.is_cef import isCEF
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.utilities.to_unicode import toUnicode
from mozdef_util.utilities.remove_at import removeAt
from mozdef_util.utilities.is_cef import isCEF
from mozdef_util.utilities.logger import logger, initLogger
from lib.plugins import sendEventToPlugins, registerPlugins
from utilities.logger import logger, initLogger
# running under uwsgi?
try:

Просмотреть файл

@ -3,12 +3,12 @@ import boto
import boto.utils
def connect_sqs(region=None, access_key=None, secret_key=None,
task_exchange=None):
if region is None:
def connect_sqs(region_name=None, aws_access_key_id=None,
aws_secret_access_key=None, task_exchange=None):
if region_name is None:
try:
# connect_sqs defaults to us-east-1 instead of the local region
region = boto.utils.get_instance_identity(
region_name = boto.utils.get_instance_identity(
timeout=0.5, num_retries=1)['document']['region']
except IndexError:
raise Exception(
@ -16,12 +16,12 @@ def connect_sqs(region=None, access_key=None, secret_key=None,
"MozDef isn't running in AWS")
credentials = {}
if access_key is not None:
credentials['aws_access_key_id'] = access_key
if secret_key is not None:
credentials['aws_secret_access_key'] = secret_key
if aws_access_key_id is not None:
credentials['aws_access_key_id'] = aws_access_key_id
if aws_secret_access_key is not None:
credentials['aws_secret_access_key'] = aws_secret_access_key
conn = sqs.connect_to_region(
region_name=region,
region_name=region_name,
**credentials
)

Просмотреть файл

@ -18,66 +18,69 @@ class message(object):
# check for messages we have vetted as n/a and prevalent
# from a sec standpoint and drop them
# drop sensitive logs
#if 'details' in message \
#and 'command' in message['details'] \
#and 'ldappasswd' in message['details']['command']:
#return(None, metadata)
# ganglia monitor daemon
if 'details' in message \
and 'parentprocess' in message['details'] \
and message['details']['parentprocess'] == 'gmond' \
and 'duser' in message['details'] \
and message['details']['duser'] == 'nobody' \
and 'command' in message['details'] \
and message['details']['command'] == '/bin/sh -c netstat -t -a -n':
if ('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'gmond' and
'duser' in message['details'] and
message['details']['duser'] == 'nobody' and
'command' in message['details'] and
message['details']['command'] == '/bin/sh -c netstat -t -a -n'):
return(None, metadata)
# rabbitmq
if ('details' in message
and 'parentprocess' in message['details']
and message['details']['parentprocess'] == 'beam.smp'
and 'duser' in message['details']
and message['details']['duser'] == 'rabbitmq'
and 'command' in message['details']) \
and (message['details']['command'] == '/usr/lib64/erlang/erts-5.8.5/bin/epmd -daemon'
or message['details']['command'].startswith('inet_gethost 4')
or message['details']['command'].startswith('sh -c exec inet_gethost 4')
or message['details']['command'].startswith('/bin/sh -s unix:cmd')
or message['details']['command'].startswith('sh -c exec /bin/sh -s unix:cmd')):
if (
('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'beam.smp' and
'duser' in message['details'] and
message['details']['duser'] == 'rabbitmq' and
'command' in message['details']
) and
(
message['details']['command'] == '/usr/lib64/erlang/erts-5.8.5/bin/epmd -daemon' or
message['details']['command'].startswith('inet_gethost 4') or
message['details']['command'].startswith('sh -c exec inet_gethost 4') or
message['details']['command'].startswith('/bin/sh -s unix:cmd') or
message['details']['command'].startswith('sh -c exec /bin/sh -s unix:cmd'))):
return(None, metadata)
# sshd
if 'details' in message \
and 'parentprocess' in message['details'] \
and message['details']['parentprocess'] == 'sshd' \
and 'duser' in message['details'] \
and message['details']['duser'] == 'root' \
and 'command' in message['details'] \
and message['details']['command'] == '/usr/sbin/sshd -R':
if ('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'sshd' and
'duser' in message['details'] and
message['details']['duser'] == 'root' and
'command' in message['details'] and
message['details']['command'] == '/usr/sbin/sshd -R'):
return(None, metadata)
# chkconfig
if ('details' in message
and 'parentprocess' in message['details']
and message['details']['parentprocess'] == 'chkconfig'
and 'suser' in message['details']
and message['details']['suser'] == 'root'
and 'command' in message['details']) \
and (message['details']['command'].startswith('/sbin/runlevel')
or message['details']['command'].startswith('sh -c /sbin/runlevel')):
if (
('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'chkconfig' and
'suser' in message['details'] and
message['details']['suser'] == 'root' and
'command' in message['details']
) and
(
message['details']['command'].startswith('/sbin/runlevel') or
message['details']['command'].startswith('sh -c /sbin/runlevel'))):
return(None, metadata)
# nagios
if ('details' in message
and 'duser' in message['details']
and message['details']['duser'] == 'nagios'
and 'suser' in message['details']
and message['details']['suser'] == 'root'
and 'command' in message['details']) \
and (message['details']['command'].startswith('/usr/lib64/nagios/plugins')
or message['details']['command'].startswith('sh -c /usr/lib64/nagios/plugins')):
if (
('details' in message and
'duser' in message['details'] and
message['details']['duser'] == 'nagios' and
'suser' in message['details'] and
message['details']['suser'] == 'root' and
'command' in message['details']
) and
(
message['details']['command'].startswith('/usr/lib64/nagios/plugins') or
message['details']['command'].startswith('sh -c /usr/lib64/nagios/plugins'))):
return(None, metadata)
# fix auid from long to int
@ -86,17 +89,17 @@ class message(object):
message['details']['auid'] = '-1'
if 'ses' in message['details'].keys() and message['details']['ses'] == "4294967295":
message['details']['ses'] = '-1'
#fix '(null)' string records to fit in a long
for k,v in message['details'].iteritems():
if v=='(null)' and 'id' in k:
message['details'][k]=-1
# fix '(null)' string records to fit in a long
for k, v in message['details'].iteritems():
if v == '(null)' and 'id' in k:
message['details'][k] = -1
# fix occasional gid errant parsing
if 'details' in message.keys() and isinstance(message['details'], dict):
if 'gid' in message['details'].keys() and ',' in message['details']['gid']:
#gid didn't parse right, should just be an integer
#move it to a new field to not trigger errors in ES indexing
#as it tries to convert gid to long
# gid didn't parse right, should just be an integer
# move it to a new field to not trigger errors in ES indexing
# as it tries to convert gid to long
message['details']['gidstring'] = message['details']['gid']
del message['details']['gid']
@ -111,7 +114,7 @@ class message(object):
if 'category' not in message.keys():
message['category'] = 'auditd'
#set doctype
# set doctype
metadata['doc_type'] = 'auditd'
return (message, metadata)

Просмотреть файл

@ -377,8 +377,7 @@ class message(object):
if newmessage['details']['actions'] == "Notice::ACTION_LOG":
# retrieve indicator ip addresses from the sub field
# "sub": "Indicator: 1.2.3.4, Indicator: 5.6.7.8"
newmessage['details']['indicators'] = [ip for ip
in findIPv4(newmessage['details']['sub'])]
newmessage['details']['indicators'] = [ip for ip in findIPv4(newmessage['details']['sub'])]
# remove the details.src field and add it to indicators
# as it may not be the actual source.
if 'src' in newmessage['details']:
@ -421,7 +420,7 @@ class message(object):
u'destination {dst} '
u'port {p}'
).format(**sumstruct)
# Thank you for your service
# Thank you for your service
return (newmessage, metadata)
if logtype == 'rdp':

Просмотреть файл

@ -42,7 +42,8 @@ class message(object):
'details.requestparameters.disableApiTermination',
'details.responseelements.findings.service.additionalInfo.unusual',
'details.responseelements.distribution.distributionConfig.callerReference',
'details.requestparameters.logStreamName'
'details.requestparameters.logStreamName',
'details.apiversion'
]
def convert_key_raw_str(self, needle, haystack):

Просмотреть файл

@ -30,7 +30,7 @@ class message(object):
# drop disabled for now
#if 'signatureid' in message['details']:
#if message['details'].lower() == 'execve' and \
#'command' not in message['details']:
#'command' not in message['details']:
# auditd entry without a command
# likely a result of another command (java starting a job, etc.)
# signal a drop

Просмотреть файл

@ -25,6 +25,7 @@ def addError(message, error):
if isinstance(message['errors'], list):
message['errors'].append(error)
class message(object):
def __init__(self):
'''register our criteria for being passed a message
@ -44,24 +45,20 @@ class message(object):
# Making sufficiently sure this is a fluentd-forwarded message from
# fluentd SQS plugin, so that we don't spend too much time on other
# message types
if ((not 'az' in message.keys())
and (not 'instance_id' in message.keys())
and (not '__tag' in message.keys())):
if 'az' not in message and 'instance_id' not in message and '__tag' not in message:
return (message, metadata)
if not 'details' in message.keys():
if 'details' not in message:
message['details'] = dict()
if (not 'summary' in message.keys()) and ('message' in message.keys()):
if 'summary' not in message and 'message' in message:
message['summary'] = message['message']
if ((not 'utctimestamp' in message.keys())
and ('time' in message.keys())):
if 'utctimestamp' not in message and 'time' in message:
message['utctimestamp'] = toUTC(message['time']).isoformat()
# Bro format of {u'Timestamp': 1.482437837e+18}
if ((not 'utctimestamp' in message.keys())
and ('Timestamp' in message.keys())):
if 'utctimestamp' not in message and 'Timestamp' in message:
message['utctimestamp'] = toUTC(message['Timestamp']).isoformat()
# host is used to store dns-style-ip entries in AWS, for ex
@ -69,7 +66,7 @@ class message(object):
# that this is always trusted. It's better than nothing though. At the
# time of writing, there is no ipv6 support AWS-side for this kind of
# field. It may be overridden later by a better field, if any exists
if 'host' in message.keys():
if 'host' in message:
tmp = message['host']
if tmp.startswith('ip-'):
ipText = tmp.split('ip-')[1].replace('-', '.')
@ -86,7 +83,7 @@ class message(object):
'fluentSqsFixUp.py',
'destinationipaddress is invalid',
ipText))
if not 'hostname' in message.keys():
if 'hostname' not in message:
message['hostname'] = tmp
# All messages with __tag 'ec2.forward*' are actually syslog forwarded
@ -100,16 +97,14 @@ class message(object):
if 'ident' in message.keys():
tmp = message['ident']
message['details']['program'] = tmp
if ((not 'processname' in message.keys())
and ('program' in message['details'].keys())):
if 'processname' not in message and 'program' in message['details']:
message['processname'] = message['details']['program']
if ((not 'processid' in message.keys())
and ('pid' in message.keys())):
if 'processid' not in message and 'pid' in message:
message['processid'] = message['pid']
else:
message['processid'] = 0
# Unknown really, but this field is mandatory.
if not 'severity' in message.keys():
if 'severity' not in message:
message['severity'] = 'INFO'
# We already have the time of event stored in 'timestamp' so we don't

Просмотреть файл

@ -35,6 +35,9 @@ class message(object):
self.priority = 10
def onMessage(self, message, metadata):
if 'eventsource' not in message:
return (message, metadata)
#drop non-relevant messages
if message['eventsource'] in ('Fxa-customsMozSvc', 'FxaContentWebserver', 'FxaAuthWebserver', 'FxaOauthWebserver', 'FxaAuth', 'fxa-auth-server'):
if 'details' in message.keys():
@ -87,8 +90,8 @@ class message(object):
# handle the case of an escaped list:
# "remoteAddressChain": "[\"1.2.3.4\",\"5.6.7.8\",\"127.0.0.1\"]"
if (isinstance(message['details']['remoteAddressChain'], unicode) and
message['details']['remoteAddressChain'][0]=='[' and
message['details']['remoteAddressChain'][-1]==']'):
message['details']['remoteAddressChain'][0] == '[' and
message['details']['remoteAddressChain'][-1] == ']'):
# remove the brackets and double quotes
for i in ['[',']','"']:
message['details']['remoteAddressChain']=message['details']['remoteAddressChain'].replace(i,'')

Просмотреть файл

@ -27,7 +27,8 @@ class message(object):
'''
self.registration = ['sourceipaddress', 'destinationipaddress']
self.priority = 20
self.geoip = GeoIP()
geoip_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../data/GeoLite2-City.mmdb")
self.geoip = GeoIP(geoip_data_dir)
def ipLocation(self, ip):
location = dict()

Просмотреть файл

@ -20,14 +20,12 @@ class message(object):
# and do any clean up
# check for details.kind like 'admin#reports#activity'
if ('details' in message.keys() and
'kind' in message['details'].keys() and
'activity' in message['details']['kind']):
if ('details' in message and 'kind' in message['details'] and
'activity' in message['details']['kind']):
# details.etag might be quoted..unquote it
if 'etag' in message['details'].keys():
if 'etag' in message['details']:
message['details']['etag'] = message['details']['etag'].replace('"', '')
metadata['doc_type']= 'google'
metadata['doc_type'] = 'google'
return (message, metadata)

Просмотреть файл

@ -9,6 +9,7 @@ from mozdef_util.utilities.key_exists import key_exists
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.utilities.dot_dict import DotDict
class message(object):
def __init__(self):
'''

Просмотреть файл

@ -6,7 +6,7 @@
class message(object):
def __init__(self):
self.registration = ['nubis_events_prod']
self.registration = ['nubis_events_prod', 'githubeventsqs']
self.priority = 20
self.MAX_STRING_LENGTH = 3000
@ -24,6 +24,12 @@ class message(object):
message['details']['cmdline'] = message['details']['cmdline'][:self.MAX_STRING_LENGTH]
message['details']['cmdline'] += ' ...'
if 'pr_body' in message['details']:
if type(message['details']['pr_body']) in (str, unicode) \
and len(message['details']['pr_body']) > self.MAX_STRING_LENGTH:
message['details']['pr_body'] = message['details']['pr_body'][:self.MAX_STRING_LENGTH]
message['details']['pr_body'] += ' ...'
if 'summary' in message:
if type(message['summary']) in (str, unicode) \
and len(message['summary']) > self.MAX_STRING_LENGTH:

Просмотреть файл

@ -4,6 +4,8 @@
# Copyright (c) 2014 Mozilla Corporation
import hashlib
class message(object):
def __init__(self):
'''

Просмотреть файл

@ -5,6 +5,7 @@
import re
class message(object):
def __init__(self):
'''register our criteria for being passed a message

Просмотреть файл

@ -5,6 +5,7 @@
import re
class message(object):
def __init__(self):
'''register our criteria for being passed a message

Просмотреть файл

@ -5,6 +5,7 @@
import re
class message(object):
def __init__(self):
'''register our criteria for being passed a message

Просмотреть файл

@ -18,59 +18,68 @@ class message(object):
def onMessage(self, message, metadata):
# ganglia monitor daemon -> 3d
if 'details' in message \
and 'parentprocess' in message['details'] \
and message['details']['parentprocess'] == 'gmond' \
and 'duser' in message['details'] \
and message['details']['duser'] == 'nobody' \
and 'command' in message['details'] \
and message['details']['command'] == '/bin/sh -c netstat -t -a -n':
if ('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'gmond' and
'duser' in message['details'] and
message['details']['duser'] == 'nobody' and
'command' in message['details'] and
message['details']['command'] == '/bin/sh -c netstat -t -a -n'):
message['_ttl'] = '3d'
# rabbitmq -> 3d
if ('details' in message
and 'parentprocess' in message['details']
and message['details']['parentprocess'] == 'beam.smp'
and 'duser' in message['details']
and message['details']['duser'] == 'rabbitmq'
and 'command' in message['details']) \
and (message['details']['command'] == '/usr/lib64/erlang/erts-5.8.5/bin/epmd -daemon'
or message['details']['command'].startswith('inet_gethost 4')
or message['details']['command'].startswith('sh -c exec inet_gethost 4')
or message['details']['command'].startswith('/bin/sh -s unix:cmd')
or message['details']['command'].startswith('sh -c exec /bin/sh -s unix:cmd')):
if (
('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'beam.smp' and
'duser' in message['details'] and
message['details']['duser'] == 'rabbitmq' and
'command' in message['details']
) and
(
message['details']['command'] == '/usr/lib64/erlang/erts-5.8.5/bin/epmd -daemon' or
message['details']['command'].startswith('inet_gethost 4') or
message['details']['command'].startswith('sh -c exec inet_gethost 4') or
message['details']['command'].startswith('/bin/sh -s unix:cmd') or
message['details']['command'].startswith('sh -c exec /bin/sh -s unix:cmd'))):
message['_ttl'] = '3d'
# sshd -> 3d
if 'details' in message \
and 'parentprocess' in message['details'] \
and message['details']['parentprocess'] == 'sshd' \
and 'duser' in message['details'] \
and message['details']['duser'] == 'root' \
and 'command' in message['details'] \
and message['details']['command'] == '/usr/sbin/sshd -R':
if ('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'sshd' and
'duser' in message['details'] and
message['details']['duser'] == 'root' and
'command' in message['details'] and
message['details']['command'] == '/usr/sbin/sshd -R'):
message['_ttl'] = '3d'
# chkconfig -> 3d
if ('details' in message
and 'parentprocess' in message['details']
and message['details']['parentprocess'] == 'chkconfig'
and 'suser' in message['details']
and message['details']['suser'] == 'root'
and 'command' in message['details']) \
and (message['details']['command'].startswith('/sbin/runlevel')
or message['details']['command'].startswith('sh -c /sbin/runlevel')):
if (
('details' in message and
'parentprocess' in message['details'] and
message['details']['parentprocess'] == 'chkconfig' and
'suser' in message['details'] and
message['details']['suser'] == 'root' and
'command' in message['details']
) and
(
message['details']['command'].startswith('/sbin/runlevel') or
message['details']['command'].startswith('sh -c /sbin/runlevel'))):
message['_ttl'] = '3d'
# nagios -> 3d
if ('details' in message
and 'duser' in message['details']
and message['details']['duser'] == 'nagios'
and 'suser' in message['details']
and message['details']['suser'] == 'root'
and 'command' in message['details']) \
and (message['details']['command'].startswith('/usr/lib64/nagios/plugins')
or message['details']['command'].startswith('sh -c /usr/lib64/nagios/plugins')):
if (
('details' in message and
'duser' in message['details'] and
message['details']['duser'] == 'nagios' and
'suser' in message['details'] and
message['details']['suser'] == 'root' and
'command' in message['details']
) and
(
message['details']['command'].startswith('/usr/lib64/nagios/plugins') or
message['details']['command'].startswith('sh -c /usr/lib64/nagios/plugins'))):
message['_ttl'] = '3d'
return (message, metadata)

Просмотреть файл

@ -4,6 +4,8 @@
# Copyright (c) 2014 Mozilla Corporation
import hashlib
class message(object):
def __init__(self):
'''

Просмотреть файл

@ -8,7 +8,7 @@ bottle==0.12.4
bugzilla==1.0.0
celery==4.1.0
cffi==1.9.1
configlib==2.0.1
configlib==2.0.2
configparser==3.5.0b2
cryptography==2.3.1
dnspython==1.15.0
@ -33,7 +33,7 @@ KitnIRC==0.2.6
kombu==4.1.0
meld3==1.0.2
mozdef-client==1.0.11
mozdef-util==1.0.1
mozdef-util==1.0.2
MySQL-python==1.2.5
netaddr==0.7.1
nose==1.3.7
@ -41,6 +41,7 @@ oauth2client==1.4.12
packaging==16.8
pyasn1==0.1.9
pyasn1-modules==0.0.5
pyOpenSSL==18.0.0
pycparser==2.17
pymongo==3.6.1
pynsive==0.2.6

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше