зеркало из https://github.com/mozilla/MozDef.git
Merge pull request #910 from mozilla/fix_some_pep8_errors
Fix some pep8 errors
This commit is contained in:
Коммит
747b766a07
14
.flake8
14
.flake8
|
@ -4,8 +4,6 @@ exclude =
|
|||
.git
|
||||
*__init__.py
|
||||
ignore =
|
||||
E114 # indentation is not a multiple of four (comment)
|
||||
E116 # unexpected indentation (comment)
|
||||
E121 # continuation line under-indented for hanging indent
|
||||
E122 # continuation line missing indentation or outdented
|
||||
E123 # closing bracket does not match indentation of opening bracket's line
|
||||
|
@ -14,25 +12,13 @@ ignore =
|
|||
E126 # continuation line over-indented for hanging indent
|
||||
E127 # continuation line over-indented for visual indent
|
||||
E128 # continuation line under-indented for visual indent
|
||||
E129 # visually indented line with same indent as next logical line
|
||||
E131 # continuation line unaligned for hanging indent
|
||||
E222 # multiple spaces after operator
|
||||
E225 # missing whitespace around operator
|
||||
E226 # missing whitespace around arithmetic operator
|
||||
E228 # missing whitespace around modulo operator
|
||||
E231 # missing whitespace after ','
|
||||
E241 # multiple spaces after ','
|
||||
E261 # at least two spaces before inline comment
|
||||
E265 # block comment should start with '# '
|
||||
E266 # too many leading '#' for block comment
|
||||
E301 # expected 1 blank line
|
||||
E302 # expected 2 blank lines, found 1
|
||||
E305 # expected 2 blank lines after class or function definition
|
||||
E402 # module level import not at top of file
|
||||
E501 # line too long
|
||||
E711 # comparison to None should be 'if cond is not None
|
||||
E712 # comparison to True should be 'if cond is True
|
||||
E713 # test for membership should be 'not in'
|
||||
E722 # do not use bare except'
|
||||
F401 # library imported but unused
|
||||
F601 # dictionary key 'tags' repeated with different values
|
||||
|
|
|
@ -201,7 +201,7 @@ class AlertTask(Task):
|
|||
alert['notify_mozdefbot'] = False
|
||||
|
||||
# If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
|
||||
if 'ircchannel' in alert and alert['ircchannel'] != '' and alert['ircchannel'] != None:
|
||||
if 'ircchannel' in alert and alert['ircchannel'] != '' and alert['ircchannel'] is not None:
|
||||
alert['notify_mozdefbot'] = True
|
||||
return alert
|
||||
|
||||
|
|
|
@ -40,11 +40,7 @@ class AlertProxyDropNonStandardPort(AlertTask):
|
|||
# I think it makes sense to alert every time here
|
||||
self.walkAggregations(threshold=1)
|
||||
|
||||
# Set alert properties
|
||||
def onAggregation(self, aggreg):
|
||||
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
|
||||
# aggreg['value']: value of the aggregation field, ex: toto@example.com
|
||||
# aggreg['events']: list of events in the aggregation
|
||||
category = 'squid'
|
||||
tags = ['squid', 'proxy']
|
||||
severity = 'WARNING'
|
||||
|
|
|
@ -20,6 +20,7 @@ import re
|
|||
# alert will not generate an alert event. If the detected key is not in
|
||||
# the whitelist, an alert will be created.
|
||||
|
||||
|
||||
class SSHKey(AlertTask):
|
||||
def __init__(self):
|
||||
# _whitelist contains all whitelisted key paths, loaded from the
|
||||
|
@ -65,7 +66,7 @@ class SSHKey(AlertTask):
|
|||
rem = re.compile(went['hostre'])
|
||||
except:
|
||||
continue
|
||||
if rem.match(hostname) == None:
|
||||
if rem.match(hostname) is None:
|
||||
continue
|
||||
if privkey['path'] == went['path']:
|
||||
return False
|
||||
|
|
|
@ -71,6 +71,7 @@ import netaddr
|
|||
# ]
|
||||
# }
|
||||
|
||||
|
||||
class SshLateral(AlertTask):
|
||||
def __init__(self):
|
||||
AlertTask.__init__(self)
|
||||
|
@ -92,8 +93,8 @@ class SshLateral(AlertTask):
|
|||
# listed in the configuration file.
|
||||
def exception_check(self, user, host, srcip):
|
||||
for x in self._config['exceptions']:
|
||||
if re.match(x[0], user) != None and \
|
||||
re.match(x[1], host) != None and \
|
||||
if re.match(x[0], user) is not None and \
|
||||
re.match(x[1], host) is not None and \
|
||||
netaddr.IPAddress(srcip) in netaddr.IPNetwork(x[2]):
|
||||
return True
|
||||
return False
|
||||
|
@ -110,13 +111,13 @@ class SshLateral(AlertTask):
|
|||
srchost = aggreg['events'][0]['_source']['hostname']
|
||||
srcmatch = False
|
||||
for x in self._config['hostmustmatch']:
|
||||
if re.match(x, srchost) != None:
|
||||
if re.match(x, srchost) is not None:
|
||||
srcmatch = True
|
||||
break
|
||||
if not srcmatch:
|
||||
return None
|
||||
for x in self._config['hostmustnotmatch']:
|
||||
if re.match(x, srchost) != None:
|
||||
if re.match(x, srchost) is not None:
|
||||
return None
|
||||
|
||||
# Determine if the origin of the connection was from a source outside
|
||||
|
@ -126,7 +127,7 @@ class SshLateral(AlertTask):
|
|||
sampleuser = None
|
||||
for x in aggreg['events']:
|
||||
m = re.match('Accepted publickey for (\S+) from (\S+).*', x['_source']['summary'])
|
||||
if m != None and len(m.groups()) == 2:
|
||||
if m is not None and len(m.groups()) == 2:
|
||||
ipaddr = netaddr.IPAddress(m.group(2))
|
||||
for y in self._config['alertifsource']:
|
||||
if ipaddr in netaddr.IPNetwork(y):
|
||||
|
@ -149,9 +150,9 @@ class SshLateral(AlertTask):
|
|||
# Check our exception list
|
||||
if self.exception_check(m.group(1), srchost, m.group(2)):
|
||||
continue
|
||||
if sampleip == None:
|
||||
if sampleip is None:
|
||||
sampleip = m.group(2)
|
||||
if sampleuser == None:
|
||||
if sampleuser is None:
|
||||
sampleuser = m.group(1)
|
||||
candidates.append(x)
|
||||
if len(candidates) == 0:
|
||||
|
|
|
@ -33,6 +33,8 @@ logger = logging.getLogger(sys.argv[0])
|
|||
logger.level=logging.DEBUG
|
||||
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
|
||||
|
||||
def postLogs(logcache):
|
||||
#post logs asynchronously with requests workers and check on the results
|
||||
#expects a queue object from the multiprocessing library
|
||||
|
@ -61,6 +63,7 @@ def postLogs(logcache):
|
|||
logger.fatal("exception posting to %s %r %r [will not retry]\n"%(url,e,postdata))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser=OptionParser()
|
||||
parser.add_option("-u", dest='url', default='http://localhost:8080/events/', help="mozdef events URL to use when posting events")
|
||||
|
|
|
@ -52,5 +52,6 @@ class Roulette(Module):
|
|||
# tell kitnirc that we handled this, no need to pass to other modules.
|
||||
return True
|
||||
|
||||
|
||||
# Let KitnIRC know what module class it should be loading.
|
||||
module = Roulette
|
||||
|
|
|
@ -331,7 +331,7 @@ class alertConsumer(ConsumerMixin):
|
|||
|
||||
# see if we need to delay a bit before sending the alert, to avoid
|
||||
# flooding the channel
|
||||
if self.lastalert != None:
|
||||
if self.lastalert is not None:
|
||||
delta = toUTC(datetime.now()) - self.lastalert
|
||||
sys.stdout.write('new alert, delta since last is {}\n'.format(delta))
|
||||
if delta.seconds < 2:
|
||||
|
@ -349,6 +349,7 @@ class alertConsumer(ConsumerMixin):
|
|||
logger.exception(
|
||||
"alertworker exception while processing events queue %r" % e)
|
||||
|
||||
|
||||
@run_async
|
||||
def consumeAlerts(ircBot):
|
||||
# connect and declare the message queue/kombu objects.
|
||||
|
|
|
@ -186,6 +186,7 @@ class alertConsumer(ConsumerMixin):
|
|||
except ValueError as e:
|
||||
logger.exception("mozdefbot_slack exception while processing events queue %r" % e)
|
||||
|
||||
|
||||
def consumeAlerts(bot):
|
||||
# connect and declare the message queue/kombu objects.
|
||||
# server/exchange/queue
|
||||
|
|
|
@ -21,6 +21,7 @@ except ImportError:
|
|||
quote_url = urllib.quote
|
||||
import traceback
|
||||
|
||||
|
||||
class DotDict(dict):
|
||||
'''dict.item notation for dict()'s'''
|
||||
__getattr__ = dict.__getitem__
|
||||
|
@ -33,187 +34,196 @@ class DotDict(dict):
|
|||
value = DotDict(value)
|
||||
self[key] = value
|
||||
|
||||
|
||||
def fatal(msg):
|
||||
print(msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def debug(msg):
|
||||
sys.stderr.write('+++ {}\n'.format(msg))
|
||||
|
||||
#This is from https://auth0.com/docs/api/management/v2#!/Logs/get_logs
|
||||
#and https://github.com/auth0/auth0-logs-to-logentries/blob/master/index.js (MIT)
|
||||
|
||||
# This is from https://auth0.com/docs/api/management/v2#!/Logs/get_logs
|
||||
# and https://github.com/auth0/auth0-logs-to-logentries/blob/master/index.js (MIT)
|
||||
# levels
|
||||
# 0 = Debug
|
||||
# 1 = Info
|
||||
# 2 = Warning
|
||||
# 3 = Error
|
||||
# 4 = Critical
|
||||
log_types=DotDict({
|
||||
's': {
|
||||
"event": 'Success Login',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'slo': {
|
||||
"event": 'Success Logout',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'flo': {
|
||||
"event": 'Failed Logout',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'seacft': {
|
||||
"event": 'Success Exchange (Authorization Code for Access Token)',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'feacft': {
|
||||
"event": 'Failed Exchange (Authorization Code for Access Token)',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'f': {
|
||||
"event": 'Failed Login',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'w': {
|
||||
"event": 'Warnings During Login',
|
||||
"level": 2 # Warning
|
||||
"level": 2
|
||||
},
|
||||
'du': {
|
||||
"event": 'Deleted User',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fu': {
|
||||
"event": 'Failed Login (invalid email/username)',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'fp': {
|
||||
"event": 'Failed Login (wrong password)',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'fc': {
|
||||
"event": 'Failed by Connector',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'fco': {
|
||||
"event": 'Failed by CORS',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'con': {
|
||||
"event": 'Connector Online',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'coff': {
|
||||
"event": 'Connector Offline',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'fcpro': {
|
||||
"event": 'Failed Connector Provisioning',
|
||||
"level": 4 # Critical
|
||||
"level": 4
|
||||
},
|
||||
'ss': {
|
||||
"event": 'Success Signup',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fs': {
|
||||
"event": 'Failed Signup',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'cs': {
|
||||
"event": 'Code Sent',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'cls': {
|
||||
"event": 'Code/Link Sent',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'sv': {
|
||||
"event": 'Success Verification Email',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'fv': {
|
||||
"event": 'Failed Verification Email',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'scp': {
|
||||
"event": 'Success Change Password',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fcp': {
|
||||
"event": 'Failed Change Password',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'sce': {
|
||||
"event": 'Success Change Email',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fce': {
|
||||
"event": 'Failed Change Email',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'scu': {
|
||||
"event": 'Success Change Username',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fcu': {
|
||||
"event": 'Failed Change Username',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'scpn': {
|
||||
"event": 'Success Change Phone Number',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fcpn': {
|
||||
"event": 'Failed Change Phone Number',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'svr': {
|
||||
"event": 'Success Verification Email Request',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'fvr': {
|
||||
"event": 'Failed Verification Email Request',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'scpr': {
|
||||
"event": 'Success Change Password Request',
|
||||
"level": 0 # Debug
|
||||
"level": 0
|
||||
},
|
||||
'fcpr': {
|
||||
"event": 'Failed Change Password Request',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'fn': {
|
||||
"event": 'Failed Sending Notification',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'sapi': {
|
||||
"event": 'API Operation',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fapi': {
|
||||
"event": 'Failed API Operation',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'limit_wc': {
|
||||
"event": 'Blocked Account',
|
||||
"level": 4 # Critical
|
||||
"level": 4
|
||||
},
|
||||
'limit_ui': {
|
||||
"event": 'Too Many Calls to /userinfo',
|
||||
"level": 4 # Critical
|
||||
"level": 4
|
||||
},
|
||||
'api_limit': {
|
||||
"event": 'Rate Limit On API',
|
||||
"level": 4 # Critical
|
||||
"level": 4
|
||||
},
|
||||
'sdu': {
|
||||
"event": 'Successful User Deletion',
|
||||
"level": 1 # Info
|
||||
"level": 1
|
||||
},
|
||||
'fdu': {
|
||||
"event": 'Failed User Deletion',
|
||||
"level": 3 # Error
|
||||
"level": 3
|
||||
},
|
||||
'sd': {
|
||||
"event": 'Success Delegation',
|
||||
"level": 3 # error
|
||||
"level": 3
|
||||
},
|
||||
'fd': {
|
||||
"event": 'Failed Delegation',
|
||||
"level": 3 # error
|
||||
"level": 3
|
||||
},
|
||||
'seccft': {
|
||||
"event": "Success Exchange (Client Credentials for Access Token)",
|
||||
|
@ -249,6 +259,7 @@ log_types=DotDict({
|
|||
}
|
||||
})
|
||||
|
||||
|
||||
def process_msg(mozmsg, msg):
|
||||
"""Normalization function for auth0 msg.
|
||||
@mozmsg: MozDefEvent (mozdef message)
|
||||
|
@ -339,6 +350,7 @@ def process_msg(mozmsg, msg):
|
|||
|
||||
return mozmsg
|
||||
|
||||
|
||||
def load_state(fpath):
|
||||
"""Load last msg id we've read from auth0 (log index).
|
||||
@fpath string (path to state file)
|
||||
|
@ -351,6 +363,7 @@ def load_state(fpath):
|
|||
pass
|
||||
return state
|
||||
|
||||
|
||||
def save_state(fpath, state):
|
||||
"""Saves last msg id we've read from auth0 (log index).
|
||||
@fpath string (path to state file)
|
||||
|
@ -359,6 +372,7 @@ def save_state(fpath, state):
|
|||
with open(fpath, mode='w') as fd:
|
||||
fd.write(str(state)+'\n')
|
||||
|
||||
|
||||
def byteify(input):
|
||||
"""Convert input to ascii"""
|
||||
if isinstance(input, dict):
|
||||
|
@ -371,6 +385,7 @@ def byteify(input):
|
|||
else:
|
||||
return input
|
||||
|
||||
|
||||
def fetch_auth0_logs(config, headers, fromid):
|
||||
lastid = fromid
|
||||
|
||||
|
@ -421,13 +436,14 @@ def fetch_auth0_logs(config, headers, fromid):
|
|||
else:
|
||||
return (0, 0, 0, lastid)
|
||||
|
||||
|
||||
def main():
|
||||
#Configuration loading
|
||||
# Configuration loading
|
||||
config_location = os.path.dirname(sys.argv[0]) + '/' + 'auth02mozdef.json'
|
||||
with open(config_location) as fd:
|
||||
config = DotDict(hjson.load(fd))
|
||||
|
||||
if config == None:
|
||||
if config is None:
|
||||
print("No configuration file 'auth02mozdef.json' found.")
|
||||
sys.exit(1)
|
||||
|
||||
|
@ -449,5 +465,6 @@ def main():
|
|||
|
||||
save_state(config.state_file, lastid)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -71,8 +71,7 @@ def main():
|
|||
logger.debug('snapshot repo registered')
|
||||
|
||||
# do the actual snapshotting
|
||||
for (index, dobackup, rotation, pruning) in zip(options.indices,
|
||||
options.dobackup, options.rotation, options.pruning):
|
||||
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
|
||||
if dobackup == '1':
|
||||
index_to_snapshot = index
|
||||
if rotation == 'daily':
|
||||
|
@ -121,6 +120,7 @@ echo "DONE!"
|
|||
except Exception as e:
|
||||
logger.error("Unhandled exception, terminating: %r"%e)
|
||||
|
||||
|
||||
def initConfig():
|
||||
# output our log to stdout or syslog
|
||||
options.output = getConfig(
|
||||
|
@ -187,6 +187,7 @@ def initConfig():
|
|||
options.configfile
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
defaultconfigfile = sys.argv[0].replace('.py', '.conf')
|
||||
|
|
|
@ -135,18 +135,30 @@ def searchMongoAlerts(mozdefdb):
|
|||
# aggregate IPv4 addresses in the most recent alerts
|
||||
# to find common attackers.
|
||||
ipv4TopHits = alerts.aggregate([
|
||||
{"$sort": {"utcepoch":-1}}, # reverse sort the current alerts
|
||||
{"$limit": 100}, # most recent 100
|
||||
{"$match": {"events.documentsource.details.sourceipaddress":{"$exists": True}}}, # must have an ip address
|
||||
{"$match": {"attackerid":{"$exists": False}}}, # must not be already related to an attacker
|
||||
{"$unwind":"$events"}, # make each event into it's own doc
|
||||
{"$project":{"_id":0,
|
||||
"sourceip":"$events.documentsource.details.sourceipaddress"}}, # emit the source ip only
|
||||
{"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}}, # count by ip
|
||||
{"$match":{"hitcount":{"$gt":5}}}, # limit to those with X observances
|
||||
{"$sort": SON([("hitcount", -1), ("_id", -1)])}, # sort
|
||||
{"$limit": 10} # top 10
|
||||
])
|
||||
# reverse sort the current alerts
|
||||
{"$sort": {"utcepoch": -1}},
|
||||
# most recent 100
|
||||
{"$limit": 100},
|
||||
# must have an ip address
|
||||
{"$match": {"events.documentsource.details.sourceipaddress": {"$exists": True}}},
|
||||
# must not be already related to an attacker
|
||||
{"$match": {"attackerid": {"$exists": False}}},
|
||||
# make each event into it's own doc
|
||||
{"$unwind": "$events"},
|
||||
{"$project": {
|
||||
"_id": 0,
|
||||
# emit the source ip only
|
||||
"sourceip": "$events.documentsource.details.sourceipaddress"
|
||||
}},
|
||||
# count by ip
|
||||
{"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}},
|
||||
# limit to those with X observances
|
||||
{"$match": {"hitcount": {"$gt": 5}}},
|
||||
# sort
|
||||
{"$sort": SON([("hitcount", -1), ("_id", -1)])},
|
||||
# top 10
|
||||
{"$limit": 10}
|
||||
])
|
||||
for ip in ipv4TopHits:
|
||||
# sanity check ip['_id'] which should be the ipv4 address
|
||||
if isIPv4(ip['_id']) and ip['_id'] not in netaddr.IPSet(['0.0.0.0']):
|
||||
|
@ -324,27 +336,29 @@ def genNewAttacker():
|
|||
|
||||
return newAttacker
|
||||
|
||||
|
||||
def updateAttackerGeoIP(mozdefdb, attackerID, eventDictionary):
|
||||
'''given an attacker ID and a dictionary of an elastic search event
|
||||
look for a valid geoIP in the dict and update the attacker's geo coordinates
|
||||
'''
|
||||
|
||||
# geo ip should be in eventDictionary['details']['sourceipgeolocation']
|
||||
#"sourceipgeolocation": {
|
||||
#"city": "Polska",
|
||||
#"region_code": "73",
|
||||
#"area_code": 0,
|
||||
#"time_zone": "Europe/Warsaw",
|
||||
#"dma_code": 0,
|
||||
#"metro_code": null,
|
||||
#"country_code3": "POL",
|
||||
#"latitude": 52.59309999999999,
|
||||
#"postal_code": null,
|
||||
#"longitude": 19.089400000000012,
|
||||
#"country_code": "PL",
|
||||
#"country_name": "Poland",
|
||||
#"continent": "EU"
|
||||
#logger.debug(eventDictionary)
|
||||
# "sourceipgeolocation": {
|
||||
# "city": "Polska",
|
||||
# "region_code": "73",
|
||||
# "area_code": 0,
|
||||
# "time_zone": "Europe/Warsaw",
|
||||
# "dma_code": 0,
|
||||
# "metro_code": null,
|
||||
# "country_code3": "POL",
|
||||
# "latitude": 52.59309999999999,
|
||||
# "postal_code": null,
|
||||
# "longitude": 19.089400000000012,
|
||||
# "country_code": "PL",
|
||||
# "country_name": "Poland",
|
||||
# "continent": "EU"
|
||||
# }
|
||||
# logger.debug(eventDictionary)
|
||||
if 'details' in eventDictionary.keys():
|
||||
if 'sourceipgeolocation' in eventDictionary['details']:
|
||||
attackers=mozdefdb['attackers']
|
||||
|
|
|
@ -78,7 +78,7 @@ def esSearch(es, macassignments=None):
|
|||
Expecting an event like: user: username@somewhere.com; mac: 5c:f9:38:b1:de:cf; author reason: roamed session; ssid: ANSSID; AP 46/2\n
|
||||
'''
|
||||
usermacre=re.compile(r'''user: (?P<username>.*?); mac: (?P<macaddress>.*?); ''',re.IGNORECASE)
|
||||
correlations={} # list of dicts to populate hits we find
|
||||
correlations={}
|
||||
|
||||
search_query = SearchQuery(minutes=options.correlationminutes)
|
||||
search_query.add_must(TermMatch('details.program', 'AUTHORIZATION-SUCCESS'))
|
||||
|
@ -105,6 +105,7 @@ def esSearch(es, macassignments=None):
|
|||
except ElasticsearchBadServer:
|
||||
logger.error('Elastic Search server could not be reached, check network connectivity')
|
||||
|
||||
|
||||
def esStoreCorrelations(es, correlations):
|
||||
for c in correlations:
|
||||
event=dict(
|
||||
|
|
|
@ -27,6 +27,7 @@ logger = logging.getLogger(sys.argv[0])
|
|||
def loggerTimeStamp(self, record, datefmt=None):
|
||||
return toUTC(datetime.now()).isoformat()
|
||||
|
||||
|
||||
def initLogger():
|
||||
logger.level = logging.INFO
|
||||
formatter = logging.Formatter(
|
||||
|
@ -41,9 +42,11 @@ def initLogger():
|
|||
sh.setFormatter(formatter)
|
||||
logger.addHandler(sh)
|
||||
|
||||
|
||||
def genMeteorID():
|
||||
return('%024x' % random.randrange(16**24))
|
||||
|
||||
|
||||
def isFQDN(fqdn):
|
||||
try:
|
||||
# We could resolve FQDNs here, but that could tip our hand and it's
|
||||
|
@ -54,6 +57,7 @@ def isFQDN(fqdn):
|
|||
except:
|
||||
return False
|
||||
|
||||
|
||||
def parse_fqdn_whitelist(fqdn_whitelist_location):
|
||||
fqdns = []
|
||||
with open(fqdn_whitelist_location, "r") as text_file:
|
||||
|
@ -63,6 +67,7 @@ def parse_fqdn_whitelist(fqdn_whitelist_location):
|
|||
fqdns.append(line)
|
||||
return fqdns
|
||||
|
||||
|
||||
def main():
|
||||
logger.debug('starting')
|
||||
logger.debug(options)
|
||||
|
@ -161,6 +166,7 @@ def s3_upload_file(file_path, bucket_name, key_name):
|
|||
print("URL: {}".format(url))
|
||||
return url
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option(
|
||||
|
|
|
@ -27,6 +27,7 @@ logger = logging.getLogger(sys.argv[0])
|
|||
def loggerTimeStamp(self, record, datefmt=None):
|
||||
return toUTC(datetime.now()).isoformat()
|
||||
|
||||
|
||||
def initLogger():
|
||||
logger.level = logging.INFO
|
||||
formatter = logging.Formatter(
|
||||
|
@ -41,9 +42,11 @@ def initLogger():
|
|||
sh.setFormatter(formatter)
|
||||
logger.addHandler(sh)
|
||||
|
||||
|
||||
def genMeteorID():
|
||||
return('%024x' % random.randrange(16**24))
|
||||
|
||||
|
||||
def isIPv4(ip):
|
||||
try:
|
||||
# netaddr on it's own considers 1 and 0 to be valid_ipv4
|
||||
|
@ -58,12 +61,14 @@ def isIPv4(ip):
|
|||
except:
|
||||
return False
|
||||
|
||||
|
||||
def isIPv6(ip):
|
||||
try:
|
||||
return netaddr.valid_ipv6(ip)
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def aggregateAttackerIPs(attackers):
|
||||
iplist = []
|
||||
|
||||
|
@ -100,6 +105,7 @@ def aggregateAttackerIPs(attackers):
|
|||
logger.debug('invalid:' + ip)
|
||||
return iplist
|
||||
|
||||
|
||||
def parse_network_whitelist(network_whitelist_location):
|
||||
networks = []
|
||||
with open(network_whitelist_location, "r") as text_file:
|
||||
|
@ -109,6 +115,7 @@ def parse_network_whitelist(network_whitelist_location):
|
|||
networks.append(line)
|
||||
return networks
|
||||
|
||||
|
||||
def main():
|
||||
logger.debug('starting')
|
||||
logger.debug(options)
|
||||
|
@ -231,6 +238,7 @@ def s3_upload_file(file_path, bucket_name, key_name):
|
|||
print("URL: {}".format(url))
|
||||
return url
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option(
|
||||
|
|
|
@ -15,8 +15,10 @@ except ImportError:
|
|||
class UTC(tzinfo):
|
||||
def utcoffset(self, dt):
|
||||
return timedelta(0)
|
||||
|
||||
def tzname(self, dt):
|
||||
return "UTC"
|
||||
|
||||
def dst(self, dt):
|
||||
return timedelta(0)
|
||||
utc = UTC()
|
||||
|
@ -26,6 +28,7 @@ import duo_client
|
|||
import mozdef_client as mozdef
|
||||
import pickle
|
||||
|
||||
|
||||
def normalize(details):
|
||||
# Normalizes fields to conform to http://mozdef.readthedocs.io/en/latest/usage.html#mandatory-fields
|
||||
# This is mainly used for common field names to put inside the details structure
|
||||
|
@ -42,6 +45,7 @@ def normalize(details):
|
|||
normalized[f] = details[f]
|
||||
return normalized
|
||||
|
||||
|
||||
def process_events(mozmsg, duo_events, etype, state):
|
||||
# There are some key fields that we use as MozDef fields, those are set to "noconsume"
|
||||
# After processing these fields, we just pour everything into the "details" fields of Mozdef, except for the
|
||||
|
@ -67,8 +71,8 @@ def process_events(mozmsg, duo_events, etype, state):
|
|||
if i in noconsume:
|
||||
continue
|
||||
|
||||
# Duo client doesn't translate inner dicts to dicts for some reason - its just a string, so we have to process and parse it
|
||||
if e[i] != None and type(e[i]) == str and e[i].startswith('{'):
|
||||
# Duo client doesn't translate inner dicts to dicts for some reason - its just a string, so we have to process and parse it
|
||||
if e[i] is not None and type(e[i]) == str and e[i].startswith('{'):
|
||||
j = json.loads(e[i])
|
||||
for x in j:
|
||||
details[x] = j[x]
|
||||
|
@ -93,6 +97,7 @@ def process_events(mozmsg, duo_events, etype, state):
|
|||
pass
|
||||
return state
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
state = pickle.load(open(options.statepath, 'rb'))
|
||||
|
@ -119,6 +124,7 @@ def main():
|
|||
|
||||
pickle.dump(state, open(options.statepath, 'wb'))
|
||||
|
||||
|
||||
def initConfig():
|
||||
options.IKEY = getConfig('IKEY', '', options.configfile)
|
||||
options.SKEY = getConfig('SKEY', '', options.configfile)
|
||||
|
@ -129,6 +135,7 @@ def initConfig():
|
|||
options.statepath = getConfig('statepath', '', options.configfile)
|
||||
options.update_tags = getConfig('addtag', '', options.configfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
defaultconfigfile = sys.argv[0].replace('.py', '.conf')
|
||||
|
|
|
@ -170,6 +170,7 @@ def initConfig():
|
|||
default_mapping_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mozdefStateDefaultMappingTemplate.json')
|
||||
options.default_mapping_file = getConfig('default_mapping_file', default_mapping_location, options.configfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option(
|
||||
|
|
|
@ -64,6 +64,7 @@ def writeFrontendStats(data, mongo):
|
|||
del host['_source']['details'][key]
|
||||
mongo.healthfrontend.insert(host['_source'])
|
||||
|
||||
|
||||
def getSqsStats(es):
|
||||
search_query = SearchQuery(minutes=15)
|
||||
search_query.add_must([
|
||||
|
|
|
@ -60,6 +60,7 @@ class State:
|
|||
indent=4,
|
||||
separators=(',', ': '))
|
||||
|
||||
|
||||
def main():
|
||||
if options.output=='syslog':
|
||||
logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
|
||||
|
|
|
@ -42,8 +42,7 @@ def esPruneIndexes():
|
|||
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
|
||||
indices = es.get_indices()
|
||||
# do the pruning
|
||||
for (index, dobackup, rotation, pruning) in zip(options.indices,
|
||||
options.dobackup, options.rotation, options.pruning):
|
||||
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
|
||||
try:
|
||||
if pruning != '0':
|
||||
index_to_prune = index
|
||||
|
@ -110,6 +109,7 @@ def initConfig():
|
|||
options.configfile).split(',')
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option("-c",
|
||||
|
|
|
@ -17,6 +17,7 @@ from configlib import getConfig, OptionParser
|
|||
sys.path.insert(1, os.path.join(sys.path[0], '..'))
|
||||
from utils import es as es_module
|
||||
|
||||
|
||||
def initConfig():
|
||||
options.esservers = list(getConfig(
|
||||
'esservers',
|
||||
|
@ -34,6 +35,7 @@ def initConfig():
|
|||
options.configfile).split(',')
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option("-c",
|
||||
|
|
|
@ -31,6 +31,7 @@ from mozdef_util.utilities.is_cef import isCEF
|
|||
from mozdef_util.utilities.logger import logger, initLogger
|
||||
from mozdef_util.elasticsearch_client import ElasticsearchClient, ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException
|
||||
|
||||
|
||||
def getDocID(sqsregionidentifier):
|
||||
# create a hash to use as the ES doc id
|
||||
# hostname plus salt as doctype.latest
|
||||
|
@ -38,6 +39,7 @@ def getDocID(sqsregionidentifier):
|
|||
hash.update('{0}.mozdefhealth.latest'.format(sqsregionidentifier))
|
||||
return hash.hexdigest()
|
||||
|
||||
|
||||
def getQueueSizes():
|
||||
logger.debug('starting')
|
||||
logger.debug(options)
|
||||
|
@ -114,11 +116,13 @@ def getQueueSizes():
|
|||
# except Exception as e:
|
||||
# logger.error("Exception %r when gathering health and status " % e)
|
||||
|
||||
|
||||
def main():
|
||||
logger.debug('Starting')
|
||||
logger.debug(options)
|
||||
getQueueSizes()
|
||||
|
||||
|
||||
def initConfig():
|
||||
# aws options
|
||||
options.accesskey = getConfig('accesskey', '', options.configfile)
|
||||
|
@ -133,6 +137,7 @@ def initConfig():
|
|||
options.index = getConfig('index', 'mozdefstate', options.configfile)
|
||||
options.account = getConfig('account', '', options.configfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# configure ourselves
|
||||
parser = OptionParser()
|
||||
|
|
|
@ -24,6 +24,7 @@ def fetch_ip_list(aws_key_id, aws_secret_key, s3_bucket, ip_list_filename):
|
|||
contents = ip_list_key.get_contents_as_string().rstrip()
|
||||
return contents.split("\n")
|
||||
|
||||
|
||||
def save_ip_list(save_path, ips):
|
||||
ip_list_contents = '\n'.join(ips)
|
||||
logger.debug("Saving ip list")
|
||||
|
@ -34,6 +35,7 @@ def save_ip_list(save_path, ips):
|
|||
with open(save_path, "w+") as text_file:
|
||||
text_file.write(ip_list_contents)
|
||||
|
||||
|
||||
def main():
|
||||
logger.debug('Starting')
|
||||
logger.debug(options)
|
||||
|
@ -49,6 +51,7 @@ def main():
|
|||
raise LookupError('IP List contains less than ' + str(options.ips_list_threshold) + ' entries...something is probably up here.')
|
||||
save_ip_list(options.local_ip_list_path, ips)
|
||||
|
||||
|
||||
def initConfig():
|
||||
# output our log to stdout or syslog
|
||||
options.output = getConfig('output', 'stdout', options.configfile)
|
||||
|
@ -64,6 +67,7 @@ def initConfig():
|
|||
options.ips_list_threshold = getConfig('ips_list_threshold', 20, options.configfile)
|
||||
options.manual_additions = getConfig('manual_additions', '', options.configfile).split(',')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = OptionParser()
|
||||
parser.add_option(
|
||||
|
|
|
@ -24,6 +24,7 @@ from datetime import datetime
|
|||
from os import stat
|
||||
from os.path import exists, getsize
|
||||
|
||||
|
||||
class MozDefError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
@ -163,6 +164,7 @@ def main():
|
|||
hostname=socket.gethostname()
|
||||
)
|
||||
|
||||
|
||||
def getConfig(optionname, thedefault, configfile):
|
||||
"""read an option from a config file or set a default
|
||||
send 'thedefault' as the data class you want to get a string back
|
||||
|
|
|
@ -26,7 +26,6 @@ parser.add_argument('backup_conf_file', help='The relative path to backup.conf f
|
|||
args = parser.parse_args()
|
||||
|
||||
|
||||
|
||||
esserver = os.environ.get('OPTIONS_ESSERVERS')
|
||||
if esserver is None:
|
||||
esserver = args.esserver
|
||||
|
|
|
@ -65,6 +65,7 @@ def setConfig(option,value,configfile):
|
|||
config.write(configfp)
|
||||
configfp.close()
|
||||
|
||||
|
||||
def postLogs(logcache):
|
||||
#post logs asynchronously with requests workers and check on the results
|
||||
#expects a queue object from the multiprocessing library
|
||||
|
@ -83,23 +84,25 @@ def postLogs(logcache):
|
|||
#posts.append((r,postdata,url))
|
||||
except Empty as e:
|
||||
pass
|
||||
#for p,postdata,url in posts:
|
||||
#try:
|
||||
#if p.result().status_code >=500:
|
||||
#logger.error("exception posting to %s %r [will retry]\n"%(url,p.result().status_code))
|
||||
##try again later when the next message in forces other attempts at posting.
|
||||
#logcache.put(postdata)
|
||||
#except ClosedPoolError as e:
|
||||
##logger.fatal("Closed Pool Error exception posting to %s %r %r [will retry]\n"%(url,e,postdata))
|
||||
#logcache.put(postdata)
|
||||
#except Exception as e:
|
||||
#logger.fatal("exception posting to %s %r %r [will not retry]\n"%(url,e,postdata))
|
||||
#sys.exit(1)
|
||||
# for p, postdata, url in posts:
|
||||
# try:
|
||||
# if p.result().status_code >= 500:
|
||||
# logger.error("exception posting to %s %r [will retry]\n" % (url, p.result().status_code))
|
||||
# # try again later when the next message in forces other attempts at posting.
|
||||
# logcache.put(postdata)
|
||||
# except ClosedPoolError as e:
|
||||
# logger.fatal("Closed Pool Error exception posting to %s %r %r [will retry]\n" % (url, e, postdata))
|
||||
# logcache.put(postdata)
|
||||
# except Exception as e:
|
||||
# logger.fatal("exception posting to %s %r %r [will not retry]\n" % (url, e, postdata))
|
||||
# sys.exit(1)
|
||||
|
||||
|
||||
def genRandomIPv4():
|
||||
#random, IPs
|
||||
return '.'.join("%d" % (random.randint(0,254)) for x in range(4))
|
||||
|
||||
|
||||
def genAttackerIPv4():
|
||||
#random, but not too random as to allow for alerting about attacks from
|
||||
#the same IP.
|
||||
|
@ -165,6 +168,7 @@ def makeEvents():
|
|||
except KeyboardInterrupt as e:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def makeAlerts():
|
||||
'''
|
||||
send events that will be correlated into alerts
|
||||
|
@ -234,6 +238,7 @@ def makeAlerts():
|
|||
except KeyboardInterrupt as e:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def makeAttackers():
|
||||
'''
|
||||
send events that will be correlated into attackers using pre-defined IPs
|
||||
|
|
|
@ -26,6 +26,7 @@ def status():
|
|||
response.body = json.dumps(dict(status='ok', service='loginput'))
|
||||
return response
|
||||
|
||||
|
||||
@route('/test')
|
||||
@route('/test/')
|
||||
def testindex():
|
||||
|
@ -34,6 +35,8 @@ def testindex():
|
|||
response.status=200
|
||||
|
||||
#act like elastic search bulk index
|
||||
|
||||
|
||||
@route('/_bulk',method='POST')
|
||||
@route('/_bulk/',method='POST')
|
||||
def bulkindex():
|
||||
|
@ -56,17 +59,19 @@ def bulkindex():
|
|||
except ValueError as e:
|
||||
response.status=500
|
||||
return
|
||||
if not 'index' in json.loads(i).keys(): # don't post the items telling us where to post things..
|
||||
# don't post the items telling us where to post things..
|
||||
if 'index' not in json.loads(i):
|
||||
ensurePublish=mqConn.ensure(mqproducer,mqproducer.publish,max_retries=10)
|
||||
ensurePublish(eventDict,exchange=eventTaskExchange,routing_key=options.taskexchange)
|
||||
except ValueError:
|
||||
bottlelog('value error {0}'.format(i))
|
||||
return
|
||||
|
||||
|
||||
@route('/_status')
|
||||
@route('/_status/')
|
||||
@route('/nxlog/', method=['POST','PUT'])
|
||||
@route('/nxlog', method=['POST','PUT'])
|
||||
@route('/nxlog', method=['POST','PUT'])
|
||||
@route('/events/',method=['POST','PUT'])
|
||||
@route('/events', method=['POST','PUT'])
|
||||
def eventsindex():
|
||||
|
@ -88,6 +93,7 @@ def eventsindex():
|
|||
|
||||
return
|
||||
|
||||
|
||||
@route('/cef', method=['POST','PUT'])
|
||||
@route('/cef/',method=['POST','PUT'])
|
||||
#debug(True)
|
||||
|
@ -109,6 +115,7 @@ def cefindex():
|
|||
ensurePublish(cefDict,exchange=eventTaskExchange,routing_key=options.taskexchange)
|
||||
return
|
||||
|
||||
|
||||
@route('/custom/<application>',method=['POST','PUT'])
|
||||
def customindex(application):
|
||||
'''
|
||||
|
|
|
@ -125,6 +125,7 @@ class RoleManager:
|
|||
'aws_secret_access_key': credential.secret_key,
|
||||
'security_token': credential.session_token} if credential else {}
|
||||
|
||||
|
||||
def get_aws_credentials(region=None, accesskey=None, secretkey=None, security_token=None):
|
||||
result = {}
|
||||
if region not in ['', '<add_region>', None]:
|
||||
|
|
|
@ -421,7 +421,7 @@ class message(object):
|
|||
u'destination {dst} '
|
||||
u'port {p}'
|
||||
).format(**sumstruct)
|
||||
# Thank you for your service
|
||||
# Thank you for your service
|
||||
return (newmessage, metadata)
|
||||
|
||||
if logtype == 'rdp':
|
||||
|
|
|
@ -30,7 +30,7 @@ class message(object):
|
|||
# drop disabled for now
|
||||
#if 'signatureid' in message['details']:
|
||||
#if message['details'].lower() == 'execve' and \
|
||||
#'command' not in message['details']:
|
||||
#'command' not in message['details']:
|
||||
# auditd entry without a command
|
||||
# likely a result of another command (java starting a job, etc.)
|
||||
# signal a drop
|
||||
|
|
|
@ -25,6 +25,7 @@ def addError(message, error):
|
|||
if isinstance(message['errors'], list):
|
||||
message['errors'].append(error)
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
@ -44,24 +45,20 @@ class message(object):
|
|||
# Making sufficiently sure this is a fluentd-forwarded message from
|
||||
# fluentd SQS plugin, so that we don't spend too much time on other
|
||||
# message types
|
||||
if ((not 'az' in message.keys())
|
||||
and (not 'instance_id' in message.keys())
|
||||
and (not '__tag' in message.keys())):
|
||||
if 'az' not in message and 'instance_id' not in message and '__tag' not in message:
|
||||
return (message, metadata)
|
||||
|
||||
if not 'details' in message.keys():
|
||||
if 'details' not in message:
|
||||
message['details'] = dict()
|
||||
|
||||
if (not 'summary' in message.keys()) and ('message' in message.keys()):
|
||||
if 'summary' not in message and 'message' in message:
|
||||
message['summary'] = message['message']
|
||||
|
||||
if ((not 'utctimestamp' in message.keys())
|
||||
and ('time' in message.keys())):
|
||||
if 'utctimestamp' not in message and 'time' in message:
|
||||
message['utctimestamp'] = toUTC(message['time']).isoformat()
|
||||
|
||||
# Bro format of {u'Timestamp': 1.482437837e+18}
|
||||
if ((not 'utctimestamp' in message.keys())
|
||||
and ('Timestamp' in message.keys())):
|
||||
if 'utctimestamp' not in message and 'Timestamp' in message:
|
||||
message['utctimestamp'] = toUTC(message['Timestamp']).isoformat()
|
||||
|
||||
# host is used to store dns-style-ip entries in AWS, for ex
|
||||
|
@ -69,7 +66,7 @@ class message(object):
|
|||
# that this is always trusted. It's better than nothing though. At the
|
||||
# time of writing, there is no ipv6 support AWS-side for this kind of
|
||||
# field. It may be overridden later by a better field, if any exists
|
||||
if 'host' in message.keys():
|
||||
if 'host' in message:
|
||||
tmp = message['host']
|
||||
if tmp.startswith('ip-'):
|
||||
ipText = tmp.split('ip-')[1].replace('-', '.')
|
||||
|
@ -86,7 +83,7 @@ class message(object):
|
|||
'fluentSqsFixUp.py',
|
||||
'destinationipaddress is invalid',
|
||||
ipText))
|
||||
if not 'hostname' in message.keys():
|
||||
if 'hostname' not in message:
|
||||
message['hostname'] = tmp
|
||||
|
||||
# All messages with __tag 'ec2.forward*' are actually syslog forwarded
|
||||
|
@ -100,16 +97,14 @@ class message(object):
|
|||
if 'ident' in message.keys():
|
||||
tmp = message['ident']
|
||||
message['details']['program'] = tmp
|
||||
if ((not 'processname' in message.keys())
|
||||
and ('program' in message['details'].keys())):
|
||||
if 'processname' not in message and 'program' in message['details']:
|
||||
message['processname'] = message['details']['program']
|
||||
if ((not 'processid' in message.keys())
|
||||
and ('pid' in message.keys())):
|
||||
if 'processid' not in message and 'pid' in message:
|
||||
message['processid'] = message['pid']
|
||||
else:
|
||||
message['processid'] = 0
|
||||
# Unknown really, but this field is mandatory.
|
||||
if not 'severity' in message.keys():
|
||||
if 'severity' not in message:
|
||||
message['severity'] = 'INFO'
|
||||
|
||||
# We already have the time of event stored in 'timestamp' so we don't
|
||||
|
|
|
@ -9,6 +9,7 @@ from mozdef_util.utilities.key_exists import key_exists
|
|||
from mozdef_util.utilities.toUTC import toUTC
|
||||
from mozdef_util.utilities.dot_dict import DotDict
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
import hashlib
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
import re
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
import re
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
import re
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
# Copyright (c) 2014 Mozilla Corporation
|
||||
|
||||
import hashlib
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''
|
||||
|
|
|
@ -58,6 +58,7 @@ def test():
|
|||
sendMessgeToPlugins(request, response, 'test')
|
||||
return response
|
||||
|
||||
|
||||
@route('/status')
|
||||
@route('/status/')
|
||||
def status():
|
||||
|
@ -122,6 +123,7 @@ def index():
|
|||
sendMessgeToPlugins(request, response, 'blockip')
|
||||
return response
|
||||
|
||||
|
||||
@post('/blockfqdn', methods=['POST'])
|
||||
@post('/blockfqdn/', methods=['POST'])
|
||||
@enable_cors
|
||||
|
@ -130,6 +132,7 @@ def index():
|
|||
sendMessgeToPlugins(request, response, 'blockfqdn')
|
||||
return response
|
||||
|
||||
|
||||
@post('/ipwhois', methods=['POST'])
|
||||
@post('/ipwhois/', methods=['POST'])
|
||||
@enable_cors
|
||||
|
@ -213,6 +216,7 @@ def index():
|
|||
sendMessgeToPlugins(request, response, 'ipdshieldquery')
|
||||
return response
|
||||
|
||||
|
||||
@route('/plugins', methods=['GET'])
|
||||
@route('/plugins/', methods=['GET'])
|
||||
@route('/plugins/<endpoint>', methods=['GET'])
|
||||
|
@ -249,6 +253,7 @@ def getPluginList(endpoint=None):
|
|||
sendMessgeToPlugins(request, response, 'plugins')
|
||||
return response
|
||||
|
||||
|
||||
@post('/incident', methods=['POST'])
|
||||
@post('/incident/', methods=['POST'])
|
||||
def createIncident():
|
||||
|
@ -320,7 +325,7 @@ def createIncident():
|
|||
|
||||
# Validating Incident phase type
|
||||
if (type(incident['phase']) not in (str, unicode) or
|
||||
incident['phase'] not in validIncidentPhases):
|
||||
incident['phase'] not in validIncidentPhases):
|
||||
|
||||
response.status = 500
|
||||
response.body = json.dumps(dict(status='failed',
|
||||
|
@ -389,6 +394,7 @@ def createIncident():
|
|||
))
|
||||
return response
|
||||
|
||||
|
||||
def validateDate(date, dateFormat='%Y-%m-%d %I:%M %p'):
|
||||
'''
|
||||
Converts a date string into a datetime object based
|
||||
|
@ -410,9 +416,11 @@ def validateDate(date, dateFormat='%Y-%m-%d %I:%M %p'):
|
|||
finally:
|
||||
return dateObj
|
||||
|
||||
|
||||
def generateMeteorID():
|
||||
return('%024x' % random.randrange(16**24))
|
||||
|
||||
|
||||
def registerPlugins():
|
||||
'''walk the ./plugins directory
|
||||
and register modules in pluginList
|
||||
|
@ -579,13 +587,6 @@ def verisSummary(verisRegex=None):
|
|||
client = MongoClient(options.mongohost, options.mongoport)
|
||||
# use meteor db
|
||||
incidents= client.meteor['incidents']
|
||||
#iveris=incidents.aggregate([
|
||||
#{"$match":{"tags":{"$exists":True}}},
|
||||
#{"$unwind" : "$tags" },
|
||||
#{"$match":{"tags":{"$regex":''}}}, #regex for tag querying
|
||||
#{"$group": {"_id": "$tags", "hitcount": {"$sum": 1}}}, # count by tag
|
||||
#{"$sort": SON([("hitcount", -1), ("_id", -1)])}, #sort
|
||||
#])
|
||||
|
||||
iveris=incidents.aggregate([
|
||||
|
||||
|
@ -605,6 +606,7 @@ def verisSummary(verisRegex=None):
|
|||
except Exception as e:
|
||||
sys.stderr.write('Exception while aggregating veris summary: {0}\n'.format(e))
|
||||
|
||||
|
||||
def initConfig():
|
||||
# output our log to stdout or syslog
|
||||
options.output = getConfig('output', 'stdout', options.configfile)
|
||||
|
@ -626,6 +628,7 @@ def initConfig():
|
|||
default_user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/58.0'
|
||||
options.user_agent = getConfig('user_agent', default_user_agent, options.configfile)
|
||||
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("-c", dest='configfile',
|
||||
default=os.path.join(os.path.dirname(__file__), __file__).replace('.py', '.conf'),
|
||||
|
|
|
@ -9,6 +9,7 @@ import os
|
|||
import sys
|
||||
from configlib import getConfig, OptionParser
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -12,6 +12,7 @@ from configlib import getConfig, OptionParser
|
|||
from datetime import datetime, timedelta
|
||||
from pymongo import MongoClient
|
||||
|
||||
|
||||
def isFQDN(fqdn):
|
||||
try:
|
||||
# We could resolve FQDNs here, but that could tip our hand and it's
|
||||
|
@ -22,9 +23,11 @@ def isFQDN(fqdn):
|
|||
except:
|
||||
return False
|
||||
|
||||
|
||||
def genMeteorID():
|
||||
return('%024x' % random.randrange(16**24))
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -12,6 +12,7 @@ from configlib import getConfig, OptionParser
|
|||
from datetime import datetime, timedelta
|
||||
from pymongo import MongoClient
|
||||
|
||||
|
||||
def isIPv4(ip):
|
||||
try:
|
||||
# netaddr on it's own considers 1 and 0 to be valid_ipv4
|
||||
|
@ -26,15 +27,18 @@ def isIPv4(ip):
|
|||
except:
|
||||
return False
|
||||
|
||||
|
||||
def isIPv6(ip):
|
||||
try:
|
||||
return netaddr.valid_ipv6(ip)
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def genMeteorID():
|
||||
return('%024x' % random.randrange(16**24))
|
||||
|
||||
|
||||
class message(object):
|
||||
def __init__(self):
|
||||
'''register our criteria for being passed a message
|
||||
|
|
|
@ -11,6 +11,7 @@ import json
|
|||
import netaddr
|
||||
from boto3.session import Session
|
||||
|
||||
|
||||
def isIPv4(ip):
|
||||
try:
|
||||
# netaddr on it's own considers 1 and 0 to be valid_ipv4
|
||||
|
@ -186,7 +187,7 @@ class message(object):
|
|||
if not ipcidr.ip.is_loopback() \
|
||||
and not ipcidr.ip.is_private() \
|
||||
and not ipcidr.ip.is_reserved():
|
||||
ipaddress = str(ipcidr.cidr)
|
||||
ipaddress = str(ipcidr.cidr)
|
||||
self.addBlackholeEntry(ipaddress)
|
||||
sys.stdout.write('Blackholed {0}\n'.format(ipaddress))
|
||||
except Exception as e:
|
||||
|
|
|
@ -362,6 +362,7 @@ class TestGetIndices(ElasticsearchClientTest):
|
|||
indices.sort()
|
||||
assert indices == [self.alert_index_name, self.previous_event_index_name, self.event_index_name, 'test_index']
|
||||
|
||||
|
||||
class TestIndexExists(ElasticsearchClientTest):
|
||||
|
||||
def teardown(self):
|
||||
|
@ -374,7 +375,7 @@ class TestIndexExists(ElasticsearchClientTest):
|
|||
self.es_client.create_index('test_index')
|
||||
time.sleep(1)
|
||||
indices = self.es_client.index_exists('test_index')
|
||||
assert indices == True
|
||||
assert indices is True
|
||||
|
||||
|
||||
class TestClusterHealth(ElasticsearchClientTest):
|
||||
|
|
|
@ -52,7 +52,7 @@ class TestDotDict(UnitTestSuite):
|
|||
}
|
||||
}
|
||||
dct = DotDict(original_dct)
|
||||
assert dct.get('does.not.exist') == None
|
||||
assert dct.get('does.not.exist') is None
|
||||
assert dct.get('details') == {'key1': 'value1','subkey': {'subkey': 'subvalue'}}
|
||||
assert dct.get('details.key1') == 'value1'
|
||||
assert dct.get('details.subkey') == {'subkey':'subvalue'}
|
||||
|
|
|
@ -8,6 +8,8 @@ import tzlocal
|
|||
from mozdef_util.utilities.toUTC import toUTC
|
||||
|
||||
UTC_TIMEZONE_COUNT = 0
|
||||
|
||||
|
||||
def utc_timezone():
|
||||
''' This is a mock function, so when we run tests
|
||||
we trick the system into thinking we're on UTC
|
||||
|
|
|
@ -430,7 +430,7 @@ class TestBroFixup(object):
|
|||
'SOURCE': 'bro_http',
|
||||
'customendpoint': 'bro'
|
||||
}
|
||||
MESSAGE = {
|
||||
MESSAGE = {
|
||||
"ts":1505701210.163246,
|
||||
"uid":"CMxwva4RHFtKpxWLba",
|
||||
"id.orig_h":"10.22.74.212",
|
||||
|
|
|
@ -55,6 +55,8 @@ class TestSSHDAcceptedMessageV1():
|
|||
assert retmessage['details']['sourceipaddress'] == '10.22.74.208'
|
||||
|
||||
# Long Username and SHA256 fpr present
|
||||
|
||||
|
||||
class TestSSHDAcceptedMessageV2():
|
||||
def setup(self):
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ class TestSuricataFixup(object):
|
|||
assert result == event
|
||||
assert metadata['doc_type'] is not 'nsm'
|
||||
|
||||
## Should never match and be modified by the plugin
|
||||
# Should never match and be modified by the plugin
|
||||
def test_notsuri_log2(self):
|
||||
metadata = {
|
||||
'doc_type': 'event',
|
||||
|
@ -49,7 +49,7 @@ class TestSuricataFixup(object):
|
|||
assert result == event
|
||||
assert metadata['doc_type'] is not 'nsm'
|
||||
|
||||
## Should never match and be modified by the plugin
|
||||
# Should never match and be modified by the plugin
|
||||
def test_suricata_nocustomendpoint_log(self):
|
||||
metadata = {
|
||||
'doc_type': 'event',
|
||||
|
|
|
@ -64,32 +64,32 @@ class TestVulnerabilityMessageV2():
|
|||
self.msg['sourcename'] = 'scanapi'
|
||||
self.msg['version'] = 2
|
||||
self.msg['vulnerabilities'] = [
|
||||
{
|
||||
'name': 'RHEL 6 : kernel (RHSA-2016:2006)',
|
||||
'vulnerable_packages': [
|
||||
'kernel-2.6.32-642.4.2.el6',
|
||||
'kernel-devel-2.6.32-642.4.2.el6',
|
||||
'kernel-firmware-2.6.32-642.4.2.el6',
|
||||
'kernel-headers-2.6.32-642.4.2.el6'
|
||||
],
|
||||
'output': '\nRemote package installed : kernel-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-2.6.32-642.6.1.el6\n\n' +
|
||||
'Remote package installed : kernel-devel-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-devel-2.6.32-642.6.1.el6\n' +
|
||||
'\nRemote package installed : kernel-firmware-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-firmware-2.6.32-642.6.1.el6\n\n' +
|
||||
'Remote package installed : kernel-headers-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-headers-2.6.32-642.6.1.el6\n\n',
|
||||
'cve': 'CVE-2016-4470',
|
||||
'cvss': 7.2,
|
||||
'risk': 'high'
|
||||
}
|
||||
]
|
||||
{
|
||||
'name': 'RHEL 6 : kernel (RHSA-2016:2006)',
|
||||
'vulnerable_packages': [
|
||||
'kernel-2.6.32-642.4.2.el6',
|
||||
'kernel-devel-2.6.32-642.4.2.el6',
|
||||
'kernel-firmware-2.6.32-642.4.2.el6',
|
||||
'kernel-headers-2.6.32-642.4.2.el6'
|
||||
],
|
||||
'output': '\nRemote package installed : kernel-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-2.6.32-642.6.1.el6\n\n' +
|
||||
'Remote package installed : kernel-devel-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-devel-2.6.32-642.6.1.el6\n' +
|
||||
'\nRemote package installed : kernel-firmware-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-firmware-2.6.32-642.6.1.el6\n\n' +
|
||||
'Remote package installed : kernel-headers-2.6.32-642.4.2.el6\n' +
|
||||
'Should be : kernel-headers-2.6.32-642.6.1.el6\n\n',
|
||||
'cve': 'CVE-2016-4470',
|
||||
'cvss': 7.2,
|
||||
'risk': 'high'
|
||||
}
|
||||
]
|
||||
self.msg['asset'] = {
|
||||
'os': 'Linux Kernel 2.6.32-642.4.2.el6.x86_64 on Red Hat Enterprise Linux Server release 6.8 (Santiago)',
|
||||
'hostname': 'hostname.mozilla.com',
|
||||
'ipaddress': '1.2.3.4'
|
||||
}
|
||||
'os': 'Linux Kernel 2.6.32-642.4.2.el6.x86_64 on Red Hat Enterprise Linux Server release 6.8 (Santiago)',
|
||||
'hostname': 'hostname.mozilla.com',
|
||||
'ipaddress': '1.2.3.4'
|
||||
}
|
||||
|
||||
def test_onMessage(self):
|
||||
metadata = {}
|
||||
|
|
|
@ -13,6 +13,7 @@ import datetime
|
|||
def utc_timezone():
|
||||
return pytz.timezone('UTC')
|
||||
|
||||
|
||||
tzlocal.get_localzone = utc_timezone
|
||||
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче