This commit is contained in:
Kyle Lahnakoski 2014-08-18 17:40:47 -04:00
Родитель 6ffb5358ee
Коммит d67265794f
58 изменённых файлов: 4801 добавлений и 6319 удалений

90
README.txt Normal file
Просмотреть файл

@ -0,0 +1,90 @@
esFrontLine
===========
Limit restful requests to backend ElasticSearch cluster: Queries only.
Requirements
------------
- Python 2.7
- An ElasticSearch cluster to forward queries to
Install
-------
I will assume you have Python installed (if not, here are `Windows7
instructions <https://github.com/klahnakoski/pyLibrary#windows-7-install-instructions->`__)
::
pip install esFrontLine
Setup
-----
You must write your own setting.json file with the following properties
set:
- **elasticsearch** - (Array of) ElasticSearch nodes
- **elasticsearch.host** - URL of the ElasticSearch node that will
accept query requests
- **elasticsearch.port** - port for ES (default = 9200)
- **flask** - flask.run() parameters (default port = 5000)
- **debug** - turn on debugging
- **whitelist** - list of indexes that are allowed
Here is an example of my ``settings.json`` file
::
{
"elasticsearch":[{
"host":"http://elasticsearch4.metrics.scl3.mozilla.com",
"port":9200
},{
"host":"http://elasticsearch5.metrics.scl3.mozilla.com",
"port":9200
},{
"host":"http://elasticsearch7.metrics.scl3.mozilla.com",
"port":9200
},{
"host":"http://elasticsearch8.metrics.scl3.mozilla.com",
"port":9200
}],
"flask":{
"host":"0.0.0.0",
"port":9292,
"debug":false,
"threaded":true,
"processes":1
},
"whitelist":["bugs", "org_chart", "bug_summary", "reviews"],
"debug":{
"log":[{
"filename": "./tests/results/logs/app.log",
"maxBytes": 10000000,
"backupCount": 200,
"encoding": "utf8"
},{
"stream":"sys.stdout"
}]
}
}
Execution
---------
::
python app.py --settings-file <path_to_file_with_JSON_settings>
Code Source
-----------
https://github.com/klahnakoski/esFrontLine

Просмотреть файл

@ -19,6 +19,7 @@ from urllib import urlencode
from . import struct
from . import jsons
from .times.dates import Date
from .jsons import json_encoder
from .collections.multiset import Multiset
from .env.profiles import Profiler
@ -50,11 +51,10 @@ class CNV:
def JSON2object(json_string, params=None, flexible=False, paths=False):
with Profiler("JSON2Object"):
try:
#REMOVE """COMMENTS""", #COMMENTS, //COMMENTS, AND \n \r
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
if flexible:
#DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py#L58
json_string = re.sub(r"\"\"\".*?\"\"\"|[ \t]+//.*\n|^//.*\n|#.*?\n", r"\n", json_string)
json_string = re.sub(r"\n//.*\n", r"\n\n", json_string)
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"|[ \t]+//.*\n|^//.*\n|#.*?\n", r"\n", json_string, flags=re.MULTILINE)
if params:
params = dict([(k, CNV.value2quote(v)) for k, v in params.items()])
json_string = expand_template(json_string, params)
@ -73,7 +73,7 @@ class CNV:
@staticmethod
def string2datetime(value, format):
## http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
## http://docs.python.org/2/library/datetime.html# strftime-and-strptime-behavior
if value == None:
return None
try:
@ -83,10 +83,7 @@ class CNV:
@staticmethod
def datetime2string(value, format="%Y-%m-%d %H:%M:%S"):
try:
return value.strftime(format)
except Exception, e:
Log.error("Can not format {{value}} with {{format}}", {"value": value, "format": format}, e)
return Date(value).format(format=format)
@staticmethod
def datetime2unix(d):
@ -150,8 +147,8 @@ class CNV:
@staticmethod
def table2list(
column_names, #tuple of columns names
rows #list of tuples
column_names, # tuple of columns names
rows # list of tuples
):
return wrap([dict(zip(column_names, r)) for r in rows])
@ -168,7 +165,7 @@ class CNV:
return "\t".join(keys) + "\n" + "\n".join(output)
#PROPER NULL HANDLING
# PROPER NULL HANDLING
@staticmethod
def value2string(value):
if value == None:
@ -176,7 +173,7 @@ class CNV:
return unicode(value)
#RETURN PRETTY PYTHON CODE FOR THE SAME
# RETURN PRETTY PYTHON CODE FOR THE SAME
@staticmethod
def value2quote(value):
if isinstance(value, basestring):
@ -199,7 +196,7 @@ class CNV:
return value.replace("\\\\", "\\").replace("\\\"", "\"").replace("\\'", "'").replace("\\\n", "\n").replace("\\\t", "\t")
#RETURN PYTHON CODE FOR THE SAME
# RETURN PYTHON CODE FOR THE SAME
@staticmethod
def value2code(value):
return repr(value)
@ -270,7 +267,7 @@ class CNV:
try:
if isinstance(v, float) and round(v, 0) != v:
return v
#IF LOOKS LIKE AN INT, RETURN AN INT
# IF LOOKS LIKE AN INT, RETURN AN INT
return int(v)
except Exception:
try:

Просмотреть файл

@ -63,22 +63,6 @@ def PRODUCT(*values):
output *= v
return output
def SUM(*values):
if isinstance(values, tuple) and len(values) == 1 and isinstance(values[0], (list, set, tuple, Multiset, types.GeneratorType)):
values = values[0]
output = Null
for v in values:
if v == None:
continue
if isinstance(v, float) and math.isnan(v):
continue
if output == None:
output = v
continue
output += v
return output
def COUNT(*values):
if isinstance(values, tuple) and len(values) == 1 and isinstance(values[0], (list, set, tuple, Multiset, types.GeneratorType)):
values = values[0]

Просмотреть файл

@ -136,7 +136,7 @@ class Matrix(object):
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
"""
#offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE
# offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE
offsets = []
new_dim = []
acc = 1

Просмотреть файл

@ -11,6 +11,7 @@
from __future__ import unicode_literals
class Multiset(object):
"""
Multiset IS ONE MEMBER IN A FAMILY OF USEFUL CONTAINERS
@ -24,7 +25,6 @@ class Multiset(object):
| No | No | Multiset |
+------------+---------+----------+
"""
def __new__(cls, list=None, key_field=None, count_field=None, allow_negative=False):
try:
if allow_negative:
@ -36,6 +36,15 @@ class Multiset(object):
Log.error("Not expected", e)
def add(self, value):
raise NotImplementedError
def extend(self, values):
raise NotImplementedError
def remove(self, value):
raise NotImplementedError
class _Multiset(Multiset):

19
tests/util/env/elasticsearch.py поставляемый
Просмотреть файл

@ -77,25 +77,26 @@ class ElasticSearch(object):
@staticmethod
def get_or_create_index(settings, schema, limit_replicas=False):
def get_or_create_index(settings, schema=None, limit_replicas=None):
es = ElasticSearch(settings)
aliases = es.get_aliases()
if settings.index not in [a.index for a in aliases]:
schema = CNV.JSON2object(CNV.object2JSON(schema), paths=True)
if settings.index not in aliases.index:
es = ElasticSearch.create_index(settings, schema, limit_replicas=limit_replicas)
return es
@staticmethod
def create_index(settings, schema=None, limit_replicas=False):
def create_index(settings, schema=None, limit_replicas=None):
if not schema and settings.schema_file:
from .files import File
schema = CNV.JSON2object(File(settings.schema_file).read(), flexible=True, paths=True)
elif isinstance(schema, basestring):
schema = CNV.JSON2object(schema, paths=True)
else:
schema = wrap(schema)
if isinstance(schema, basestring):
schema = CNV.JSON2object(schema)
schema = CNV.JSON2object(CNV.object2JSON(schema), paths=True)
limit_replicas = nvl(limit_replicas, settings.limit_replicas)
if limit_replicas:
# DO NOT ASK FOR TOO MANY REPLICAS
@ -164,7 +165,7 @@ class ElasticSearch(object):
return wrap({"mappings":mapping[self.settings.type]})
#DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
# DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
def delete_all_but(self, prefix, name):
if prefix == name:
Log.note("{{index_name}} will not be deleted", {"index_name": prefix})
@ -236,7 +237,7 @@ class ElasticSearch(object):
elif self.node_metatdata.version.number.startswith("1.0"):
query = {"query": filter}
else:
Log.error("not implemented yet")
raise NotImplementedError
if self.debug:
Log.note("Delete bugs:\n{{query}}", {"query": query})

8
tests/util/env/emailer.py поставляемый
Просмотреть файл

@ -13,7 +13,7 @@ from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import sys
from .. import struct
from ..structs.wraps import listwrap
from ..struct import nvl
@ -22,9 +22,9 @@ class Emailer:
"""
REQUIRES SETTINGS LIKE
"email": {
"from_address": "klahnakoski@mozilla.com", #DEFAULT
"to":"klahnakoski@mozilla.com", #DEFAULT
"subject": "catchy title", #DEFAULT
"from_address": "klahnakoski@mozilla.com", # DEFAULT
"to":"klahnakoski@mozilla.com", # DEFAULT
"subject": "catchy title", # DEFAULT
"host": "mail.mozilla.com",
"port": 465,
"username": "example@example.com",

6
tests/util/env/files.py поставляемый
Просмотреть файл

@ -112,9 +112,9 @@ class File(object):
f.write(d.encode("utf8"))
def __iter__(self):
#NOT SURE HOW TO MAXIMIZE FILE READ SPEED
#http://stackoverflow.com/questions/8009882/how-to-read-large-file-line-by-line-in-python
#http://effbot.org/zone/wide-finder.htm
# NOT SURE HOW TO MAXIMIZE FILE READ SPEED
# http://stackoverflow.com/questions/8009882/how-to-read-large-file-line-by-line-in-python
# http://effbot.org/zone/wide-finder.htm
def output():
try:
with io.open(self._filename, "rb") as f:

6
tests/util/env/log_usingElasticSearch.py поставляемый
Просмотреть файл

@ -33,17 +33,17 @@ class Log_usingElasticSearch(BaseLog):
def write(self, template, params):
try:
if params.get("template", None):
#DETECTED INNER TEMPLATE, ASSUME TRACE IS ON, SO DO NOT NEED THE OUTER TEMPLATE
# DETECTED INNER TEMPLATE, ASSUME TRACE IS ON, SO DO NOT NEED THE OUTER TEMPLATE
self.queue.add(params)
else:
self.queue.add({"template": template, "params": params})
return self
except Exception, e:
raise e #OH NO!
raise e # OH NO!
def stop(self):
try:
self.queue.add(Thread.STOP) #BE PATIENT, LET REST OF MESSAGE BE SENT
self.queue.add(Thread.STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception, e:
pass

10
tests/util/env/log_usingLogger.py поставляемый
Просмотреть файл

@ -22,7 +22,7 @@ from ..thread.threads import Thread
#WRAP PYTHON CLASSIC logger OBJECTS
# WRAP PYTHON CLASSIC logger OBJECTS
class Log_usingLogger(BaseLog):
def __init__(self, settings):
self.logger = logging.Logger("unique name", level=logging.INFO)
@ -34,14 +34,14 @@ class Log_usingLogger(BaseLog):
self.thread.start()
def write(self, template, params):
# http://docs.python.org/2/library/logging.html#logging.LogRecord
# http://docs.python.org/2/library/logging.html# logging.LogRecord
self.queue.add({"template": template, "params": params})
def stop(self):
try:
if DEBUG_LOGGING:
sys.stdout.write("Log_usingLogger sees stop, adding stop to queue\n")
self.queue.add(Thread.STOP) #BE PATIENT, LET REST OF MESSAGE BE SENT
self.queue.add(Thread.STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
if DEBUG_LOGGING:
sys.stdout.write("Log_usingLogger done\n")
@ -67,12 +67,12 @@ def make_log_from_settings(settings):
constructor = object.__getattribute__(temp, class_name)
except Exception, e:
if settings.stream and not constructor:
#PROVIDE A DEFAULT STREAM HANLDER
# PROVIDE A DEFAULT STREAM HANLDER
constructor = Log_usingStream
else:
Log.error("Can not find class {{class}}", {"class": path}, e)
#IF WE NEED A FILE, MAKE SURE DIRECTORY EXISTS
# IF WE NEED A FILE, MAKE SURE DIRECTORY EXISTS
if settings.filename:
from ..env.files import File

13
tests/util/env/log_usingStream.py поставляемый
Просмотреть файл

@ -12,7 +12,6 @@
from __future__ import unicode_literals
from datetime import datetime, timedelta
import sys
from .logs import BaseLog, DEBUG_LOGGING, Log
from ..strings import expand_template
from ..thread.threads import Thread
@ -20,8 +19,8 @@ from ..thread.threads import Thread
class Log_usingStream(BaseLog):
#stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
#WHICH WILL eval() TO ONE
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
@ -29,14 +28,14 @@ class Log_usingStream(BaseLog):
if isinstance(stream, basestring):
if stream.startswith("sys."):
use_UTF8 = True #sys.* ARE OLD AND CAN NOT HANDLE unicode
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
#WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from ..thread.threads import Queue
if use_UTF8:
@ -58,13 +57,13 @@ class Log_usingStream(BaseLog):
self.queue.add({"template": template, "params": params})
return self
except Exception, e:
raise e #OH NO!
raise e # OH NO!
def stop(self):
try:
if DEBUG_LOGGING:
sys.stdout.write("Log_usingStream sees stop, adding stop to queue\n")
self.queue.add(Thread.STOP) #BE PATIENT, LET REST OF MESSAGE BE SENT
self.queue.add(Thread.STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
if DEBUG_LOGGING:
sys.stdout.write("Log_usingStream done\n")

120
tests/util/env/logs.py поставляемый
Просмотреть файл

@ -11,13 +11,14 @@
from __future__ import unicode_literals
from datetime import datetime
import os
import sys
from types import ModuleType
from .. import struct
from ..jsons import json_encoder
from ..thread import threads
from ..struct import nvl, Struct
from ..structs.wraps import listwrap, wrap
from ..struct import nvl, Struct, split_field, join_field
from ..structs.wraps import listwrap, wrap, wrap_dot
from ..strings import indent, expand_template
from ..thread.threads import Thread
@ -26,6 +27,7 @@ from ..thread.threads import Thread
DEBUG_LOGGING = False
ERROR = "ERROR"
WARNING = "WARNING"
UNEXPECTED = "UNEXPECTED"
NOTE = "NOTE"
@ -36,9 +38,10 @@ class Log(object):
trace = False
main_log = None
logging_multi = None
profiler = None
profiler = None # simple pypy-friendly profiler
cprofiler = None # screws up with pypy, but better than nothing
error_mode = False # prevent error loops
please_setup_constants = False # we intend to manipulate module-level constants for debugging
@classmethod
def new_instance(cls, settings):
@ -106,16 +109,16 @@ class Log(object):
cls.main_log.write(log_template, log_params)
@classmethod
def warning(cls, template, params=None, cause=None):
def unexpected(cls, template, params=None, cause=None):
if isinstance(params, BaseException):
cause = params
params = None
if cause and not isinstance(cause, Except):
cause = Except(WARNING, unicode(cause), trace=extract_tb(0))
cause = Except(UNEXPECTED, unicode(cause), trace=extract_tb(0))
trace = extract_stack(1)
e = Except(WARNING, template, params, cause, trace)
e = Except(UNEXPECTED, template, params, cause, trace)
Log.note(unicode(e), {
"warning": {
"template": template,
@ -126,13 +129,34 @@ class Log(object):
})
@classmethod
def warning(cls, template, params=None, cause=None):
if isinstance(params, BaseException):
cause = params
params = None
if cause and not isinstance(cause, Except):
cause = Except(WARNING, unicode(cause), trace=extract_tb(0))
trace = extract_stack(1)
e = Except(WARNING, template, params, cause, trace)
Log.note(unicode(e), {
"warning": { # REDUNDANT INFO
"template": template,
"params": params,
"cause": cause,
"trace": trace
}
})
@classmethod
def error(
cls,
template, #human readable template
params=None, #parameters for template
cause=None, #pausible cause
offset=0 #stack trace offset (==1 if you do not want to report self)
template, # human readable template
params=None, # parameters for template
cause=None, # pausible cause
offset=0 # stack trace offset (==1 if you do not want to report self)
):
"""
raise an exception with a trace for the cause too
@ -157,10 +181,10 @@ class Log(object):
@classmethod
def fatal(
cls,
template, #human readable template
params=None, #parameters for template
cause=None, #pausible cause
offset=0 #stack trace offset (==1 if you do not want to report self)
template, # human readable template
params=None, # parameters for template
cause=None, # pausible cause
offset=0 # stack trace offset (==1 if you do not want to report self)
):
"""
SEND TO STDERR
@ -201,10 +225,10 @@ class Log(object):
sys.stderr.write(str_e)
#RUN ME FIRST TO SETUP THE THREADED LOGGING
# RUN ME FIRST TO SETUP THE THREADED LOGGING
@classmethod
def start(cls, settings=None):
##http://victorlin.me/2012/08/good-logging-practice-in-python/
## http://victorlin.me/2012/08/good-logging-practice-in-python/
if not settings:
return
@ -239,6 +263,49 @@ class Log(object):
if settings.profile.enabled:
profiles.ON = True
if settings.constants:
cls.please_setup_constants = True
if cls.please_setup_constants:
sys_modules = sys.modules
# ONE MODULE IS MISSING, THE CALLING MODULE
caller_globals = sys._getframe(1).f_globals
caller_file = caller_globals["__file__"]
if not caller_file.endswith(".py"):
raise Exception("do not know how to handle non-python caller")
caller_module = caller_file[:-3].replace("/", ".")
for k, v in wrap_dot(settings.constants).leaves():
module_name = join_field(split_field(k)[:-1])
attribute_name = split_field(k)[-1].lower()
if module_name in sys_modules and isinstance(sys_modules[module_name], ModuleType):
mod = sys_modules[module_name]
all_names = dir(mod)
for name in all_names:
if attribute_name == name.lower():
setattr(mod, name, v)
continue
elif caller_module.endswith(module_name):
for name in caller_globals.keys():
if attribute_name == name.lower():
old_value = caller_globals[name]
try:
new_value = old_value.__class__(v) # TRY TO MAKE INSTANCE OF SAME CLASS
except Exception, e:
new_value = v
caller_globals[name] = new_value
Log.note("Changed {{module}}[{{attribute}}] from {{old_value}} to {{new_value}}", {
"module": module_name,
"attribute": name,
"old_value": old_value,
"new_value": new_value
})
break
else:
Log.note("Can not change {{module}}[{{attribute}}] to {{new_value}}", {
"module": module_name,
"attribute": k,
"new_value": v
})
@classmethod
def stop(cls):
@ -253,7 +320,7 @@ class Log(object):
def write(self):
Log.error("not implemented")
raise NotImplementedError
def extract_stack(start=0):
"""
@ -330,7 +397,7 @@ def format_trace(tbs, start=0):
class Except(Exception):
def __init__(self, type=ERROR, template=None, params=None, cause=None, trace=None):
super(Exception, self).__init__(self)
Exception.__init__(self)
self.type = type
self.template = template
self.params = params
@ -369,6 +436,9 @@ class Except(Exception):
return output + "\n"
def __unicode__(self):
return unicode(str(self))
def __json__(self):
return json_encoder(Struct(
type = self.type,
@ -401,17 +471,15 @@ class Log_usingFile(BaseLog):
self.file_lock = threads.Lock()
def write(self, template, params):
from ..env.files import File
with self.file_lock:
File(self.filename).append(expand_template(template, params))
self.file.append(expand_template(template, params))
class Log_usingThread(BaseLog):
def __init__(self, logger):
#DELAYED LOAD FOR THREADS MODULE
# DELAYED LOAD FOR THREADS MODULE
from ..thread.threads import Queue
self.queue = Queue(max=10000, silent=True)
@ -438,13 +506,13 @@ class Log_usingThread(BaseLog):
return self
except Exception, e:
sys.stdout.write("IF YOU SEE THIS, IT IS LIKELY YOU FORGOT TO RUN Log.start() FIRST\n")
raise e #OH NO!
raise e # OH NO!
def stop(self):
try:
if DEBUG_LOGGING:
sys.stdout.write("injecting stop into queue\n")
self.queue.add(Thread.STOP) #BE PATIENT, LET REST OF MESSAGE BE SENT
self.queue.add(Thread.STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
if DEBUG_LOGGING:
sys.stdout.write("Log_usingThread telling logger to stop\n")
@ -463,6 +531,7 @@ class Log_usingThread(BaseLog):
class Log_usingMulti(BaseLog):
def __init__(self):
self.many = []
def write(self, template, params):
for m in self.many:
try:
@ -481,6 +550,7 @@ class Log_usingMulti(BaseLog):
def clear_log(self):
self.many = []
def stop(self):
for m in self.many:
try:

2
tests/util/env/profiles.py поставляемый
Просмотреть файл

@ -49,7 +49,7 @@ class Profiler(object):
self.end = clock()
duration = self.end - self.start
from util.queries.windows import Stats
from ..queries.windows import Stats
self.stats.add(duration)
if self.samples is not None:

31
tests/util/env/startup.py поставляемый
Просмотреть файл

@ -19,19 +19,19 @@ from ..env.logs import Log
from ..env.files import File
#PARAMETERS MATCH argparse.ArgumentParser.add_argument()
#http://docs.python.org/dev/library/argparse.html#the-add-argument-method
#name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.
#action - The basic type of action to be taken when this argument is encountered at the command line.
#nargs - The number of command-line arguments that should be consumed.
#const - A constant value required by some action and nargs selections.
#default - The value produced if the argument is absent from the command line.
#type - The type to which the command-line argument should be converted.
#choices - A container of the allowable values for the argument.
#required - Whether or not the command-line option may be omitted (optionals only).
#help - A brief description of what the argument does.
#metavar - A name for the argument in usage messages.
#dest - The name of the attribute to be added to the object returned by parse_args().
# PARAMETERS MATCH argparse.ArgumentParser.add_argument()
# http://docs.python.org/dev/library/argparse.html# the-add-argument-method
# name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.
# action - The basic type of action to be taken when this argument is encountered at the command line.
# nargs - The number of command-line arguments that should be consumed.
# const - A constant value required by some action and nargs selections.
# default - The value produced if the argument is absent from the command line.
# type - The type to which the command-line argument should be converted.
# choices - A container of the allowable values for the argument.
# required - Whether or not the command-line option may be omitted (optionals only).
# help - A brief description of what the argument does.
# metavar - A name for the argument in usage messages.
# dest - The name of the attribute to be added to the object returned by parse_args().
def _argparse(defs):
parser = argparse.ArgumentParser()
@ -130,15 +130,10 @@ class SingleInstance:
sys.exit(-1)
self.initialized = True
def __exit__(self, type, value, traceback):
self.__del__()
def __del__(self):
import sys
import os
temp, self.initialized = self.initialized, False
if not temp:
return

Просмотреть файл

@ -66,7 +66,7 @@ def encode(value, pretty=False):
output = _buffer.build()
return output
except Exception, e:
#THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from .env.logs import Log
Log.warning("Serialization of JSON problems", e)
try:
@ -202,7 +202,7 @@ for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), u'\\u{0:04x}'.format(i))
#REMOVE VALUES THAT CAN NOT BE JSON-IZED
# REMOVE VALUES THAT CAN NOT BE JSON-IZED
def json_scrub(value):
return _scrub(value)
@ -340,7 +340,7 @@ def pretty_json(value):
js = [pretty_json(v) for v in value]
max_len = MAX(len(j) for j in js)
if max_len <= ARRAY_ITEM_MAX_LENGTH and AND(j.find("\n") == -1 for j in js):
#ALL TINY VALUES
# ALL TINY VALUES
num_columns = max(1, min(ARRAY_MAX_COLUMNS, int(floor((ARRAY_ROW_LENGTH + 2.0)/float(max_len+2))))) # +2 TO COMPENSATE FOR COMMAS
if len(js)<=num_columns: # DO NOT ADD \n IF ONLY ONE ROW
return "[" + ", ".join(js) + "]"

Просмотреть файл

@ -128,7 +128,7 @@ class Math(object):
return v - (v % mod)
#RETURN A VALUE CLOSE TO value, BUT WITH SHORTER len(unicode(value))<len(unicode(value)):
# RETURN A VALUE CLOSE TO value, BUT WITH SHORTER len(unicode(value))<len(unicode(value)):
@staticmethod
def approx_str(value):
v = unicode(value)
@ -161,3 +161,15 @@ class Math(object):
else:
pass
return output
@staticmethod
def min(*values):
output = None
for v in values:
if v == None:
continue
elif output == None or v < output:
output = v
else:
pass
return output

Просмотреть файл

@ -35,7 +35,7 @@ def encrypt(text, _key, salt=None):
data = bytearray(text.encode("utf8"))
#Initialize encryption using key and iv
# Initialize encryption using key and iv
key_expander_256 = key_expander.KeyExpander(256)
expanded_key = key_expander_256.expand(_key)
aes_cipher_256 = aes_cipher.AESCipher(expanded_key)
@ -48,7 +48,7 @@ def encrypt(text, _key, salt=None):
output.length = len(data)
encrypted = bytearray()
for i, d in Q.groupby(data, size=16):
for _, d in Q.groupby(data, size=16):
encrypted.extend(aes_cbc_256.encrypt_block(d))
output.data = CNV.bytearray2base64(encrypted)
json = CNV.object2JSON(output)
@ -65,13 +65,13 @@ def decrypt(data, _key):
"""
ACCEPT JSON OF ENCRYPTED DATA {"salt":s, "length":l, "data":d}
"""
#Key and iv have not been generated or provided, bail out
# Key and iv have not been generated or provided, bail out
if _key is None:
Log.error("Expecting a key")
_input = CNV.JSON2object(data)
#Initialize encryption using key and iv
# Initialize encryption using key and iv
key_expander_256 = key_expander.KeyExpander(256)
expanded_key = key_expander_256.expand(_key)
aes_cipher_256 = aes_cipher.AESCipher(expanded_key)

Просмотреть файл

@ -35,10 +35,13 @@ if DEBUG_STRANGMAN:
def chisquare(f_obs, f_exp):
py_result = strangman.stats.chisquare(
f_obs,
f_exp
)
try:
py_result = strangman.stats.chisquare(
f_obs,
f_exp
)
except Exception, e:
Log.error("problem with call", e)
if DEBUG_STRANGMAN:
sp_result = scipy.stats.chisquare(
@ -236,12 +239,12 @@ class Z_moment(object):
@property
def tuple(self):
#RETURN AS ORDERED TUPLE
# RETURN AS ORDERED TUPLE
return self.S
@property
def dict(self):
#RETURN HASH OF SUMS
# RETURN HASH OF SUMS
return {u"s" + unicode(i): m for i, m in enumerate(self.S)}
@ -268,7 +271,7 @@ def sub(a, b):
def z_moment2dict(z):
#RETURN HASH OF SUMS
# RETURN HASH OF SUMS
return {u"s" + unicode(i): m for i, m in enumerate(z.S)}
@ -308,7 +311,7 @@ def median(values, simple=True, mean_weight=0.0):
return float(_sorted[middle - 1] + _median) / 2
return _median
#FIND RANGE OF THE median
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1

Просмотреть файл

@ -174,8 +174,8 @@ class _MVEL(object):
else:
qb_fields, es_fields = zip(*[(i, e) for i, e in enumerate(fields)])
#NO LOOPS BECAUSE QUERY IS SHALLOW
#DOMAIN IS FROM A DIMENSION, USE IT'S FIELD DEFS TO PULL
# NO LOOPS BECAUSE QUERY IS SHALLOW
# DOMAIN IS FROM A DIMENSION, USE IT'S FIELD DEFS TO PULL
if len(es_fields) == 1:
def fromTerm(term):
return domain.getPartByKey(term)

Просмотреть файл

@ -43,7 +43,7 @@ def run(query):
Log.error("Do not know how to handle")
if query.edges:
Log.error("not implemented yet")
raise NotImplementedError
try:
if query.filter != None or query.esfilter != None:
@ -74,7 +74,7 @@ groupby = group_by.groupby
def index(data, keys=None):
#return dict that uses keys to index data
# return dict that uses keys to index data
o = Index(keys)
for d in data:
o.add(d)
@ -115,7 +115,7 @@ def map2set(data, relation):
if isinstance(relation, dict):
try:
#relation[d] is expected to be a list
# relation[d] is expected to be a list
# return set(cod for d in data for cod in relation[d])
output = set()
for d in data:
@ -126,7 +126,7 @@ def map2set(data, relation):
Log.error("Expecting a dict with lists in codomain", e)
else:
try:
#relation[d] is expected to be a list
# relation[d] is expected to be a list
# return set(cod for d in data for cod in relation[d])
output = set()
for d in data:
@ -215,7 +215,7 @@ def _tuple_deep(v, field, depth, record):
def select(data, field_name):
#return list with values from field_name
# return list with values from field_name
if isinstance(data, Cube):
return data._select(_normalize_selects(field_name))
@ -391,14 +391,14 @@ def sort(data, fieldnames=None):
fieldnames = listwrap(fieldnames)
if len(fieldnames) == 1:
fieldnames = fieldnames[0]
#SPECIAL CASE, ONLY ONE FIELD TO SORT BY
# SPECIAL CASE, ONLY ONE FIELD TO SORT BY
if isinstance(fieldnames, basestring):
def comparer(left, right):
return cmp(nvl(left, Struct())[fieldnames], nvl(right, Struct())[fieldnames])
return StructList([unwrap(d) for d in sorted(data, cmp=comparer)])
else:
#EXPECTING {"field":f, "sort":i} FORMAT
# EXPECTING {"field":f, "sort":i} FORMAT
def comparer(left, right):
return fieldnames["sort"] * cmp(nvl(left, Struct())[fieldnames["field"]], nvl(right, Struct())[fieldnames["field"]])
@ -430,17 +430,17 @@ def sort(data, fieldnames=None):
Log.error("Problem sorting\n{{data}}", {"data": data}, e)
def pairwise(values):
"""
WITH values = [a, b, c, d, ...]
RETURN [(a, b), (b, c), (c, d), ...]
"""
i = iter(values)
a = next(i)
def add(*values):
total = Null
for v in values:
if total == None:
total = v
else:
if v != None:
total += v
return total
for b in i:
yield (a, b)
a = b
def filter(data, where):
@ -637,7 +637,7 @@ def drill_filter(esfilter, data):
else:
Log.error(u"Can not interpret esfilter: {{esfilter}}", {u"esfilter": filter})
output = [] #A LIST OF OBJECTS MAKING THROUGH THE FILTER
output = [] # A LIST OF OBJECTS MAKING THROUGH THE FILTER
def main(sequence, esfilter, row, depth):
"""
@ -681,7 +681,7 @@ def drill_filter(esfilter, data):
else:
nested = row[-1][primary_column[depth]]
if not nested:
#PASSED FILTER, BUT NO CHILDREN, SO ADD NULL CHILDREN
# PASSED FILTER, BUT NO CHILDREN, SO ADD NULL CHILDREN
for i in range(depth, max):
row.append(None)
uniform_output.append(row)
@ -695,7 +695,7 @@ def drill_filter(esfilter, data):
recurse(o, 0)
if not max:
#SIMPLE LIST AS RESULT
# SIMPLE LIST AS RESULT
return wrap([unwrap(u[0]) for u in uniform_output])
return FlatList(primary_column[0:max], uniform_output)
@ -744,7 +744,7 @@ def window(data, param):
data = sort(data, sortColumns)
if not aggregate and not edges:
#SIMPLE CALCULATED VALUE
# SIMPLE CALCULATED VALUE
for rownum, r in enumerate(data):
r[name] = calc_value(r, rownum, data)
return
@ -763,19 +763,19 @@ def window(data, param):
head = nvl(_range.max, _range.stop)
tail = nvl(_range.min, _range.start)
#PRELOAD total
# PRELOAD total
total = aggregate()
for i in range(tail, head):
total.add(sequence[i].__temp__)
#WINDOW FUNCTION APPLICATION
# WINDOW FUNCTION APPLICATION
for i, r in enumerate(sequence):
r[name] = total.end()
total.add(sequence[i + head].__temp__)
total.sub(sequence[i + tail].__temp__)
for r in data:
r["__temp__"] = None #CLEANUP
r["__temp__"] = None # CLEANUP

Просмотреть файл

@ -32,7 +32,7 @@ class Cube(object):
self.is_value = False if isinstance(select, list) else True
self.select = select
#ENSURE frum IS PROPER FORM
# ENSURE frum IS PROPER FORM
if isinstance(select, list):
if OR(not isinstance(v, Matrix) for v in data.values()):
Log.error("Expecting data to be a dict with Matrix values")
@ -179,7 +179,7 @@ class Cube(object):
if len(stacked) + len(remainder) != len(self.edges):
Log.error("can not find some edges to group by")
#CACHE SOME RESULTS
# CACHE SOME RESULTS
keys = [e.name for e in self.edges]
getKey = [e.domain.getKey for e in self.edges]
lookup = [[getKey[i](p) for p in e.domain.partitions] for i, e in enumerate(self.edges)]

Просмотреть файл

@ -220,8 +220,8 @@ class DBQuery(object):
FROM
{{table}}
{{where}}
{{limit}}
{{sort}}
{{limit}}
""", {
"selects": SQL(",\n".join(selects)),
"table": self._subquery(query["from"])[0],
@ -241,7 +241,7 @@ class DBQuery(object):
r[s.name+"."+k] = None
if isinstance(s.value, list):
#REWRITE AS TUPLE
# REWRITE AS TUPLE
for r in result:
r[s.name] = tuple(r[s.name + "," + str(i)] for i, ss in enumerate(s.value))
for i, ss in enumerate(s.value):
@ -265,8 +265,8 @@ class DBQuery(object):
FROM
{{table}}
{{where}}
{{limit}}
{{sort}}
{{limit}}
""", {
"selects": SQL(select),
"table": self._subquery(query["from"])[0],
@ -326,7 +326,9 @@ def _esfilter2sqlwhere(db, esfilter):
"""
esfilter = wrap(esfilter)
if esfilter["and"]:
if esfilter is True:
return "1=1"
elif esfilter["and"]:
return _isolate("AND", [esfilter2sqlwhere(db, a) for a in esfilter["and"]])
elif esfilter["or"]:
return _isolate("OR", [esfilter2sqlwhere(db, a) for a in esfilter["or"]])
@ -359,7 +361,7 @@ def _esfilter2sqlwhere(db, esfilter):
return "false"
except Exception, e:
pass
return db.quote_column(col) + " in (" + ", ".join([db.quote_value(val) for val in v]) + ")"
return db.quote_column(col) + " in (" + ",\n".join([db.quote_value(val) for val in v]) + ")"
elif esfilter.script:
return "(" + esfilter.script + ")"
elif esfilter.range:
@ -374,7 +376,7 @@ def _esfilter2sqlwhere(db, esfilter):
min = nvl(r["gte"], r[">="])
max = nvl(r["lte"], r["<="])
if min and max:
#SPECIAL CASE (BETWEEN)
# SPECIAL CASE (BETWEEN)
return db.quote_column(col) + " BETWEEN " + db.quote_value(min) + " AND " + db.quote_value(max)
else:
return " AND ".join(
@ -403,7 +405,7 @@ def _esfilter2sqlwhere(db, esfilter):
def expand_json(rows):
#CONVERT JSON TO VALUES
# CONVERT JSON TO VALUES
for r in rows:
for k, json in list(r.items()):
if isinstance(json, basestring) and json[0:1] in ("[", "{"):
@ -414,7 +416,7 @@ def expand_json(rows):
pass
#MAP NAME TO SQL FUNCTION
# MAP NAME TO SQL FUNCTION
aggregates = {
"one": "COUNT({{code}})",
"sum": "SUM({{code}})",

Просмотреть файл

@ -156,7 +156,7 @@ class ESQuery(object):
"""
command = wrap(command)
#GET IDS OF DOCUMENTS
# GET IDS OF DOCUMENTS
results = self.es.search({
"fields": [],
"query": {"filtered": {
@ -172,7 +172,7 @@ class ESQuery(object):
if not MVEL.isKeyword(k):
Log.error("Only support simple paths for now")
scripts.append("ctx._source."+k+" = "+MVEL.value2MVEL(v)+";")
scripts.append("ctx._source."+k+" = "+MVEL.value2MVEL(v)+";\n")
script = "".join(scripts)
if results.hits.hits:

Просмотреть файл

@ -150,7 +150,7 @@ def es_setop(es, mvel, query):
esQuery.facets.mvel.terms.order = "term" if s0.sort >= 0 else "reverse_term"
elif not isDeep:
simple_query = query.copy()
simple_query.where = TRUE_FILTER #THE FACET FILTER IS FASTER
simple_query.where = TRUE_FILTER # THE FACET FILTER IS FASTER
esQuery.facets.mvel = {
"terms": {
"script_field": mvel.code(simple_query),

Просмотреть файл

@ -87,7 +87,7 @@ def es_terms(es, mvel, query):
try:
output[s.name][term_coord] = term[aggregates[s.aggregate]]
except Exception, e:
#USUALLY CAUSED BY output[s.name] NOT BEING BIG ENOUGH TO HANDLE NULL COUNTS
# USUALLY CAUSED BY output[s.name] NOT BEING BIG ENOUGH TO HANDLE NULL COUNTS
pass
cube = Cube(query.select, query.edges, output)
cube.query = query

Просмотреть файл

@ -23,7 +23,7 @@ from ..structs.wraps import wrap, listwrap
def is_terms_stats(query):
#ONLY ALLOWED ONE UNKNOWN DOMAIN
# ONLY ALLOWED ONE UNKNOWN DOMAIN
num_unknown = COUNT(1 for e in query.edges if e.domain.type not in domains.KNOWN)
if num_unknown <= 1:
@ -64,7 +64,7 @@ def es_terms_stats(esq, mvel, query):
if not specialEdge:
# WE SERIOUSLY WANT A SPECIAL EDGE, OTHERWISE WE WILL HAVE TOO MANY FACETS
#THE BIGGEST EDGE MAY BE COLLAPSED TO A TERM, MAYBE?
# THE BIGGEST EDGE MAY BE COLLAPSED TO A TERM, MAYBE?
num_parts = 0
special_index = -1
for i, e in enumerate(facetEdges):
@ -147,7 +147,7 @@ def es_terms_stats(esq, mvel, query):
data = es_query_util.post(esq.es, esQuery, query.limit)
if specialEdge.domain.type not in domains.KNOWN:
#WE BUILD THE PARTS BASED ON THE RESULTS WE RECEIVED
# WE BUILD THE PARTS BASED ON THE RESULTS WE RECEIVED
partitions = StructList()
map = {}
for facetName, parts in data.facets.items():
@ -195,7 +195,7 @@ def register_script_field(esQuery, code):
if not esQuery.script_fields:
esQuery.script_fields = {}
#IF CODE IS IDENTICAL, THEN USE THE EXISTING SCRIPT
# IF CODE IS IDENTICAL, THEN USE THE EXISTING SCRIPT
for n, c in esQuery.script_fields.items():
if c.script == code:
return n

Просмотреть файл

@ -53,7 +53,7 @@ def loadColumns(es, frum):
if not frum.host:
Log.error("must have host defined")
#DETERMINE IF THE es IS FUNCTIONALLY DIFFERENT
# DETERMINE IF THE es IS FUNCTIONALLY DIFFERENT
diff = False
for k, v in es.settings.items():
if k != "name" and v != frum[k]:
@ -444,7 +444,7 @@ def compileEdges2Term(mvel_compiler, edges, constants):
if not t.toTerm.body:
mvel_compiler.Parts2Term(e.domain)
Log.error("")
Log.unexpected("what?")
fromTerm2Part.append(t.fromTerm)
mvel_terms.append(t.toTerm.body)
@ -472,7 +472,7 @@ def fix_es_stats(s):
return s
#MAP NAME TO SQL FUNCTION
# MAP NAME TO SQL FUNCTION
aggregates = {
"none": "none",
"one": "count",

Просмотреть файл

@ -8,6 +8,7 @@
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from dzAlerts.util.collections import OR
from ..structs.wraps import wrap
TRUE_FILTER = True
@ -56,7 +57,7 @@ def _normalize(esfilter):
while isDiff:
isDiff = False
if esfilter["and"]:
if esfilter["and"] != None:
output = []
for a in esfilter["and"]:
if isinstance(a, (list, set)):
@ -88,7 +89,7 @@ def _normalize(esfilter):
esfilter = wrap({"and": output})
continue
if esfilter["or"]:
if esfilter["or"] != None:
output = []
for a in esfilter["or"]:
a_ = _normalize(a)
@ -127,8 +128,24 @@ def _normalize(esfilter):
if esfilter.terms != None:
for k, v in esfilter.terms.items():
if len(v) > 0:
esfilter.isNormal = True
return esfilter
if OR(vv == None for vv in v):
rest = [vv for vv in v if vv != None]
if len(rest) > 0:
return {
"or": [
{"missing": {"field": k}},
{"terms": {k: rest}}
],
"isNormal": True
}
else:
return {
"missing": {"field": k},
"isNormal": True
}
else:
esfilter.isNormal = True
return esfilter
return FALSE_FILTER
if esfilter["not"] != None:

Просмотреть файл

@ -21,7 +21,7 @@ from ..collections.multiset import Multiset
def groupby(data, keys=None, size=None, min_size=None, max_size=None, contiguous=False):
"""
return list of (keys, values) pairs where
group by the set of set of keys
group by the set of keys
values IS LIST OF ALL data that has those keys
contiguous - MAINTAIN THE ORDER OF THE DATA, STARTING THE NEW GROUP WHEN THE SELECTOR CHANGES
"""
@ -34,9 +34,7 @@ def groupby(data, keys=None, size=None, min_size=None, max_size=None, contiguous
if isinstance(data, Cube):
return data.groupby(keys)
def value2hash(x):
return value2key(keys, x)
keys = listwrap(keys)
def get_keys(d):
output = Struct()
for k in keys:
@ -50,7 +48,7 @@ def groupby(data, keys=None, size=None, min_size=None, max_size=None, contiguous
agg = StructList()
acc = StructList()
curr_key = value2hash(data[0])
curr_key = value2key(keys, data[0])
for d in data:
key = value2key(keys, d)
if key != curr_key:
@ -98,7 +96,7 @@ def groupby_size(data, size):
break
return output
#THIS IS LAZY
# THIS IS LAZY
i = 0
while True:
output = more()

Просмотреть файл

@ -46,7 +46,7 @@ class Index(object):
Log.error("something went wrong", e)
def __setitem__(self, key, value):
Log.error("Not implemented")
raise NotImplementedError
def add(self, val):

Просмотреть файл

@ -290,14 +290,14 @@ def _where_terms(master, where, schema):
"""
if isinstance(where, dict):
if where.term:
#MAP TERM
# MAP TERM
try:
output = _map_term_using_schema(master, [], where.term, schema.edges)
return output
except Exception, e:
Log.error("programmer problem?", e)
elif where.terms:
#MAP TERM
# MAP TERM
output = StructList()
for k, v in where.terms.items():
if not isinstance(v, (list, set)):
@ -307,7 +307,7 @@ def _where_terms(master, where, schema):
output.append({"terms": {k: v}})
else:
if isinstance(edge, basestring):
#DIRECT FIELD REFERENCE
# DIRECT FIELD REFERENCE
return {"terms": {edge: v}}
try:
domain = edge.getDomain()

Просмотреть файл

@ -26,25 +26,26 @@ class AggregationFunction(object):
"""
RETURN A ZERO-STATE AGGREGATE
"""
Log.error("not implemented yet")
raise NotImplementedError
def add(self, value):
"""
ADD value TO AGGREGATE
"""
Log.error("not implemented yet")
raise NotImplementedError
def merge(self, agg):
"""
ADD TWO AGGREGATES TOGETHER
"""
Log.error("not implemented yet")
raise NotImplementedError
def end(self):
"""
RETURN AGGREGATE
"""
raise NotImplementedError
class Exists(AggregationFunction):
@ -70,14 +71,14 @@ class WindowFunction(AggregationFunction):
"""
RETURN A ZERO-STATE AGGREGATE
"""
Log.error("not implemented yet")
raise NotImplementedError
def sub(self, value):
"""
REMOVE value FROM AGGREGATE
"""
Log.error("not implemented yet")
raise NotImplementedError
def Stats(**kwargs):

Просмотреть файл

@ -96,7 +96,7 @@ class DB(object):
self.cursor = None
self.partial_rollback = False
self.transaction_level = 0
self.backlog = [] #accumulate the write commands so they are sent at once
self.backlog = [] # accumulate the write commands so they are sent at once
def __enter__(self):
@ -144,7 +144,7 @@ class DB(object):
def close(self):
if self.transaction_level > 0:
Log.error("expecting commit() or rollback() before close")
self.cursor = None #NOT NEEDED
self.cursor = None # NOT NEEDED
try:
self.db.close()
except Exception, e:
@ -194,7 +194,7 @@ class DB(object):
def rollback(self):
self.backlog = [] #YAY! FREE!
self.backlog = [] # YAY! FREE!
if self.transaction_level == 0:
Log.error("No transaction has begun")
elif self.transaction_level == 1:
@ -227,7 +227,7 @@ class DB(object):
self._execute_backlog()
try:
old_cursor = self.cursor
if not old_cursor: #ALLOW NON-TRANSACTIONAL READS
if not old_cursor: # ALLOW NON-TRANSACTIONAL READS
self.cursor = self.db.cursor()
self.cursor.execute("SET TIME_ZONE='+00:00'")
self.cursor.close()
@ -244,7 +244,7 @@ class DB(object):
fixed = [[utf8_to_unicode(c) for c in row] for row in self.cursor]
result = CNV.table2list(columns, fixed)
if not old_cursor: #CLEANUP AFTER NON-TRANSACTIONAL READS
if not old_cursor: # CLEANUP AFTER NON-TRANSACTIONAL READS
self.cursor.close()
self.cursor = None
@ -261,7 +261,7 @@ class DB(object):
self._execute_backlog()
try:
old_cursor = self.cursor
if not old_cursor: #ALLOW NON-TRANSACTIONAL READS
if not old_cursor: # ALLOW NON-TRANSACTIONAL READS
self.cursor = self.db.cursor()
self.cursor.execute("SET TIME_ZONE='+00:00'")
self.cursor.close()
@ -278,7 +278,7 @@ class DB(object):
# columns = [utf8_to_unicode(d[0]) for d in nvl(self.cursor.description, [])]
result = zip(*grid)
if not old_cursor: #CLEANUP AFTER NON-TRANSACTIONAL READS
if not old_cursor: # CLEANUP AFTER NON-TRANSACTIONAL READS
self.cursor.close()
self.cursor = None
@ -299,7 +299,7 @@ class DB(object):
self._execute_backlog()
try:
old_cursor = self.cursor
if not old_cursor: #ALLOW NON-TRANSACTIONAL READS
if not old_cursor: # ALLOW NON-TRANSACTIONAL READS
self.cursor = self.db.cursor()
if param:
@ -314,7 +314,7 @@ class DB(object):
num += 1
_execute(wrap(dict(zip(columns, [utf8_to_unicode(c) for c in r]))))
if not old_cursor: #CLEANUP AFTER NON-TRANSACTIONAL READS
if not old_cursor: # CLEANUP AFTER NON-TRANSACTIONAL READS
self.cursor.close()
self.cursor = None
@ -485,8 +485,8 @@ class DB(object):
where_clause = " AND\n".join([
self.quote_column(k) + "=" + self.quote_value(v) if v != None else self.quote_column(k) + " IS NULL"
for k, v in where_slice.items()]
)
for k, v in where_slice.items()
])
command = "UPDATE " + self.quote_column(table_name) + "\n" + \
"SET " + \
@ -509,7 +509,7 @@ class DB(object):
return "NULL"
elif isinstance(value, SQL):
if not value.param:
#value.template CAN BE MORE THAN A TEMPLATE STRING
# value.template CAN BE MORE THAN A TEMPLATE STRING
return self.quote_sql(value.template)
param = {k: self.quote_sql(v) for k, v in value.param.items()}
return expand_template(value.template, param)
@ -554,13 +554,13 @@ class DB(object):
if isinstance(column_name, basestring):
if table:
column_name = table + "." + column_name
return SQL("`" + column_name.replace(".", "`.`") + "`") #MY SQL QUOTE OF COLUMN NAMES
return SQL("`" + column_name.replace(".", "`.`") + "`") # MY SQL QUOTE OF COLUMN NAMES
elif isinstance(column_name, list):
if table:
return SQL(", ".join([self.quote_column(table + "." + c) for c in column_name]))
return SQL(", ".join([self.quote_column(c) for c in column_name]))
else:
#ASSUME {"name":name, "value":value} FORM
# ASSUME {"name":name, "value":value} FORM
return SQL(column_name.value + " AS " + self.quote_column(column_name.name))
def sort2sqlorderby(self, sort):
@ -580,7 +580,7 @@ def utf8_to_unicode(v):
Log.error("not expected", e)
#ACTUAL SQL, DO NOT QUOTE THIS STRING
# ACTUAL SQL, DO NOT QUOTE THIS STRING
class SQL(unicode):
def __init__(self, template='', param=None):
unicode.__init__(self)
@ -599,8 +599,8 @@ def int_list_packer(term, values):
"""
return singletons, ranges and exclusions
"""
DENSITY = 10 #a range can have holes, this is inverse of the hole density
MIN_RANGE = 20 #min members before a range is allowed to be used
DENSITY = 10 # a range can have holes, this is inverse of the hole density
MIN_RANGE = 20 # min members before a range is allowed to be used
singletons = set()
ranges = []
@ -616,27 +616,27 @@ def int_list_packer(term, values):
if v <= last + 1:
pass
elif v - last > 3:
#big step, how do we deal with it?
# big step, how do we deal with it?
if last == curr_start:
#not a range yet, so just add as singlton
# not a range yet, so just add as singlton
singletons.add(last)
elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY):
#small ranges are singletons, sparse ranges are singletons
# small ranges are singletons, sparse ranges are singletons
singletons |= set(range(curr_start, last + 1))
singletons -= curr_excl
else:
#big enough, and dense enough range
# big enough, and dense enough range
ranges.append({"gte": curr_start, "lte": last})
exclude |= curr_excl
curr_start = v
curr_excl = set()
else:
if 1 + last - curr_start >= len(curr_excl) * DENSITY:
#high density, keep track of excluded and continue
# high density, keep track of excluded and continue
add_me = set(range(last + 1, v))
curr_excl |= add_me
elif 1 + last - curr_start - len(curr_excl) < MIN_RANGE:
#not big enough, convert range to singletons
# not big enough, convert range to singletons
new_singles = set(range(curr_start, last + 1)) - curr_excl
singletons = singletons | new_singles
@ -650,14 +650,14 @@ def int_list_packer(term, values):
last = v
if last == curr_start:
#not a range yet, so just add as singlton
# not a range yet, so just add as singlton
singletons.add(last)
elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY):
#small ranges are singletons, sparse ranges are singletons
# small ranges are singletons, sparse ranges are singletons
singletons |= set(range(curr_start, last + 1))
singletons -= curr_excl
else:
#big enough, and dense enough range
# big enough, and dense enough range
ranges.append({"gte": curr_start, "lte": last})
exclude |= curr_excl

Просмотреть файл

@ -16,6 +16,7 @@ import re
from . import struct
import math
import __builtin__
from urllib import urlencode
from .structs.wraps import unwrap, wrap
@ -44,6 +45,21 @@ def unix(value):
return str(CNV.datetime2unix(value))
def url(value, use_plus=False):
"""
CONVERT FROM dict TO URL PARAMETERS
"""
if use_plus:
return urlencode(value)
else:
# I LOVE ENCODING SPACES AS "+", BECAUSE IT IS HUMANE. BUT, SINCE
# MANY LIBRARIES DO IT WRONG, WE CAN TRUST NOTHING TO INTERPRET URLS
# PROPERLY. SO WE GO WITH LOWEST COMMON DENOMINATOR.
#
# BTW, THIS WOULD BE MUCH FASTER IF urlencode WAS NOT USED
return urlencode(value).replace("+", "%20")
def upper(value):
return value.upper()
@ -115,7 +131,7 @@ def between(value, prefix, suffix):
if e == -1:
return None
s = value.rfind(prefix, 0, e) + len(prefix) #WE KNOW THIS EXISTS, BUT THERE MAY BE A RIGHT-MORE ONE
s = value.rfind(prefix, 0, e) + len(prefix) # WE KNOW THIS EXISTS, BUT THERE MAY BE A RIGHT-MORE ONE
return value[s:e]
@ -198,13 +214,13 @@ def _simple_expand(template, seq):
if len(parts) > 1:
val = eval(parts[0] + "(val, " + ("(".join(parts[1::])))
else:
val = eval(filter + "(val)")
val = globals()[filter](val)
val = toString(val)
return val
except Exception, e:
try:
if e.message.find("is not JSON serializable"):
#WORK HARDER
# WORK HARDER
val = toString(val)
return val
except Exception, f:
@ -241,7 +257,7 @@ def toString(val):
def edit_distance(s1, s2):
"""
FROM http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
FROM http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance# Python
LICENCE http://creativecommons.org/licenses/by-sa/3.0/
"""
if len(s1) < len(s2):

Просмотреть файл

@ -17,37 +17,53 @@ DEBUG = False
class Struct(dict):
"""
Struct is an anonymous class with some properties good for manipulating JSON
Struct is used to declare an instance of an anonymous type, and has good
features for manipulating JSON. Anonymous types are necessary when
writing sophisticated list comprehensions, or queries, and to keep them
readable. In many ways, dict() can act as an anonymous type, but it does
not have the features listed here.
0) a.b==a["b"]
1) the IDE does tab completion, and my spelling mistakes get found at "compile time"
2) it deals with missing keys gracefully, so I can put it into set operations (database
operations) without choking
1) by allowing dot notation, the IDE does tab completion and my spelling
mistakes get found at "compile time"
2) it deals with missing keys gracefully, so I can put it into set
operations (database operations) without raising exceptions
a = wrap({})
> a == {}
a.b is Null
a.b == None
> True
a.b.c == None
> True
2b) missing keys is important when dealing with JSON, which is often almost anything
a[None] == None
> True
2b) missing keys is important when dealing with JSON, which is often almost
anything
2c) you loose the ability to perform <code>a is None</code> checks, must
always use <code>a == None</code> instead
3) you can access paths as a variable: a["b.c"]==a.b.c
4) you can set paths to values, missing objects along the path are created:
4) you can set paths to values, missing dicts along the path are created:
a = wrap({})
> a == {}
a["b.c"] = 42
> a == {"b": {"c": 42}}
5) attribute names (keys) are corrected to unicode - it appears Python object.getattribute()
is called with str() even when using from __future__ import unicode_literals
5) attribute names (keys) are corrected to unicode - it appears Python
object.getattribute() is called with str() even when using
<code>from __future__ import unicode_literals</code>
MORE ON MISSING VALUES: http://www.numpy.org/NA-overview.html
IT ONLY CONSIDERS THE LEGITIMATE-FIELD-WITH-MISSING-VALUE (Statistical Null)
AND DOES NOT LOOK AT FIELD-DOES-NOT-EXIST-IN-THIS-CONTEXT (Database Null)
More on missing values: http://www.numpy.org/NA-overview.html
it only considers the legitimate-field-with-missing-value (Statistical Null)
and does not look at field-does-not-exist-in-this-context (Database Null)
The Struct is a common pattern in many frameworks (I am still working on this list)
The Struct is a common pattern in many frameworks even though it goes by
different names, some examples are:
jinja2.environment.Environment.getattr()
argparse.Environment() - code performs setattr(e, name, value) on instances of Environment
collections.namedtuple() - gives attribute names to tuple indicies
* jinja2.environment.Environment.getattr()
* argparse.Environment() - code performs setattr(e, name, value) on instances of Environment
* collections.namedtuple() - gives attribute names to tuple indicies
* C# Linq requires anonymous types to avoid large amounts of boilerplate code.
http://www.saltycrane.com/blog/2012/08/python-data-object-motivated-desire-mutable-namedtuple-default-values/
"""
@ -90,6 +106,8 @@ class Struct(dict):
return False
def __getitem__(self, key):
if key == None:
return Null
if isinstance(key, str):
key = key.decode("utf8")
@ -188,6 +206,19 @@ class Struct(dict):
d = _get(self, "__dict__")
return ((k, wrap(v)) for k, v in d.items())
def leaves(self, prefix=None):
"""
LIKE items() BUT RECURSIVE, AND ONLY FOR THE LEAVES (non dict) VALUES
"""
prefix = nvl(prefix, "")
output = []
for k, v in self.items():
if isinstance(v, dict):
output.extend(wrap(v).leaves(prefix=prefix+literal_field(k)+"."))
else:
output.append((prefix+literal_field(k), v))
return output
def all_items(self):
"""
GET ALL KEY-VALUES OF LEAF NODES IN Struct
@ -202,7 +233,7 @@ class Struct(dict):
return output
def iteritems(self):
#LOW LEVEL ITERATION, NO WRAPPING
# LOW LEVEL ITERATION, NO WRAPPING
d = _get(self, "__dict__")
return d.iteritems()
@ -290,7 +321,7 @@ def _setdefault(obj, key, value):
def set_default(*params):
"""
I+NPUT dicts IN PRIORITY ORDER
INPUT dicts IN PRIORITY ORDER
UPDATES FIRST dict WITH THE MERGE RESULT, WHERE MERGE RESULT IS DEFINED AS:
FOR EACH LEAF, RETURN THE HIGHEST PRIORITY LEAF VALUE
"""
@ -338,8 +369,8 @@ def _assign(obj, path, value, force=True):
"""
if isinstance(obj, NullType):
d = _get(obj, "__dict__")
o = d["obj"]
p = d["path"]
o = d["_obj"]
p = d["_path"]
s = split_field(p)+path
return _assign(o, s, value)
@ -373,8 +404,8 @@ class NullType(object):
def __init__(self, obj=None, path=None):
d = _get(self, "__dict__")
d["obj"] = obj
d["path"] = path
d["_obj"] = obj
d["_path"] = path
def __bool__(self):
return False
@ -458,8 +489,8 @@ class NullType(object):
def __setitem__(self, key, value):
try:
d = _get(self, "__dict__")
o = d["obj"]
path = d["path"]
o = d["_obj"]
path = d["_path"]
seq = split_field(path)+split_field(key)
_assign(o, seq, value)
@ -481,6 +512,9 @@ class NullType(object):
def __repr__(self):
return "Null"
def __hash__(self):
return hash(None)
Null = NullType()
EmptyList = Null
@ -664,7 +698,7 @@ def inverse(d):
def nvl(*args):
#pick the first none-null value
# pick the first none-null value
for a in args:
if a != None:
return wrap(a)

Просмотреть файл

@ -106,17 +106,17 @@ def listwrap(value):
value -> [value]
[...] -> [...] (unchanged list)
#BEFORE
# BEFORE
if a is not None:
if not isinstance(a, list):
a=[a]
for x in a:
#do something
# do something
#AFTER
# AFTER
for x in listwrap(a):
#do something
# do something
"""
if value == None:

Просмотреть файл

@ -1,13 +1,13 @@
# encoding: utf-8
#
from .util import struct
from .util.cnv import CNV
from .util.env.elasticsearch import ElasticSearch
from .util.env.logs import Log
from .util.env.files import File
from .util.queries import Q
from .util.struct import Struct
from .util.structs.wraps import unwrap, wrap
from .. import struct
from ..cnv import CNV
from ..env.elasticsearch import ElasticSearch
from ..env.logs import Log
from ..env.files import File
from ..queries import Q
from ..struct import Struct
from ..structs.wraps import unwrap, wrap
def make_test_instance(name, settings):
if settings.filename:

Просмотреть файл

@ -7,7 +7,7 @@
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from multiprocessing.queues import Queue
from dzAlerts.util.thread.threads import Queue
from ..env.logs import Log
@ -35,9 +35,9 @@ class Multiprocess(object):
self.inbound = Queue()
self.inbound = Queue()
#MAKE
# MAKE
#MAKE THREADS
# MAKE THREADS
self.threads = []
for t, f in enumerate(functions):
thread = worker(
@ -52,7 +52,7 @@ class Multiprocess(object):
def __enter__(self):
return self
#WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
# WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, a, b, c):
try:
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
@ -62,10 +62,10 @@ class Multiprocess(object):
self.join()
#IF YOU SENT A stop(), OR STOP, YOU MAY WAIT FOR SHUTDOWN
# IF YOU SENT A stop(), OR STOP, YOU MAY WAIT FOR SHUTDOWN
def join(self):
try:
#WAIT FOR FINISH
# WAIT FOR FINISH
for t in self.threads:
t.join()
except (KeyboardInterrupt, SystemExit):
@ -81,9 +81,9 @@ class Multiprocess(object):
self.outbound.close()
#RETURN A GENERATOR THAT HAS len(parameters) RESULTS (ANY ORDER)
# RETURN A GENERATOR THAT HAS len(parameters) RESULTS (ANY ORDER)
def execute(self, parameters):
#FILL QUEUE WITH WORK
# FILL QUEUE WITH WORK
self.inbound.extend(parameters)
num = len(parameters)
@ -95,9 +95,9 @@ class Multiprocess(object):
return output()
#EXTERNAL COMMAND THAT RETURNS IMMEDIATELY
# EXTERNAL COMMAND THAT RETURNS IMMEDIATELY
def stop(self):
self.inbound.close() #SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
for t in self.threads:
t.keep_running = False

Просмотреть файл

@ -37,7 +37,7 @@ class Multithread(object):
self.inbound = Queue(silent=silent_queues)
#MAKE THREADS
# MAKE THREADS
if isinstance(functions, Iterable):
if threads:
Log.error("do not know how to handle an array of functions AND a thread multiplier")
@ -46,7 +46,7 @@ class Multithread(object):
thread = worker_thread("worker " + unicode(t), self.inbound, self.outbound, f)
self.threads.append(thread)
else:
#ASSUME functions IS A SINGLE FUNCTION
# ASSUME functions IS A SINGLE FUNCTION
self.threads = []
for t in range(nvl(threads, 1)):
thread = worker_thread("worker " + unicode(t), self.inbound, self.outbound, functions)
@ -55,7 +55,7 @@ class Multithread(object):
def __enter__(self):
return self
#WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
# WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, type, value, traceback):
try:
if isinstance(value, Exception):
@ -66,10 +66,10 @@ class Multithread(object):
Log.warning("Problem sending stops", e)
#IF YOU SENT A stop(), OR Thread.STOP, YOU MAY WAIT FOR SHUTDOWN
# IF YOU SENT A stop(), OR Thread.STOP, YOU MAY WAIT FOR SHUTDOWN
def join(self):
try:
#WAIT FOR FINISH
# WAIT FOR FINISH
for t in self.threads:
t.join()
except (KeyboardInterrupt, SystemExit):
@ -80,7 +80,8 @@ class Multithread(object):
for t in self.threads:
t.keep_running = False
self.inbound.close()
if self.outbound: self.outbound.close()
if self.outbound:
self.outbound.close()
for t in self.threads:
t.join()
@ -93,7 +94,7 @@ class Multithread(object):
if not isinstance(requests,(list, tuple, GeneratorType)):
Log.error("Expecting requests to be a list or generator", offset=1)
#FILL QUEUE WITH WORK
# FILL QUEUE WITH WORK
self.inbound.extend(requests)
num = len(requests)
@ -111,15 +112,15 @@ class Multithread(object):
else:
return
#EXTERNAL COMMAND THAT RETURNS IMMEDIATELY
# EXTERNAL COMMAND THAT RETURNS IMMEDIATELY
def stop(self):
self.inbound.close() #SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
for t in self.threads:
t.keep_running = False
class worker_thread(Thread):
#in_queue MUST CONTAIN HASH OF PARAMETERS FOR load()
# in_queue MUST CONTAIN HASH OF PARAMETERS FOR load()
def __init__(self, name, in_queue, out_queue, function):
Thread.__init__(self, name, self.event_loop)
self.in_queue = in_queue

Просмотреть файл

@ -128,7 +128,7 @@ class Queue(object):
while self.keep_running:
if self.queue:
value = self.queue.pop(0)
if value is Thread.STOP: #SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
if value is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
return value
self.lock.wait()
@ -145,11 +145,11 @@ class Queue(object):
return []
for v in self.queue:
if v is Thread.STOP: #SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
if v is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
output = list(self.queue)
del self.queue[:] #CLEAR
del self.queue[:] # CLEAR
return output
def close(self):
@ -168,7 +168,7 @@ class AllThread(object):
def __enter__(self):
return self
#WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
# WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, type, value, traceback):
self.join()
@ -223,7 +223,7 @@ class Thread(object):
self.synch_lock = Lock()
self.args = args
#ENSURE THERE IS A SHARED please_stop SIGNAL
# ENSURE THERE IS A SHARED please_stop SIGNAL
self.kwargs = kwargs.copy()
self.kwargs["please_stop"] = self.kwargs.get("please_stop", Signal())
self.please_stop = self.kwargs["please_stop"]
@ -313,7 +313,7 @@ class Thread(object):
@staticmethod
def run(name, target, *args, **kwargs):
#ENSURE target HAS please_stop ARGUMENT
# ENSURE target HAS please_stop ARGUMENT
if "please_stop" not in target.__code__.co_varnames:
from ..env.logs import Log
@ -407,7 +407,7 @@ class ThreadedQueue(Queue):
def __init__(self, queue, size=None, max=None, period=None, silent=False):
if max == None:
#REASONABLE DEFAULT
# REASONABLE DEFAULT
max = size * 2
Queue.__init__(self, max=max, silent=silent)
@ -415,7 +415,7 @@ class ThreadedQueue(Queue):
def size_pusher(please_stop):
please_stop.on_go(lambda: self.add(Thread.STOP))
#queue IS A MULTI-THREADED QUEUE, SO THIS WILL BLOCK UNTIL THE size ARE READY
# queue IS A MULTI-THREADED QUEUE, SO THIS WILL BLOCK UNTIL THE size ARE READY
from ..queries import Q
for i, g in Q.groupby(self, size=size):

89
tests/util/times/dates.py Normal file
Просмотреть файл

@ -0,0 +1,89 @@
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
#
# MONKEY-PATCH datetime FOR MORE AWESOME FUN
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from datetime import datetime, date
import math
class Date(object):
def __init__(self, *args):
try:
if len(args) == 1:
a0 = args[0]
if isinstance(a0, (datetime, date)):
self.value = a0
elif isinstance(a0, Date):
self.value = a0.value
elif isinstance(a0, (int, long, float)):
if a0 == 9999999999000: # PYPY BUG https://bugs.pypy.org/issue1697
return datetime.datetime(2286, 11, 20, 17, 46, 39)
self.value = datetime.utcfromtimestamp(a0/1000)
else:
self.value = datetime(*args)
else:
self.value = datetime(*args)
except Exception, e:
Log.error("Can not convert {{args}} to Date", {"args": args}, e)
def floor(self, duration=None):
if duration is None: # ASSUME DAY
return Date(math.floor(self.milli / 86400000) * 86400000)
elif not duration.month:
return Date(math.floor(self.milli / duration.milli) * duration.milli)
else:
month = math.floor(self.value.month / duration.month) * duration.month
return Date(datetime(self.value.year, month, 1))
def format(self, format="%Y-%m-%d %H:%M:%S"):
try:
return self.value.strftime(format)
except Exception, e:
Log.error("Can not format {{value}} with {{format}}", {"value": self.value, "format": format}, e)
@property
def milli(self):
try:
if self.value == None:
return None
elif isinstance(self.value, datetime):
epoch = datetime(1970, 1, 1)
elif isinstance(self.value, date):
epoch = date(1970, 1, 1)
else:
Log.error("Can not convert {{value}} of type {{type}}", {"value": self.value, "type": self.value.__class__})
diff = self.value - epoch
return long(diff.total_seconds()) * 1000L + long(diff.microseconds / 1000)
except Exception, e:
Log.error("Can not convert {{value}}", {"value": self.value}, e)
@property
def unix(self):
return self.milli/1000
@staticmethod
def now():
return Date(datetime.utcnow())
@staticmethod
def today():
return Date(datetime.utcnow()).floor()
def __str__(self):
return str(self.value)
from ..env.logs import Log

Просмотреть файл

@ -8,35 +8,52 @@
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from datetime import timedelta
from .. import regex
from ..vendor.dateutil.relativedelta import relativedelta
from ..cnv import CNV
from ..collections import MIN
from ..env.logs import Log
from ..maths import Math
from ..structs.wraps import unwrap, wrap
from ..structs.wraps import wrap
class Duration(object):
ZERO = None
SECOND = None
MINUTE = None
HOUR = None
DAY = None
WEEK = None
MONTH = None
QUARTER = None
YEAR = None
def __new__(cls, obj=None):
def __new__(cls, value=None, **kwargs):
output = object.__new__(cls)
if obj == None:
return None
if Math.is_number(obj):
output.milli = obj
if value == None:
if kwargs:
output.milli = timedelta(**kwargs).total_seconds()*1000
output.month = 0
return output
else:
return None
if Math.is_number(value):
output.milli = value
output.month = 0
return output
elif isinstance(obj, basestring):
return parse(obj)
elif isinstance(obj, Duration):
output.milli = obj.milli
output.month = obj.month
elif isinstance(value, basestring):
return parse(value)
elif isinstance(value, Duration):
output.milli = value.milli
output.month = value.month
return output
elif Math.is_nan(obj):
elif Math.is_nan(value):
return None
else:
Log.error("Do not know type of object (" + CNV.object2JSON(obj) + ")of to make a Duration")
Log.error("Do not know type of object (" + CNV.object2JSON(value) + ")of to make a Duration")
def __add__(self, other):
@ -87,10 +104,21 @@ class Duration(object):
def __sub__(self, duration):
output = Duration(0)
output.milli = self.milli - duration.milli
output.month = self.month - duration.month
return output
output = Duration(0)
output.milli = self.milli - duration.milli
output.month = self.month - duration.month
return output
def __rsub__(self, time):
if isinstance(time, Duration):
output = Duration(0)
output.milli = time.milli - self.milli
output.month = time.month - self.month
return output
else:
return time - relativedelta(months=self.month, seconds=self.milli/1000)
def floor(self, interval=None):
if not isinstance(interval, Duration):
@ -144,7 +172,7 @@ class Duration(object):
rest = Math.floor(rest / 24)
# DAY
if rest < 11 and rest != 7:
if (rest < 11 and rest != 7) or rest % 10 == 0:
rem = rest
rest = 0
else:
@ -152,11 +180,11 @@ class Duration(object):
rest = Math.floor(rest / 7)
if rem != 0:
output = "+" + rem + "day" + output
output = "+" + str(rem) + "day" + output
# WEEK
if rest != 0:
output = "+" + rest + "week" + output
output = "+" + str(rest) + "week" + output
if isNegative:
output = output.replace("+", "-")
@ -173,7 +201,7 @@ class Duration(object):
if m != 0:
output = sign + m + "month" + output
y = Math.floor(month / 12)
output = sign + y + "year" + output
output = sign + str(y) + "year" + output
if output[0] == "+":
output = output[1::]
@ -183,7 +211,7 @@ class Duration(object):
def format(self, interval, rounding):
return self.round(Duration.newInstance(interval), rounding) + interval
return self.round(Duration(interval), rounding) + interval
def round(self, interval, rounding=0):
output = self / interval
@ -199,7 +227,7 @@ def _string2Duration(text):
return Duration(0)
amount, interval = regex.match(r"([\d\.]*)(.*)", text)
amount = CNV.value2int(amount) if amount else 0
amount = CNV.value2int(amount) if amount else 1
if MILLI_VALUES[interval] == None:
Log.error(interval + " is not a recognized duration type (did you use the pural form by mistake?")
@ -274,6 +302,19 @@ MONTH = Duration("month")
QUARTER = Duration("quarter")
YEAR = Duration("year")
Duration.ZERO = ZERO
Duration.SECOND = SECOND
Duration.MINUTE = MINUTE
Duration.HOUR = HOUR
Duration.DAY = DAY
Duration.WEEK = WEEK
Duration.MONTH = MONTH
Duration.QUARTER = QUARTER
Duration.YEAR = YEAR
COMMON_INTERVALS = [
Duration("second"),
Duration("15second"),

38
tests/util/vendor/aespython/aes_cipher.py поставляемый
Просмотреть файл

@ -18,7 +18,7 @@ Licensed under the MIT license http://www.opensource.org/licenses/mit-license.ph
"""
__author__ = "Adam Newman"
#Normally use relative import. In test mode use local import.
# Normally use relative import. In test mode use local import.
try:
from . import aes_tables
except ValueError:
@ -28,47 +28,47 @@ class AESCipher:
"""Perform single block AES cipher/decipher"""
def __init__ (self, expanded_key):
#Store epanded key
# Store epanded key
self._expanded_key = expanded_key
#Number of rounds determined by expanded key length
# Number of rounds determined by expanded key length
self._Nr = int(len(expanded_key) / 16) - 1
def _sub_bytes (self, state):
#Run state through sbox
# Run state through sbox
for i,s in enumerate(state):state[i]=aes_tables.sbox[s]
def _i_sub_bytes (self, state):
#Run state through inverted sbox
# Run state through inverted sbox
for i,s in enumerate(state):state[i]=aes_tables.i_sbox[s]
def _shift_row (self, row, shift):
#Circular shift row left by shift amount
# Circular shift row left by shift amount
row+=row[:shift]
del row[:shift]
return row
def _i_shift_row (self, row, shift):
#Circular shift row left by shift amount
# Circular shift row left by shift amount
row+=row[:shift]
del row[:4+shift]
return row
def _shift_rows (self, state):
#Extract rows as every 4th item starting at [1..3]
#Replace row with shift_row operation
# Extract rows as every 4th item starting at [1..3]
# Replace row with shift_row operation
for i in 1,2,3:
state[i::4] = self._shift_row(state[i::4],i)
def _i_shift_rows (self, state):
#Extract rows as every 4th item starting at [1..3]
#Replace row with inverse shift_row operation
# Extract rows as every 4th item starting at [1..3]
# Replace row with inverse shift_row operation
for i in 1,2,3:
state[i::4] = self._i_shift_row(state[i::4],-i)
def _mix_column (self, column, inverse):
#Use galois lookup tables instead of performing complicated operations
#If inverse, use matrix with inverse values
# Use galois lookup tables instead of performing complicated operations
# If inverse, use matrix with inverse values
g0,g1,g2,g3=aes_tables.galI if inverse else aes_tables.galNI
c0,c1,c2,c3=column
return (
@ -78,18 +78,18 @@ class AESCipher:
g1[c0]^g2[c1]^g3[c2]^g0[c3])
def _mix_columns (self, state, inverse):
#Perform mix_column for each column in the state
# Perform mix_column for each column in the state
for i,j in (0,4),(4,8),(8,12),(12,16):
state[i:j] = self._mix_column(state[i:j], inverse)
def _add_round_key (self, state, round):
#XOR the state with the current round key
# XOR the state with the current round key
for k,(i,j) in enumerate(zip(state, self._expanded_key[round*16:(round+1)*16])):state[k]=i^j
def cipher_block (self, state):
"""Perform AES block cipher on input"""
#PKCS7 Padding
state=state+[16-len(state)]*(16-len(state))#Fails test if it changes the input with +=
# PKCS7 Padding
state=state+[16-len(state)]*(16-len(state))# Fails test if it changes the input with +=
self._add_round_key(state, 0)
@ -106,7 +106,7 @@ class AESCipher:
def decipher_block (self, state):
"""Perform AES block decipher on input"""
#null padding. Padding actually should not be needed here with valid input.
# null padding. Padding actually should not be needed here with valid input.
state=state+[0]*(16-len(state))
self._add_round_key(state, self._Nr)
@ -148,4 +148,4 @@ class TestCipher(unittest.TestCase):
msg='Test %d bit decipher'%key_size)
if __name__ == "__main__":
unittest.main()
unittest.main()

101
tests/util/vendor/aespython/key_expander.py поставляемый
Просмотреть файл

@ -14,7 +14,7 @@ Licensed under the MIT license http://www.opensource.org/licenses/mit-license.ph
"""
__author__ = "Adam Newman"
#Normally use relative import. In test mode use local import.
# Normally use relative import. In test mode use local import.
try:
from . import aes_tables
except ValueError:
@ -22,76 +22,76 @@ except ValueError:
class KeyExpander:
"""Perform AES Key Expansion"""
_expanded_key_length = {128 : 176, 192 : 208, 256 : 240}
def __init__(self, key_length):
self._key_length = key_length
self._n = int(key_length / 8)
if key_length in self._expanded_key_length:
self._b = self._expanded_key_length[key_length]
else:
raise LookupError('Invalid Key Size')
def _core(self, key_array, iteration):
if len(key_array) != 4:
raise RuntimeError('_core(): key segment size invalid')
#Append the list of elements 1-3 and list comprised of element 0 (circular rotate left)
#For each element of this new list, put the result of sbox into output array.
#I was torn on readability vs pythonicity. This also may be faster.
# Append the list of elements 1-3 and list comprised of element 0 (circular rotate left)
# For each element of this new list, put the result of sbox into output array.
# I was torn on readability vs pythonicity. This also may be faster.
output = [aes_tables.sbox[i] for i in key_array[1:] + key_array[:1]]
#First byte of output array is XORed with rcon(iteration)
# First byte of output array is XORed with rcon(iteration)
output[0] = output[0] ^ aes_tables.rcon[iteration]
return output
def _xor_list(self, list_1, list_2):
return [ i ^ j for i,j in zip(list_1, list_2)]
return [ i ^ j for i,j in zip(list_1, list_2)]
def expand(self, key_array):
"""
Expand the encryption key per AES key schedule specifications
http://en.wikipedia.org/wiki/Rijndael_key_schedule#Key_schedule_description
"""
Expand the encryption key per AES key schedule specifications
http://en.wikipedia.org/wiki/Rijndael_key_schedule# Key_schedule_description
"""
if len(key_array) != self._n:
raise RuntimeError('expand(): key size ' + str(len(key_array)) + ' is invalid')
#First n bytes are copied from key. Copy prevents inplace modification of original key
# First n bytes are copied from key. Copy prevents inplace modification of original key
new_key = list(key_array)
rcon_iteration = 1
len_new_key = len(new_key)
#There are several parts of the code below that could be done with tidy list comprehensions like
#the one I put in _core, but I left this alone for readability.
#Grow the key until it is the correct length
# There are several parts of the code below that could be done with tidy list comprehensions like
# the one I put in _core, but I left this alone for readability.
# Grow the key until it is the correct length
while len_new_key < self._b:
#Copy last 4 bytes of extended key, apply _core function order i, increment i(rcon_iteration),
#xor with 4 bytes n bytes from end of extended key
# Copy last 4 bytes of extended key, apply _core function order i, increment i(rcon_iteration),
# xor with 4 bytes n bytes from end of extended key
t = new_key[-4:]
t = self._core(t, rcon_iteration)
t = self._core(t, rcon_iteration)
rcon_iteration += 1
t = self._xor_list(t, new_key[-self._n : -self._n + 4])# self._n_bytes_before(len_new_key, new_key))
new_key.extend(t)
len_new_key += 4
#Run three passes of 4 byte expansion using copy of 4 byte tail of extended key
#which is then xor'd with 4 bytes n bytes from end of extended key
# Run three passes of 4 byte expansion using copy of 4 byte tail of extended key
# which is then xor'd with 4 bytes n bytes from end of extended key
for j in range(3):
t = new_key[-4:]
t = new_key[-4:]
t = self._xor_list(t, new_key[-self._n : -self._n + 4])
new_key.extend(t)
len_new_key += 4
#If key length is 256 and key is not complete, add 4 bytes tail of extended key
#run through sbox before xor with 4 bytes n bytes from end of extended key
# If key length is 256 and key is not complete, add 4 bytes tail of extended key
# run through sbox before xor with 4 bytes n bytes from end of extended key
if self._key_length == 256 and len_new_key < self._b:
t = new_key[-4:]
t2=[]
@ -100,45 +100,44 @@ class KeyExpander:
t = self._xor_list(t2, new_key[-self._n : -self._n + 4])
new_key.extend(t)
len_new_key += 4
#If key length is 192 or 256 and key is not complete, run 2 or 3 passes respectively
#of 4 byte tail of extended key xor with 4 bytes n bytes from end of extended key
# If key length is 192 or 256 and key is not complete, run 2 or 3 passes respectively
# of 4 byte tail of extended key xor with 4 bytes n bytes from end of extended key
if self._key_length != 128 and len_new_key < self._b:
if self._key_length == 192:
r = range(2)
else:
r = range(3)
for j in r:
t = new_key[-4:]
t = self._xor_list(t, new_key[-self._n : -self._n + 4])
new_key.extend(t)
len_new_key += 4
return new_key
import unittest
class TestKeyExpander(unittest.TestCase):
def test_keys(self):
"""Test All Key Expansions"""
try:
from . import test_keys
except:
import test_keys
test_data = test_keys.TestKeys()
for key_size in [128, 192, 256]:
test_expander = KeyExpander(key_size)
test_expanded_key = test_expander.expand(test_data.test_key[key_size])
self.assertEqual (len([i for i, j in zip(test_expanded_key, test_data.test_expanded_key_validated[key_size]) if i == j]),
self.assertEqual (len([i for i, j in zip(test_expanded_key, test_data.test_expanded_key_validated[key_size]) if i == j]),
len(test_data.test_expanded_key_validated[key_size]),
msg='Key expansion ' + str(key_size) + ' bit')
if __name__ == "__main__":
unittest.main()

87
tests/util/vendor/aespython/test_keys.py поставляемый
Просмотреть файл

@ -16,58 +16,58 @@ class TestKeys:
128 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
, 192 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17]
, 256 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f]
}
test_expanded_key_validated = {
128 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0xd6, 0xaa, 0x74, 0xfd, 0xd2, 0xaf, 0x72, 0xfa, 0xda, 0xa6, 0x78, 0xf1, 0xd6, 0xab, 0x76, 0xfe,
0xb6, 0x92, 0xcf, 0x0b, 0x64, 0x3d, 0xbd, 0xf1, 0xbe, 0x9b, 0xc5, 0x00, 0x68, 0x30, 0xb3, 0xfe,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0xd6, 0xaa, 0x74, 0xfd, 0xd2, 0xaf, 0x72, 0xfa, 0xda, 0xa6, 0x78, 0xf1, 0xd6, 0xab, 0x76, 0xfe,
0xb6, 0x92, 0xcf, 0x0b, 0x64, 0x3d, 0xbd, 0xf1, 0xbe, 0x9b, 0xc5, 0x00, 0x68, 0x30, 0xb3, 0xfe,
0xb6, 0xff, 0x74, 0x4e, 0xd2, 0xc2, 0xc9, 0xbf, 0x6c, 0x59, 0x0c, 0xbf, 0x04, 0x69, 0xbf, 0x41,
0x47, 0xf7, 0xf7, 0xbc, 0x95, 0x35, 0x3e, 0x03, 0xf9, 0x6c, 0x32, 0xbc, 0xfd, 0x05, 0x8d, 0xfd,
0x47, 0xf7, 0xf7, 0xbc, 0x95, 0x35, 0x3e, 0x03, 0xf9, 0x6c, 0x32, 0xbc, 0xfd, 0x05, 0x8d, 0xfd,
0x3c, 0xaa, 0xa3, 0xe8, 0xa9, 0x9f, 0x9d, 0xeb, 0x50, 0xf3, 0xaf, 0x57, 0xad, 0xf6, 0x22, 0xaa,
0x5e, 0x39, 0x0f, 0x7d, 0xf7, 0xa6, 0x92, 0x96, 0xa7, 0x55, 0x3d, 0xc1, 0x0a, 0xa3, 0x1f, 0x6b,
0x14, 0xf9, 0x70, 0x1a, 0xe3, 0x5f, 0xe2, 0x8c, 0x44, 0x0a, 0xdf, 0x4d, 0x4e, 0xa9, 0xc0, 0x26,
0x47, 0x43, 0x87, 0x35, 0xa4, 0x1c, 0x65, 0xb9, 0xe0, 0x16, 0xba, 0xf4, 0xae, 0xbf, 0x7a, 0xd2,
0x54, 0x99, 0x32, 0xd1, 0xf0, 0x85, 0x57, 0x68, 0x10, 0x93, 0xed, 0x9c, 0xbe, 0x2c, 0x97, 0x4e,
0x5e, 0x39, 0x0f, 0x7d, 0xf7, 0xa6, 0x92, 0x96, 0xa7, 0x55, 0x3d, 0xc1, 0x0a, 0xa3, 0x1f, 0x6b,
0x14, 0xf9, 0x70, 0x1a, 0xe3, 0x5f, 0xe2, 0x8c, 0x44, 0x0a, 0xdf, 0x4d, 0x4e, 0xa9, 0xc0, 0x26,
0x47, 0x43, 0x87, 0x35, 0xa4, 0x1c, 0x65, 0xb9, 0xe0, 0x16, 0xba, 0xf4, 0xae, 0xbf, 0x7a, 0xd2,
0x54, 0x99, 0x32, 0xd1, 0xf0, 0x85, 0x57, 0x68, 0x10, 0x93, 0xed, 0x9c, 0xbe, 0x2c, 0x97, 0x4e,
0x13, 0x11, 0x1d, 0x7f, 0xe3, 0x94, 0x4a, 0x17, 0xf3, 0x07, 0xa7, 0x8b, 0x4d, 0x2b, 0x30, 0xc5]
, 192 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x58, 0x46, 0xf2, 0xf9, 0x5c, 0x43, 0xf4, 0xfe,
0x54, 0x4a, 0xfe, 0xf5, 0x58, 0x47, 0xf0, 0xfa, 0x48, 0x56, 0xe2, 0xe9, 0x5c, 0x43, 0xf4, 0xfe,
0x40, 0xf9, 0x49, 0xb3, 0x1c, 0xba, 0xbd, 0x4d, 0x48, 0xf0, 0x43, 0xb8, 0x10, 0xb7, 0xb3, 0x42,
0x58, 0xe1, 0x51, 0xab, 0x04, 0xa2, 0xa5, 0x55, 0x7e, 0xff, 0xb5, 0x41, 0x62, 0x45, 0x08, 0x0c,
0x2a, 0xb5, 0x4b, 0xb4, 0x3a, 0x02, 0xf8, 0xf6, 0x62, 0xe3, 0xa9, 0x5d, 0x66, 0x41, 0x0c, 0x08,
0xf5, 0x01, 0x85, 0x72, 0x97, 0x44, 0x8d, 0x7e, 0xbd, 0xf1, 0xc6, 0xca, 0x87, 0xf3, 0x3e, 0x3c,
0xe5, 0x10, 0x97, 0x61, 0x83, 0x51, 0x9b, 0x69, 0x34, 0x15, 0x7c, 0x9e, 0xa3, 0x51, 0xf1, 0xe0,
0x1e, 0xa0, 0x37, 0x2a, 0x99, 0x53, 0x09, 0x16, 0x7c, 0x43, 0x9e, 0x77, 0xff, 0x12, 0x05, 0x1e,
0xdd, 0x7e, 0x0e, 0x88, 0x7e, 0x2f, 0xff, 0x68, 0x60, 0x8f, 0xc8, 0x42, 0xf9, 0xdc, 0xc1, 0x54,
0x85, 0x9f, 0x5f, 0x23, 0x7a, 0x8d, 0x5a, 0x3d, 0xc0, 0xc0, 0x29, 0x52, 0xbe, 0xef, 0xd6, 0x3a,
0xde, 0x60, 0x1e, 0x78, 0x27, 0xbc, 0xdf, 0x2c, 0xa2, 0x23, 0x80, 0x0f, 0xd8, 0xae, 0xda, 0x32,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x58, 0x46, 0xf2, 0xf9, 0x5c, 0x43, 0xf4, 0xfe,
0x54, 0x4a, 0xfe, 0xf5, 0x58, 0x47, 0xf0, 0xfa, 0x48, 0x56, 0xe2, 0xe9, 0x5c, 0x43, 0xf4, 0xfe,
0x40, 0xf9, 0x49, 0xb3, 0x1c, 0xba, 0xbd, 0x4d, 0x48, 0xf0, 0x43, 0xb8, 0x10, 0xb7, 0xb3, 0x42,
0x58, 0xe1, 0x51, 0xab, 0x04, 0xa2, 0xa5, 0x55, 0x7e, 0xff, 0xb5, 0x41, 0x62, 0x45, 0x08, 0x0c,
0x2a, 0xb5, 0x4b, 0xb4, 0x3a, 0x02, 0xf8, 0xf6, 0x62, 0xe3, 0xa9, 0x5d, 0x66, 0x41, 0x0c, 0x08,
0xf5, 0x01, 0x85, 0x72, 0x97, 0x44, 0x8d, 0x7e, 0xbd, 0xf1, 0xc6, 0xca, 0x87, 0xf3, 0x3e, 0x3c,
0xe5, 0x10, 0x97, 0x61, 0x83, 0x51, 0x9b, 0x69, 0x34, 0x15, 0x7c, 0x9e, 0xa3, 0x51, 0xf1, 0xe0,
0x1e, 0xa0, 0x37, 0x2a, 0x99, 0x53, 0x09, 0x16, 0x7c, 0x43, 0x9e, 0x77, 0xff, 0x12, 0x05, 0x1e,
0xdd, 0x7e, 0x0e, 0x88, 0x7e, 0x2f, 0xff, 0x68, 0x60, 0x8f, 0xc8, 0x42, 0xf9, 0xdc, 0xc1, 0x54,
0x85, 0x9f, 0x5f, 0x23, 0x7a, 0x8d, 0x5a, 0x3d, 0xc0, 0xc0, 0x29, 0x52, 0xbe, 0xef, 0xd6, 0x3a,
0xde, 0x60, 0x1e, 0x78, 0x27, 0xbc, 0xdf, 0x2c, 0xa2, 0x23, 0x80, 0x0f, 0xd8, 0xae, 0xda, 0x32,
0xa4, 0x97, 0x0a, 0x33, 0x1a, 0x78, 0xdc, 0x09, 0xc4, 0x18, 0xc2, 0x71, 0xe3, 0xa4, 0x1d, 0x5d]
, 256 : [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0xa5, 0x73, 0xc2, 0x9f, 0xa1, 0x76, 0xc4, 0x98, 0xa9, 0x7f, 0xce, 0x93, 0xa5, 0x72, 0xc0, 0x9c,
0x16, 0x51, 0xa8, 0xcd, 0x02, 0x44, 0xbe, 0xda, 0x1a, 0x5d, 0xa4, 0xc1, 0x06, 0x40, 0xba, 0xde,
0xae, 0x87, 0xdf, 0xf0, 0x0f, 0xf1, 0x1b, 0x68, 0xa6, 0x8e, 0xd5, 0xfb, 0x03, 0xfc, 0x15, 0x67,
0x6d, 0xe1, 0xf1, 0x48, 0x6f, 0xa5, 0x4f, 0x92, 0x75, 0xf8, 0xeb, 0x53, 0x73, 0xb8, 0x51, 0x8d,
0xc6, 0x56, 0x82, 0x7f, 0xc9, 0xa7, 0x99, 0x17, 0x6f, 0x29, 0x4c, 0xec, 0x6c, 0xd5, 0x59, 0x8b,
0x3d, 0xe2, 0x3a, 0x75, 0x52, 0x47, 0x75, 0xe7, 0x27, 0xbf, 0x9e, 0xb4, 0x54, 0x07, 0xcf, 0x39,
0x0b, 0xdc, 0x90, 0x5f, 0xc2, 0x7b, 0x09, 0x48, 0xad, 0x52, 0x45, 0xa4, 0xc1, 0x87, 0x1c, 0x2f,
0x45, 0xf5, 0xa6, 0x60, 0x17, 0xb2, 0xd3, 0x87, 0x30, 0x0d, 0x4d, 0x33, 0x64, 0x0a, 0x82, 0x0a,
0x16, 0x51, 0xa8, 0xcd, 0x02, 0x44, 0xbe, 0xda, 0x1a, 0x5d, 0xa4, 0xc1, 0x06, 0x40, 0xba, 0xde,
0xae, 0x87, 0xdf, 0xf0, 0x0f, 0xf1, 0x1b, 0x68, 0xa6, 0x8e, 0xd5, 0xfb, 0x03, 0xfc, 0x15, 0x67,
0x6d, 0xe1, 0xf1, 0x48, 0x6f, 0xa5, 0x4f, 0x92, 0x75, 0xf8, 0xeb, 0x53, 0x73, 0xb8, 0x51, 0x8d,
0xc6, 0x56, 0x82, 0x7f, 0xc9, 0xa7, 0x99, 0x17, 0x6f, 0x29, 0x4c, 0xec, 0x6c, 0xd5, 0x59, 0x8b,
0x3d, 0xe2, 0x3a, 0x75, 0x52, 0x47, 0x75, 0xe7, 0x27, 0xbf, 0x9e, 0xb4, 0x54, 0x07, 0xcf, 0x39,
0x0b, 0xdc, 0x90, 0x5f, 0xc2, 0x7b, 0x09, 0x48, 0xad, 0x52, 0x45, 0xa4, 0xc1, 0x87, 0x1c, 0x2f,
0x45, 0xf5, 0xa6, 0x60, 0x17, 0xb2, 0xd3, 0x87, 0x30, 0x0d, 0x4d, 0x33, 0x64, 0x0a, 0x82, 0x0a,
0x7c, 0xcf, 0xf7, 0x1c, 0xbe, 0xb4, 0xfe, 0x54, 0x13, 0xe6, 0xbb, 0xf0, 0xd2, 0x61, 0xa7, 0xdf,
0xf0, 0x1a, 0xfa, 0xfe, 0xe7, 0xa8, 0x29, 0x79, 0xd7, 0xa5, 0x64, 0x4a, 0xb3, 0xaf, 0xe6, 0x40,
0x25, 0x41, 0xfe, 0x71, 0x9b, 0xf5, 0x00, 0x25, 0x88, 0x13, 0xbb, 0xd5, 0x5a, 0x72, 0x1c, 0x0a,
0x4e, 0x5a, 0x66, 0x99, 0xa9, 0xf2, 0x4f, 0xe0, 0x7e, 0x57, 0x2b, 0xaa, 0xcd, 0xf8, 0xcd, 0xea,
0xf0, 0x1a, 0xfa, 0xfe, 0xe7, 0xa8, 0x29, 0x79, 0xd7, 0xa5, 0x64, 0x4a, 0xb3, 0xaf, 0xe6, 0x40,
0x25, 0x41, 0xfe, 0x71, 0x9b, 0xf5, 0x00, 0x25, 0x88, 0x13, 0xbb, 0xd5, 0x5a, 0x72, 0x1c, 0x0a,
0x4e, 0x5a, 0x66, 0x99, 0xa9, 0xf2, 0x4f, 0xe0, 0x7e, 0x57, 0x2b, 0xaa, 0xcd, 0xf8, 0xcd, 0xea,
0x24, 0xfc, 0x79, 0xcc, 0xbf, 0x09, 0x79, 0xe9, 0x37, 0x1a, 0xc2, 0x3c, 0x6d, 0x68, 0xde, 0x36]
}
test_block_ciphertext_validated = {
128 : [
0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a]
@ -76,12 +76,12 @@ class TestKeys:
, 256 : [
0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89]
}
test_block_plaintext = [
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
#After initial validation, these deviated from test in SP 800-38A to use same key, iv, and plaintext on tests.
#Still valid, just easier to test with.
# After initial validation, these deviated from test in SP 800-38A to use same key, iv, and plaintext on tests.
# Still valid, just easier to test with.
test_mode_key= [
0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4]
@ -107,13 +107,12 @@ class TestKeys:
[0x4f, 0xeb, 0xdc, 0x67, 0x40, 0xd2, 0x0b, 0x3a, 0xc8, 0x8f, 0x6a, 0xd8, 0x2a, 0x4f, 0xb0, 0x8d],
[0x71, 0xab, 0x47, 0xa0, 0x86, 0xe8, 0x6e, 0xed, 0xf3, 0x9d, 0x1c, 0x5b, 0xba, 0x97, 0xc4, 0x08],
[0x01, 0x26, 0x14, 0x1d, 0x67, 0xf3, 0x7b, 0xe8, 0x53, 0x8f, 0x5a, 0x8b, 0xe7, 0x40, 0xe4, 0x84]]
def hex_output(self, list):
#Debugging output helper
# Debugging output helper
result = '['
for i in list[:-1]:
result += hex(i) + ','
return result + hex(list[-1]) + ']'

10
tests/util/vendor/dateutil/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__version__ = "2.1"

91
tests/util/vendor/dateutil/easter.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,91 @@
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y), int(m), int(d))

909
tests/util/vendor/dateutil/parser.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,909 @@
# -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
from __future__ import unicode_literals
__license__ = "Simplified BSD"
import datetime
import string
import time
import sys
import os
import collections
try:
from io import StringIO
except ImportError:
from io import StringIO
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year//100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i == len_l and l[i-2] == ' ' and info.hms(l[i-3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i-3]) + 1
if idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et

436
tests/util/vendor/dateutil/relativedelta.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,436 @@
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
import datetime
import calendar
integer_types = (int, long)
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
if not type(dt1) == type(dt2): # isinstance(dt1, type(dt2)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __add__(self, other):
if isinstance(other, relativedelta):
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.microsecond or self.microsecond)
if not isinstance(other, datetime.date):
raise TypeError("unsupported type for add operation")
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError("unsupported type for sub operation")
return relativedelta(years=self.years-other.years,
months=self.months-other.months,
days=self.days-other.days,
hours=self.hours-other.hours,
minutes=self.minutes-other.minutes,
seconds=self.seconds-other.seconds,
microseconds=self.microseconds-other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=int(self.years*f),
months=int(self.months*f),
days=int(self.days*f),
hours=int(self.hours*f),
minutes=int(self.minutes*f),
seconds=int(self.seconds*f),
microseconds=int(self.microseconds*f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et

1112
tests/util/vendor/dateutil/rrule.py поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

960
tests/util/vendor/dateutil/tz.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,960 @@
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
from six import string_types, PY3
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et

90
tests/util/vendor/dateutil/zoneinfo/__init__.py поставляемый Normal file
Просмотреть файл

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = sorted(os.listdir(os.path.join(os.path.dirname(__file__))))
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
# The "backwards" zone file contains links to other files, so must be
# processed as last
for name in sorted(tf.getnames(),
key=lambda k: k != "backward" and k or "z"):
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)

Двоичные данные
tests/util/vendor/dateutil/zoneinfo/zoneinfo--latest.tar.gz поставляемый Normal file

Двоичный файл не отображается.

626
tests/util/vendor/strangman/glplot.py поставляемый
Просмотреть файл

@ -1,626 +0,0 @@
##
## glplot.py ... combines OpenGL and wxPython to produce quick-and-dirty, zoomable line-plots
##
## Copyright (c) Gary Strangman, All Rights Reserved
## This software is provided AS-IS. Improvements are welcome. strang@nmr.mgh.harvard.edu
##
## NOTE: left button and drag creates a zoom box, right button returns to full-plot view
##
## Requires PyOpenGL, Numeric, and wxPython, and Python 2.2+
## Tested on Linux and Windoze platforms. Does what I need it to do on both.
##
try:
import im # im module only required to save the generated bitmaps
except:
pass
import glob, os, sys, string
import Numeric as N
from wxPython.wx import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL import GLUT
from wxPython.wx import *
from wxPython.glcanvas import *
from Numeric import *
import math, os, sys
glplotcolors = [(0.,0.,1.), # blue
(0.,1.,0.), # green
(1.,0.,0.), # red
(0.,1.,1.), # cyan
(1.,0.,1.), # magenta
(1.,1.,0.)] # yellow
#---------------------------------------------------------------------------------------
class RawOpengl(wxGLCanvas):
def __init__(self, parent,*args,**kw):
apply(wxGLCanvas.__init__,(self,parent),kw)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
def wxSize(self, event):
### Size callback
size = self.GetClientSize()
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, size.width, size.height)
def wxEraseBackground(self, event):
pass # Do nothing, to avoid flashing.
def wxPaint(self,*dummy):
dc = wxPaintDC(self)
self.wxRedraw(None)
def wxRedraw(self, *dummy):
### Capture rendering context
dc = wxClientDC(self)
# dc = wxPaintDC(self)
self.SetCurrent()
_mode = glGetDouble(GL_MATRIX_MODE)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
self.redraw()
glFlush()
glPopMatrix()
### Swap buffers
self.SwapBuffers()
glMatrixMode(_mode)
def wxExpose(self, *dummy):
self.wxRedraw()
#---------------------------------------------------------------------------------------
class OpenglMultiLinePlot(RawOpengl):
"""
A class for drawing line plots on an openGL canvas.
"""
def __init__(self, parent=None, autospin_allowed=1, xs=None, ys=None, errors=None, **kw):
apply(RawOpengl.__init__, (self, parent), kw)
self.parent = parent
if ys is None:
self.ys = None
self.xs = None
else: # len(ys.shape) == 1:
self.set_ys(ys)
self.set_xs(xs)
self.errors = errors
self.arrow = 0
self.font = GLUT.GLUT_BITMAP_HELVETICA_12
# self.font = WGL.
# self.font = GLTTwxFont.GLTTFont('arialbd',9)
self.parent = parent
self.drawcount = 0
self.redraw = self.paintit
self.xscale = 1
self.yscale = 1
self.lineweight = 1.0
self.bkgdcolor = [0., 0., 0., 0.]
self.settingbackground = 0
self.xminusflag = 0
self.yminusflag = 0
self.box = None
self.dataxmin = min(ravel(self.xs))
self.dataymin = min(ravel(self.ys))
self.dataxmax = max(ravel(self.xs))
self.dataymax = max(ravel(self.ys))
self.plotxmin = self.dataxmin
self.plotymin = self.dataymin
self.plotxmax = self.dataxmax
self.plotymax = self.dataymax
EVT_MOUSE_EVENTS(self, self.OnMouseEvent)
EVT_CHAR(self,self.OnChar)
# def wxPaint(self,*dummy):
# dc = wxPaintDC(self)
# self.paintit()
def OnChar(self, event):
# print event.KeyCode()
if event.KeyCode() < 256:
key = string.upper(chr(event.KeyCode()))
if key == 'L':
popup = wxFileDialog(NULL, "Choose LOG filename ...", "",
"", "*", wxSAVE, wxPoint(100,100))
popup.ShowModal()
# @@@need to make "enter" default to Save, somehow
a = glReadPixels(0,0,self.GetSize().x,self.GetSize().y,GL_RGB,GL_UNSIGNED_BYTE)
size = self.GetClientSizeTuple()
a = array(size,Int).tostring() + a
f=open(popup.GetFilename(),'wb')
f.write(a)
f.close()
def OnMouseEvent(self,event):
size = self.GetSize()
# determine where (in proportions) on screen the click happened
xr = float(event.GetX())/size.x # GetX=0 at left
yr = float(event.GetY())/size.y # GetY=0 at top
# scale this location to where WITHIN THE PLOT the click happened (in proportions)
# ... with 0,0 at lower left of PLOT area
xrs = (xr-(1-self.xscale)/2.)/float(self.xscale) # scale to the plot area
yrs = 1-(yr-(1-self.yscale)/2.)/float(self.yscale) # invert Y and scale to plot area
if event.LeftDown():
self.xminusflag = 0 #was selection box dragged LEFT?
self.yminusflag = 0 #was selection box dragged UP?
self.box = [(xrs*(self.plotxmax-self.plotxmin)+self.plotxmin),
(yrs*(self.plotymax-self.plotymin)+self.plotymin)]
self.xstart = xr
self.ystart = yr
elif self.box and event.LeftIsDown() and not event.LeftDown():
# compute position of other box-corner within plot
nxrs = (xrs*(self.plotxmax-self.plotxmin)+self.plotxmin)
nyrs = (yrs*(self.plotymax-self.plotymin)+self.plotymin)
if nxrs < self.box[0]:
self.xminusflag = 1
else:
self.xminusflag = 0
if nyrs < self.box[1]:
self.yminusflag = 1
else:
self.yminusflag = 0
if self.box[0]<>nxrs or self.box[1]<>nyrs:
self.box = [self.box[0], self.box[1], nxrs, nyrs]
else: # may need to convert a 4-element box to a 2-element box
self.box = [nxrs, nyrs]
self.xend = xr
self.yend = yr
self.paintit()
elif event.LeftUp():
if len(self.box)>2:
# if dragged up or left, exchange value-pairs
if self.box[0] > self.box[2]:
self.box[0],self.box[2] = self.box[2],self.box[0]
if self.box[1] > self.box[3]:
self.box[1],self.box[3] = self.box[3],self.box[1]
self.plotxmin = self.box[0]
self.plotymin = self.box[1]
self.plotxmax = self.box[2]
self.plotymax = self.box[3]
self.xminusflag = 0
self.yminusflag = 0
self.box = None
self.paintit() # can't use wxRedraw for some reason
if event.RightUp():
self.plotxmin = self.dataxmin
self.plotymin = self.dataymin
self.plotxmax = self.dataxmax
self.plotymax = self.dataymax
self.box = None
self.paintit() # can't use wxRedraw for some reason
def OnSize(self, event):
size = self.GetClientSize()
if self.GetContext() != 'NULL':
self.SetCurrent()
glViewport(0, 0, size.width, size.height)
def changelineweight(self,step):
self.lineweight += step
if self.lineweight <= 0:
self.lineweight = 0.1
self.paintit()
def save_colorpixelmap(self):
string = glReadPixels(0,0,self.GetSize().x,self.GetSize().y,GL_RGB,GL_UNSIGNED_BYTE)
size = list(self.GetClientSizeTuple())
a = fromstring(string,Int8) # convert pixels to array
print a.shape, size
size[0],size[1] = size[1],size[0] # swap x,y dimensions for proper unraveling
r = a[0::3]+0
g = a[1::3]+0
b = a[2::3]+0
r.shape = size
g.shape = size
b.shape = size
carray = array([r[::-1,:],g[::-1,:],b[::-1,:]]) # up-down flip the image
print carray.shape, type(carray), carray.typecode(), min(ravel(carray)), max(ravel(carray))
im.ashow(carray)
def save_graypixelmap(self):
string = glReadPixels(0,0,self.GetSize().x,self.GetSize().y,GL_LUMINANCE,GL_FLOAT)
size = list(self.GetClientSizeTuple())
a = fromstring(string,Float32) # convert pixels to array
print a.shape, size
size[0],size[1] = size[1],size[0] # swap x,y dimensions for proper unraveling
carray = reshape(a,size)*255 # must be a luminance map
print carray.shape, type(carray), carray.typecode(), min(ravel(carray)), max(ravel(carray))
im.ashow(carray[::-1,:])
def setbackground(self,color):
if self.settingbackground:
return
if len(color) == 3:
color = list(color) + [0.]
apply(glClearColor,color)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.bkgdcolor = color
self.settingbackground = 1
self.paintit()
self.settingbackground = 0
def set_xs(self, xs=None):
if self.ys is None:
xs = None
return
elif xs is None:
xs = arange(self.ys.shape[0])
self.xs = xs
self.x_offset = -xs[0]
self.x_scale = 1.0/(max(xs)-min(xs))
self.dataxmin = min(ravel(self.xs))
self.dataxmax = max(ravel(self.xs))
self.plotxmin = self.dataxmin
self.plotxmax = self.dataxmax
def transform(self, ys):
# should convert to a rank-2 array
return add.reduce(ys)
def set_ys(self, ys):
if ys is None:
self.ys = None
return
while len(ys.shape) > 2:
ys = self.transform(ys)
self.ys = ys
self.y_offset = -ys[0]
try:
self.y_scale = 1.0/(max(ys)-min(ys))
except ZeroDivisionError:
self.y_scale = 1.0
self.dataymin = min(ravel(self.ys))
self.dataymax = max(ravel(self.ys))
self.plotymin = self.dataymin
self.plotymax = self.dataymax
def set_errors(self, errors):
if errors is None:
self.errors = None
return
while len(errors.shape) > 2:
errors = self.transform(errors)
self.errors = errors
self.dataymin = min(ravel(self.ys-abs(self.errors)))
self.dataymax = max(ravel(self.ys+abs(self.errors)))
self.plotymin = self.dataymin
self.plotymax = self.dataymax
def paintit(self):#, event):
### PREPARE FOR DRAWING AND CLEAR WINDOW
self.setbackground(self.bkgdcolor)
if self.ys is None:
return
### SET UP FOR REDRAWING
if not self.xs:
self.set_xs()
size = self.GetClientSize()
w,h = size.x, size.y
WZ = float(w) / len(self.xs)
HZ = float(h) / len(self.ys)
glLoadIdentity()
glEnable(GL_LINE_SMOOTH)
glEnable(GL_BLEND)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# IMPORTANT COORDINATE TRANSFORMATIONS
self.xscale = 0.84
self.yscale = 0.8
glScale(self.xscale, self.yscale, 1.0) # scale everything hereafter in this matrix
glOrtho(self.plotxmin, self.plotxmax,
self.plotymin, self.plotymax,
0, 1)
# Make sure both are 2D, so plot code can be general for multi and single lines
if len(self.ys.shape) == 1:
self.ys = self.ys[:,NewAxis]
if self.errors:
self.errors.shape = (len(self.errors),1)
### PLOT ERRORBARS (SAME COLOR AS ASSOCIATED TIMESERIES)
if hasattr(self, 'errors') and self.errors:
# loop through all timeseries'
for i in range(self.errors.shape[1]):
if self.errors.shape[1] > 1:
colortrio = glplotcolors[i%len(glplotcolors)]
apply(glColor3f,colortrio)
else:
glColor3f(1.,1.,0.)
glLineWidth(1.0)
lower = self.ys[:,i] - self.errors[:,i]
upper = self.ys[:,i] + self.errors[:,i]
glBegin(GL_LINES)
for x,yl, yu in transpose(array([self.xs, lower, upper])):
if x>=self.plotxmin and x<=self.plotxmax:
glVertex2f(x,yl)
glVertex2f(x,yu)
glEnd()
### PLOT TIMESERIES (after/ON-TOP-OF ERRORBARS)
# loop through all timeseries'
for i in range(self.ys.shape[1]):
glLineWidth(self.lineweight)
if self.ys.shape[1] > 1:
colortrio = glplotcolors[i%len(glplotcolors)]
apply(glColor3f,colortrio)
else:
glColor3f(1.,1.,1.)
d = array((self.xs+0.0, self.ys[:,i]))
t = transpose(d)
glBegin(GL_LINE_STRIP)
for vert in t:
if vert[0]>=self.plotxmin and vert[0]<=self.plotxmax:
glVertex(vert[0],vert[1])
glEnd()
### PLOT X/Y-AXIS LINES (white)
glColor3f(1.,1.,1.)
glLineWidth(1.5)
glBegin(GL_LINES)
glVertex2i(self.plotxmin, 0)
glVertex2i(self.plotxmax, 0)
glVertex2i(0, self.plotymin)
glVertex2i(0, self.plotymax)
glEnd()
###
### TEXT PLOTTING CODE ... USED TO USE PyGLTT; NOW USES GLUT (until GLTT/FTGL works again)
###
self.textcolor = (1,1,1)
# Pick round numbers to be displayed
xrange_sigfig = log10(self.plotxmax-self.plotxmin)
yrange_sigfig = log10(self.plotymax-self.plotymin)
# print self.plotymax, self.plotymin, yrange_sigfig
if xrange_sigfig<=1:
xrounddigits = int(xrange_sigfig)+3
else:
xrounddigits = 0
if yrange_sigfig<=1:
yrounddigits = int(yrange_sigfig)+3
else:
yrounddigits = 0
# print self.plotymax, self.plotymin, yrange_sigfig
# And properly format the numeric text strings to be dispalyed
if xrounddigits:
xminstr = str(round(self.plotxmin,xrounddigits))
xmaxstr = str(round(self.plotxmax,xrounddigits))
else:
xminstr = str(int(round(self.plotxmin,xrounddigits)))
xmaxstr = str(int(round(self.plotxmax,xrounddigits)))
if yrounddigits:
yminstr = str(round(self.plotymin,yrounddigits))
ymaxstr = str(round(self.plotymax,yrounddigits))
else:
yminstr = str(int(round(self.plotymin,yrounddigits)))
ymaxstr = str(int(round(self.plotymax,yrounddigits)))
# Figure out where to place the numerical labels
# NOTE: Though we are using an Identity matrix, bitmap font locations apparently
# want to be localized in pixel-coordinates (hence all the GetSize() calls)
glPushMatrix()
glLoadIdentity()
xaxis_yoffset = -0.93*self.GetSize().y
yaxis_xoffset = -0.94*self.GetSize().x
xaxis_xmin = (-self.xscale-0.01)*self.GetSize().x
xaxis_xmax = (self.xscale-0.01)*self.GetSize().x
yaxis_ymin = -0.86*self.GetSize().y
yaxis_ymax = 0.78*self.GetSize().y
# print
# print self.GetSize(), self.GetClientSize()
# print "X-axis min: ",xaxis_xmin, xaxis_yoffset, ' / ', xminstr
# print "X-axis max: ",xaxis_xmax, xaxis_yoffset, ' / ', xmaxstr
# print "Y-axis min: ",yaxis_xoffset, yaxis_ymin, ' / ', yminstr
# print "Y-axis max: ",yaxis_xoffset, yaxis_ymax, ' / ', ymaxstr
### y-axis maximum
self.draw_text(self,
yaxis_xoffset, #self.GetSize().x*xoffset,
yaxis_ymax, #self.GetSize().y*ymaxoffset,
ymaxstr,None,None)
### y-axis minimum
self.draw_text(self,
yaxis_xoffset, #self.GetSize().x*xoffset,
yaxis_ymin, #self.GetSize().y*yminoffset,
yminstr,None,None)
# GLTTwxFont.ALIGN_RIGHT, GLTTwxFont.VALIGN_BOTTOM)
### x-axis maximum
self.draw_text(self,
xaxis_xmax, #self.GetSize().x*xoffset,
xaxis_yoffset, #self.GetSize().y*ymaxoffset,
xmaxstr,None,None)
### x-axis minimum
self.draw_text(self,
xaxis_xmin, #self.GetSize().x*xoffset,
xaxis_yoffset, #self.GetSize().y*yminoffset,
xminstr,None,None)
# GLTTwxFont.ALIGN_RIGHT, GLTTwxFont.VALIGN_BOTTOM)
### arrow value
# self.draw_text(self,
# xarrowoffset,
# self.GetSize().y*ymaxoffset,
# ' '+str(round(self.ys[self.arrow],1)), 0, 0) #,
# GLTTwxFont.ALIGN_LEFT, GLTTwxFont.VALIGN_BOTTOM)
### arrow timepoint
# self.draw_text(self,
# xarrowoffset,
# self.GetSize().y*yminoffset,
# ' '+str(self.arrow),None,None)
# GLTTwxFont.ALIGN_LEFT, GLTTwxFont.VALIGN_BOTTOM)
# Finally, draw a bounding-box (bottom/top left/right)
# NOTE: No need to use GetSize() here; we have an Identity matrix and are
# drawing normal (non-bitmap-text) stuff
BL = [-self.xscale,-self.yscale]
TL = [-self.xscale, self.yscale]
TR = [self.xscale, self.yscale]
BR = [self.xscale, -self.yscale]
#print BL, TL, TR, BR
glPointSize(1.0)
glColor3f(0.3,0.3,0.3)
glBegin(GL_LINE_STRIP)
glVertex2f(BL[0],BL[1])
glVertex2f(TL[0],TL[1])
glVertex2f(TR[0],TR[1])
glVertex2f(BR[0],BR[1])
glVertex2f(BL[0],BL[1])
glEnd()
glPopMatrix()
### LAST, BUT NOT LEAST, DRAW SELECTION-BOX ... (RED)
if self.box and len(self.box)==4:
glPointSize(2.0)
glColor3f(1.,0.,0.)
glBegin(GL_LINE_STRIP)
glVertex2f(self.box[0], self.box[1])
glVertex2f(self.box[2], self.box[1])
glVertex2f(self.box[2], self.box[3])
glVertex2f(self.box[0], self.box[3])
glVertex2f(self.box[0], self.box[1])
glEnd()
# FINALLY, CLIP VIEW TO SPECIFIED SUB-PORTION OF WINDOW
# glEnable(GL_CLIP_PLANE1)
# glEnable(GL_CLIP_PLANE2)
# glEnable(GL_CLIP_PLANE3)
# glEnable(GL_CLIP_PLANE4)
# glClipPlane(GL_CLIP_PLANE1, [0., 1., 0., -self.plotymin]) # clips off the bottom
# glClipPlane(GL_CLIP_PLANE2, [0., -1., 0., self.plotymax]) # clips off the top
# glClipPlane(GL_CLIP_PLANE3, [1., 0., 0., -self.plotxmin]) # clips off the left
# glClipPlane(GL_CLIP_PLANE4, [-1., 0., 0., self.plotxmax]) # clips off the right
self.SwapBuffers() # NECESSARY, or screen doesn't redraw
def draw_text(self, canvas, x,y,text,align,valign):
apply(glColor3f, self.textcolor)
size = self.GetClientSize()
w,h = float(size.x), float(size.y)
glRasterPos2f(x/w,y/h)
for char in text:
# print x,y,self.font,char
GLUT.glutBitmapCharacter(self.font,ord(char)) #text[0]) #self.font,text)
# self.font.write_string(canvas, x, y, text, align, valign)
def getpropX(self, x):
w = self.GetClientSize().x
p = (x - w*.1) / (w*.8)
return p
def TimeToQuit(self, event):
### REMAKE LINEPLOT WHEN SELF.BOX IS RE-CREATED
self.Close(true)
def glplot(yvals=None,xvals=None,errors=None):
"""
Create a plot using a wxGLCanvas.
Usage: glplot(
x=None, x-axis data
y=None, y-axis data, skip x and use y=[data] for x=range(len(y))
errors=None, y-axis errorbar data
"""
if not xvals and not yvals:
return
if not xvals:
xvals = N.arange(yvals.shape[0])
class MyApp(wxApp):
def OnInit(self): #,x=None,y=None,errors=None):
windowXpixels = 8 # 8 pixels of frame OUTSIDE the canvas
windowYpixels = 27 # 27 pixels of frame plus title-bar OUTSIDE the canvas
self.frame = wxFrame(NULL, -1, "wxPython Context",
wxPoint(0,0),
wxSize(1200+windowXpixels,400+windowYpixels))
self.mainmenu = wxMenuBar()
filemenu = wxMenu()
cimgID = wxNewId()
gimgID = wxNewId()
exitID = wxNewId()
filemenu.Append(cimgID, 'Save C&olor\tAlt-C', 'Save color pixelmap using IC.exe')
filemenu.Append(gimgID, 'Save G&ray\tAlt-G', 'Save gray pixelmap using IC.exe')
filemenu.Append(exitID, 'E&xit\tAlt-X', 'Quit')
EVT_MENU(self, cimgID, self.OnCImgSave)
EVT_MENU(self, gimgID, self.OnGImgSave)
EVT_MENU(self, exitID, self.OnFileExit)
self.mainmenu.Append(filemenu, '&File')
propmenu = wxMenu()
fontID = wxNewId()
lineweightupID = wxNewId()
lineweightdnID = wxNewId()
bkgdID = wxNewId()
propmenu.Append(fontID, 'F&onts\tAlt-F', 'Change font for all text items')
propmenu.Append(lineweightupID, 'I&ncrease lineweight\tAlt-I', 'Increase plotting line weight')
propmenu.Append(lineweightdnID, 'D&ecrease lineweight\tAlt-D', 'Decrease plotting line weight')
propmenu.Append(bkgdID, 'B&ackground color\tAlt-B', 'Change plot background color')
EVT_MENU(self, fontID, self.OnFont)
EVT_MENU(self, lineweightupID, self.OnLineweightup)
EVT_MENU(self, lineweightdnID, self.OnLineweightdn)
EVT_MENU(self, bkgdID, self.OnBkgd)
self.mainmenu.Append(propmenu, '&Edit')
self.frame.SetMenuBar(self.mainmenu)
# Now, create the line-plot part
self.win = OpenglMultiLinePlot(self.frame,autospin_allowed=0)
self.frame.Show(TRUE)
self.SetTopWindow(self.frame)
return TRUE
def OnCImgSave(self,event):
self.win.save_colorpixelmap()
def OnGImgSave(self,event):
self.win.save_graypixelmap()
def OnFileExit(self,event):
sys.exit()
def OnFont(self,event):
data = wxFontData()
dlg = wxFontDialog(self.frame, data)
if dlg.ShowModal() == wxID_OK:
data = dlg.GetFontData()
font = data.GetChosenFont()
print 'You selected: ',font.GetFaceName(),', ',str(font.GetPointSize()),', color ',data.GetColour().Get()
self.win.fontname = font.GetFaceName()
self.win.fontstype = font.GetStyle()
self.win.fontsize = font.GetPointSize()
self.win.fontcolor = data.GetColour().Get()
dlg.Destroy()
def OnLineweightup(self,event):
self.win.changelineweight(+1)
def OnLineweightdn(self,event):
self.win.changelineweight(-1)
def OnBkgd(self,event):
data = wxColourData()
dlg = wxColourDialog(self.frame, data)
if dlg.ShowModal() == wxID_OK:
data = dlg.GetColourData()
wxcolor = data.GetColour()
dlg.Destroy()
color = N.array([wxcolor.Red(), wxcolor.Green(), wxcolor.Blue()])
newcolor = color / 255.
self.win.setbackground(newcolor)
app = MyApp(0)
app.win.set_xs(xvals)
app.win.set_ys(yvals)
app.win.set_errors(errors)
app.MainLoop()

1168
tests/util/vendor/strangman/io.py поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

899
tests/util/vendor/strangman/pstat.py поставляемый
Просмотреть файл

@ -34,8 +34,6 @@ modeled after those found in the |Stat package by Gary Perlman, plus a
number of other useful list/file manipulation functions. The list-based
functions include:
abut (source,*args)
simpleabut (source, addon)
colex (listoflists,cnums)
collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
dm (listoflists,criterion)
@ -117,90 +115,11 @@ __version__ = 0.4
### Array functions (for NumPy-enabled computers) appear below.
###
def abut (source,*args):
"""
Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
until it is as long as the longest list. If this behavior is not desired,
use pstat.simpleabut().
Usage: abut(source, args) where args=any # of lists
Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
for addon in args:
if type(addon) not in [ListType,TupleType]:
addon = [addon]
if len(addon) < len(source): # is source list longer?
if len(source) % len(addon) == 0: # are they integer multiples?
repeats = len(source)/len(addon) # repeat addon n times
origadd = copy.deepcopy(addon)
for i in range(repeats-1):
addon = addon + origadd
else:
repeats = len(source)/len(addon)+1 # repeat addon x times,
origadd = copy.deepcopy(addon) # x is NOT an integer
for i in range(repeats-1):
addon = addon + origadd
addon = addon[0:len(source)]
elif len(source) < len(addon): # is addon list longer?
if len(addon) % len(source) == 0: # are they integer multiples?
repeats = len(addon)/len(source) # repeat source n times
origsour = copy.deepcopy(source)
for i in range(repeats-1):
source = source + origsour
else:
repeats = len(addon)/len(source)+1 # repeat source x times,
origsour = copy.deepcopy(source) # x is NOT an integer
for i in range(repeats-1):
source = source + origsour
source = source[0:len(addon)]
source = simpleabut(source,addon)
return source
def simpleabut (source, addon):
"""
Concatenates two lists as columns and returns the result. '2D' lists
are also accomodated for either argument (source or addon). This DOES NOT
repeat either list to make the 2 lists of equal length. Beware of list pairs
with different lengths ... the resulting list will be the length of the
FIRST list passed.
Usage: simpleabut(source,addon) where source, addon=list (or list-of-lists)
Returns: a list of lists as long as source, with source on the 'left' and
addon on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
if type(addon) not in [ListType,TupleType]:
addon = [addon]
minlen = min(len(source),len(addon))
list = copy.deepcopy(source) # start abut process
if type(source[0]) not in [ListType,TupleType]:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = [source[i]] + [addon[i]] # source/addon = column
else:
for i in range(minlen):
list[i] = [source[i]] + addon[i] # addon=list-of-lists
else:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = source[i] + [addon[i]] # source=list-of-lists
else:
for i in range(minlen):
list[i] = source[i] + addon[i] # source/addon = list-of-lists
source = list
return source
def colex (listoflists,cnums):
def colex(listoflists, cnums):
"""
Extracts from listoflists the columns specified in the list 'cnums'
(cnums can be an integer, a sequence of integers, or a string-expression that
@ -213,14 +132,14 @@ Returns: a list-of-lists corresponding to the columns from listoflists
"""
global index
column = 0
if type(cnums) in [ListType,TupleType]: # if multiple columns to get
if type(cnums) in [ListType, TupleType]: # if multiple columns to get
index = cnums[0]
column = map(lambda x: x[index], listoflists)
for col in cnums[1:]:
index = col
column = abut(column,map(lambda x: x[index], listoflists))
column = zip(column, map(lambda x: x[index], listoflists))
elif type(cnums) == StringType: # if an 'x[3:]' type expr.
evalstring = 'map(lambda x: x'+cnums+', listoflists)'
evalstring = 'map(lambda x: x' + cnums + ', listoflists)'
column = eval(evalstring)
else: # else it's just 1 col to get
index = cnums
@ -228,114 +147,8 @@ Returns: a list-of-lists corresponding to the columns from listoflists
return column
def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
"""
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining the
unique sets of values in keepcols, the mean for each. Setting fcn1
and/or fcn2 to point to a function rather than None (e.g., stats.sterr, len)
will append those results (e.g., the sterr, N) after each calculated mean.
cfcn is the collapse function to apply (defaults to mean, defined here in the
pstat module to avoid circular imports with stats.py, but harmonicmean or
others could be passed).
Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
Returns: a list of lists with all unique permutations of entries appearing in
columns ("conditions") specified by keepcols, abutted with the result of
cfcn (if cfcn=None, defaults to the mean) of each column specified by
collapsecols.
"""
def collmean (inlist):
s = 0
for item in inlist:
s = s + item
return s/float(len(inlist))
if type(keepcols) not in [ListType,TupleType]:
keepcols = [keepcols]
if type(collapsecols) not in [ListType,TupleType]:
collapsecols = [collapsecols]
if cfcn == None:
cfcn = collmean
if keepcols == []:
means = [0]*len(collapsecols)
for i in range(len(collapsecols)):
avgcol = colex(listoflists,collapsecols[i])
means[i] = cfcn(avgcol)
if fcn1:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
means[i] = [means[i], test]
if fcn2:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
try:
means[i] = means[i] + [len(avgcol)]
except TypeError:
means[i] = [means[i],len(avgcol)]
return means
else:
values = colex(listoflists,keepcols)
uniques = unique(values)
uniques.sort()
newlist = []
if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols]
for item in uniques:
if type(item) not in [ListType,TupleType]: item =[item]
tmprows = linexand(listoflists,keepcols,item)
for col in collapsecols:
avgcol = colex(tmprows,col)
item.append(cfcn(avgcol))
if fcn1 <> None:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
item.append(test)
if fcn2 <> None:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
item.append(test)
newlist.append(item)
return newlist
def dm (listoflists,criterion):
"""
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x; e.g., 'x[3]>=9'
will return all rows where the 4th column>=9 and "x[2]=='N'" will return rows
with column 2 equal to the string 'N').
Usage: dm (listoflists, criterion)
Returns: rows from listoflists that meet the specified criterion.
"""
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def flat(l):
"""
Returns the flattened version of a '2D' list. List-correlate to the a.ravel()()
method of NumPy arrays.
Usage: flat(l)
"""
newl = []
for i in range(len(l)):
for j in range(len(l[i])):
newl.append(l[i][j])
return newl
def linexand (listoflists,columnlist,valuelist):
def linexand(listoflists, columnlist, valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
@ -344,319 +157,24 @@ len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType,TupleType]:
if type(columnlist) not in [ListType, TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
if type(valuelist) not in [ListType, TupleType]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
if type(valuelist[i]) == StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion + ' x[' + str(columnlist[i]) + ']==' + critval + ' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
function = 'filter(lambda x: ' + criterion + ',listoflists)'
lines = eval(function)
return lines
def linexor (listoflists,columnlist,valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[).
One value is required for each column in columnlist. If only one value
exists for columnlist but multiple values appear in valuelist, the
valuelist values are all assumed to pertain to the same column.
Usage: linexor (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i
"""
if type(columnlist) not in [ListType,TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
valuelist = [valuelist]
criterion = ''
if len(columnlist) == 1 and len(valuelist) > 1:
columnlist = columnlist*len(valuelist)
for i in range(len(columnlist)): # build an exec string
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
criterion = criterion[0:-2] # remove the "or" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def linedelimited (inlist,delimiter):
"""
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
"""
outstr = ''
for item in inlist:
if type(item) <> StringType:
item = str(item)
outstr = outstr + item + delimiter
outstr = outstr[0:-1]
return outstr
def lineincols (inlist,colsize):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in columns of (fixed) colsize.
Usage: lineincols (inlist,colsize) where colsize is an integer
"""
outstr = ''
for item in inlist:
if type(item) <> StringType:
item = str(item)
size = len(item)
if size <= colsize:
for i in range(colsize-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsize+1]
return outstr
def lineincustcols (inlist,colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) <> StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
def list2string (inlist,delimit=' '):
"""
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
"""
stringlist = map(makestr,inlist)
return string.join(stringlist,delimit)
def makelol(inlist):
"""
Converts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you
want to use put() to write a 1D list one item per line in the file.
Usage: makelol(inlist)
Returns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc.
"""
x = []
for item in inlist:
x.append([item])
return x
def makestr (x):
if type(x) <> StringType:
x = str(x)
return x
def printcc (lst,extra=2):
"""
Prints a list of lists in columns, customized by the max size of items
within the columns (max size of items in col, plus 'extra' number of spaces).
Use 'dashes' or '\\n' in the list-of-lists to print dashes or blank lines,
respectively.
Usage: printcc (lst,extra=2)
Returns: None
"""
if type(lst[0]) not in [ListType,TupleType]:
lst = [lst]
rowstokill = []
list2print = copy.deepcopy(lst)
for i in range(len(lst)):
if lst[i] == ['\n'] or lst[i]=='\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:
rowstokill = rowstokill + [i]
rowstokill.reverse() # delete blank rows from the end
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = colex(list2print,col)
items = map(makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in lst:
if row == ['\n'] or row == '\n' or row == '' or row == ['']:
print
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
print lineincustcols(dashes,maxsize)
else:
print lineincustcols(row,maxsize)
return None
def printincols (listoflists,colsize):
"""
Prints a list of lists in columns of (fixed) colsize width, where
colsize is an integer.
Usage: printincols (listoflists,colsize)
Returns: None
"""
for row in listoflists:
print lineincols(row,colsize)
return None
def pl (listoflists):
"""
Prints a list of lists, 1 list (row) at a time.
Usage: pl(listoflists)
Returns: None
"""
for row in listoflists:
if row[-1] == '\n':
print row,
else:
print row
return None
def printl(listoflists):
"""Alias for pl."""
pl(listoflists)
return
def replace (inlst,oldval,newval):
"""
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
"""
lst = inlst*1
for i in range(len(lst)):
if type(lst[i]) not in [ListType,TupleType]:
if lst[i]==oldval: lst[i]=newval
else:
lst[i] = replace(lst[i],oldval,newval)
return lst
def recode (inlist,listmap,cols=None):
"""
Changes the values in a list to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers. cols defaults
to None (meaning all columns are recoded).
Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list
Returns: inlist with the appropriate values replaced with new ones
"""
lst = copy.deepcopy(inlist)
if cols != None:
if type(cols) not in [ListType,TupleType]:
cols = [cols]
for col in cols:
for row in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
else:
for row in range(len(lst)):
for col in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
return lst
def remap (listoflists,criterion):
"""
Remaps values in a given column of a 2D list (listoflists). This requires
a criterion as a function of 'x' so that the result of the following is
returned ... map(lambda x: 'criterion',listoflists).
Usage: remap(listoflists,criterion) criterion=string
Returns: remapped version of listoflists
"""
function = 'map(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def roundlist (inlist,digits):
"""
Goes through each element in a 1D or 2D inlist, and applies the following
function to all elements of FloatType ... round(element,digits).
Usage: roundlist(inlist,digits)
Returns: list with rounded floats
"""
if type(inlist[0]) in [IntType, FloatType]:
inlist = [inlist]
l = inlist*1
for i in range(len(l)):
for j in range(len(l[i])):
if type(l[i][j])==FloatType:
l[i][j] = round(l[i][j],digits)
return l
def sortby(listoflists,sortcols):
"""
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
"""
newlist = abut(colex(listoflists,sortcols),listoflists)
newlist.sort()
try:
numcols = len(sortcols)
except TypeError:
numcols = 1
crit = '[' + str(numcols) + ':]'
newlist = colex(newlist,crit)
return newlist
def unique (inlist):
def unique(inlist):
"""
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
@ -671,398 +189,3 @@ Returns: the unique elements (or rows) in inlist
uniques.append(item)
return uniques
def duplicates(inlist):
"""
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
"""
dups = []
for i in range(len(inlist)):
if inlist[i] in inlist[i+1:]:
dups.append(inlist[i])
return dups
def nonrepeats(inlist):
"""
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
try: # DEFINE THESE *ONLY* IF numpy IS AVAILABLE
import numpy as N
def aabut (source, *args):
"""
Like the |Stat abut command. It concatenates two arrays column-wise
and returns the result. CAUTION: If one array is shorter, it will be
repeated until it is as long as the other.
Usage: aabut (source, args) where args=any # of arrays
Returns: an array as long as the LONGEST array past, source appearing on the
'left', arrays in <args> attached on the 'right'.
"""
if len(source.shape)==1:
width = 1
source = N.resize(source,[source.shape[0],width])
else:
width = source.shape[1]
for addon in args:
if len(addon.shape)==1:
width = 1
addon = N.resize(addon,[source.shape[0],width])
else:
width = source.shape[1]
if len(addon) < len(source):
addon = N.resize(addon,[source.shape[0],addon.shape[1]])
elif len(source) < len(addon):
source = N.resize(source,[addon.shape[0],source.shape[1]])
source = N.concatenate((source,addon),1)
return source
def acolex (a,indices,axis=1):
"""
Extracts specified indices (a list) from passed array, along passed
axis (column extraction is default). BEWARE: A 1D array is presumed to be a
column-array (and that the whole array will be returned as a column).
Usage: acolex (a,indices,axis=1)
Returns: the columns of a specified by indices
"""
if type(indices) not in [ListType,TupleType,N.ndarray]:
indices = [indices]
if len(N.shape(a)) == 1:
cols = N.resize(a,[a.shape[0],1])
else:
# print a[:3]
cols = N.take(a,indices,axis)
# print cols[:3]
return cols
def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
"""
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining
the unique sets of values in keepcols, the mean for each. If stderror or
N of the mean are desired, set either or both parameters to 1.
Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
Returns: unique 'conditions' specified by the contents of columns specified
by keepcols, abutted with the mean(s) of column(s) specified by
collapsecols
"""
def acollmean (inarray):
return N.sum(N.ravel(inarray))
if type(keepcols) not in [ListType,TupleType,N.ndarray]:
keepcols = [keepcols]
if type(collapsecols) not in [ListType,TupleType,N.ndarray]:
collapsecols = [collapsecols]
if cfcn == None:
cfcn = acollmean
if keepcols == []:
avgcol = acolex(a,collapsecols)
means = N.sum(avgcol)/float(len(avgcol))
if fcn1<>None:
try:
test = fcn1(avgcol)
except:
test = N.array(['N/A']*len(means))
means = aabut(means,test)
if fcn2<>None:
try:
test = fcn2(avgcol)
except:
test = N.array(['N/A']*len(means))
means = aabut(means,test)
return means
else:
if type(keepcols) not in [ListType,TupleType,N.ndarray]:
keepcols = [keepcols]
values = colex(a,keepcols) # so that "item" can be appended (below)
uniques = unique(values) # get a LIST, so .sort keeps rows intact
uniques.sort()
newlist = []
for item in uniques:
if type(item) not in [ListType,TupleType,N.ndarray]:
item =[item]
tmprows = alinexand(a,keepcols,item)
for col in collapsecols:
avgcol = acolex(tmprows,col)
item.append(acollmean(avgcol))
if fcn1<>None:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
item.append(test)
if fcn2<>None:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
item.append(test)
newlist.append(item)
try:
new_a = N.array(newlist)
except TypeError:
new_a = N.array(newlist,'O')
return new_a
def adm (a,criterion):
"""
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x).
Usage: adm (a,criterion) where criterion is like 'x[2]==37'
"""
function = 'filter(lambda x: '+criterion+',a)'
lines = eval(function)
try:
lines = N.array(lines)
except:
lines = N.array(lines,dtype='O')
return lines
def isstring(x):
if type(x)==StringType:
return 1
else:
return 0
def alinexand (a,columnlist,valuelist):
"""
Returns the rows of an array where col (from columnlist) = val
(from valuelist). One value is required for each column in columnlist.
Usage: alinexand (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType,TupleType,N.ndarray]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType,N.ndarray]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
return adm(a,criterion)
def alinexor (a,columnlist,valuelist):
"""
Returns the rows of an array where col (from columnlist) = val (from
valuelist). One value is required for each column in columnlist.
The exception is if either columnlist or valuelist has only 1 value,
in which case that item will be expanded to match the length of the
other list.
Usage: alinexor (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i
"""
if type(columnlist) not in [ListType,TupleType,N.ndarray]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType,N.ndarray]:
valuelist = [valuelist]
criterion = ''
if len(columnlist) == 1 and len(valuelist) > 1:
columnlist = columnlist*len(valuelist)
elif len(valuelist) == 1 and len(columnlist) > 1:
valuelist = valuelist*len(columnlist)
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
criterion = criterion[0:-2] # remove the "or" after the last crit
return adm(a,criterion)
def areplace (a,oldval,newval):
"""
Replaces all occurrences of oldval with newval in array a.
Usage: areplace(a,oldval,newval)
"""
return N.where(a==oldval,newval,a)
def arecode (a,listmap,col='all'):
"""
Remaps the values in an array to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers as most stats
packages require. Can work on SINGLE columns, or 'all' columns at once.
@@@BROKEN 2007-11-26
Usage: arecode (a,listmap,col='all')
Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1]
"""
ashape = a.shape
if col == 'all':
work = a.ravel()
else:
work = acolex(a,col)
work = work.ravel()
for pair in listmap:
if type(pair[1]) == StringType or work.dtype.char=='O' or a.dtype.char=='O':
work = N.array(work,dtype='O')
a = N.array(a,dtype='O')
for i in range(len(work)):
if work[i]==pair[0]:
work[i] = pair[1]
if col == 'all':
return N.reshape(work,ashape)
else:
return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)
else: # must be a non-Object type array and replacement
work = N.where(work==pair[0],pair[1],work)
return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)
def arowcompare(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
@@@PURPOSE? 2007-11-26
Usage: arowcompare(row1,row2)
Returns: an array of equal length containing 1s where the two rows had
identical elements and 0 otherwise
"""
return
if row1.dtype.char=='O' or row2.dtype=='O':
cmpvect = N.logical_not(abs(N.array(map(cmp,row1,row2)))) # cmp fcn gives -1,0,1
else:
cmpvect = N.equal(row1,row2)
return cmpvect
def arowsame(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
Usage: arowsame(row1,row2)
Returns: 1 if the two rows are identical, 0 otherwise.
"""
cmpval = N.alltrue(arowcompare(row1,row2))
return cmpval
def asortrows(a,axis=0):
"""
Sorts an array "by rows". This differs from the Numeric.sort() function,
which sorts elements WITHIN the given axis. Instead, this function keeps
the elements along the given axis intact, but shifts them 'up or down'
relative to one another.
Usage: asortrows(a,axis=0)
Returns: sorted version of a
"""
return N.sort(a,axis=axis,kind='mergesort')
def aunique(inarray):
"""
Returns unique items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
Usage: aunique (inarray)
"""
uniques = N.array([inarray[0]])
if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY
for item in inarray[1:]:
if N.add.reduce(N.equal(uniques,item).ravel()) == 0:
try:
uniques = N.concatenate([uniques,N.array[N.newaxis,:]])
except TypeError:
uniques = N.concatenate([uniques,N.array([item])])
else: # IT MUST BE A 2+D ARRAY
if inarray.dtype.char != 'O': # not an Object array
for item in inarray[1:]:
if not N.sum(N.alltrue(N.equal(uniques,item),1)):
try:
uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
except TypeError: # the item to add isn't a list
uniques = N.concatenate([uniques,N.array([item])])
else:
pass # this item is already in the uniques array
else: # must be an Object array, alltrue/equal functions don't work
for item in inarray[1:]:
newflag = 1
for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>
test = N.sum(abs(N.array(map(cmp,item,unq))))
if test == 0: # if item identical to any 1 row in uniques
newflag = 0 # then not a novel item to add
break
if newflag == 1:
try:
uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
except TypeError: # the item to add isn't a list
uniques = N.concatenate([uniques,N.array([item])])
return uniques
def aduplicates(inarray):
"""
Returns duplicate items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
Usage: aunique (inarray)
"""
inarray = N.array(inarray)
if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY
dups = []
inarray = inarray.tolist()
for i in range(len(inarray)):
if inarray[i] in inarray[i+1:]:
dups.append(inarray[i])
dups = aunique(dups)
else: # IT MUST BE A 2+D ARRAY
dups = []
aslist = inarray.tolist()
for i in range(len(aslist)):
if aslist[i] in aslist[i+1:]:
dups.append(aslist[i])
dups = unique(dups)
dups = N.array(dups)
return dups
except ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs
pass

3596
tests/util/vendor/strangman/stats.py поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

22
tests/util/vendor/strangman/statstest.py поставляемый
Просмотреть файл

@ -28,10 +28,10 @@ print 'moment:',stats.moment(l),stats.moment(lf),stats.moment(a),stats.moment(af
print 'variation:',stats.variation(l),stats.variation(a),stats.variation(lf),stats.variation(af)
print 'skew:',stats.skew(l),stats.skew(lf),stats.skew(a),stats.skew(af)
print 'kurtosis:',stats.kurtosis(l),stats.kurtosis(lf),stats.kurtosis(a),stats.kurtosis(af)
print 'tmean:',stats.tmean(a,(5,17)),stats.tmean(af,(5,17))
print 'tvar:',stats.tvar(a,(5,17)),stats.tvar(af,(5,17))
print 'tstdev:',stats.tstdev(a,(5,17)),stats.tstdev(af,(5,17))
print 'tsem:',stats.tsem(a,(5,17)),stats.tsem(af,(5,17))
print 'mean:',stats.mean(a),stats.mean(af)
print 'var:',stats.var(a),stats.var(af)
print 'stdev:',stats.stdev(a),stats.stdev(af)
print 'sem:',stats.sem(a),stats.sem(af)
print 'describe:'
print stats.describe(l)
print stats.describe(lf)
@ -92,7 +92,7 @@ print stats.trim1(a,.2)
print stats.trim1(af,.2)
print '\nCORRELATION'
#execfile('testpairedstats.py')
# execfile('testpairedstats.py')
l = range(1,21)
a = N.array(l)
@ -100,15 +100,15 @@ ll = [l]*5
aa = N.array(ll)
m = range(4,24)
m[10] = 34
m[10] = 34
b = N.array(m)
pb = [0]*9 + [1]*11
apb = N.array(pb)
print 'paired:'
#stats.paired(l,m)
#stats.paired(a,b)
# stats.paired(l,m)
# stats.paired(a,b)
print
print
@ -168,13 +168,13 @@ ll = [l]*5
aa = N.array(ll)
m = range(4,24)
m[10] = 34
m[10] = 34
b = N.array(m)
print '\n\nF_oneway:'
print stats.F_oneway(l,m)
print stats.F_oneway(l,m)
print stats.F_oneway(a,b)
#print 'F_value:',stats.F_value(l),stats.F_value(a)
# print 'F_value:',stats.F_value(l),stats.F_value(a)
print '\nSUPPORT'
print 'sum:',stats.sum(l),stats.sum(lf),stats.sum(a),stats.sum(af)