Enable analyzing flaky sequence failures without full network logs (#254)
* Make it possible to analyze flaky sequence failures without parsing full network logs. This update logs a sample request for sequence failures in the spec coverage file. The seq.render() code path has been updated to return a meaningful error in case of all failures, and return the full sequence information in case of sequence failures. This data can now be added to the spec coverage log.
This commit is contained in:
Родитель
e2b25ebe16
Коммит
1b0bab380c
|
@ -69,44 +69,52 @@ During each Test run a `speccov.json` file will be created in the logs directory
|
|||
|
||||
#### Example of a single request from the json file:
|
||||
```
|
||||
"5915766984a7c5deaaae43cae4cfb810c138d0f2": {
|
||||
"5915766984a7c5deaaae43cae4cfb810c138d0f2_1__1": {
|
||||
"verb": "PUT",
|
||||
"endpoint": "/blog/posts/{postId}",
|
||||
"verb_endpoint": "PUT /blog/posts/{postId}",
|
||||
"valid": 0,
|
||||
"matching_prefix": {
|
||||
"id": "1d7752f6d5ca3e03e423967a57335038a3d1bb70",
|
||||
"valid": 1
|
||||
},
|
||||
"matching_prefix": [
|
||||
{
|
||||
"id": "1d7752f6d5ca3e03e423967a57335038a3d1bb70_1"
|
||||
}
|
||||
],
|
||||
"invalid_due_to_sequence_failure": 0,
|
||||
"invalid_due_to_resource_failure": 0,
|
||||
"invalid_due_to_parser_failure": 0,
|
||||
"invalid_due_to_500": 0,
|
||||
"status_code": "400",
|
||||
"status_text": "BAD REQUEST",
|
||||
"error_message": "{\n \"errors\": {\n \"id\": \"'5882' is not of type 'integer'\"\n },\n \"message\": \"Input payload validation failed\"\n}\n",
|
||||
"status_code": null,
|
||||
"status_text": null,
|
||||
"error_message": "{\n \"errors\": {\n \"id\": \"'5872' is not of type 'integer'\"\n },\n \"message\": \"Input payload validation failed\"\n}\n",
|
||||
"request_order": 4,
|
||||
"sample_request": {
|
||||
"request_sent_timestamp": null,
|
||||
"response_received_timestamp": "2021-03-31 18:20:14",
|
||||
"request_uri": "/api/blog/posts/5882",
|
||||
"response_received_timestamp": "2021-07-02 05:10:12",
|
||||
"request_verb": "PUT",
|
||||
"request_uri": "/api/blog/posts/5872",
|
||||
"request_headers": [
|
||||
"Accept: application/json",
|
||||
"Host: localhost:8888",
|
||||
"Content-Type: application/json"
|
||||
],
|
||||
"request_body": "{\n \"id\":\"5882\",\n \"checksum\":\"fuzzstring\",\n \"body\":\"fuzzstring\"}\r\n",
|
||||
"request_body": "{\n \"id\":\"5872\",\n \"checksum\":\"fuzzstring\",\n \"body\":\"first blog\"}\r\n",
|
||||
"response_status_code": "400",
|
||||
"response_status_text": "BAD REQUEST",
|
||||
"response_headers": [
|
||||
"Content-Type: application/json",
|
||||
"Content-Length: 124",
|
||||
"Server: Werkzeug/0.16.0 Python/3.7.8",
|
||||
"Date: Wed, 31 Mar 2021 18:20:14 GMT"
|
||||
"Date: Fri, 02 Jul 2021 05:10:12 GMT"
|
||||
],
|
||||
"response_body": "{\n \"errors\": {\n \"id\": \"'5882' is not of type 'integer'\"\n },\n \"message\": \"Input payload validation failed\"\n}\n"
|
||||
"response_body": "{\n \"errors\": {\n \"id\": \"'5872' is not of type 'integer'\"\n },\n \"message\": \"Input payload validation failed\"\n}\n"
|
||||
},
|
||||
"tracked_parameters": {
|
||||
"per_page": ["2"],
|
||||
"page": ["1"]
|
||||
"id": [
|
||||
"123"
|
||||
],
|
||||
"body": [
|
||||
"\"first blog\""
|
||||
]
|
||||
}
|
||||
},
|
||||
```
|
||||
|
@ -129,7 +137,8 @@ the appropriate __"invalid_due_to_..."__ value will be set to 1.
|
|||
* "500" will be set if a 5xx bug was detected.
|
||||
* The __"status_code"__ and __"status_text"__ values are the response values received from the server.
|
||||
* The __"sample_request"__ contains the concrete values of the sent request and received response for which
|
||||
the coverage data is being reported.
|
||||
the coverage data is being reported. This property is optional.
|
||||
* The __"sequence_failure_sample_request"__ contains the concrete values of the sent request that failed when a valid sequence was being re-rendered. This property is optional.
|
||||
* The __"error_message"__ value will be set to the response body if the request was not "valid".
|
||||
* The __"request_order"__ value is the 0 indexed order that the request was sent.
|
||||
* Requests sent during "preprocessing" or "postprocessing" will explicitely say so.
|
||||
|
|
|
@ -213,7 +213,8 @@ def render_one(seq_to_render, ith, checkers, generation, global_lock):
|
|||
or renderings.valid or n_invalid_renderings < 1:
|
||||
apply_checkers(checkers, renderings, global_lock)
|
||||
|
||||
# If renderings.sequence is None it means there is nothing left to render.
|
||||
# If renderings.sequence is None, it means there is nothing left to render or
|
||||
# there was an error that prevents further testing, such as an error sending the request.
|
||||
if renderings.sequence is None:
|
||||
break
|
||||
|
||||
|
@ -241,17 +242,21 @@ def render_one(seq_to_render, ith, checkers, generation, global_lock):
|
|||
|
||||
# If in test mode, log the spec coverage.
|
||||
if Settings().fuzzing_mode == 'directed-smoke-test':
|
||||
logged_renderings = renderings if renderings.sequence else prev_renderings
|
||||
logged_renderings = renderings if renderings.sequence or renderings.failure_info else prev_renderings
|
||||
if logged_renderings:
|
||||
if logged_renderings.sequence:
|
||||
logged_renderings.sequence.last_request.stats.set_all_stats(logged_renderings)
|
||||
logger.print_request_coverage(rendered_sequence=logged_renderings, log_rendered_hash=True)
|
||||
else:
|
||||
# There was a failure rendering the sequence. This needs to be logged to the spec coverage file.
|
||||
# The entire sequence was not rendered. However, failure information may be available.
|
||||
current_seq.last_request.stats.set_all_stats(logged_renderings)
|
||||
logger.print_request_coverage(request=current_seq.last_request, log_rendered_hash=False)
|
||||
else:
|
||||
# There was a failure rendering the sequence because no valid combinations were found.
|
||||
# This needs to be logged to the spec coverage file.
|
||||
current_seq.last_request.stats.valid = 0
|
||||
|
||||
if current_seq.length > 1:
|
||||
current_seq.last_request.stats.set_matching_prefix(current_seq.prefix)
|
||||
|
||||
logger.print_request_coverage(request=current_seq.last_request, log_rendered_hash=False)
|
||||
# Else, there was a failure rendering the sequence.
|
||||
# Never rendered requests will be printed at the end of the fuzzing loop, so there is no need to
|
||||
|
@ -269,14 +274,10 @@ def render_one(seq_to_render, ith, checkers, generation, global_lock):
|
|||
apply_checkers(checkers, renderings, global_lock)
|
||||
|
||||
# If in exhaustive test mode, log the spec coverage.
|
||||
if renderings.sequence:
|
||||
if Settings().fuzzing_mode == 'test-all-combinations':
|
||||
if renderings and renderings.sequence:
|
||||
renderings.sequence.last_request.stats.set_all_stats(renderings)
|
||||
logger.print_request_coverage(rendered_sequence=renderings, log_rendered_hash=True)
|
||||
|
||||
# Save the previous rendering in order to log statistics in cases when all renderings
|
||||
# were invalid
|
||||
prev_renderings = renderings
|
||||
else:
|
||||
print("Unsupported fuzzing_mode:", Settings().fuzzing_mode)
|
||||
assert False
|
||||
|
@ -403,6 +404,7 @@ def render_with_cache(seq_collection, fuzzing_pool, checkers, generation, global
|
|||
# Print information about the attempt to render this request to main.txt
|
||||
print_rendering_to_main_txt(current_seq)
|
||||
logger.write_to_main(f"{formatting.timestamp()}: Rendering INVALID")
|
||||
|
||||
logger.format_rendering_stats_definition(
|
||||
current_seq.last_request, GrammarRequestCollection().candidate_values_pool
|
||||
)
|
||||
|
|
|
@ -35,6 +35,7 @@ class FailureInformation(Enum):
|
|||
RESOURCE_CREATION = 2
|
||||
PARSER = 3
|
||||
BUG = 4
|
||||
MISSING_STATUS_CODE = 5
|
||||
|
||||
class RenderedRequestStats(object):
|
||||
""" Class used for encapsulating data about a specific rendered request and its response.
|
||||
|
@ -45,10 +46,13 @@ class RenderedRequestStats(object):
|
|||
self.request_sent_timestamp = None
|
||||
self.response_received_timestamp = None
|
||||
|
||||
self.request_verb = None
|
||||
self.request_uri = None
|
||||
self.request_headers = None
|
||||
self.request_body = None
|
||||
|
||||
self.response_status_code = None
|
||||
self.response_status_text = None
|
||||
self.response_headers = None
|
||||
self.response_body = None
|
||||
|
||||
|
@ -64,7 +68,9 @@ class RenderedRequestStats(object):
|
|||
try:
|
||||
split_body = request_text.split(messaging.DELIM)
|
||||
split_headers = split_body[0].split("\r\n")
|
||||
self.request_uri = split_headers[0].split(" ")[1]
|
||||
verb_and_uri = split_headers[0].split(" ")
|
||||
self.request_verb = verb_and_uri[0]
|
||||
self.request_uri = verb_and_uri[1]
|
||||
self.request_headers = split_headers[1:]
|
||||
|
||||
if len(split_body) > 0 and split_body[1]:
|
||||
|
@ -83,6 +89,8 @@ class RenderedRequestStats(object):
|
|||
@rtype : None
|
||||
|
||||
"""
|
||||
self.response_status_code = final_request_response.status_code
|
||||
self.response_status_text = final_request_response.status_text
|
||||
self.response_headers = final_request_response.headers
|
||||
self.response_body = final_request_response.body
|
||||
self.response_received_timestamp = final_response_datetime
|
||||
|
@ -101,7 +109,8 @@ class SmokeTestStats(object):
|
|||
self.status_code = None
|
||||
self.status_text = None
|
||||
|
||||
self.sample_request = RenderedRequestStats()
|
||||
self.sample_request = None
|
||||
self.sequence_failure_sample_request = None
|
||||
self.tracked_parameters = {}
|
||||
|
||||
def set_matching_prefix(self, sequence_prefix):
|
||||
|
@ -117,26 +126,38 @@ class SmokeTestStats(object):
|
|||
self.matching_prefix = prefix_ids
|
||||
|
||||
def set_all_stats(self, renderings):
|
||||
|
||||
if self.failure is not None and self.failure != FailureInformation.SEQUENCE:
|
||||
self.status_code = renderings.final_request_response.status_code
|
||||
self.status_text = renderings.final_request_response.status_text
|
||||
# Get the last rendered request. The corresponding response should be
|
||||
# the last received response.
|
||||
self.sample_request.set_request_stats(
|
||||
renderings.sequence.sent_request_data_list[-1].rendered_data)
|
||||
self.sample_request.set_response_stats(renderings.final_request_response,
|
||||
renderings.final_response_datetime)
|
||||
|
||||
response_body = renderings.final_request_response.body
|
||||
if renderings.sequence:
|
||||
self.valid = 1 if renderings.valid else 0
|
||||
if self.valid:
|
||||
self.has_valid_rendering = 1
|
||||
self.failure = renderings.failure_info
|
||||
|
||||
# Get the last rendered request. The corresponding response should be
|
||||
# the last received response.
|
||||
if renderings.sequence:
|
||||
self.set_matching_prefix(renderings.sequence.prefix)
|
||||
if self.failure == FailureInformation.SEQUENCE:
|
||||
self.sequence_failure_sample_request = RenderedRequestStats()
|
||||
self.sequence_failure_sample_request.set_request_stats(
|
||||
renderings.sequence.sent_request_data_list[-1].rendered_data)
|
||||
self.sequence_failure_sample_request.set_response_stats(renderings.final_request_response,
|
||||
renderings.final_response_datetime)
|
||||
else:
|
||||
self.sample_request = RenderedRequestStats()
|
||||
self.sample_request.set_request_stats(
|
||||
renderings.sequence.sent_request_data_list[-1].rendered_data)
|
||||
self.sample_request.set_response_stats(renderings.final_request_response,
|
||||
renderings.final_response_datetime)
|
||||
response_body = renderings.final_request_response.body
|
||||
|
||||
if not renderings.valid:
|
||||
self.error_msg = response_body
|
||||
|
||||
self.set_matching_prefix(renderings.sequence.prefix)
|
||||
|
||||
# Set tracked parameters
|
||||
last_req = renderings.sequence.last_request
|
||||
|
||||
|
|
|
@ -324,6 +324,9 @@ class Sequence(object):
|
|||
|
||||
self._sent_request_data_list = []
|
||||
|
||||
datetime_format = "%Y-%m-%d %H:%M:%S"
|
||||
response_datetime_str = None
|
||||
timestamp_micro = None
|
||||
for rendered_data, parser, tracked_parameters in\
|
||||
request.render_iter(candidate_values_pool,
|
||||
skip=request._current_combination_id,
|
||||
|
@ -352,10 +355,15 @@ class Sequence(object):
|
|||
sequence_failed = False
|
||||
request._tracked_parameters = {}
|
||||
request.update_tracked_parameters(tracked_parameters)
|
||||
|
||||
# Step A: Static template rendering
|
||||
# Render last known valid combination of primitive type values
|
||||
# for every request until the last
|
||||
current_request = None
|
||||
prev_request = None
|
||||
prev_response = None
|
||||
for i in range(len(self.requests) - 1):
|
||||
last_tested_request_idx = i
|
||||
prev_request = self.requests[i]
|
||||
prev_rendered_data, prev_parser, tracked_parameters =\
|
||||
prev_request.render_current(candidate_values_pool,
|
||||
|
@ -390,6 +398,11 @@ class Sequence(object):
|
|||
|
||||
self.append_data_to_sent_list(prev_rendered_data, prev_parser, prev_response, prev_producer_timing_delay, prev_req_async_wait)
|
||||
|
||||
# Record the time at which the response was received
|
||||
datetime_now = datetime.datetime.now(datetime.timezone.utc)
|
||||
response_datetime_str = datetime_now.strftime(datetime_format)
|
||||
timestamp_micro = int(datetime_now.timestamp()*10**6)
|
||||
|
||||
if not prev_status_code:
|
||||
logger.write_to_main(f"Error: Failed to get status code during valid sequence re-rendering.\n")
|
||||
sequence_failed = True
|
||||
|
@ -411,6 +424,15 @@ class Sequence(object):
|
|||
sequence_failed = True
|
||||
break
|
||||
|
||||
rendering_is_valid = not prev_parser_threw_exception\
|
||||
and not resource_error\
|
||||
and prev_response.has_valid_code()
|
||||
|
||||
if not rendering_is_valid:
|
||||
logger.write_to_main("Error: Invalid rendering occurred during valid sequence re-rendering.\n")
|
||||
sequence_failed = True
|
||||
break
|
||||
|
||||
# If the previous request is a resource generator and we did not perform an async resource
|
||||
# creation wait, then wait for the specified duration in order for the backend to have a
|
||||
# chance to create the resource.
|
||||
|
@ -418,18 +440,22 @@ class Sequence(object):
|
|||
print(f"Pausing for {prev_producer_timing_delay} seconds, request is a generator...")
|
||||
time.sleep(prev_producer_timing_delay)
|
||||
|
||||
logger.write_to_main("sequence did not fail")
|
||||
# register latest client/server interaction
|
||||
timestamp_micro = int(time.time()*10**6)
|
||||
self.status_codes.append(status_codes_monitor.RequestExecutionStatus(timestamp_micro,
|
||||
prev_request.hex_definition,
|
||||
prev_status_code,
|
||||
prev_response.has_valid_code(),
|
||||
False))
|
||||
|
||||
|
||||
# Render candidate value combinations seeking for valid error codes
|
||||
request._current_combination_id += 1
|
||||
|
||||
if sequence_failed:
|
||||
self.status_codes.append(
|
||||
status_codes_monitor.RequestExecutionStatus(
|
||||
int(time.time()*10**6),
|
||||
timestamp_micro,
|
||||
request.hex_definition,
|
||||
RESTLER_INVALID_CODE,
|
||||
False,
|
||||
|
@ -437,7 +463,21 @@ class Sequence(object):
|
|||
)
|
||||
)
|
||||
Monitor().update_status_codes_monitor(self, self.status_codes, lock)
|
||||
return RenderedSequence(failure_info=FailureInformation.SEQUENCE)
|
||||
|
||||
if lock is not None:
|
||||
lock.acquire()
|
||||
# Deep copying here will try copying anything the class has access
|
||||
# to including the shared client monitor, which we update in the
|
||||
# above code block holding the lock, but then we release the
|
||||
# lock and one thread can be updating while another is copying.
|
||||
# This is a typlical nasty read after write syncronization bug.
|
||||
duplicate = copy.deepcopy(self)
|
||||
if lock is not None:
|
||||
lock.release()
|
||||
|
||||
return RenderedSequence(duplicate, valid=False, failure_info=FailureInformation.SEQUENCE,
|
||||
final_request_response=prev_response,
|
||||
response_datetime=response_datetime_str)
|
||||
|
||||
# Step B: Dynamic template rendering
|
||||
# substitute reference placeholders with ressoved values
|
||||
|
@ -445,9 +485,6 @@ class Sequence(object):
|
|||
if not Settings().ignore_dependencies:
|
||||
rendered_data = self.resolve_dependencies(rendered_data)
|
||||
|
||||
# Render candidate value combinations seeking for valid error codes
|
||||
request._current_combination_id += 1
|
||||
|
||||
req_async_wait = Settings().get_max_async_resource_creation_time(request.request_id)
|
||||
|
||||
response = request_utilities.send_request_data(rendered_data)
|
||||
|
@ -459,16 +496,18 @@ class Sequence(object):
|
|||
parser_exception_occurred = not request_utilities.call_response_parser(parser, response_to_parse, request)
|
||||
status_code = response.status_code
|
||||
if not status_code:
|
||||
return RenderedSequence(None)
|
||||
return RenderedSequence(failure_info=FailureInformation.MISSING_STATUS_CODE)
|
||||
|
||||
self.append_data_to_sent_list(rendered_data, parser, response, max_async_wait_time=req_async_wait)
|
||||
|
||||
rendering_is_valid = not parser_exception_occurred\
|
||||
and not resource_error\
|
||||
and response.has_valid_code()
|
||||
# register latest client/server interaction and add to the status codes list
|
||||
response_datetime = datetime.datetime.now(datetime.timezone.utc)
|
||||
timestamp_micro = int(response_datetime.timestamp()*10**6)
|
||||
|
||||
# Record the time at which the response was received
|
||||
datetime_now = datetime.datetime.now(datetime.timezone.utc)
|
||||
response_datetime_str=datetime_now.strftime(datetime_format)
|
||||
timestamp_micro = int(datetime_now.timestamp()*10**6)
|
||||
|
||||
self.status_codes.append(status_codes_monitor.RequestExecutionStatus(timestamp_micro,
|
||||
request.hex_definition,
|
||||
|
@ -505,13 +544,10 @@ class Sequence(object):
|
|||
if lock is not None:
|
||||
lock.release()
|
||||
|
||||
datetime_format = "%Y-%m-%d %H:%M:%S"
|
||||
response_datetime=response_datetime.strftime(datetime_format)
|
||||
|
||||
# return a rendered clone if response indicates a valid status code
|
||||
if rendering_is_valid or Settings().ignore_feedback:
|
||||
return RenderedSequence(duplicate, valid=True, final_request_response=response,
|
||||
response_datetime=response_datetime)
|
||||
response_datetime=response_datetime_str)
|
||||
else:
|
||||
information = None
|
||||
if response.has_valid_code():
|
||||
|
@ -523,7 +559,7 @@ class Sequence(object):
|
|||
information = FailureInformation.BUG
|
||||
return RenderedSequence(duplicate, valid=False, failure_info=information,
|
||||
final_request_response=response,
|
||||
response_datetime=response_datetime)
|
||||
response_datetime=response_datetime_str)
|
||||
|
||||
return RenderedSequence(None)
|
||||
|
||||
|
|
|
@ -184,6 +184,13 @@ class FuzzingLogParser(LogParser):
|
|||
@rtype : None
|
||||
|
||||
"""
|
||||
def is_seq_or_checker_start(line):
|
||||
if GENERATION in line and RENDERING_SEQUENCE in line:
|
||||
return True
|
||||
if CHECKER_START in line:
|
||||
return True
|
||||
return False
|
||||
|
||||
with open(self._path, 'r') as file:
|
||||
try:
|
||||
line = file.readline()
|
||||
|
@ -205,8 +212,15 @@ class FuzzingLogParser(LogParser):
|
|||
if REPLAY_START in line:
|
||||
self._skip_replay(file)
|
||||
|
||||
# Handle cases where fewer requests
|
||||
# are sent due to a sequence failure
|
||||
if is_seq_or_checker_start(line):
|
||||
break
|
||||
if SENDING in line:
|
||||
seq += self._get_request(line, True)
|
||||
line = file.readline()
|
||||
else:
|
||||
break
|
||||
|
||||
# Extend the list of sequences in this log
|
||||
self._seq_list += [seq]
|
||||
|
@ -214,6 +228,11 @@ class FuzzingLogParser(LogParser):
|
|||
self._handle_checker(seq, line, file)
|
||||
line = file.readline()
|
||||
|
||||
# Only read the next line if it is not already at the start of the
|
||||
# next operation to process
|
||||
if not is_seq_or_checker_start(line):
|
||||
line = file.readline()
|
||||
|
||||
except Exception as err:
|
||||
print("Failed to read fuzzing log. Log was not a complete test log.\n"
|
||||
f"{err!s}")
|
||||
|
|
|
@ -11,6 +11,8 @@ UNIT_TEST_RESOURCE_IDENTIFIER = '<test!>'
|
|||
class ParsedRequest:
|
||||
""" Created by parsing a request string """
|
||||
def __init__(self, request_str: str, ignore_dynamic_objects=False):
|
||||
if not request_str or request_str.isspace():
|
||||
raise Exception("Invalid request: empty payload")
|
||||
# Extract method from request string
|
||||
method_split = request_str.split(' ', 1)
|
||||
self.method = method_split[0]
|
||||
|
|
|
@ -39,6 +39,14 @@ class InvalidBody(Exception):
|
|||
"""
|
||||
pass
|
||||
|
||||
class FlakyBehavior(Exception):
|
||||
""" To be raised when flakiness is intentionally injected
|
||||
via a 'flaky' property of the body.
|
||||
Correct: 'flaky' property set to an odd number
|
||||
FlakyBehavior: 'flaky' property set to an even number`
|
||||
"""
|
||||
pass
|
||||
|
||||
class ResourceBase:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
|
|
|
@ -82,6 +82,9 @@ class UnitTestResource(ResourceBase):
|
|||
raise ResourceDoesNotExist()
|
||||
|
||||
class ResourceFactory(object):
|
||||
def __init__(self):
|
||||
self._flaky_count = 0
|
||||
|
||||
def get_resource_object(self, type: str, name: str, body: dict) -> UnitTestResource:
|
||||
if type == "city":
|
||||
return City(name, body)
|
||||
|
@ -101,6 +104,12 @@ class ResourceFactory(object):
|
|||
return Group(name, body)
|
||||
|
||||
if type == "A":
|
||||
# If the body contains a 'flaky' property, fail if it is
|
||||
# set to an even number.
|
||||
if 'flaky' in body:
|
||||
self._flaky_count = self._flaky_count + 1
|
||||
if (self._flaky_count - 1) % 2 == 1:
|
||||
raise FlakyBehavior()
|
||||
return A(name, body)
|
||||
if type == "B":
|
||||
return B(name, body)
|
||||
|
|
|
@ -143,6 +143,8 @@ class UnitTestServer(TestServerBase):
|
|||
self._response = self._400(resource)
|
||||
except InvalidBody:
|
||||
self._response = self._400(request.body)
|
||||
except FlakyBehavior:
|
||||
self._response = self._400(request.body)
|
||||
except Exception as error:
|
||||
self._response = self._500(str(error))
|
||||
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
{
|
||||
"44414ad093616e18a9e2f845ae9d453eb6e4c8bc_1": {
|
||||
"verb": "PUT",
|
||||
"endpoint": "/A/{A}",
|
||||
"verb_endpoint": "PUT /A/{A}",
|
||||
"valid": 1,
|
||||
"matching_prefix": "None",
|
||||
"invalid_due_to_sequence_failure": 0,
|
||||
"invalid_due_to_resource_failure": 0,
|
||||
"invalid_due_to_parser_failure": 0,
|
||||
"invalid_due_to_500": 0,
|
||||
"status_code": null,
|
||||
"status_text": null,
|
||||
"error_message": null,
|
||||
"request_order": 0,
|
||||
"sample_request": {
|
||||
"request_sent_timestamp": null,
|
||||
"response_received_timestamp": "2021-07-02 00:55:24",
|
||||
"request_verb": "PUT",
|
||||
"request_uri": "/A/A",
|
||||
"request_headers": [
|
||||
"Accept: application/json",
|
||||
"Host: unittest",
|
||||
"Content-Type: application/json",
|
||||
"AUTHORIZATION TOKEN"
|
||||
],
|
||||
"request_body": "{\"flaky\": 1}\r\n",
|
||||
"response_status_code": "201",
|
||||
"response_status_text": "Created",
|
||||
"response_headers": [
|
||||
"Restler Test"
|
||||
],
|
||||
"response_body": "{\"name\": \"A\"}"
|
||||
},
|
||||
"tracked_parameters": {
|
||||
"flaky": [
|
||||
"1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"44414ad093616e18a9e2f845ae9d453eb6e4c8bc_2": {
|
||||
"verb": "PUT",
|
||||
"endpoint": "/A/{A}",
|
||||
"verb_endpoint": "PUT /A/{A}",
|
||||
"valid": 0,
|
||||
"matching_prefix": "None",
|
||||
"invalid_due_to_sequence_failure": 0,
|
||||
"invalid_due_to_resource_failure": 0,
|
||||
"invalid_due_to_parser_failure": 0,
|
||||
"invalid_due_to_500": 0,
|
||||
"status_code": null,
|
||||
"status_text": null,
|
||||
"error_message": "{\"error\": \"{\\\"flaky\\\": 2}\"}",
|
||||
"request_order": 0,
|
||||
"sample_request": {
|
||||
"request_sent_timestamp": null,
|
||||
"response_received_timestamp": "2021-07-02 00:55:24",
|
||||
"request_verb": "PUT",
|
||||
"request_uri": "/A/A",
|
||||
"request_headers": [
|
||||
"Accept: application/json",
|
||||
"Host: unittest",
|
||||
"Content-Type: application/json",
|
||||
"AUTHORIZATION TOKEN"
|
||||
],
|
||||
"request_body": "{\"flaky\": 2}\r\n",
|
||||
"response_status_code": "400",
|
||||
"response_status_text": "Bad Request",
|
||||
"response_headers": [
|
||||
"Restler Test"
|
||||
],
|
||||
"response_body": "{\"error\": \"{\\\"flaky\\\": 2}\"}"
|
||||
},
|
||||
"tracked_parameters": {
|
||||
"flaky": [
|
||||
"2"
|
||||
]
|
||||
}
|
||||
},
|
||||
"c4d9948bd2e270ee123e19bbb92f88b9b59f36c9_1__1": {
|
||||
"verb": "GET",
|
||||
"endpoint": "/A/{A}",
|
||||
"verb_endpoint": "GET /A/{A}",
|
||||
"valid": 1,
|
||||
"matching_prefix": [
|
||||
{
|
||||
"id": "44414ad093616e18a9e2f845ae9d453eb6e4c8bc_1",
|
||||
"valid": 1
|
||||
}
|
||||
],
|
||||
"invalid_due_to_sequence_failure": 0,
|
||||
"invalid_due_to_resource_failure": 0,
|
||||
"invalid_due_to_parser_failure": 0,
|
||||
"invalid_due_to_500": 0,
|
||||
"status_code": null,
|
||||
"status_text": null,
|
||||
"error_message": null,
|
||||
"request_order": 1,
|
||||
"sample_request": {
|
||||
"request_sent_timestamp": null,
|
||||
"response_received_timestamp": "2021-07-02 00:55:25",
|
||||
"request_verb": "GET",
|
||||
"request_uri": "/A/A",
|
||||
"request_headers": [
|
||||
"Accept: application/json",
|
||||
"Host: unittest",
|
||||
"Content-Type: application/json",
|
||||
"AUTHORIZATION TOKEN"
|
||||
],
|
||||
"request_body": "{\"A\": \"A\",\"X\": 0.1}",
|
||||
"response_status_code": "200",
|
||||
"response_status_text": "OK",
|
||||
"response_headers": [
|
||||
"Restler Test"
|
||||
],
|
||||
"response_body": "{\"name\": \"A\"}"
|
||||
},
|
||||
"tracked_parameters": {
|
||||
"X": [
|
||||
"0.1"
|
||||
],
|
||||
"flaky": [
|
||||
"1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"c4d9948bd2e270ee123e19bbb92f88b9b59f36c9_2__1": {
|
||||
"verb": "GET",
|
||||
"endpoint": "/A/{A}",
|
||||
"verb_endpoint": "GET /A/{A}",
|
||||
"valid": 0,
|
||||
"matching_prefix": [
|
||||
{
|
||||
"id": "44414ad093616e18a9e2f845ae9d453eb6e4c8bc_1"
|
||||
}
|
||||
],
|
||||
"invalid_due_to_sequence_failure": 1,
|
||||
"invalid_due_to_resource_failure": 0,
|
||||
"invalid_due_to_parser_failure": 0,
|
||||
"invalid_due_to_500": 0,
|
||||
"status_code": null,
|
||||
"status_text": null,
|
||||
"error_message": null,
|
||||
"request_order": 1,
|
||||
"sequence_failure_sample_request": {
|
||||
"request_sent_timestamp": null,
|
||||
"response_received_timestamp": "2021-07-02 00:55:25",
|
||||
"request_verb": "PUT",
|
||||
"request_uri": "/A/A",
|
||||
"request_headers": [
|
||||
"Accept: application/json",
|
||||
"Host: unittest",
|
||||
"Content-Type: application/json",
|
||||
"AUTHORIZATION TOKEN"
|
||||
],
|
||||
"request_body": "{\"flaky\": 1}\r\n",
|
||||
"response_status_code": "400",
|
||||
"response_status_text": "Bad Request",
|
||||
"response_headers": [
|
||||
"Restler Test"
|
||||
],
|
||||
"response_body": "{\"error\": \"{\\\"flaky\\\": 1}\"}"
|
||||
},
|
||||
"tracked_parameters": {
|
||||
"X": [
|
||||
"0.2"
|
||||
],
|
||||
"flaky": [
|
||||
"1"
|
||||
]
|
||||
}
|
||||
}}
|
|
@ -0,0 +1,140 @@
|
|||
2021-07-01 17:55:24.466: Will refresh token: python D:\git\restler-fuzzer\restler\unit_tests\log_baseline_test_files\unit_test_server_auth.py
|
||||
2021-07-01 17:55:24.554: New value: {'user1':{}, 'user2':{}}
|
||||
Authorization: valid_unit_test_token
|
||||
Authorization: shadow_unit_test_token
|
||||
|
||||
Generation-1: Rendering Sequence-1
|
||||
|
||||
Request: 1 (Remaining candidate combinations: 2)
|
||||
Request hash: 44414ad093616e18a9e2f845ae9d453eb6e4c8bc
|
||||
|
||||
- restler_static_string: 'PUT '
|
||||
- restler_static_string: '/A/A'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"flaky": '
|
||||
+ restler_fuzzable_group: ['1', '2']
|
||||
- restler_static_string: '}'
|
||||
- restler_static_string: '\r\n'
|
||||
|
||||
2021-07-01 17:55:24.741: Sending: 'PUT /A/A HTTP/1.1\r\nAccept: application/json\r\nHost: unittest\r\nContent-Type: application/json\r\nAuthorization: valid_unit_test_token\r\nContent-Length: 14\r\n\r\n{"flaky": 1}\r\n'
|
||||
|
||||
2021-07-01 17:55:24.748: Received: 'HTTP/1.1 201 Created\r\nRestler Test\r\n\r\n{"name": "A"}'
|
||||
|
||||
|
||||
Generation-1: Rendering Sequence-1
|
||||
|
||||
Request: 1 (Remaining candidate combinations: 1)
|
||||
Request hash: 44414ad093616e18a9e2f845ae9d453eb6e4c8bc
|
||||
|
||||
- restler_static_string: 'PUT '
|
||||
- restler_static_string: '/A/A'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"flaky": '
|
||||
+ restler_fuzzable_group: ['1', '2']
|
||||
- restler_static_string: '}'
|
||||
- restler_static_string: '\r\n'
|
||||
|
||||
2021-07-01 17:55:24.910: Sending: 'PUT /A/A HTTP/1.1\r\nAccept: application/json\r\nHost: unittest\r\nContent-Type: application/json\r\nAuthorization: valid_unit_test_token\r\nContent-Length: 14\r\n\r\n{"flaky": 2}\r\n'
|
||||
|
||||
2021-07-01 17:55:24.919: Received: 'HTTP/1.1 400 Bad Request\r\nRestler Test\r\n\r\n{"error": "{\\"flaky\\": 2}"}'
|
||||
|
||||
2021-07-01 17:55:24.936: Failed to parse _post_a; it is now set to None.
|
||||
|
||||
Generation-2: Rendering Sequence-1
|
||||
|
||||
Request: 1 (Current combination: 1 / 2)
|
||||
- restler_static_string: 'PUT '
|
||||
- restler_static_string: '/A/A'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"flaky": '
|
||||
+ restler_fuzzable_group: ['1', '2']
|
||||
- restler_static_string: '}'
|
||||
- restler_static_string: '\r\n'
|
||||
|
||||
Request: 2 (Remaining candidate combinations: 2)
|
||||
Request hash: c4d9948bd2e270ee123e19bbb92f88b9b59f36c9
|
||||
|
||||
- restler_static_string: 'GET '
|
||||
- restler_static_string: '/A/'
|
||||
- restler_static_string: '_READER_DELIM_post_a_READER_DELIM'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"A": "'
|
||||
- restler_static_string: '_READER_DELIM_post_a_READER_DELIM'
|
||||
- restler_static_string: '","X": '
|
||||
+ restler_fuzzable_group: ['0.1', '0.2']
|
||||
- restler_static_string: '}'
|
||||
|
||||
2021-07-01 17:55:25.383: Sending: 'PUT /A/A HTTP/1.1\r\nAccept: application/json\r\nHost: unittest\r\nContent-Type: application/json\r\nAuthorization: valid_unit_test_token\r\nContent-Length: 14\r\n\r\n{"flaky": 1}\r\n'
|
||||
|
||||
2021-07-01 17:55:25.392: Received: 'HTTP/1.1 201 Created\r\nRestler Test\r\n\r\n{"name": "A"}'
|
||||
|
||||
2021-07-01 17:55:25.407: Sending: 'GET /A/A HTTP/1.1\r\nAccept: application/json\r\nHost: unittest\r\nContent-Type: application/json\r\nAuthorization: valid_unit_test_token\r\nContent-Length: 19\r\n\r\n{"A": "A","X": 0.1}'
|
||||
|
||||
2021-07-01 17:55:25.416: Received: 'HTTP/1.1 200 OK\r\nRestler Test\r\n\r\n{"name": "A"}'
|
||||
|
||||
|
||||
Generation-2: Rendering Sequence-1
|
||||
|
||||
Request: 1 (Current combination: 1 / 2)
|
||||
- restler_static_string: 'PUT '
|
||||
- restler_static_string: '/A/A'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"flaky": '
|
||||
+ restler_fuzzable_group: ['1', '2']
|
||||
- restler_static_string: '}'
|
||||
- restler_static_string: '\r\n'
|
||||
|
||||
Request: 2 (Remaining candidate combinations: 1)
|
||||
Request hash: c4d9948bd2e270ee123e19bbb92f88b9b59f36c9
|
||||
|
||||
- restler_static_string: 'GET '
|
||||
- restler_static_string: '/A/'
|
||||
- restler_static_string: '_READER_DELIM_post_a_READER_DELIM'
|
||||
- restler_static_string: ' HTTP/1.1\r\n'
|
||||
- restler_static_string: 'Accept: application/json\r\n'
|
||||
- restler_static_string: 'Host: unittest\r\n'
|
||||
- restler_static_string: 'Content-Type: application/json\r\n'
|
||||
+ restler_refreshable_authentication_token: ['token_refresh_cmd', 'token_refresh_interval']
|
||||
- restler_static_string: '\r\n'
|
||||
- restler_static_string: '{'
|
||||
- restler_static_string: '"A": "'
|
||||
- restler_static_string: '_READER_DELIM_post_a_READER_DELIM'
|
||||
- restler_static_string: '","X": '
|
||||
+ restler_fuzzable_group: ['0.1', '0.2']
|
||||
- restler_static_string: '}'
|
||||
|
||||
2021-07-01 17:55:25.734: Sending: 'PUT /A/A HTTP/1.1\r\nAccept: application/json\r\nHost: unittest\r\nContent-Type: application/json\r\nAuthorization: valid_unit_test_token\r\nContent-Length: 14\r\n\r\n{"flaky": 1}\r\n'
|
||||
|
||||
2021-07-01 17:55:25.743: Received: 'HTTP/1.1 400 Bad Request\r\nRestler Test\r\n\r\n{"error": "{\\"flaky\\": 1}"}'
|
||||
|
||||
2021-07-01 17:55:25.760: Failed to parse _post_a; it is now set to None.
|
|
@ -0,0 +1,90 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
# This grammar was created manually.
|
||||
# There is no corresponding OpenAPI spec.
|
||||
|
||||
from __future__ import print_function
|
||||
import json
|
||||
|
||||
from engine import primitives
|
||||
from engine.core import requests
|
||||
from engine.errors import ResponseParsingException
|
||||
from engine import dependencies
|
||||
|
||||
_post_a = dependencies.DynamicVariable(
|
||||
"_post_a"
|
||||
)
|
||||
|
||||
def parse_A(data):
|
||||
temp_123 = None
|
||||
|
||||
try:
|
||||
data = json.loads(data)
|
||||
except Exception as error:
|
||||
raise ResponseParsingException("Exception parsing response, data was not valid json: {}".format(error))
|
||||
|
||||
try:
|
||||
temp_123 = str(data["name"])
|
||||
except Exception as error:
|
||||
pass
|
||||
|
||||
if temp_123:
|
||||
dependencies.set_variable("_post_a", temp_123)
|
||||
|
||||
|
||||
req_collection = requests.RequestCollection([])
|
||||
|
||||
request = requests.Request([
|
||||
primitives.restler_static_string("PUT "),
|
||||
primitives.restler_static_string("/A/A"),
|
||||
primitives.restler_static_string(" HTTP/1.1\r\n"),
|
||||
primitives.restler_static_string("Accept: application/json\r\n"),
|
||||
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
|
||||
primitives.restler_static_string("Content-Type: application/json\r\n"),
|
||||
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
|
||||
primitives.restler_static_string("\r\n"),
|
||||
primitives.restler_static_string("{"),
|
||||
primitives.restler_static_string('"flaky": '),
|
||||
# These must be an odd and even number
|
||||
primitives.restler_fuzzable_group("flaky", ["1", "2"]),
|
||||
primitives.restler_static_string("}"),
|
||||
primitives.restler_static_string("\r\n"),
|
||||
{
|
||||
'post_send':
|
||||
{
|
||||
'parser': parse_A,
|
||||
'dependencies':
|
||||
[
|
||||
_post_a.writer()
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
],
|
||||
requestId="/A/{A}"
|
||||
)
|
||||
req_collection.add_request(request)
|
||||
|
||||
request = requests.Request([
|
||||
primitives.restler_static_string("GET "),
|
||||
primitives.restler_static_string("/A/"),
|
||||
primitives.restler_static_string(_post_a.reader()),
|
||||
primitives.restler_static_string(" HTTP/1.1\r\n"),
|
||||
primitives.restler_static_string("Accept: application/json\r\n"),
|
||||
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
|
||||
primitives.restler_static_string("Content-Type: application/json\r\n"),
|
||||
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
|
||||
primitives.restler_static_string("\r\n"),
|
||||
primitives.restler_static_string("{"),
|
||||
primitives.restler_static_string('"A": "'),
|
||||
primitives.restler_static_string(_post_a.reader()),
|
||||
primitives.restler_static_string('","X": '),
|
||||
primitives.restler_fuzzable_group("X", ["0.1", "0.2"]),
|
||||
primitives.restler_static_string("}"),
|
||||
],
|
||||
requestId="/A/{A}"
|
||||
)
|
||||
req_collection.add_request(request)
|
||||
|
||||
|
|
@ -61,10 +61,10 @@ class FunctionalityTests(unittest.TestCase):
|
|||
"""
|
||||
return glob.glob(os.path.join(dir, 'logs', f'network.{log_type}.*.1.txt'))[0]
|
||||
|
||||
def run_abc_smoke_test(self, test_file_dir, grammar_file_name):
|
||||
def run_abc_smoke_test(self, test_file_dir, grammar_file_name, fuzzing_mode):
|
||||
grammar_file_path = os.path.join(test_file_dir, grammar_file_name)
|
||||
args = Common_Settings + [
|
||||
'--fuzzing_mode', 'directed-smoke-test',
|
||||
'--fuzzing_mode', f"{fuzzing_mode}",
|
||||
'--restler_grammar', f'{grammar_file_path}'
|
||||
]
|
||||
|
||||
|
@ -84,7 +84,7 @@ class FunctionalityTests(unittest.TestCase):
|
|||
"Experiments directory was not deleted.")
|
||||
|
||||
def test_abc_invalid_b_smoke_test(self):
|
||||
self.run_abc_smoke_test(Test_File_Directory, "abc_test_grammar_invalid_b.py")
|
||||
self.run_abc_smoke_test(Test_File_Directory, "abc_test_grammar_invalid_b.py", "directed-smoke-test")
|
||||
experiments_dir = self.get_experiments_dir()
|
||||
|
||||
# Make sure all requests were successfully rendered. This is because the comparisons below do not
|
||||
|
@ -118,7 +118,7 @@ class FunctionalityTests(unittest.TestCase):
|
|||
rendered "from scratch", but the sequence for E will reuse the 'D' prefix.
|
||||
|
||||
"""
|
||||
self.run_abc_smoke_test(Test_File_Directory, "abc_test_grammar.py")
|
||||
self.run_abc_smoke_test(Test_File_Directory, "abc_test_grammar.py", "directed-smoke-test")
|
||||
experiments_dir = self.get_experiments_dir()
|
||||
|
||||
# Make sure all requests were successfully rendered. This is because the comparisons below do not
|
||||
|
@ -141,6 +141,62 @@ class FunctionalityTests(unittest.TestCase):
|
|||
except TestFailedException:
|
||||
self.fail("Smoke test failed: Fuzzing")
|
||||
|
||||
def test_ab_all_combinations_with_sequence_failure(self):
|
||||
""" This checks that sequence failures are correctly reported in the
|
||||
spec coverage file for a minimal grammar.
|
||||
Let 2 requests A, B where:
|
||||
- B depends on A
|
||||
- There are 2 renderings of B, and 2 renderings of A, so four sequences AB
|
||||
will be tested.
|
||||
- A is flaky - it returns '200' on odd invocations, and '400' on even invocations.
|
||||
|
||||
The spec coverage file should contain:
|
||||
- 2 entries for A, one valid and one invalid
|
||||
- 2 entries for B, one valid and one 'sequence_failure' entry, with a
|
||||
sample request for the failed execution of A
|
||||
|
||||
The test checks that the sequence failure sample requests are correct.
|
||||
"""
|
||||
self.run_abc_smoke_test(Test_File_Directory, "ab_flaky_b_grammar.py", "test-all-combinations")
|
||||
experiments_dir = self.get_experiments_dir()
|
||||
|
||||
# Make sure all requests were successfully rendered. This is because the comparisons below do not
|
||||
# take status codes into account
|
||||
|
||||
# Make sure the right number of requests was sent.
|
||||
testing_summary_file_path = os.path.join(experiments_dir, "logs", "testing_summary.json")
|
||||
|
||||
try:
|
||||
with open(testing_summary_file_path, 'r') as file:
|
||||
testing_summary = json.loads(file.read())
|
||||
total_requests_sent = testing_summary["total_requests_sent"]["main_driver"]
|
||||
num_fully_valid = testing_summary["num_fully_valid"]
|
||||
self.assertEqual(num_fully_valid, 2)
|
||||
self.assertLessEqual(total_requests_sent, 6)
|
||||
|
||||
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "ab_flaky_b_all_combinations_testing_log.txt"))
|
||||
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
|
||||
self.assertTrue(default_parser.diff_log(test_parser))
|
||||
|
||||
baseline_speccov_json_file_path = os.path.join(Test_File_Directory, "ab_flaky_b_all_combinations_speccov.json")
|
||||
test_speccov_json_file_path = os.path.join(experiments_dir, "logs", "speccov.json")
|
||||
# The speccov files should be identical
|
||||
with open(baseline_speccov_json_file_path, 'r') as file1:
|
||||
with open(test_speccov_json_file_path, 'r') as file2:
|
||||
baseline_json = json.loads(file1.read())
|
||||
test_json = json.loads(file2.read())
|
||||
# Remove the timestamps
|
||||
for spec in [baseline_json, test_json]:
|
||||
for key, val in spec.items():
|
||||
if 'sequence_failure_sample_request' in val:
|
||||
val['sequence_failure_sample_request']['response_received_timestamp'] = None
|
||||
if 'sample_request' in val:
|
||||
val['sample_request']['response_received_timestamp'] = None
|
||||
self.assertTrue(baseline_json == test_json)
|
||||
|
||||
except TestFailedException:
|
||||
self.fail("Smoke test failed: Fuzzing")
|
||||
|
||||
def test_smoke_test(self):
|
||||
""" This checks that the directed smoke test executes all
|
||||
of the expected requests in the correct order with correct
|
||||
|
@ -192,7 +248,7 @@ class FunctionalityTests(unittest.TestCase):
|
|||
try:
|
||||
result.check_returncode()
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
|
||||
self.fail(f"Restler returned non-zero exit code: {result.returncode} {result.stdout}")
|
||||
|
||||
experiments_dir = self.get_experiments_dir()
|
||||
|
||||
|
|
|
@ -175,11 +175,16 @@ class SpecCoverageLog(object):
|
|||
req_spec['invalid_due_to_parser_failure'] = 1
|
||||
elif req.stats.failure == FailureInformation.BUG:
|
||||
req_spec['invalid_due_to_500'] = 1
|
||||
elif req.stats.failure == FailureInformation.MISSING_STATUS_CODE:
|
||||
req_spec['invalid_due_to_missing_response_code'] = 1
|
||||
req_spec['status_code'] = req.stats.status_code
|
||||
req_spec['status_text'] = req.stats.status_text
|
||||
req_spec['error_message'] = req.stats.error_msg
|
||||
req_spec['request_order'] = req.stats.request_order
|
||||
if req.stats.sample_request:
|
||||
req_spec['sample_request'] = vars(req.stats.sample_request)
|
||||
if req.stats.sequence_failure_sample_request:
|
||||
req_spec['sequence_failure_sample_request'] = vars(req.stats.sequence_failure_sample_request)
|
||||
|
||||
if log_tracked_parameters:
|
||||
req_spec['tracked_parameters'] = {}
|
||||
|
@ -467,59 +472,7 @@ def custom_network_logging(sequence, candidate_values_pool, **kwargs):
|
|||
" (Current combination: "
|
||||
f"{request._current_combination_id} / {request.num_combinations(candidate_values_pool)})")
|
||||
for request_block in definition:
|
||||
primitive = request_block[0]
|
||||
if primitive == primitives.FUZZABLE_GROUP:
|
||||
field_name = request_block[1]
|
||||
default_val = request_block[2]
|
||||
quoted = request_block[3]
|
||||
examples = request_block[4]
|
||||
elif primitive in [ primitives.CUSTOM_PAYLOAD,
|
||||
primitives.CUSTOM_PAYLOAD_HEADER,
|
||||
primitives.CUSTOM_PAYLOAD_UUID4_SUFFIX ]:
|
||||
field_name = request_block[1]
|
||||
quoted = request_block[2]
|
||||
examples = request_block[3]
|
||||
else:
|
||||
default_val = request_block[1]
|
||||
quoted = request_block[2]
|
||||
examples = request_block[3]
|
||||
field_name = request_block[4]
|
||||
|
||||
# Handling dynamic primitives that need fresh rendering every time
|
||||
if primitive == "restler_fuzzable_uuid4":
|
||||
values = [primitives.restler_fuzzable_uuid4]
|
||||
# Handle enums that have a list of values instead of one default val
|
||||
elif primitive == "restler_fuzzable_group":
|
||||
values = list(default_val)
|
||||
# Handle multipart/formdata
|
||||
elif primitive == "restler_multipart_formdata":
|
||||
values = ['_OMITTED_BINARY_DATA_']
|
||||
default_val = '_OMITTED_BINARY_DATA_'
|
||||
# Handle custom payload
|
||||
elif primitive == "restler_custom_payload_header":
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
if len(values) == 1:
|
||||
default_val = values[0]
|
||||
# Handle custom payload
|
||||
elif primitive == "restler_custom_payload":
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
if len(values) == 1:
|
||||
default_val = values[0]
|
||||
# Handle custom payload with uuid4 suffix
|
||||
elif primitive == "restler_custom_payload_uuid4_suffix":
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
default_val = values[0]
|
||||
# Handle all the rest
|
||||
else:
|
||||
values = candidate_values_pool.get_fuzzable_values(primitive, default_val, request.request_id, quoted=quoted, examples=examples)
|
||||
|
||||
primitive, values, default_val = format_request_block(request.request_id, request_block, candidate_values_pool)
|
||||
if len(values) > 1:
|
||||
network_log.write(f"\t\t+ {primitive}: {values}")
|
||||
else:
|
||||
|
@ -864,10 +817,26 @@ def print_generation_stats(req_collection, fuzzing_monitor, global_lock, final=F
|
|||
with open(os.path.join(LOGS_DIR, "testing_summary.json"), "w+") as summary_json:
|
||||
json.dump(testing_summary, summary_json, indent=4)
|
||||
|
||||
def format_rendering_stats_definition(request, candidate_values_pool, log_file=None):
|
||||
for request_block in request.definition:
|
||||
def format_request_block(request_id, request_block, candidate_values_pool):
|
||||
primitive = request_block[0]
|
||||
if primitive == primitives.FUZZABLE_GROUP:
|
||||
field_name = request_block[1]
|
||||
default_val = request_block[2]
|
||||
quoted = request_block[3]
|
||||
examples = request_block[4]
|
||||
elif primitive in [ primitives.CUSTOM_PAYLOAD,
|
||||
primitives.CUSTOM_PAYLOAD_HEADER,
|
||||
primitives.CUSTOM_PAYLOAD_UUID4_SUFFIX ]:
|
||||
default_val = None
|
||||
field_name = request_block[1]
|
||||
quoted = request_block[2]
|
||||
examples = request_block[3]
|
||||
else:
|
||||
default_val = request_block[1]
|
||||
quoted = request_block[2]
|
||||
examples = request_block[3]
|
||||
field_name = request_block[4]
|
||||
|
||||
# Handling dynamic primitives that need fresh rendering every time
|
||||
if primitive == "restler_fuzzable_uuid4":
|
||||
values = [primitives.restler_fuzzable_uuid4]
|
||||
|
@ -879,29 +848,34 @@ def format_rendering_stats_definition(request, candidate_values_pool, log_file=N
|
|||
values = ['_OMITTED_BINARY_DATA_']
|
||||
default_val = '_OMITTED_BINARY_DATA_'
|
||||
# Handle custom payload
|
||||
elif primitive == "restler_custom_payload":
|
||||
current_fuzzable_tag = default_val
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag)
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
if len(values) == 1:
|
||||
default_val = values[0]
|
||||
# Handle custom payload header
|
||||
elif primitive == "restler_custom_payload_header":
|
||||
current_fuzzable_tag = default_val
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag)
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
if len(values) == 1:
|
||||
default_val = values[0]
|
||||
|
||||
# Handle custom payload
|
||||
elif primitive == "restler_custom_payload":
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
if len(values) == 1:
|
||||
default_val = values[0]
|
||||
# Handle custom payload with uuid4 suffix
|
||||
elif primitive == "restler_custom_payload_uuid4_suffix":
|
||||
current_fuzzable_tag = default_val
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id, tag=current_fuzzable_tag)
|
||||
current_fuzzable_tag = field_name
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request_id, tag=current_fuzzable_tag, quoted=quoted)
|
||||
default_val = values[0]
|
||||
# Handle all the rest
|
||||
else:
|
||||
values = candidate_values_pool.get_candidate_values(primitive, request_id=request.request_id)
|
||||
values = candidate_values_pool.get_fuzzable_values(primitive, default_val, request_id, quoted=quoted, examples=examples)
|
||||
return primitive, values, default_val
|
||||
|
||||
def format_rendering_stats_definition(request, candidate_values_pool, log_file=None):
|
||||
for request_block in request.definition:
|
||||
primitive, values, default_val = format_request_block(request.request_id, request_block, candidate_values_pool)
|
||||
|
||||
if len(values) > 1:
|
||||
data = f"\t\t+ {primitive}: {values}"
|
||||
|
|
Загрузка…
Ссылка в новой задаче