зеркало из https://github.com/microsoft/CCF.git
Allow redirects by default, but make this overridable (#2083)
Squashing and merging, this should fix the daily.
This commit is contained in:
Родитель
e04c6bbbe0
Коммит
5e8fd5c505
|
@ -1,2 +1 @@
|
|||
I WAS HERE, AGAIN AND AGAIN!
|
||||
#
|
||||
Time is a flat circle
|
||||
|
|
|
@ -68,6 +68,8 @@ class Request:
|
|||
http_verb: str
|
||||
#: HTTP headers
|
||||
headers: dict
|
||||
#: Whether redirect headers should be transparently followed
|
||||
allow_redirects: bool
|
||||
|
||||
def __str__(self):
|
||||
string = f"<cyan>{self.http_verb}</> <green>{self.path}</>"
|
||||
|
@ -200,10 +202,20 @@ class Response:
|
|||
|
||||
@staticmethod
|
||||
def from_raw(raw):
|
||||
sock = FakeSocket(raw)
|
||||
response = HTTPResponse(sock)
|
||||
response.begin()
|
||||
raw_body = response.read(raw)
|
||||
# Raw is the output of curl, which is a full HTTP response.
|
||||
# But in the case of a redirect, it is multiple concatenated responses.
|
||||
# We want the final response, so we keep constructing new responses from this stream until we have reached the end
|
||||
while True:
|
||||
sock = FakeSocket(raw)
|
||||
response = HTTPResponse(sock)
|
||||
response.begin()
|
||||
response_len = sock.file.tell() + response.length
|
||||
raw_len = len(raw)
|
||||
if raw_len == response_len:
|
||||
break
|
||||
raw = raw[response_len:]
|
||||
|
||||
raw_body = response.read()
|
||||
|
||||
return Response(
|
||||
response.status,
|
||||
|
@ -283,6 +295,9 @@ class CurlClient:
|
|||
f"-m {timeout}",
|
||||
]
|
||||
|
||||
if request.allow_redirects:
|
||||
cmd.append("-L")
|
||||
|
||||
if request.body is not None:
|
||||
if isinstance(request.body, str) and request.body.startswith("@"):
|
||||
# Request is already a file path - pass it directly
|
||||
|
@ -462,7 +477,7 @@ class RequestClient:
|
|||
url=f"https://{self.host}:{self.port}{request.path}",
|
||||
auth=auth_value,
|
||||
headers=extra_headers,
|
||||
allow_redirects=False,
|
||||
allow_redirects=request.allow_redirects,
|
||||
timeout=timeout,
|
||||
data=request_body,
|
||||
)
|
||||
|
@ -614,10 +629,11 @@ class CCFClient:
|
|||
headers: Optional[dict] = None,
|
||||
timeout: int = DEFAULT_REQUEST_TIMEOUT_SEC,
|
||||
log_capture: Optional[list] = None,
|
||||
allow_redirects=True,
|
||||
) -> Response:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
r = Request(path, body, http_verb, headers)
|
||||
r = Request(path, body, http_verb, headers, allow_redirects)
|
||||
|
||||
flush_info([f"{self.description} {r}"], log_capture, 3)
|
||||
response = self.client_impl.request(r, timeout)
|
||||
|
@ -632,6 +648,7 @@ class CCFClient:
|
|||
headers: Optional[dict] = None,
|
||||
timeout: int = DEFAULT_REQUEST_TIMEOUT_SEC,
|
||||
log_capture: Optional[list] = None,
|
||||
allow_redirects: bool = True,
|
||||
) -> Response:
|
||||
"""
|
||||
Issues one request, synchronously, and returns the response.
|
||||
|
@ -643,6 +660,7 @@ class CCFClient:
|
|||
:param dict headers: HTTP request headers (optional).
|
||||
:param int timeout: Maximum time to wait for a response before giving up.
|
||||
:param list log_capture: Rather than emit to default handler, capture log lines to list (optional).
|
||||
:param bool allow_redirects: Select whether redirects are followed.
|
||||
|
||||
:return: :py:class:`ccf.clients.Response`
|
||||
"""
|
||||
|
@ -652,7 +670,9 @@ class CCFClient:
|
|||
logs: List[str] = []
|
||||
|
||||
if self.is_connected:
|
||||
r = self._call(path, body, http_verb, headers, timeout, logs)
|
||||
r = self._call(
|
||||
path, body, http_verb, headers, timeout, logs, allow_redirects
|
||||
)
|
||||
flush_info(logs, log_capture, 2)
|
||||
return r
|
||||
|
||||
|
@ -660,7 +680,9 @@ class CCFClient:
|
|||
while True:
|
||||
try:
|
||||
logs = []
|
||||
response = self._call(path, body, http_verb, headers, timeout, logs)
|
||||
response = self._call(
|
||||
path, body, http_verb, headers, timeout, logs, allow_redirects
|
||||
)
|
||||
# Only the first request gets this timeout logic - future calls
|
||||
# call _call
|
||||
self.is_connected = True
|
||||
|
|
|
@ -434,13 +434,13 @@ namespace ccf
|
|||
is_primary = consensus->primary() == node_id;
|
||||
}
|
||||
auto ni = info.value();
|
||||
return make_success({node_id,
|
||||
ni.status,
|
||||
ni.pubhost,
|
||||
ni.pubport,
|
||||
ni.rpchost,
|
||||
ni.rpcport,
|
||||
is_primary});
|
||||
return make_success(GetNode::Out{node_id,
|
||||
ni.status,
|
||||
ni.pubhost,
|
||||
ni.pubport,
|
||||
ni.rpchost,
|
||||
ni.rpcport,
|
||||
is_primary});
|
||||
};
|
||||
make_read_only_endpoint(
|
||||
"network/nodes/{node_id}",
|
||||
|
@ -494,7 +494,7 @@ namespace ccf
|
|||
"https://{}:{}/node/network/nodes/{}",
|
||||
info->pubhost,
|
||||
info->pubport,
|
||||
node_id));
|
||||
primary_id));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -641,7 +641,7 @@ def test_primary(network, args):
|
|||
|
||||
backup = network.find_any_backup()
|
||||
with backup.client() as c:
|
||||
r = c.head("/node/primary")
|
||||
r = c.head("/node/primary", allow_redirects=False)
|
||||
assert r.status_code == http.HTTPStatus.PERMANENT_REDIRECT.value
|
||||
assert (
|
||||
r.headers["location"]
|
||||
|
@ -650,6 +650,75 @@ def test_primary(network, args):
|
|||
return network
|
||||
|
||||
|
||||
@reqs.description("Network node info")
|
||||
@reqs.at_least_n_nodes(2)
|
||||
def test_network_node_info(network, args):
|
||||
primary, backups = network.find_nodes()
|
||||
|
||||
all_nodes = [primary, *backups]
|
||||
|
||||
# Populate node_infos by calling self
|
||||
node_infos = {}
|
||||
for node in all_nodes:
|
||||
with node.client() as c:
|
||||
# /node/network/nodes/self is always a redirect
|
||||
r = c.get("/node/network/nodes/self", allow_redirects=False)
|
||||
assert r.status_code == http.HTTPStatus.PERMANENT_REDIRECT.value
|
||||
assert (
|
||||
r.headers["location"]
|
||||
== f"https://{node.pubhost}:{node.pubport}/node/network/nodes/{node.node_id}"
|
||||
), r.headers["location"]
|
||||
|
||||
# Following that redirect gets you the node info
|
||||
r = c.get("/node/network/nodes/self", allow_redirects=True)
|
||||
assert r.status_code == http.HTTPStatus.OK.value
|
||||
body = r.body.json()
|
||||
assert body["node_id"] == node.node_id
|
||||
assert body["host"] == node.pubhost
|
||||
assert body["port"] == str(node.pubport)
|
||||
assert body["primary"] == (node == primary)
|
||||
|
||||
node_infos[node.node_id] = body
|
||||
|
||||
for node in all_nodes:
|
||||
with node.client() as c:
|
||||
# /node/primary is a 200 on the primary, and a redirect (to a 200) elsewhere
|
||||
r = c.head("/node/primary", allow_redirects=False)
|
||||
if node != primary:
|
||||
assert r.status_code == http.HTTPStatus.PERMANENT_REDIRECT.value
|
||||
assert (
|
||||
r.headers["location"]
|
||||
== f"https://{primary.pubhost}:{primary.pubport}/node/primary"
|
||||
), r.headers["location"]
|
||||
r = c.head("/node/primary", allow_redirects=True)
|
||||
|
||||
assert r.status_code == http.HTTPStatus.OK.value
|
||||
|
||||
# /node/network/nodes/primary is always a redirect
|
||||
r = c.get("/node/network/nodes/primary", allow_redirects=False)
|
||||
assert r.status_code == http.HTTPStatus.PERMANENT_REDIRECT.value
|
||||
actual = r.headers["location"]
|
||||
expected = f"https://{node.pubhost}:{node.pubport}/node/network/nodes/{primary.node_id}"
|
||||
assert actual == expected, f"{actual} != {expected}"
|
||||
|
||||
# Following that redirect gets you the primary's node info
|
||||
r = c.get("/node/network/nodes/primary", allow_redirects=True)
|
||||
assert r.status_code == http.HTTPStatus.OK.value
|
||||
body = r.body.json()
|
||||
assert body == node_infos[primary.node_id]
|
||||
|
||||
# Node info can be retrieved directly by node ID, from and about every node, without redirection
|
||||
for target_node in all_nodes:
|
||||
r = c.get(
|
||||
f"/node/network/nodes/{target_node.node_id}", allow_redirects=False
|
||||
)
|
||||
assert r.status_code == http.HTTPStatus.OK.value
|
||||
body = r.body.json()
|
||||
assert body == node_infos[target_node.node_id]
|
||||
|
||||
return network
|
||||
|
||||
|
||||
@reqs.description("Memory usage")
|
||||
def test_memory(network, args):
|
||||
primary, _ = network.find_primary()
|
||||
|
@ -697,6 +766,7 @@ def run(args):
|
|||
network = test_historical_query(network, args)
|
||||
network = test_view_history(network, args)
|
||||
network = test_primary(network, args)
|
||||
network = test_network_node_info(network, args)
|
||||
network = test_metrics(network, args)
|
||||
network = test_memory(network, args)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче