зеркало из https://github.com/microsoft/CCF.git
Remove code to do with 1.x compatibility (#5596)
This commit is contained in:
Родитель
a03d30ae6f
Коммит
bc78dd9cf8
|
@ -1,4 +1,4 @@
|
|||
-^- ___ ___
|
||||
(- -) (= =) | Y & +--?
|
||||
( V ) \ . \ O +---=---'
|
||||
( V ) / . \ O +---=---'
|
||||
/--x-m- /--n-n---xXx--/--yY-----]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
___ ___ ___
|
||||
(. =) Y (9 3) (* *) Y
|
||||
O / . | /
|
||||
O \ . | /
|
||||
/-xXx--//-----x=x--/-xXx--/---x---->xxxx
|
||||
|
|
|
@ -1 +1 @@
|
|||
...
|
||||
.....
|
|
@ -1,7 +1,7 @@
|
|||
Certificates
|
||||
============
|
||||
|
||||
Since 2.x releases, the validity period of certificates is no longer hardcoded. This page describes how the validity period can instead be set by operators, and renewed by members.
|
||||
This page describes how the validity period of node and service certificates can be set by operators, and renewed by members.
|
||||
|
||||
.. note:: The granularity for the validity period of nodes and service certificates is one day.
|
||||
|
||||
|
|
|
@ -666,23 +666,11 @@ namespace ccf
|
|||
crypto::Pem n2n_channels_cert;
|
||||
if (!resp.network_info->endorsed_certificate.has_value())
|
||||
{
|
||||
// Endorsed node certificate is included in join response
|
||||
// from 2.x (CFT only). When joining an existing 1.x service,
|
||||
// self-sign own certificate and use it to endorse TLS
|
||||
// connections.
|
||||
endorsed_node_cert = create_endorsed_node_cert(
|
||||
default_node_cert_validity_period_days);
|
||||
history->set_endorsed_certificate(endorsed_node_cert.value());
|
||||
n2n_channels_cert = endorsed_node_cert.value();
|
||||
open_frontend(ActorsType::members);
|
||||
open_user_frontend();
|
||||
accept_network_tls_connections();
|
||||
}
|
||||
else
|
||||
{
|
||||
n2n_channels_cert =
|
||||
resp.network_info->endorsed_certificate.value();
|
||||
// Endorsed certificate was added to join response in 2.x
|
||||
throw std::logic_error(
|
||||
"Expected endorsed certificate in join response");
|
||||
}
|
||||
n2n_channels_cert = resp.network_info->endorsed_certificate.value();
|
||||
|
||||
setup_consensus(
|
||||
resp.network_info->service_status.value_or(
|
||||
|
@ -1807,20 +1795,6 @@ namespace ccf
|
|||
}
|
||||
}
|
||||
|
||||
crypto::Pem create_endorsed_node_cert(size_t validity_period_days)
|
||||
{
|
||||
// Only used by a 2.x node joining an existing 1.x service which will
|
||||
// not endorsed the identity of the new joiner.
|
||||
return create_endorsed_cert(
|
||||
node_sign_kp,
|
||||
config.node_certificate.subject_name,
|
||||
subject_alt_names,
|
||||
config.startup_host_time,
|
||||
validity_period_days,
|
||||
network.identity->priv_key,
|
||||
network.identity->cert);
|
||||
}
|
||||
|
||||
void accept_node_tls_connections()
|
||||
{
|
||||
// Accept TLS connections, presenting self-signed (i.e. non-endorsed)
|
||||
|
|
|
@ -822,193 +822,44 @@ class CCFRemote(object):
|
|||
# to reference the destination file locally in the target workspace.
|
||||
bin_path = os.path.join(".", os.path.basename(self.BIN))
|
||||
|
||||
if major_version is None or major_version > 1:
|
||||
# use the relative path to the config file so that it works on remotes too
|
||||
cmd = [
|
||||
bin_path,
|
||||
"--config",
|
||||
os.path.basename(config_file),
|
||||
]
|
||||
# use the relative path to the config file so that it works on remotes too
|
||||
cmd = [
|
||||
bin_path,
|
||||
"--config",
|
||||
os.path.basename(config_file),
|
||||
]
|
||||
|
||||
v = (
|
||||
ccf._versionifier.to_python_version(version)
|
||||
if version is not None
|
||||
else None
|
||||
)
|
||||
if v is None or v >= Version("4.0.5"):
|
||||
# Avoid passing too-low level to debug SGX nodes
|
||||
if not (enclave_type == "debug" and enclave_platform == "sgx"):
|
||||
cmd += [
|
||||
"--enclave-log-level",
|
||||
enclave_log_level,
|
||||
]
|
||||
|
||||
if start_type == StartType.start:
|
||||
members_info = kwargs.get("members_info")
|
||||
if not members_info:
|
||||
raise ValueError("no members info for start node")
|
||||
for mi in members_info:
|
||||
data_files += [
|
||||
os.path.join(self.common_dir, mi["certificate_file"])
|
||||
]
|
||||
if mi["encryption_public_key_file"]:
|
||||
data_files += [
|
||||
os.path.join(
|
||||
self.common_dir, mi["encryption_public_key_file"]
|
||||
)
|
||||
]
|
||||
if mi["data_json_file"]:
|
||||
data_files += [
|
||||
os.path.join(self.common_dir, mi["data_json_file"])
|
||||
]
|
||||
|
||||
for c in constitution:
|
||||
data_files += [os.path.join(self.common_dir, c)]
|
||||
|
||||
if start_type == StartType.join:
|
||||
data_files += [os.path.join(self.common_dir, "service_cert.pem")]
|
||||
|
||||
else:
|
||||
consensus = kwargs.get("consensus")
|
||||
worker_threads = kwargs.get("worker_threads")
|
||||
ledger_chunk_bytes = kwargs.get("ledger_chunk_bytes")
|
||||
subject_alt_names = kwargs.get("subject_alt_names")
|
||||
snapshot_tx_interval = kwargs.get("snapshot_tx_interval")
|
||||
max_open_sessions = kwargs.get("max_open_sessions")
|
||||
max_open_sessions_hard = kwargs.get("max_open_sessions_hard")
|
||||
initial_node_cert_validity_days = kwargs.get(
|
||||
"initial_node_cert_validity_days"
|
||||
)
|
||||
node_client_host = kwargs.get("node_client_host")
|
||||
members_info = kwargs.get("members_info")
|
||||
target_rpc_address = kwargs.get("target_rpc_address")
|
||||
maximum_node_certificate_validity_days = kwargs.get(
|
||||
"maximum_node_certificate_validity_days"
|
||||
)
|
||||
log_format_json = kwargs.get("log_format_json")
|
||||
sig_tx_interval = kwargs.get("sig_tx_interval")
|
||||
|
||||
primary_rpc_interface = host.get_primary_interface()
|
||||
cmd = [
|
||||
bin_path,
|
||||
f"--enclave-file={self.enclave_file}",
|
||||
f"--enclave-type={enclave_type}",
|
||||
f"--node-address-file={self.node_address_file}",
|
||||
f"--rpc-address={infra.interfaces.make_address(primary_rpc_interface.host, primary_rpc_interface.port)}",
|
||||
f"--rpc-address-file={self.rpc_addresses_file}",
|
||||
f"--ledger-dir={self.ledger_dir_name}",
|
||||
f"--snapshot-dir={self.snapshots_dir_name}",
|
||||
f"--node-cert-file={self.pem}",
|
||||
f"--host-log-level={host_log_level}",
|
||||
f"--raft-election-timeout-ms={election_timeout_ms}",
|
||||
f"--consensus={consensus}",
|
||||
f"--worker-threads={worker_threads}",
|
||||
f"--node-address={node_address}",
|
||||
f"--public-rpc-address={infra.interfaces.make_address(primary_rpc_interface.public_host, primary_rpc_interface.public_port)}",
|
||||
]
|
||||
|
||||
if log_format_json:
|
||||
cmd += ["--log-format-json"]
|
||||
|
||||
if sig_tx_interval:
|
||||
cmd += [f"--sig-tx-interval={sig_tx_interval}"]
|
||||
|
||||
if sig_ms_interval:
|
||||
cmd += [f"--sig-ms-interval={sig_ms_interval}"]
|
||||
|
||||
if ledger_chunk_bytes:
|
||||
cmd += [f"--ledger-chunk-bytes={ledger_chunk_bytes}"]
|
||||
|
||||
if subject_alt_names:
|
||||
cmd += [f"--san={s}" for s in subject_alt_names]
|
||||
|
||||
if snapshot_tx_interval:
|
||||
cmd += [f"--snapshot-tx-interval={snapshot_tx_interval}"]
|
||||
|
||||
if max_open_sessions:
|
||||
cmd += [f"--max-open-sessions={max_open_sessions}"]
|
||||
|
||||
if jwt_key_refresh_interval_s:
|
||||
cmd += [f"--jwt-key-refresh-interval-s={jwt_key_refresh_interval_s}"]
|
||||
|
||||
for f in self.read_only_ledger_dirs_names:
|
||||
cmd += [f"--read-only-ledger-dir={f}"]
|
||||
|
||||
for f in self.read_only_ledger_dirs:
|
||||
data_files += [os.path.join(self.common_dir, f)]
|
||||
|
||||
if curve_id is not None:
|
||||
cmd += [f"--curve-id={curve_id.name}"]
|
||||
|
||||
# Added in 1.x
|
||||
if not major_version or major_version > 1:
|
||||
if initial_node_cert_validity_days:
|
||||
cmd += [
|
||||
f"--initial-node-cert-validity-days={initial_node_cert_validity_days}"
|
||||
]
|
||||
|
||||
if node_client_host:
|
||||
cmd += [f"--node-client-interface={node_client_host}"]
|
||||
|
||||
if max_open_sessions_hard:
|
||||
cmd += [f"--max-open-sessions-hard={max_open_sessions_hard}"]
|
||||
|
||||
if start_type == StartType.start:
|
||||
cmd += ["start", "--network-cert-file=service_cert.pem"]
|
||||
for fragment in constitution:
|
||||
cmd.append(f"--constitution={os.path.basename(fragment)}")
|
||||
data_files += [
|
||||
os.path.join(self.common_dir, os.path.basename(fragment))
|
||||
]
|
||||
|
||||
if members_info is None:
|
||||
raise ValueError(
|
||||
"Starting node should be given at least one member info"
|
||||
)
|
||||
for mi in members_info:
|
||||
member_info_cmd = f'--member-info={mi["certificate_file"]}'
|
||||
data_files.append(
|
||||
os.path.join(self.common_dir, mi["certificate_file"])
|
||||
)
|
||||
if mi["encryption_public_key_file"] is not None:
|
||||
member_info_cmd += f',{mi["encryption_public_key_file"]}'
|
||||
data_files.append(
|
||||
os.path.join(
|
||||
self.common_dir, mi["encryption_public_key_file"]
|
||||
)
|
||||
)
|
||||
elif mi["data_json_file"] is not None:
|
||||
member_info_cmd += ","
|
||||
if mi["data_json_file"] is not None:
|
||||
member_info_cmd += f',{mi["data_json_file"]}'
|
||||
data_files.append(
|
||||
os.path.join(self.common_dir, mi["data_json_file"])
|
||||
)
|
||||
cmd += [member_info_cmd]
|
||||
|
||||
# Added in 1.x
|
||||
if not major_version or major_version > 1:
|
||||
if maximum_node_certificate_validity_days:
|
||||
cmd += [
|
||||
f"--max-allowed-node-cert-validity-days={maximum_node_certificate_validity_days}"
|
||||
]
|
||||
|
||||
elif start_type == StartType.join:
|
||||
v = (
|
||||
ccf._versionifier.to_python_version(version)
|
||||
if version is not None
|
||||
else None
|
||||
)
|
||||
if v is None or v >= Version("4.0.5"):
|
||||
# Avoid passing too-low level to debug SGX nodes
|
||||
if not (enclave_type == "debug" and enclave_platform == "sgx"):
|
||||
cmd += [
|
||||
"join",
|
||||
"--network-cert-file=service_cert.pem",
|
||||
f"--target-rpc-address={target_rpc_address}",
|
||||
f"--join-timer={join_timer_s * 1000}",
|
||||
"--enclave-log-level",
|
||||
enclave_log_level,
|
||||
]
|
||||
data_files += [os.path.join(self.common_dir, "service_cert.pem")]
|
||||
|
||||
elif start_type == StartType.recover:
|
||||
cmd += ["recover", "--network-cert-file=service_cert.pem"]
|
||||
if start_type == StartType.start:
|
||||
members_info = kwargs.get("members_info")
|
||||
if not members_info:
|
||||
raise ValueError("no members info for start node")
|
||||
for mi in members_info:
|
||||
data_files += [os.path.join(self.common_dir, mi["certificate_file"])]
|
||||
if mi["encryption_public_key_file"]:
|
||||
data_files += [
|
||||
os.path.join(self.common_dir, mi["encryption_public_key_file"])
|
||||
]
|
||||
if mi["data_json_file"]:
|
||||
data_files += [os.path.join(self.common_dir, mi["data_json_file"])]
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected CCFRemote start type {start_type}. Should be start, join or recover"
|
||||
)
|
||||
for c in constitution:
|
||||
data_files += [os.path.join(self.common_dir, c)]
|
||||
|
||||
if start_type == StartType.join:
|
||||
data_files += [os.path.join(self.common_dir, "service_cert.pem")]
|
||||
|
||||
self.remote = remote_class(
|
||||
self.name,
|
||||
|
|
|
@ -13,7 +13,6 @@ import suite.test_requirements as reqs
|
|||
import ccf.ledger
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import datetime
|
||||
from e2e_logging import test_random_receipts
|
||||
from governance import test_all_nodes_cert_renewal, test_service_cert_renewal
|
||||
|
@ -95,7 +94,6 @@ def test_new_service(
|
|||
binary_dir,
|
||||
library_dir,
|
||||
version,
|
||||
cycle_existing_nodes=False,
|
||||
):
|
||||
if IS_SNP:
|
||||
LOG.info(
|
||||
|
@ -120,49 +118,29 @@ def test_new_service(
|
|||
)
|
||||
network.consortium.set_js_app_from_dir(primary, js_app_directory)
|
||||
|
||||
LOG.info(f"Add node to new service [cycle nodes: {cycle_existing_nodes}]")
|
||||
nodes_to_cycle = network.get_joined_nodes() if cycle_existing_nodes else []
|
||||
nodes_to_add_count = len(nodes_to_cycle) if cycle_existing_nodes else 1
|
||||
LOG.info("Add node to new service")
|
||||
|
||||
# Pre-2.0 nodes require X509 time format
|
||||
valid_from = str(infra.crypto.datetime_to_X509time(datetime.datetime.utcnow()))
|
||||
|
||||
kwargs = {}
|
||||
if not infra.node.version_after(version, "ccf-4.0.0-rc1"):
|
||||
kwargs["reconfiguration_type"] = "OneTransaction"
|
||||
kwargs["reconfiguration_type"] = "OneTransaction"
|
||||
|
||||
for _ in range(0, nodes_to_add_count):
|
||||
new_node = network.create_node(
|
||||
"local://localhost",
|
||||
binary_dir=binary_dir,
|
||||
library_dir=library_dir,
|
||||
version=version,
|
||||
)
|
||||
network.join_node(new_node, args.package, args, **kwargs)
|
||||
network.trust_node(
|
||||
new_node,
|
||||
args,
|
||||
valid_from=valid_from,
|
||||
)
|
||||
new_node.verify_certificate_validity_period(
|
||||
expected_validity_period_days=DEFAULT_NODE_CERTIFICATE_VALIDITY_DAYS
|
||||
)
|
||||
all_nodes.append(new_node)
|
||||
|
||||
for node in nodes_to_cycle:
|
||||
network.retire_node(primary, node)
|
||||
if primary == node:
|
||||
primary, _ = network.wait_for_new_primary(primary)
|
||||
# Stopping a node immediately after its removal being
|
||||
# committed and an election is not safe: the successor
|
||||
# primary may need to re-establish commit on a config
|
||||
# that includes the retire node.
|
||||
# See https://github.com/microsoft/CCF/issues/1713
|
||||
# for more detail. Until the dedicated endpoint exposing
|
||||
# this safely is implemented, we work around this by
|
||||
# submitting and waiting for commit on another transaction.
|
||||
network.txs.issue(network, number_txs=1, repeat=True)
|
||||
node.stop()
|
||||
new_node = network.create_node(
|
||||
"local://localhost",
|
||||
binary_dir=binary_dir,
|
||||
library_dir=library_dir,
|
||||
version=version,
|
||||
)
|
||||
network.join_node(new_node, args.package, args, **kwargs)
|
||||
network.trust_node(
|
||||
new_node,
|
||||
args,
|
||||
valid_from=valid_from,
|
||||
)
|
||||
new_node.verify_certificate_validity_period(
|
||||
expected_validity_period_days=DEFAULT_NODE_CERTIFICATE_VALIDITY_DAYS
|
||||
)
|
||||
all_nodes.append(new_node)
|
||||
|
||||
test_all_nodes_cert_renewal(network, args, valid_from=valid_from)
|
||||
test_service_cert_renewal(network, args, valid_from=valid_from)
|
||||
|
@ -248,7 +226,6 @@ def run_code_upgrade_from(
|
|||
|
||||
old_nodes = network.get_joined_nodes()
|
||||
primary, _ = network.find_primary()
|
||||
from_major_version = primary.major_version
|
||||
|
||||
LOG.info("Apply transactions to old service")
|
||||
issue_activity_on_live_service(network, args)
|
||||
|
@ -294,15 +271,13 @@ def run_code_upgrade_from(
|
|||
|
||||
# Verify that all nodes run the expected CCF version
|
||||
for node in network.get_joined_nodes():
|
||||
# Note: /node/version endpoint was added in 2.x
|
||||
if not node.major_version or node.major_version > 1:
|
||||
with node.client() as c:
|
||||
r = c.get("/node/version")
|
||||
expected_version = node.version or args.ccf_version
|
||||
version = r.body.json()["ccf_version"]
|
||||
assert (
|
||||
version == expected_version
|
||||
), f"For node {node.local_node_id}, expect version {expected_version}, got {version}"
|
||||
with node.client() as c:
|
||||
r = c.get("/node/version")
|
||||
expected_version = node.version or args.ccf_version
|
||||
version = r.body.json()["ccf_version"]
|
||||
assert (
|
||||
version == expected_version
|
||||
), f"For node {node.local_node_id}, expect version {expected_version}, got {version}"
|
||||
|
||||
LOG.info("Apply transactions to hybrid network, with primary as old node")
|
||||
issue_activity_on_live_service(network, args)
|
||||
|
@ -376,19 +351,11 @@ def run_code_upgrade_from(
|
|||
# and retrieve new keys via auto refresh
|
||||
if not os.getenv("CONTAINER_NODES"):
|
||||
jwt_issuer.refresh_keys()
|
||||
# Note: /gov/jwt_keys/all endpoint was added in 2.x
|
||||
if not primary.major_version or primary.major_version > 1:
|
||||
jwt_issuer.wait_for_refresh(network)
|
||||
else:
|
||||
time.sleep(3)
|
||||
jwt_issuer.wait_for_refresh(network)
|
||||
else:
|
||||
# https://github.com/microsoft/CCF/issues/2608#issuecomment-924785744
|
||||
LOG.warning("Skipping JWT refresh as running nodes in container")
|
||||
|
||||
# Code update from 1.x to 2.x requires cycling the freshly-added 2.x nodes
|
||||
# once. This is because 2.x nodes will not have an endorsed certificate
|
||||
# recorded in the store and thus will not be able to have their certificate
|
||||
# refreshed, etc.
|
||||
test_new_service(
|
||||
network,
|
||||
args,
|
||||
|
@ -396,21 +363,8 @@ def run_code_upgrade_from(
|
|||
to_binary_dir,
|
||||
to_library_dir,
|
||||
to_version,
|
||||
cycle_existing_nodes=True,
|
||||
)
|
||||
|
||||
# Check that the ledger can be parsed
|
||||
# Note: When upgrading from 1.x to 2.x, it is possible that ledger chunk are not
|
||||
# in sync between nodes, which may cause some chunks to differ when starting
|
||||
# from a snapshot. See https://github.com/microsoft/ccf/issues/3613. In such case,
|
||||
# we only verify that the ledger can be parsed, even if some chunks are duplicated.
|
||||
# This can go once 2.0 is released.
|
||||
insecure_ledger_verification = (
|
||||
from_major_version == 1 and primary.version_after("ccf-2.0.0-rc7")
|
||||
)
|
||||
network.get_latest_ledger_public_state(
|
||||
insecure=insecure_ledger_verification
|
||||
)
|
||||
network.get_latest_ledger_public_state()
|
||||
|
||||
|
||||
@reqs.description("Run live compatibility with latest LTS")
|
||||
|
@ -591,27 +545,20 @@ def run_ledger_compatibility_since_first(args, local_branch, use_snapshot):
|
|||
|
||||
# Verify that all nodes run the expected CCF version
|
||||
for node in nodes:
|
||||
# Note: /node/version endpoint and custom certificate validity
|
||||
# were added in 2.x
|
||||
if not node.major_version or node.major_version > 1:
|
||||
with node.client() as c:
|
||||
r = c.get("/node/version")
|
||||
expected_version = node.version or args.ccf_version
|
||||
version = r.body.json()["ccf_version"]
|
||||
assert (
|
||||
r.body.json()["ccf_version"] == expected_version
|
||||
), f"Node version is not {expected_version}"
|
||||
node.verify_certificate_validity_period()
|
||||
with node.client() as c:
|
||||
r = c.get("/node/version")
|
||||
expected_version = node.version or args.ccf_version
|
||||
version = r.body.json()["ccf_version"]
|
||||
assert (
|
||||
r.body.json()["ccf_version"] == expected_version
|
||||
), f"Node version is not {expected_version}"
|
||||
node.verify_certificate_validity_period()
|
||||
|
||||
# Rollover JWKS so that new primary must read historical CA bundle table
|
||||
# and retrieve new keys via auto refresh
|
||||
jwt_issuer.refresh_keys()
|
||||
# Note: /gov/jwt_keys/all endpoint was added in 2.x
|
||||
primary, _ = network.find_nodes()
|
||||
if not primary.major_version or primary.major_version > 1:
|
||||
jwt_issuer.wait_for_refresh(network)
|
||||
else:
|
||||
time.sleep(3)
|
||||
jwt_issuer.wait_for_refresh(network)
|
||||
|
||||
issue_activity_on_live_service(network, args)
|
||||
|
||||
|
@ -636,10 +583,7 @@ def run_ledger_compatibility_since_first(args, local_branch, use_snapshot):
|
|||
# between those versions (see https://github.com/microsoft/ccf/issues/3613;
|
||||
# 1.x ledgers do not contain the header flags to synchronize ledger chunks).
|
||||
# This can go once 2.0 is released.
|
||||
network.stop_all_nodes(
|
||||
skip_verification=True,
|
||||
accept_ledger_diff=True,
|
||||
)
|
||||
network.stop_all_nodes(skip_verification=True, accept_ledger_diff=True)
|
||||
ledger_dir, committed_ledger_dirs = primary.get_ledger()
|
||||
|
||||
# Check that ledger and snapshots can be parsed
|
||||
|
|
Загрузка…
Ссылка в новой задаче