Родитель
7ea5584291
Коммит
e931bed3ef
|
@ -9,13 +9,12 @@
|
|||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"charliermarsh.ruff",
|
||||
"editorconfig.editorconfig",
|
||||
"esbenp.prettier-vscode",
|
||||
"dbaeumer.vscode-eslint",
|
||||
"ms-python.python",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"charliermarsh.ruff",
|
||||
"ms-python.debugpy"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -41,17 +41,10 @@ runs:
|
|||
python-version: '3.x'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Check Python format
|
||||
run: |
|
||||
python -m pip install -U black ruff
|
||||
python -m ruff check
|
||||
python -m black . --check
|
||||
working-directory: python_files
|
||||
shell: bash
|
||||
|
||||
- name: Run Ruff
|
||||
run: |
|
||||
python -m pip install -U ruff
|
||||
python -m ruff check .
|
||||
python -m ruff format --check
|
||||
working-directory: python_files
|
||||
shell: bash
|
||||
|
|
|
@ -2,13 +2,11 @@
|
|||
// See https://go.microsoft.com/fwlink/?LinkId=827846
|
||||
// for the documentation about the extensions.json format
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"editorconfig.editorconfig",
|
||||
"esbenp.prettier-vscode",
|
||||
"dbaeumer.vscode-eslint",
|
||||
"ms-python.python",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-python.isort",
|
||||
"ms-python.flake8"
|
||||
"ms-python.vscode-pylance"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
"source.fixAll.eslint": "explicit",
|
||||
"source.organizeImports.isort": "explicit"
|
||||
},
|
||||
"editor.defaultFormatter": "ms-python.black-formatter",
|
||||
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
|
|
|
@ -37,11 +37,7 @@ def main():
|
|||
invoke = sys.argv[1]
|
||||
if invoke == "-m":
|
||||
linter = sys.argv[2]
|
||||
args = (
|
||||
[sys.executable, "-m", linter]
|
||||
+ linter_settings[linter]["args"]
|
||||
+ sys.argv[3:]
|
||||
)
|
||||
args = [sys.executable, "-m", linter] + linter_settings[linter]["args"] + sys.argv[3:]
|
||||
else:
|
||||
linter = sys.argv[2]
|
||||
args = [sys.argv[3]] + linter_settings[linter]["args"] + sys.argv[4:]
|
||||
|
|
|
@ -191,9 +191,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted):
|
|||
ast.IfExp,
|
||||
ast.ExceptHandler,
|
||||
)
|
||||
if isinstance(node, ast_types_with_nodebody) and isinstance(
|
||||
node.body, Iterable
|
||||
):
|
||||
if isinstance(node, ast_types_with_nodebody) and isinstance(node.body, Iterable):
|
||||
for child_nodes in node.body:
|
||||
top_level_nodes.append(child_nodes)
|
||||
|
||||
|
@ -204,9 +202,7 @@ def traverse_file(wholeFileContent, start_line, end_line, was_highlighted):
|
|||
which_line_next = 0
|
||||
for same_line_node in exact_nodes:
|
||||
should_run_top_blocks.append(same_line_node)
|
||||
smart_code += (
|
||||
f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n"
|
||||
)
|
||||
smart_code += f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n"
|
||||
which_line_next = get_next_block_lineno(should_run_top_blocks)
|
||||
return {
|
||||
"normalized_smart_result": smart_code,
|
||||
|
|
|
@ -36,8 +36,14 @@ ignore = [
|
|||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 140
|
||||
exclude = ["tests/testing_tools/adapter/.data"]
|
||||
line-length = 100
|
||||
exclude = [
|
||||
"tests/testing_tools/adapter/.data",
|
||||
"tests/unittestadapter/.data"
|
||||
]
|
||||
|
||||
[tool.ruff.format]
|
||||
docstring-code-format = true
|
||||
|
||||
[tool.ruff.lint.pydocstyle]
|
||||
convention = "pep257"
|
||||
|
|
|
@ -50,9 +50,7 @@ def parse_args(
|
|||
subsub = add_subparser(cmdname, toolname, subsubs)
|
||||
if cmdname == "discover":
|
||||
subsub.add_argument("--simple", action="store_true")
|
||||
subsub.add_argument(
|
||||
"--no-hide-stdio", dest="hidestdio", action="store_false"
|
||||
)
|
||||
subsub.add_argument("--no-hide-stdio", dest="hidestdio", action="store_false")
|
||||
subsub.add_argument("--pretty", action="store_true")
|
||||
|
||||
# Parse the args!
|
||||
|
|
|
@ -61,9 +61,7 @@ class ParentInfo(namedtuple("ParentInfo", "id kind name root relpath parentid"))
|
|||
raise TypeError("missing relpath")
|
||||
|
||||
|
||||
class SingleTestInfo(
|
||||
namedtuple("TestInfo", "id name path source markers parentid kind")
|
||||
):
|
||||
class SingleTestInfo(namedtuple("TestInfo", "id name path source markers parentid kind")):
|
||||
"""Info for a single test."""
|
||||
|
||||
MARKERS = ("skip", "skip-if", "expected-failure")
|
||||
|
|
|
@ -17,7 +17,7 @@ def discover(
|
|||
# *,
|
||||
_pytest_main=pytest.main,
|
||||
_plugin=None,
|
||||
**_ignored
|
||||
**_ignored,
|
||||
):
|
||||
"""Return the results of test discovery."""
|
||||
if _plugin is None:
|
||||
|
|
|
@ -170,9 +170,7 @@ def parse_item(
|
|||
parents = [(parentid, item.originalname, kind)] + parents
|
||||
name = parameterized[1:-1] or "<empty>"
|
||||
else:
|
||||
(nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id(
|
||||
item.nodeid, kind
|
||||
)
|
||||
(nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id(item.nodeid, kind)
|
||||
name = item.name
|
||||
|
||||
# Note: testfunc does not necessarily match item.function.__name__.
|
||||
|
|
|
@ -13,7 +13,7 @@ def report_discovered(
|
|||
pretty=False,
|
||||
simple=False,
|
||||
_send=print,
|
||||
**_ignored
|
||||
**_ignored,
|
||||
):
|
||||
"""Serialize the discovered tests and write to stdout."""
|
||||
if simple:
|
||||
|
|
|
@ -128,7 +128,7 @@ def fix_fileid(
|
|||
normalize=False,
|
||||
strictpathsep=None,
|
||||
_pathsep=PATH_SEP,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""Return a pathsep-separated file ID ("./"-prefixed) for the given value.
|
||||
|
||||
|
@ -150,7 +150,7 @@ def fix_fileid(
|
|||
rootdir,
|
||||
_pathsep=_pathsep,
|
||||
# ...
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
if relpath: # Note that we treat "" here as an absolute path.
|
||||
_fileid = "./" + relpath
|
||||
|
|
|
@ -29,9 +29,7 @@ class SocketManager(object):
|
|||
self.close()
|
||||
|
||||
def connect(self):
|
||||
self.socket = socket.socket(
|
||||
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP
|
||||
)
|
||||
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
|
||||
if sys.platform == "win32":
|
||||
addr_use = socket.SO_EXCLUSIVEADDRUSE
|
||||
else:
|
||||
|
|
|
@ -12,9 +12,7 @@ from . import DEBUG_ADAPTER_ROOT, SRC_ROOT, TEST_ROOT, TESTING_TOOLS_ROOT
|
|||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
# To mark a test as functional: (decorator) @pytest.mark.functional
|
||||
parser.add_argument(
|
||||
"--functional", dest="markers", action="append_const", const="functional"
|
||||
)
|
||||
parser.add_argument("--functional", dest="markers", action="append_const", const="functional")
|
||||
parser.add_argument(
|
||||
"--no-functional", dest="markers", action="append_const", const="not functional"
|
||||
)
|
||||
|
|
|
@ -18,12 +18,8 @@ def test_install_debugpy(tmpdir):
|
|||
import install_debugpy
|
||||
|
||||
install_debugpy.main(str(tmpdir))
|
||||
dir_path = os.path.join(
|
||||
str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_bundle"
|
||||
)
|
||||
dir_path = os.path.join(str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_bundle")
|
||||
_check_binaries(dir_path)
|
||||
|
||||
dir_path = os.path.join(
|
||||
str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_frame_eval"
|
||||
)
|
||||
dir_path = os.path.join(str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_frame_eval")
|
||||
_check_binaries(dir_path)
|
||||
|
|
|
@ -322,19 +322,12 @@ unittest_folder_discovery_expected_output = {
|
|||
# └── test_bottom_function_t
|
||||
# └── test_bottom_function_f
|
||||
dual_level_nested_folder_path = TEST_DATA_PATH / "dual_level_nested_folder"
|
||||
test_top_folder_path = (
|
||||
TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py"
|
||||
)
|
||||
test_top_folder_path = TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py"
|
||||
|
||||
test_nested_folder_one_path = (
|
||||
TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one"
|
||||
)
|
||||
test_nested_folder_one_path = TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one"
|
||||
|
||||
test_bottom_folder_path = (
|
||||
TEST_DATA_PATH
|
||||
/ "dual_level_nested_folder"
|
||||
/ "nested_folder_one"
|
||||
/ "test_bottom_folder.py"
|
||||
TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one" / "test_bottom_folder.py"
|
||||
)
|
||||
|
||||
|
||||
|
@ -851,12 +844,8 @@ root_with_config_expected_output = {
|
|||
os.path.join(tests_path, "test_a.py"),
|
||||
),
|
||||
"type_": "test",
|
||||
"id_": get_absolute_test_id(
|
||||
"tests/test_a.py::test_a_function", tests_a_path
|
||||
),
|
||||
"runID": get_absolute_test_id(
|
||||
"tests/test_a.py::test_a_function", tests_a_path
|
||||
),
|
||||
"id_": get_absolute_test_id("tests/test_a.py::test_a_function", tests_a_path),
|
||||
"runID": get_absolute_test_id("tests/test_a.py::test_a_function", tests_a_path),
|
||||
}
|
||||
],
|
||||
},
|
||||
|
@ -874,12 +863,8 @@ root_with_config_expected_output = {
|
|||
os.path.join(tests_path, "test_b.py"),
|
||||
),
|
||||
"type_": "test",
|
||||
"id_": get_absolute_test_id(
|
||||
"tests/test_b.py::test_b_function", tests_b_path
|
||||
),
|
||||
"runID": get_absolute_test_id(
|
||||
"tests/test_b.py::test_b_function", tests_b_path
|
||||
),
|
||||
"id_": get_absolute_test_id("tests/test_b.py::test_b_function", tests_b_path),
|
||||
"runID": get_absolute_test_id("tests/test_b.py::test_b_function", tests_b_path),
|
||||
}
|
||||
],
|
||||
},
|
||||
|
@ -996,12 +981,8 @@ nested_classes_expected_test_output = {
|
|||
}
|
||||
SYMLINK_FOLDER_PATH = TEST_DATA_PATH / "symlink_folder"
|
||||
SYMLINK_FOLDER_PATH_TESTS = TEST_DATA_PATH / "symlink_folder" / "tests"
|
||||
SYMLINK_FOLDER_PATH_TESTS_TEST_A = (
|
||||
TEST_DATA_PATH / "symlink_folder" / "tests" / "test_a.py"
|
||||
)
|
||||
SYMLINK_FOLDER_PATH_TESTS_TEST_B = (
|
||||
TEST_DATA_PATH / "symlink_folder" / "tests" / "test_b.py"
|
||||
)
|
||||
SYMLINK_FOLDER_PATH_TESTS_TEST_A = TEST_DATA_PATH / "symlink_folder" / "tests" / "test_a.py"
|
||||
SYMLINK_FOLDER_PATH_TESTS_TEST_B = TEST_DATA_PATH / "symlink_folder" / "tests" / "test_b.py"
|
||||
|
||||
symlink_expected_discovery_output = {
|
||||
"name": "symlink_folder",
|
||||
|
|
|
@ -20,9 +20,7 @@ FAILURE = "failure"
|
|||
test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py"
|
||||
test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py"
|
||||
uf_execution_expected_output = {
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path
|
||||
),
|
||||
|
@ -31,9 +29,7 @@ uf_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
),
|
||||
|
@ -80,9 +76,7 @@ uf_execution_expected_output = {
|
|||
test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py"
|
||||
|
||||
uf_single_file_expected_output = {
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path
|
||||
),
|
||||
|
@ -91,9 +85,7 @@ uf_single_file_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
),
|
||||
|
@ -111,9 +103,7 @@ uf_single_file_expected_output = {
|
|||
# │ └── TestAddFunction
|
||||
# │ └── test_add_positive_numbers: success
|
||||
uf_single_method_execution_expected_output = {
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
),
|
||||
|
@ -149,9 +139,7 @@ uf_non_adjacent_tests_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
): {
|
||||
get_absolute_test_id(f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path
|
||||
),
|
||||
|
@ -252,35 +240,27 @@ skip_tests_execution_expected_output = {
|
|||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("skip_tests.py::test_another_thing", skip_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"skip_tests.py::test_another_thing", skip_tests_path
|
||||
),
|
||||
"test": get_absolute_test_id("skip_tests.py::test_another_thing", skip_tests_path),
|
||||
"outcome": "skipped",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("skip_tests.py::test_decorator_thing", skip_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"skip_tests.py::test_decorator_thing", skip_tests_path
|
||||
),
|
||||
"test": get_absolute_test_id("skip_tests.py::test_decorator_thing", skip_tests_path),
|
||||
"outcome": "skipped",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("skip_tests.py::test_decorator_thing_2", skip_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"skip_tests.py::test_decorator_thing_2", skip_tests_path
|
||||
),
|
||||
"test": get_absolute_test_id("skip_tests.py::test_decorator_thing_2", skip_tests_path),
|
||||
"outcome": "skipped",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"skip_tests.py::TestClass::test_class_function_a", skip_tests_path
|
||||
): {
|
||||
get_absolute_test_id("skip_tests.py::TestClass::test_class_function_a", skip_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"skip_tests.py::TestClass::test_class_function_a", skip_tests_path
|
||||
),
|
||||
|
@ -289,9 +269,7 @@ skip_tests_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"skip_tests.py::TestClass::test_class_function_b", skip_tests_path
|
||||
): {
|
||||
get_absolute_test_id("skip_tests.py::TestClass::test_class_function_b", skip_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"skip_tests.py::TestClass::test_class_function_b", skip_tests_path
|
||||
),
|
||||
|
@ -316,10 +294,7 @@ dual_level_nested_folder_top_path = (
|
|||
TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py"
|
||||
)
|
||||
dual_level_nested_folder_bottom_path = (
|
||||
TEST_DATA_PATH
|
||||
/ "dual_level_nested_folder"
|
||||
/ "nested_folder_one"
|
||||
/ "test_bottom_folder.py"
|
||||
TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one" / "test_bottom_folder.py"
|
||||
)
|
||||
dual_level_nested_folder_execution_expected_output = {
|
||||
get_absolute_test_id(
|
||||
|
@ -379,9 +354,7 @@ dual_level_nested_folder_execution_expected_output = {
|
|||
# └── test_nest.py
|
||||
# └── test_function: success
|
||||
|
||||
nested_folder_path = (
|
||||
TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py"
|
||||
)
|
||||
nested_folder_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py"
|
||||
double_nested_folder_expected_execution_output = {
|
||||
get_absolute_test_id(
|
||||
"folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path
|
||||
|
@ -403,9 +376,7 @@ double_nested_folder_expected_execution_output = {
|
|||
parametrize_tests_path = TEST_DATA_PATH / "parametrize_tests.py"
|
||||
|
||||
parametrize_tests_expected_execution_output = {
|
||||
get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path
|
||||
): {
|
||||
get_absolute_test_id("parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path
|
||||
),
|
||||
|
@ -414,9 +385,7 @@ parametrize_tests_expected_execution_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path
|
||||
): {
|
||||
get_absolute_test_id("parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path
|
||||
),
|
||||
|
@ -425,9 +394,7 @@ parametrize_tests_expected_execution_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path
|
||||
): {
|
||||
get_absolute_test_id("parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path
|
||||
),
|
||||
|
@ -442,9 +409,7 @@ parametrize_tests_expected_execution_output = {
|
|||
# └── parametrize_tests.py
|
||||
# └── test_adding[3+5-8]: success
|
||||
single_parametrize_tests_expected_execution_output = {
|
||||
get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path
|
||||
): {
|
||||
get_absolute_test_id("parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path
|
||||
),
|
||||
|
@ -461,9 +426,7 @@ single_parametrize_tests_expected_execution_output = {
|
|||
doc_test_path = TEST_DATA_PATH / "text_docstring.txt"
|
||||
doctest_pytest_expected_execution_output = {
|
||||
get_absolute_test_id("text_docstring.txt::text_docstring.txt", doc_test_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"text_docstring.txt::text_docstring.txt", doc_test_path
|
||||
),
|
||||
"test": get_absolute_test_id("text_docstring.txt::text_docstring.txt", doc_test_path),
|
||||
"outcome": "success",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
|
@ -477,10 +440,7 @@ dual_level_nested_folder_top_path = (
|
|||
TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py"
|
||||
)
|
||||
dual_level_nested_folder_bottom_path = (
|
||||
TEST_DATA_PATH
|
||||
/ "dual_level_nested_folder"
|
||||
/ "nested_folder_one"
|
||||
/ "test_bottom_folder.py"
|
||||
TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one" / "test_bottom_folder.py"
|
||||
)
|
||||
unittest_folder_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py"
|
||||
unittest_folder_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py"
|
||||
|
@ -494,26 +454,20 @@ no_test_ids_pytest_execution_expected_output = {
|
|||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("test_top_function_t", dual_level_nested_folder_top_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_top_function_t", dual_level_nested_folder_top_path
|
||||
),
|
||||
"test": get_absolute_test_id("test_top_function_t", dual_level_nested_folder_top_path),
|
||||
"outcome": "success",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("test_top_function_f", dual_level_nested_folder_top_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_top_function_f", dual_level_nested_folder_top_path
|
||||
),
|
||||
"test": get_absolute_test_id("test_top_function_f", dual_level_nested_folder_top_path),
|
||||
"outcome": "failure",
|
||||
"message": "ERROR MESSAGE",
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"test_bottom_function_t", dual_level_nested_folder_bottom_path
|
||||
): {
|
||||
get_absolute_test_id("test_bottom_function_t", dual_level_nested_folder_bottom_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_bottom_function_t", dual_level_nested_folder_bottom_path
|
||||
),
|
||||
|
@ -522,9 +476,7 @@ no_test_ids_pytest_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"test_bottom_function_f", dual_level_nested_folder_bottom_path
|
||||
): {
|
||||
get_absolute_test_id("test_bottom_function_f", dual_level_nested_folder_bottom_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_bottom_function_f", dual_level_nested_folder_bottom_path
|
||||
),
|
||||
|
@ -533,9 +485,7 @@ no_test_ids_pytest_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"TestAddFunction::test_add_negative_numbers", unittest_folder_add_path
|
||||
): {
|
||||
get_absolute_test_id("TestAddFunction::test_add_negative_numbers", unittest_folder_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"TestAddFunction::test_add_negative_numbers", unittest_folder_add_path
|
||||
),
|
||||
|
@ -544,9 +494,7 @@ no_test_ids_pytest_execution_expected_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"TestAddFunction::test_add_positive_numbers", unittest_folder_add_path
|
||||
): {
|
||||
get_absolute_test_id("TestAddFunction::test_add_positive_numbers", unittest_folder_add_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"TestAddFunction::test_add_positive_numbers", unittest_folder_add_path
|
||||
),
|
||||
|
@ -606,18 +554,14 @@ test_logging_path = TEST_DATA_PATH / "test_logging.py"
|
|||
|
||||
logging_test_expected_execution_output = {
|
||||
get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_logging.py::test_logging2", test_logging_path
|
||||
),
|
||||
"test": get_absolute_test_id("test_logging.py::test_logging2", test_logging_path),
|
||||
"outcome": "failure",
|
||||
"message": "ERROR MESSAGE",
|
||||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id("test_logging.py::test_logging", test_logging_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_logging.py::test_logging", test_logging_path
|
||||
),
|
||||
"test": get_absolute_test_id("test_logging.py::test_logging", test_logging_path),
|
||||
"outcome": "success",
|
||||
"message": None,
|
||||
"traceback": None,
|
||||
|
@ -632,9 +576,7 @@ logging_test_expected_execution_output = {
|
|||
|
||||
test_safe_clear_env_vars_path = TEST_DATA_PATH / "test_env_vars.py"
|
||||
safe_clear_env_vars_expected_execution_output = {
|
||||
get_absolute_test_id(
|
||||
"test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path
|
||||
): {
|
||||
get_absolute_test_id("test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path
|
||||
),
|
||||
|
@ -643,9 +585,7 @@ safe_clear_env_vars_expected_execution_output = {
|
|||
"traceback": None,
|
||||
"subtest": None,
|
||||
},
|
||||
get_absolute_test_id(
|
||||
"test_env_vars.py::test_check_env", test_safe_clear_env_vars_path
|
||||
): {
|
||||
get_absolute_test_id("test_env_vars.py::test_check_env", test_safe_clear_env_vars_path): {
|
||||
"test": get_absolute_test_id(
|
||||
"test_env_vars.py::test_check_env", test_safe_clear_env_vars_path
|
||||
),
|
||||
|
|
|
@ -68,9 +68,7 @@ def create_server(
|
|||
|
||||
|
||||
def _new_sock() -> socket.socket:
|
||||
sock: socket.socket = socket.socket(
|
||||
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP
|
||||
)
|
||||
sock: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
|
||||
options = [
|
||||
("SOL_SOCKET", "SO_KEEPALIVE", 1),
|
||||
("IPPROTO_TCP", "TCP_KEEPIDLE", 1),
|
||||
|
@ -88,9 +86,7 @@ def _new_sock() -> socket.socket:
|
|||
|
||||
|
||||
CONTENT_LENGTH: str = "Content-Length:"
|
||||
Env_Dict = TypedDict(
|
||||
"Env_Dict", {"TEST_UUID": str, "TEST_PORT": str, "PYTHONPATH": str}
|
||||
)
|
||||
Env_Dict = TypedDict("Env_Dict", {"TEST_UUID": str, "TEST_PORT": str, "PYTHONPATH": str})
|
||||
|
||||
|
||||
def process_rpc_message(data: str) -> Tuple[Dict[str, Any], str]:
|
||||
|
@ -133,9 +129,7 @@ def runner(args: List[str]) -> Optional[List[Dict[str, Any]]]:
|
|||
return runner_with_cwd(args, TEST_DATA_PATH)
|
||||
|
||||
|
||||
def runner_with_cwd(
|
||||
args: List[str], path: pathlib.Path
|
||||
) -> Optional[List[Dict[str, Any]]]:
|
||||
def runner_with_cwd(args: List[str], path: pathlib.Path) -> Optional[List[Dict[str, Any]]]:
|
||||
"""Run the pytest discovery and return the JSON data from the server."""
|
||||
process_args: List[str] = [
|
||||
sys.executable,
|
||||
|
@ -177,9 +171,7 @@ def runner_with_cwd(
|
|||
return process_rpc_json(result[0]) if result else None
|
||||
|
||||
|
||||
def _listen_on_socket(
|
||||
listener: socket.socket, result: List[str], completed: threading.Event
|
||||
):
|
||||
def _listen_on_socket(listener: socket.socket, result: List[str], completed: threading.Event):
|
||||
"""Listen on the socket for the JSON data from the server.
|
||||
Created as a separate function for clarity in threading.
|
||||
"""
|
||||
|
@ -201,9 +193,7 @@ def _listen_on_socket(
|
|||
result.append("".join(all_data))
|
||||
|
||||
|
||||
def _run_test_code(
|
||||
proc_args: List[str], proc_env, proc_cwd: str, completed: threading.Event
|
||||
):
|
||||
def _run_test_code(proc_args: List[str], proc_env, proc_cwd: str, completed: threading.Event):
|
||||
result = subprocess.run(proc_args, env=proc_env, cwd=proc_cwd)
|
||||
completed.set()
|
||||
return result
|
||||
|
|
|
@ -36,17 +36,13 @@ def test_import_error(tmp_path):
|
|||
temp_dir.mkdir()
|
||||
p = temp_dir / "error_pytest_import.py"
|
||||
shutil.copyfile(file_path, p)
|
||||
actual: Optional[List[Dict[str, Any]]] = helpers.runner(
|
||||
["--collect-only", os.fspath(p)]
|
||||
)
|
||||
actual: Optional[List[Dict[str, Any]]] = helpers.runner(["--collect-only", os.fspath(p)])
|
||||
assert actual
|
||||
actual_list: List[Dict[str, Any]] = actual
|
||||
if actual_list is not None:
|
||||
assert actual_list.pop(-1).get("eot")
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "error")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "error"))
|
||||
assert actual_item.get("status") == "error"
|
||||
assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH)
|
||||
|
||||
|
@ -89,9 +85,7 @@ def test_syntax_error(tmp_path):
|
|||
if actual_list is not None:
|
||||
assert actual_list.pop(-1).get("eot")
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "error")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "error"))
|
||||
assert actual_item.get("status") == "error"
|
||||
assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH)
|
||||
|
||||
|
@ -117,9 +111,7 @@ def test_parameterized_error_collect():
|
|||
if actual_list is not None:
|
||||
assert actual_list.pop(-1).get("eot")
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "error")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "error"))
|
||||
assert actual_item.get("status") == "error"
|
||||
assert actual_item.get("cwd") == os.fspath(helpers.TEST_DATA_PATH)
|
||||
|
||||
|
@ -243,9 +235,7 @@ def test_symlink_root_dir():
|
|||
assert actual_item.get("cwd") == os.fspath(
|
||||
destination
|
||||
), f"CWD does not match: {os.fspath(destination)}"
|
||||
assert (
|
||||
actual_item.get("tests") == expected
|
||||
), "Tests do not match expected value"
|
||||
assert actual_item.get("tests") == expected, "Tests do not match expected value"
|
||||
except AssertionError as e:
|
||||
# Print the actual_item in JSON format if an assertion fails
|
||||
print(json.dumps(actual_item, indent=4))
|
||||
|
|
|
@ -28,9 +28,7 @@ def test_config_file():
|
|||
]
|
||||
new_cwd = TEST_DATA_PATH / "root"
|
||||
actual = runner_with_cwd(args, new_cwd)
|
||||
expected_const = (
|
||||
expected_execution_test_output.config_file_pytest_expected_execution_output
|
||||
)
|
||||
expected_const = expected_execution_test_output.config_file_pytest_expected_execution_output
|
||||
assert actual
|
||||
actual_list: List[Dict[str, Any]] = actual
|
||||
assert actual_list.pop(-1).get("eot")
|
||||
|
@ -38,9 +36,7 @@ def test_config_file():
|
|||
actual_result_dict = dict()
|
||||
if actual_list is not None:
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "result")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "result"))
|
||||
assert actual_item.get("status") == "success"
|
||||
assert actual_item.get("cwd") == os.fspath(new_cwd)
|
||||
actual_result_dict.update(actual_item["result"])
|
||||
|
@ -53,9 +49,7 @@ def test_rootdir_specified():
|
|||
args = [rd, "tests/test_a.py::test_a_function"]
|
||||
new_cwd = TEST_DATA_PATH / "root"
|
||||
actual = runner_with_cwd(args, new_cwd)
|
||||
expected_const = (
|
||||
expected_execution_test_output.config_file_pytest_expected_execution_output
|
||||
)
|
||||
expected_const = expected_execution_test_output.config_file_pytest_expected_execution_output
|
||||
assert actual
|
||||
actual_list: List[Dict[str, Any]] = actual
|
||||
assert actual_list.pop(-1).get("eot")
|
||||
|
@ -63,9 +57,7 @@ def test_rootdir_specified():
|
|||
actual_result_dict = dict()
|
||||
if actual_list is not None:
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "result")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "result"))
|
||||
assert actual_item.get("status") == "success"
|
||||
assert actual_item.get("cwd") == os.fspath(new_cwd)
|
||||
actual_result_dict.update(actual_item["result"])
|
||||
|
@ -101,9 +93,7 @@ def test_syntax_error_execution(tmp_path):
|
|||
assert actual_list.pop(-1).get("eot")
|
||||
if actual_list is not None:
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "error")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "error"))
|
||||
assert actual_item.get("status") == "error"
|
||||
assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH)
|
||||
error_content = actual_item.get("error")
|
||||
|
@ -126,9 +116,7 @@ def test_bad_id_error_execution():
|
|||
assert actual_list.pop(-1).get("eot")
|
||||
if actual_list is not None:
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "error")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "error"))
|
||||
assert actual_item.get("status") == "error"
|
||||
assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH)
|
||||
error_content = actual_item.get("error")
|
||||
|
@ -273,9 +261,7 @@ def test_pytest_execution(test_ids, expected_const):
|
|||
actual_result_dict = dict()
|
||||
if actual_list is not None:
|
||||
for actual_item in actual_list:
|
||||
assert all(
|
||||
item in actual_item.keys() for item in ("status", "cwd", "result")
|
||||
)
|
||||
assert all(item in actual_item.keys() for item in ("status", "cwd", "result"))
|
||||
assert actual_item.get("status") == "success"
|
||||
assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH)
|
||||
actual_result_dict.update(actual_item["result"])
|
||||
|
@ -307,13 +293,9 @@ def test_symlink_run():
|
|||
)
|
||||
|
||||
# Run pytest with the cwd being the resolved symlink path (as it will be when we run the subprocess from node).
|
||||
actual = runner_with_cwd(
|
||||
[f"--rootdir={os.fspath(destination)}", test_a_id], source
|
||||
)
|
||||
actual = runner_with_cwd([f"--rootdir={os.fspath(destination)}", test_a_id], source)
|
||||
|
||||
expected_const = (
|
||||
expected_execution_test_output.symlink_run_expected_execution_output
|
||||
)
|
||||
expected_const = expected_execution_test_output.symlink_run_expected_execution_output
|
||||
assert actual
|
||||
actual_list: List[Dict[str, Any]] = actual
|
||||
if actual_list is not None:
|
||||
|
|
|
@ -29,9 +29,7 @@ def test_create_env(env_exists, git_ignore, install, python):
|
|||
def run_process(args, error_message):
|
||||
nonlocal run_process_called
|
||||
run_process_called = True
|
||||
version = (
|
||||
"12345" if python else f"{sys.version_info.major}.{sys.version_info.minor}"
|
||||
)
|
||||
version = "12345" if python else f"{sys.version_info.major}.{sys.version_info.minor}"
|
||||
if not env_exists:
|
||||
assert args == [
|
||||
sys.executable,
|
||||
|
|
|
@ -14,9 +14,7 @@ import pytest
|
|||
import create_venv
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform == "win32", reason="Windows does not have micro venv fallback."
|
||||
)
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Windows does not have micro venv fallback.")
|
||||
def test_venv_not_installed_unix():
|
||||
importlib.reload(create_venv)
|
||||
create_venv.is_installed = lambda module: module != "venv"
|
||||
|
@ -43,9 +41,7 @@ def test_venv_not_installed_unix():
|
|||
assert run_process_called is True
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform != "win32", reason="Windows does not have microvenv fallback."
|
||||
)
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Windows does not have microvenv fallback.")
|
||||
def test_venv_not_installed_windows():
|
||||
importlib.reload(create_venv)
|
||||
create_venv.is_installed = lambda module: module != "venv"
|
||||
|
@ -106,9 +102,7 @@ def test_create_env(env_exists, git_ignore, install):
|
|||
assert run_process_called == (env_exists == "noEnv")
|
||||
|
||||
# add_gitignore is called when new venv is created and git_ignore is True
|
||||
assert add_gitignore_called == (
|
||||
(env_exists == "noEnv") and (git_ignore == "useGitIgnore")
|
||||
)
|
||||
assert add_gitignore_called == ((env_exists == "noEnv") and (git_ignore == "useGitIgnore"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("install_type", ["requirements", "pyproject", "both"])
|
||||
|
@ -238,9 +232,7 @@ def test_create_venv_missing_pip():
|
|||
if "install" in args and "pip" in args:
|
||||
nonlocal run_process_called
|
||||
run_process_called = True
|
||||
pip_pyz_path = os.fspath(
|
||||
create_venv.CWD / create_venv.VENV_NAME / "pip.pyz"
|
||||
)
|
||||
pip_pyz_path = os.fspath(create_venv.CWD / create_venv.VENV_NAME / "pip.pyz")
|
||||
assert args[1:] == [pip_pyz_path, "install", "pip"]
|
||||
assert error_message == "CREATE_VENV.INSTALL_PIP_FAILED"
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ def generate_parse_item(pathsep):
|
|||
# dependency injection
|
||||
_normcase=normcase,
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def _normalize_test_id(*args):
|
||||
|
@ -234,7 +234,7 @@ def generate_parse_item(pathsep):
|
|||
# dependency injection
|
||||
_fix_fileid=_fix_fileid,
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def _iter_nodes(*args):
|
||||
|
@ -245,7 +245,7 @@ def generate_parse_item(pathsep):
|
|||
_normalize_test_id=_normalize_test_id,
|
||||
_normcase=normcase,
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def _parse_node_id(*args):
|
||||
|
@ -254,7 +254,7 @@ def generate_parse_item(pathsep):
|
|||
**dict(
|
||||
# dependency injection
|
||||
_iter_nodes=_iter_nodes,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
|
@ -264,7 +264,7 @@ def generate_parse_item(pathsep):
|
|||
**dict(
|
||||
# dependency injection
|
||||
_normcase=normcase,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
|
@ -275,7 +275,7 @@ def generate_parse_item(pathsep):
|
|||
# dependency injection
|
||||
_normcase=normcase,
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def _is_legacy_wrapper(*args):
|
||||
|
@ -284,7 +284,7 @@ def generate_parse_item(pathsep):
|
|||
**dict(
|
||||
# dependency injection
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def _get_location(*args):
|
||||
|
@ -295,7 +295,7 @@ def generate_parse_item(pathsep):
|
|||
_matches_relfile=_matches_relfile,
|
||||
_is_legacy_wrapper=_is_legacy_wrapper,
|
||||
_pathsep=pathsep,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
|
@ -307,7 +307,7 @@ def generate_parse_item(pathsep):
|
|||
_parse_node_id=_parse_node_id,
|
||||
_split_fspath=_split_fspath,
|
||||
_get_location=_get_location,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
return _parse_item
|
||||
|
@ -347,9 +347,7 @@ class DiscoverTests(unittest.TestCase):
|
|||
("discovered.__getitem__", (0,), None),
|
||||
]
|
||||
|
||||
parents, tests = _discovery.discover(
|
||||
[], _pytest_main=stubpytest.main, _plugin=plugin
|
||||
)
|
||||
parents, tests = _discovery.discover([], _pytest_main=stubpytest.main, _plugin=plugin)
|
||||
|
||||
actual_calls = unique(stub.calls, lambda k: k[0])
|
||||
expected_calls = unique(calls, lambda k: k[0])
|
||||
|
@ -389,9 +387,7 @@ class DiscoverTests(unittest.TestCase):
|
|||
("discovered.__getitem__", (0,), None),
|
||||
]
|
||||
|
||||
parents, tests = _discovery.discover(
|
||||
[], _pytest_main=pytest.main, _plugin=plugin
|
||||
)
|
||||
parents, tests = _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin)
|
||||
|
||||
actual_calls = unique(stub.calls, lambda k: k[0])
|
||||
expected_calls = unique(calls, lambda k: k[0])
|
||||
|
@ -414,9 +410,7 @@ class DiscoverTests(unittest.TestCase):
|
|||
("discovered.__getitem__", (0,), None),
|
||||
]
|
||||
|
||||
parents, tests = _discovery.discover(
|
||||
[], _pytest_main=pytest.main, _plugin=plugin
|
||||
)
|
||||
parents, tests = _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin)
|
||||
|
||||
actual_calls = unique(stub.calls, lambda k: k[0])
|
||||
expected_calls = unique(calls, lambda k: k[0])
|
||||
|
@ -845,9 +839,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.test_spam",
|
||||
sub=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests",
|
||||
),
|
||||
|
@ -920,9 +912,7 @@ class CollectorTests(unittest.TestCase):
|
|||
relfile=adapter_util.fix_relpath(doctestfile),
|
||||
func=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(doctestfile), 1
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(doctestfile), 1),
|
||||
markers=[],
|
||||
parentid="./x/test_doctest.txt",
|
||||
),
|
||||
|
@ -972,9 +962,7 @@ class CollectorTests(unittest.TestCase):
|
|||
relfile=adapter_util.fix_relpath(relfile),
|
||||
func=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=[],
|
||||
parentid="./x/y/z/test_eggs.py",
|
||||
),
|
||||
|
@ -999,9 +987,7 @@ class CollectorTests(unittest.TestCase):
|
|||
relfile=adapter_util.fix_relpath(relfile),
|
||||
func=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 28
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 28),
|
||||
markers=[],
|
||||
parentid="./x/y/z/test_eggs.py",
|
||||
),
|
||||
|
@ -1062,9 +1048,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.test_spam",
|
||||
sub=["[a-[b]-c]"],
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests::test_spam",
|
||||
),
|
||||
|
@ -1126,9 +1110,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.Ham.Eggs.test_spam",
|
||||
sub=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs",
|
||||
),
|
||||
|
@ -1479,9 +1461,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.test_spam",
|
||||
sub=[],
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests",
|
||||
),
|
||||
|
@ -1537,9 +1517,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.test_spam",
|
||||
sub=[],
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(relfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(relfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests",
|
||||
),
|
||||
|
@ -1607,9 +1585,7 @@ class CollectorTests(unittest.TestCase):
|
|||
func="SpamTests.test_spam",
|
||||
sub=None,
|
||||
),
|
||||
source="{}:{}".format(
|
||||
adapter_util.fix_relpath(srcfile), 13
|
||||
),
|
||||
source="{}:{}".format(adapter_util.fix_relpath(srcfile), 13),
|
||||
markers=None,
|
||||
parentid="./x/y/z/test_eggs.py::SpamTests",
|
||||
),
|
||||
|
|
|
@ -252,9 +252,7 @@ class DiscoveredTestsTests(unittest.TestCase):
|
|||
# missing "./":
|
||||
parentid=relfile,
|
||||
)
|
||||
expected = test._replace(
|
||||
id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)
|
||||
)
|
||||
expected = test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid))
|
||||
discovered = DiscoveredTests()
|
||||
|
||||
before = list(discovered), discovered.parents
|
||||
|
|
|
@ -48,9 +48,7 @@ def _run_adapter(cmd, tool, *cliargs, **kwargs):
|
|||
argv.insert(4, "--no-hide-stdio")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
argv.append("--cache-clear")
|
||||
print(
|
||||
"running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv))
|
||||
)
|
||||
print("running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv)))
|
||||
output = subprocess.check_output(argv, universal_newlines=True, **kwds)
|
||||
return output
|
||||
|
||||
|
|
|
@ -259,9 +259,7 @@ class FilePathTests(unittest.TestCase):
|
|||
)
|
||||
for fileid, rootdir, _os_path, expected in tests:
|
||||
pathsep = _os_path.sep
|
||||
with self.subTest(
|
||||
r"for {} (with rootdir {!r}): {!r}".format(pathsep, rootdir, fileid)
|
||||
):
|
||||
with self.subTest(r"for {} (with rootdir {!r}): {!r}".format(pathsep, rootdir, fileid)):
|
||||
fixed = fix_fileid(
|
||||
fileid,
|
||||
rootdir,
|
||||
|
|
|
@ -7,7 +7,6 @@ import unittest
|
|||
# and the two tests with their outcome as "success".
|
||||
|
||||
|
||||
|
||||
class DiscoveryA(unittest.TestCase):
|
||||
"""Test class for the two file pattern test. It is pattern *test.py
|
||||
|
||||
|
@ -19,4 +18,4 @@ class DiscoveryA(unittest.TestCase):
|
|||
self.assertGreater(2, 1)
|
||||
|
||||
def test_two_a(self) -> None:
|
||||
self.assertNotEqual(2, 1)
|
||||
self.assertNotEqual(2, 1)
|
||||
|
|
|
@ -6,10 +6,10 @@ import unittest
|
|||
# The test_ids_multiple_runs function should return a dictionary with a "success" status,
|
||||
# and the two tests with their outcome as "success".
|
||||
|
||||
class DiscoveryB(unittest.TestCase):
|
||||
|
||||
class DiscoveryB(unittest.TestCase):
|
||||
def test_one_b(self) -> None:
|
||||
self.assertGreater(2, 1)
|
||||
|
||||
def test_two_b(self) -> None:
|
||||
self.assertNotEqual(2, 1)
|
||||
self.assertNotEqual(2, 1)
|
||||
|
|
|
@ -6,17 +6,16 @@ import unittest
|
|||
# files in the same folder. The cwd is set to the parent folder. This should return
|
||||
# a dictionary with a "success" status and the two tests with their outcome as "success".
|
||||
|
||||
|
||||
def add(a, b):
|
||||
return a + b
|
||||
|
||||
|
||||
class TestAddFunction(unittest.TestCase):
|
||||
|
||||
def test_add_positive_numbers(self):
|
||||
def test_add_positive_numbers(self):
|
||||
result = add(2, 3)
|
||||
self.assertEqual(result, 5)
|
||||
|
||||
|
||||
def test_add_negative_numbers(self):
|
||||
def test_add_negative_numbers(self):
|
||||
result = add(-2, -3)
|
||||
self.assertEqual(result, -5)
|
||||
self.assertEqual(result, -5)
|
||||
|
|
|
@ -6,6 +6,7 @@ import unittest
|
|||
# files in the same folder. The cwd is set to the parent folder. This should return
|
||||
# a dictionary with a "success" status and the two tests with their outcome as "success".
|
||||
|
||||
|
||||
def subtract(a, b):
|
||||
return a - b
|
||||
|
||||
|
@ -15,7 +16,6 @@ class TestSubtractFunction(unittest.TestCase):
|
|||
result = subtract(5, 3)
|
||||
self.assertEqual(result, 2)
|
||||
|
||||
|
||||
def test_subtract_negative_numbers(self):
|
||||
result = subtract(-2, -3)
|
||||
self.assertEqual(result, 1)
|
||||
self.assertEqual(result, 1)
|
||||
|
|
|
@ -14,20 +14,14 @@ skip_unittest_folder_discovery_output = {
|
|||
"type_": TestNodeTypeEnum.folder,
|
||||
"children": [
|
||||
{
|
||||
"path": os.fspath(
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py"
|
||||
),
|
||||
"path": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py"),
|
||||
"name": "unittest_skip_file.py",
|
||||
"type_": TestNodeTypeEnum.file,
|
||||
"children": [],
|
||||
"id_": os.fspath(
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py"
|
||||
),
|
||||
"id_": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py"),
|
||||
},
|
||||
{
|
||||
"path": os.fspath(
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"
|
||||
),
|
||||
"path": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"),
|
||||
"name": "unittest_skip_function.py",
|
||||
"type_": TestNodeTypeEnum.file,
|
||||
"children": [
|
||||
|
@ -41,30 +35,22 @@ skip_unittest_folder_discovery_output = {
|
|||
{
|
||||
"name": "testadd1",
|
||||
"path": os.fspath(
|
||||
TEST_DATA_PATH
|
||||
/ "unittest_skip"
|
||||
/ "unittest_skip_function.py"
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"
|
||||
),
|
||||
"lineno": "13",
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"id_": os.fspath(
|
||||
TEST_DATA_PATH
|
||||
/ "unittest_skip"
|
||||
/ "unittest_skip_function.py"
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"
|
||||
)
|
||||
+ "\\SimpleTest\\testadd1",
|
||||
"runID": "unittest_skip_function.SimpleTest.testadd1",
|
||||
}
|
||||
],
|
||||
"id_": os.fspath(
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"
|
||||
)
|
||||
"id_": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py")
|
||||
+ "\\SimpleTest",
|
||||
}
|
||||
],
|
||||
"id_": os.fspath(
|
||||
TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"
|
||||
),
|
||||
"id_": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"),
|
||||
},
|
||||
],
|
||||
"id_": os.fspath(TEST_DATA_PATH / "unittest_skip"),
|
||||
|
@ -88,9 +74,7 @@ complex_tree_expected_output = {
|
|||
"name": "test_outer_folder",
|
||||
"type_": TestNodeTypeEnum.folder,
|
||||
"path": os.fsdecode(
|
||||
pathlib.PurePath(
|
||||
TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder"
|
||||
)
|
||||
pathlib.PurePath(TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder")
|
||||
),
|
||||
"children": [
|
||||
{
|
||||
|
@ -145,9 +129,7 @@ complex_tree_expected_output = {
|
|||
},
|
||||
],
|
||||
"id_": os.fsdecode(
|
||||
pathlib.PurePath(
|
||||
TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder"
|
||||
)
|
||||
pathlib.PurePath(TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder")
|
||||
),
|
||||
}
|
||||
],
|
||||
|
|
|
@ -107,22 +107,14 @@ def test_simple_discovery() -> None:
|
|||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "14",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoverySimple"
|
||||
+ "\\"
|
||||
+ "test_one",
|
||||
"id_": file_path + "\\" + "DiscoverySimple" + "\\" + "test_one",
|
||||
},
|
||||
{
|
||||
"name": "test_two",
|
||||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "17",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoverySimple"
|
||||
+ "\\"
|
||||
+ "test_two",
|
||||
"id_": file_path + "\\" + "DiscoverySimple" + "\\" + "test_two",
|
||||
},
|
||||
],
|
||||
"id_": file_path + "\\" + "DiscoverySimple",
|
||||
|
@ -170,22 +162,14 @@ def test_simple_discovery_with_top_dir_calculated() -> None:
|
|||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "14",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoverySimple"
|
||||
+ "\\"
|
||||
+ "test_one",
|
||||
"id_": file_path + "\\" + "DiscoverySimple" + "\\" + "test_one",
|
||||
},
|
||||
{
|
||||
"name": "test_two",
|
||||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "17",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoverySimple"
|
||||
+ "\\"
|
||||
+ "test_two",
|
||||
"id_": file_path + "\\" + "DiscoverySimple" + "\\" + "test_two",
|
||||
},
|
||||
],
|
||||
"id_": file_path + "\\" + "DiscoverySimple",
|
||||
|
@ -253,22 +237,14 @@ def test_error_discovery() -> None:
|
|||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "14",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoveryErrorTwo"
|
||||
+ "\\"
|
||||
+ "test_one",
|
||||
"id_": file_path + "\\" + "DiscoveryErrorTwo" + "\\" + "test_one",
|
||||
},
|
||||
{
|
||||
"name": "test_two",
|
||||
"path": file_path,
|
||||
"type_": TestNodeTypeEnum.test,
|
||||
"lineno": "17",
|
||||
"id_": file_path
|
||||
+ "\\"
|
||||
+ "DiscoveryErrorTwo"
|
||||
+ "\\"
|
||||
+ "test_two",
|
||||
"id_": file_path + "\\" + "DiscoveryErrorTwo" + "\\" + "test_two",
|
||||
},
|
||||
],
|
||||
"id_": file_path + "\\" + "DiscoveryErrorTwo",
|
||||
|
|
|
@ -98,9 +98,7 @@ def discover_tests(
|
|||
# Get abspath of top level directory for build_test_tree.
|
||||
top_level_dir = os.path.abspath(top_level_dir)
|
||||
|
||||
tests, error = build_test_tree(
|
||||
suite, top_level_dir
|
||||
) # test tree built successfully here.
|
||||
tests, error = build_test_tree(suite, top_level_dir) # test tree built successfully here.
|
||||
|
||||
except Exception:
|
||||
error.append(traceback.format_exc())
|
||||
|
@ -116,9 +114,7 @@ def discover_tests(
|
|||
return payload
|
||||
|
||||
|
||||
def post_response(
|
||||
payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str
|
||||
) -> None:
|
||||
def post_response(payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str) -> None:
|
||||
# Build the request data (it has to be a POST request or the Node side will not process it), and send it.
|
||||
addr = ("localhost", port)
|
||||
data = json.dumps(payload)
|
||||
|
|
|
@ -22,9 +22,7 @@ from typing_extensions import Literal, NotRequired, TypeAlias, TypedDict # noqa
|
|||
from testing_tools import process_json_util, socket_manager # noqa: E402
|
||||
from unittestadapter.pvsc_utils import parse_unittest_args # noqa: E402
|
||||
|
||||
ErrorType = Union[
|
||||
Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None]
|
||||
]
|
||||
ErrorType = Union[Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None]]
|
||||
testPort = 0
|
||||
testUuid = 0
|
||||
START_DIR = ""
|
||||
|
@ -244,9 +242,7 @@ def send_run_data(raw_data, port, uuid):
|
|||
post_response(payload, port, uuid)
|
||||
|
||||
|
||||
def post_response(
|
||||
payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str
|
||||
) -> None:
|
||||
def post_response(payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str) -> None:
|
||||
# Build the request data (it has to be a POST request or the Node side will not process it), and send it.
|
||||
addr = ("localhost", port)
|
||||
global __socket
|
||||
|
@ -286,9 +282,7 @@ if __name__ == "__main__":
|
|||
) = parse_unittest_args(argv[index + 1 :])
|
||||
|
||||
run_test_ids_port = os.environ.get("RUN_TEST_IDS_PORT")
|
||||
run_test_ids_port_int = (
|
||||
int(run_test_ids_port) if run_test_ids_port is not None else 0
|
||||
)
|
||||
run_test_ids_port_int = int(run_test_ids_port) if run_test_ids_port is not None else 0
|
||||
if run_test_ids_port_int == 0:
|
||||
print("Error[vscode-unittest]: RUN_TEST_IDS_PORT env var is not set.")
|
||||
# get data from socket
|
||||
|
@ -309,9 +303,7 @@ if __name__ == "__main__":
|
|||
|
||||
try:
|
||||
# Try to parse the buffer as JSON
|
||||
test_ids_from_buffer = process_json_util.process_rpc_json(
|
||||
buffer.decode("utf-8")
|
||||
)
|
||||
test_ids_from_buffer = process_json_util.process_rpc_json(buffer.decode("utf-8"))
|
||||
# Clear the buffer as complete JSON object is received
|
||||
buffer = b""
|
||||
break
|
||||
|
|
|
@ -91,9 +91,7 @@ def build_test_node(path: str, name: str, type_: TestNodeTypeEnum) -> TestNode:
|
|||
return {"path": path, "name": name, "type_": type_, "children": [], "id_": id_gen}
|
||||
|
||||
|
||||
def get_child_node(
|
||||
name: str, path: str, type_: TestNodeTypeEnum, root: TestNode
|
||||
) -> TestNode:
|
||||
def get_child_node(name: str, path: str, type_: TestNodeTypeEnum, root: TestNode) -> TestNode:
|
||||
"""Find a child node in a test tree given its name, type and path. If the node doesn't exist, create it.
|
||||
Path is required to distinguish between nodes with the same name and type."""
|
||||
try:
|
||||
|
@ -154,9 +152,7 @@ def build_test_tree(
|
|||
"""
|
||||
error = []
|
||||
directory_path = pathlib.PurePath(top_level_directory)
|
||||
root = build_test_node(
|
||||
top_level_directory, directory_path.name, TestNodeTypeEnum.folder
|
||||
)
|
||||
root = build_test_node(top_level_directory, directory_path.name, TestNodeTypeEnum.folder)
|
||||
|
||||
for test_case in get_test_case(suite):
|
||||
test_id = test_case.id()
|
||||
|
@ -167,9 +163,7 @@ def build_test_tree(
|
|||
class_name = f"{components[-1]}.py"
|
||||
# Find/build class node.
|
||||
file_path = os.fsdecode(os.path.join(directory_path, class_name))
|
||||
current_node = get_child_node(
|
||||
class_name, file_path, TestNodeTypeEnum.file, root
|
||||
)
|
||||
current_node = get_child_node(class_name, file_path, TestNodeTypeEnum.file, root)
|
||||
else:
|
||||
# Get the static test path components: filename, class name and function name.
|
||||
components = test_id.split(".")
|
||||
|
|
|
@ -170,9 +170,7 @@ class VsTestResult(unittest.TextTestResult):
|
|||
|
||||
def addSubTest(self, test, subtest, err):
|
||||
super(VsTestResult, self).addSubTest(test, subtest, err)
|
||||
self.sendResult(
|
||||
test, "subtest-passed" if err is None else "subtest-failed", err, subtest
|
||||
)
|
||||
self.sendResult(test, "subtest-passed" if err is None else "subtest-failed", err, subtest)
|
||||
|
||||
def sendResult(self, test, outcome, trace=None, subtest=None):
|
||||
if _channel is not None:
|
||||
|
@ -224,9 +222,7 @@ def main():
|
|||
prog="visualstudio_py_testlauncher",
|
||||
usage="Usage: %prog [<option>] <test names>... ",
|
||||
)
|
||||
parser.add_option(
|
||||
"--debug", action="store_true", help="Whether debugging the unit tests"
|
||||
)
|
||||
parser.add_option("--debug", action="store_true", help="Whether debugging the unit tests")
|
||||
parser.add_option(
|
||||
"-x",
|
||||
"--mixed-mode",
|
||||
|
@ -241,9 +237,7 @@ def main():
|
|||
action="append",
|
||||
help="specifies a test to run",
|
||||
)
|
||||
parser.add_option(
|
||||
"--testFile", type="str", help="Fully qualitified path to file name"
|
||||
)
|
||||
parser.add_option("--testFile", type="str", help="Fully qualitified path to file name")
|
||||
parser.add_option(
|
||||
"-c", "--coverage", type="str", help="enable code coverage and specify filename"
|
||||
)
|
||||
|
@ -269,9 +263,7 @@ def main():
|
|||
help="Verbose output (0 none, 1 (no -v) simple, 2 (-v) full)",
|
||||
)
|
||||
parser.add_option("--uf", "--failfast", type="str", help="Stop on first failure")
|
||||
parser.add_option(
|
||||
"--uc", "--catch", type="str", help="Catch control-C and display results"
|
||||
)
|
||||
parser.add_option("--uc", "--catch", type="str", help="Catch control-C and display results")
|
||||
(opts, _) = parser.parse_args()
|
||||
|
||||
sys.path[0] = os.getcwd()
|
||||
|
@ -281,9 +273,7 @@ def main():
|
|||
except Exception:
|
||||
with contextlib.suppress(Exception):
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
_channel = _IpcChannel(
|
||||
socket.create_connection(("127.0.0.1", opts.result_port)), stopTests
|
||||
)
|
||||
_channel = _IpcChannel(socket.create_connection(("127.0.0.1", opts.result_port)), stopTests)
|
||||
sys.stdout = _TestOutput(sys.stdout, is_stdout=True)
|
||||
sys.stderr = _TestOutput(sys.stderr, is_stdout=False)
|
||||
|
||||
|
@ -366,9 +356,7 @@ def main():
|
|||
verbosity=opts.uvInt, resultclass=VsTestResult, failfast=True
|
||||
)
|
||||
else:
|
||||
runner = unittest.TextTestRunner(
|
||||
verbosity=opts.uvInt, resultclass=VsTestResult
|
||||
)
|
||||
runner = unittest.TextTestRunner(verbosity=opts.uvInt, resultclass=VsTestResult)
|
||||
result = runner.run(tests)
|
||||
if _channel is not None:
|
||||
_channel.close()
|
||||
|
|
|
@ -9,9 +9,7 @@ os.system("color")
|
|||
|
||||
parser = argparse.ArgumentParser(description="Parse a test log into its parts")
|
||||
parser.add_argument("testlog", type=str, nargs=1, help="Log to parse")
|
||||
parser.add_argument(
|
||||
"--testoutput", action="store_true", help="Show all failures and passes"
|
||||
)
|
||||
parser.add_argument("--testoutput", action="store_true", help="Show all failures and passes")
|
||||
parser.add_argument(
|
||||
"--split",
|
||||
action="store_true",
|
||||
|
@ -39,7 +37,7 @@ def printTestOutput(testlog):
|
|||
with p.open() as f:
|
||||
for line in readStripLines(f):
|
||||
stripped = line.strip()
|
||||
if len(stripped) > 2 and stripped[0] == "\x1B" and stripped[1] == "[":
|
||||
if len(stripped) > 2 and stripped[0] == "\x1b" and stripped[1] == "[":
|
||||
print(line.rstrip()) # Should be a test line as it has color encoding
|
||||
|
||||
|
||||
|
|
|
@ -118,13 +118,9 @@ def pytest_exception_interact(node, call, report):
|
|||
if call.excinfo and call.excinfo.typename != "AssertionError":
|
||||
if report.outcome == "skipped" and "SkipTest" in str(call):
|
||||
return
|
||||
ERRORS.append(
|
||||
call.excinfo.exconly() + "\n Check Python Test Logs for more details."
|
||||
)
|
||||
ERRORS.append(call.excinfo.exconly() + "\n Check Python Test Logs for more details.")
|
||||
else:
|
||||
ERRORS.append(
|
||||
report.longreprtext + "\n Check Python Test Logs for more details."
|
||||
)
|
||||
ERRORS.append(report.longreprtext + "\n Check Python Test Logs for more details.")
|
||||
else:
|
||||
# If during execution, send this data that the given node failed.
|
||||
report_value = "error"
|
||||
|
@ -445,10 +441,7 @@ def build_test_tree(session: pytest.Session) -> TestNode:
|
|||
test_file_node = create_file_node(parent_module)
|
||||
file_nodes_dict[parent_module] = test_file_node
|
||||
# Check if the class is already a child of the file node.
|
||||
if (
|
||||
test_class_node is not None
|
||||
and test_class_node not in test_file_node["children"]
|
||||
):
|
||||
if test_class_node is not None and test_class_node not in test_file_node["children"]:
|
||||
test_file_node["children"].append(test_class_node)
|
||||
elif hasattr(test_case, "callspec"): # This means it is a parameterized test.
|
||||
function_name: str = ""
|
||||
|
@ -463,9 +456,7 @@ def build_test_tree(session: pytest.Session) -> TestNode:
|
|||
ERRORS.append(
|
||||
f"unable to find original name for {test_case.name} with parameterization detected."
|
||||
)
|
||||
raise VSCodePytestError(
|
||||
"Unable to find original name for parameterized test case"
|
||||
)
|
||||
raise VSCodePytestError("Unable to find original name for parameterized test case")
|
||||
except KeyError:
|
||||
function_test_case: TestNode = create_parameterized_function_node(
|
||||
function_name, get_node_path(test_case), test_case.nodeid
|
||||
|
@ -522,13 +513,9 @@ def build_nested_folders(
|
|||
while iterator_path != get_node_path(session):
|
||||
curr_folder_name = iterator_path.name
|
||||
try:
|
||||
curr_folder_node: TestNode = created_files_folders_dict[
|
||||
os.fspath(iterator_path)
|
||||
]
|
||||
curr_folder_node: TestNode = created_files_folders_dict[os.fspath(iterator_path)]
|
||||
except KeyError:
|
||||
curr_folder_node: TestNode = create_folder_node(
|
||||
curr_folder_name, iterator_path
|
||||
)
|
||||
curr_folder_node: TestNode = create_folder_node(curr_folder_name, iterator_path)
|
||||
created_files_folders_dict[os.fspath(iterator_path)] = curr_folder_node
|
||||
if prev_folder_node not in curr_folder_node["children"]:
|
||||
curr_folder_node["children"].append(prev_folder_node)
|
||||
|
|
|
@ -25,9 +25,7 @@ if __name__ == "__main__":
|
|||
# Get the rest of the args to run with pytest.
|
||||
args = sys.argv[1:]
|
||||
run_test_ids_port = os.environ.get("RUN_TEST_IDS_PORT")
|
||||
run_test_ids_port_int = (
|
||||
int(run_test_ids_port) if run_test_ids_port is not None else 0
|
||||
)
|
||||
run_test_ids_port_int = int(run_test_ids_port) if run_test_ids_port is not None else 0
|
||||
if run_test_ids_port_int == 0:
|
||||
print("Error[vscode-pytest]: RUN_TEST_IDS_PORT env var is not set.")
|
||||
test_ids_from_buffer = []
|
||||
|
@ -48,9 +46,7 @@ if __name__ == "__main__":
|
|||
|
||||
try:
|
||||
# Try to parse the buffer as JSON
|
||||
test_ids_from_buffer = process_json_util.process_rpc_json(
|
||||
buffer.decode("utf-8")
|
||||
)
|
||||
test_ids_from_buffer = process_json_util.process_rpc_json(buffer.decode("utf-8"))
|
||||
# Clear the buffer as complete JSON object is received
|
||||
buffer = b""
|
||||
print("Received JSON data in run script")
|
||||
|
|
Загрузка…
Ссылка в новой задаче