зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1770705 [wpt PR 34162] - BiDi `script.evaluate`, a=testonly
Automatic update from web-platform-tests BiDi script.evaluate (#34162) * BiDi `script` module. * `script.evaluate` BiDi method. * Tests for happy case scenarios. * Tests for invalid params. Not included: * Switching tests to BiDi command `script.evaluate`. -- wpt-commits: d4dd7c0f23325fd8533aa78d294d5bf47244b32d wpt-pr: 34162
This commit is contained in:
Родитель
8e38901023
Коммит
f83e3932e1
|
@ -92,6 +92,7 @@ class BidiSession:
|
|||
# For each module, have a property representing that module
|
||||
self.session = modules.Session(self)
|
||||
self.browsing_context = modules.BrowsingContext(self)
|
||||
self.script = modules.Script(self)
|
||||
|
||||
@property
|
||||
def event_loop(self):
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
|
||||
from .session import Session
|
||||
from .browsing_context import BrowsingContext
|
||||
from .script import Script
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
from enum import Enum
|
||||
from typing import Any, Optional, Mapping, MutableMapping, Union, Dict
|
||||
|
||||
from ._module import BidiModule, command
|
||||
|
||||
|
||||
class ScriptEvaluateResultException(Exception):
|
||||
def __init__(self, result: Mapping[str, Any]):
|
||||
self.result = result
|
||||
super().__init__("Script execution failed.")
|
||||
|
||||
|
||||
class OwnershipModel(Enum):
|
||||
NONE = "none"
|
||||
ROOT = "root"
|
||||
|
||||
|
||||
class RealmTarget(Dict[str, Any]):
|
||||
def __init__(self, realm: str):
|
||||
dict.__init__(self, realm=realm)
|
||||
|
||||
|
||||
class ContextTarget(Dict[str, Any]):
|
||||
def __init__(self, context: str, sandbox: Optional[str] = None):
|
||||
if sandbox is None:
|
||||
dict.__init__(self, context=context)
|
||||
else:
|
||||
dict.__init__(self, context=context, sandbox=sandbox)
|
||||
|
||||
|
||||
Target = Union[RealmTarget, ContextTarget]
|
||||
|
||||
|
||||
class Script(BidiModule):
|
||||
@command
|
||||
def evaluate(self,
|
||||
expression: str,
|
||||
target: Target,
|
||||
await_promise: Optional[bool] = None,
|
||||
result_ownership: Optional[OwnershipModel] = None) -> Mapping[str, Any]:
|
||||
params: MutableMapping[str, Any] = {
|
||||
"expression": expression,
|
||||
"target": target,
|
||||
}
|
||||
|
||||
if await_promise is not None:
|
||||
params["awaitPromise"] = await_promise
|
||||
if result_ownership is not None:
|
||||
params["resultOwnership"] = result_ownership
|
||||
return params
|
||||
|
||||
@evaluate.result
|
||||
def _evaluate(self, result: Mapping[str, Any]) -> Any:
|
||||
if "result" not in result:
|
||||
raise ScriptEvaluateResultException(result)
|
||||
return result["result"]
|
|
@ -0,0 +1,35 @@
|
|||
from typing import Any
|
||||
|
||||
|
||||
# Compares 2 objects recursively.
|
||||
# Actual value can have more keys as part of the forwards-compat design.
|
||||
# Expected value can be a callable delegate, asserting the value.
|
||||
def recursive_compare(expected: Any, actual: Any) -> None:
|
||||
if callable(expected):
|
||||
expected(actual)
|
||||
return
|
||||
|
||||
assert type(expected) == type(actual)
|
||||
if type(expected) is list:
|
||||
assert len(expected) == len(actual)
|
||||
for index, _ in enumerate(expected):
|
||||
recursive_compare(expected[index], actual[index])
|
||||
return
|
||||
|
||||
if type(expected) is dict:
|
||||
# Actual dict can have more keys as part of the forwards-compat design.
|
||||
assert expected.keys() <= actual.keys(), \
|
||||
f"Key set should be present: {set(expected.keys()) - set(actual.keys())}"
|
||||
for key in expected.keys():
|
||||
recursive_compare(expected[key], actual[key])
|
||||
return
|
||||
|
||||
assert expected == actual
|
||||
|
||||
|
||||
def any_string(actual: Any) -> None:
|
||||
assert isinstance(actual, str)
|
||||
|
||||
|
||||
def any_int(actual: Any) -> None:
|
||||
assert isinstance(actual, int)
|
|
@ -6,7 +6,7 @@ pytestmark = pytest.mark.asyncio
|
|||
|
||||
|
||||
@pytest.mark.parametrize("value", ["tab", "window"])
|
||||
async def test_type(bidi_session, current_session, wait_for_event, value):
|
||||
async def test_type(bidi_session, current_session, value):
|
||||
contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
|
||||
assert len(contexts) == 1
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
from typing import Any
|
||||
|
||||
from .. import any_int, any_string
|
||||
|
||||
|
||||
def any_stack_trace(actual: Any) -> None:
|
||||
assert type(actual) is dict
|
||||
assert "callFrames" in actual
|
||||
assert type(actual["callFrames"]) is list
|
||||
for actual_frame in actual["callFrames"]:
|
||||
any_stack_frame(actual_frame)
|
||||
|
||||
|
||||
def any_stack_frame(actual: Any) -> None:
|
||||
assert type(actual) is dict
|
||||
|
||||
assert "columnNumber" in actual
|
||||
any_int(actual["columnNumber"])
|
||||
|
||||
assert "functionName" in actual
|
||||
any_string(actual["functionName"])
|
||||
|
||||
assert "lineNumber" in actual
|
||||
any_int(actual["lineNumber"])
|
||||
|
||||
assert "url" in actual
|
||||
any_string(actual["url"])
|
|
@ -0,0 +1,160 @@
|
|||
import pytest
|
||||
from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
|
||||
|
||||
from ... import any_int, any_string, recursive_compare
|
||||
from .. import any_stack_trace
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval(bidi_session, top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
assert result == {
|
||||
"type": "number",
|
||||
"value": 3}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_params_expression_invalid_script(bidi_session, top_context):
|
||||
with pytest.raises(ScriptEvaluateResultException) as exception:
|
||||
await bidi_session.script.evaluate(
|
||||
expression='))) !!@@## some invalid JS script (((',
|
||||
target=ContextTarget(top_context["context"]))
|
||||
recursive_compare({
|
||||
'realm': any_string,
|
||||
'exceptionDetails': {
|
||||
'columnNumber': any_int,
|
||||
'exception': {
|
||||
'handle': any_string,
|
||||
'type': 'error'},
|
||||
'lineNumber': any_int,
|
||||
'stackTrace': any_stack_trace,
|
||||
'text': any_string}},
|
||||
exception.value.result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_exception(bidi_session, top_context):
|
||||
with pytest.raises(ScriptEvaluateResultException) as exception:
|
||||
await bidi_session.script.evaluate(
|
||||
expression="throw Error('SOME_ERROR_MESSAGE')",
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
recursive_compare({
|
||||
'realm': any_string,
|
||||
'exceptionDetails': {
|
||||
'columnNumber': any_int,
|
||||
'exception': {
|
||||
'handle': any_string,
|
||||
'type': 'error'},
|
||||
'lineNumber': any_int,
|
||||
'stackTrace': any_stack_trace,
|
||||
'text': any_string}},
|
||||
exception.value.result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_interact_with_dom(bidi_session, top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="'window.location.href: ' + window.location.href",
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
assert result == {
|
||||
"type": "string",
|
||||
"value": "window.location.href: about:blank"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_resolved_promise_with_await_promise_false(bidi_session,
|
||||
top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="Promise.resolve('SOME_RESOLVED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]),
|
||||
await_promise=False)
|
||||
|
||||
recursive_compare({
|
||||
"type": "promise",
|
||||
"handle": any_string},
|
||||
result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_resolved_promise_with_await_promise_true(bidi_session,
|
||||
top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="Promise.resolve('SOME_RESOLVED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]),
|
||||
await_promise=True)
|
||||
|
||||
assert result == {
|
||||
"type": "string",
|
||||
"value": "SOME_RESOLVED_RESULT"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_resolved_promise_with_await_promise_omitted(bidi_session,
|
||||
top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="Promise.resolve('SOME_RESOLVED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
assert result == {
|
||||
"type": "string",
|
||||
"value": "SOME_RESOLVED_RESULT"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rejected_promise_with_await_promise_false(bidi_session,
|
||||
top_context):
|
||||
result = await bidi_session.script.evaluate(
|
||||
expression="Promise.reject('SOME_REJECTED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]),
|
||||
await_promise=False)
|
||||
|
||||
recursive_compare({
|
||||
"type": "promise",
|
||||
"handle": any_string},
|
||||
result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rejected_promise_with_await_promise_true(bidi_session,
|
||||
top_context):
|
||||
with pytest.raises(ScriptEvaluateResultException) as exception:
|
||||
await bidi_session.script.evaluate(
|
||||
expression="Promise.reject('SOME_REJECTED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]),
|
||||
await_promise=True)
|
||||
|
||||
recursive_compare({
|
||||
'realm': any_string,
|
||||
'exceptionDetails': {
|
||||
'columnNumber': any_int,
|
||||
'exception': {'type': 'string',
|
||||
'value': 'SOME_REJECTED_RESULT'},
|
||||
'lineNumber': any_int,
|
||||
'stackTrace': any_stack_trace,
|
||||
'text': any_string}},
|
||||
exception.value.result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rejected_promise_with_await_promise_omitted(bidi_session,
|
||||
top_context):
|
||||
with pytest.raises(ScriptEvaluateResultException) as exception:
|
||||
await bidi_session.script.evaluate(
|
||||
expression="Promise.reject('SOME_REJECTED_RESULT')",
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
recursive_compare({
|
||||
'realm': any_string,
|
||||
'exceptionDetails': {
|
||||
'columnNumber': any_int,
|
||||
'exception': {'type': 'string',
|
||||
'value': 'SOME_REJECTED_RESULT'},
|
||||
'lineNumber': any_int,
|
||||
'stackTrace': any_stack_trace,
|
||||
'text': any_string}},
|
||||
exception.value.result)
|
|
@ -0,0 +1,78 @@
|
|||
import pytest
|
||||
import webdriver.bidi.error as error
|
||||
|
||||
from webdriver.bidi.modules.script import ContextTarget, RealmTarget
|
||||
|
||||
pytestmark = pytest.mark.asyncio
|
||||
|
||||
|
||||
@pytest.mark.parametrize("target", [None, False, 42, {}, []])
|
||||
async def test_params_target_invalid_type(bidi_session, target):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=target)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("context", [None, False, 42, {}, []])
|
||||
async def test_params_context_invalid_type(bidi_session, context):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=ContextTarget(context))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sandbox", [False, 42, {}, []])
|
||||
async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=ContextTarget(top_context["context"], sandbox))
|
||||
|
||||
|
||||
async def test_params_context_unknown(bidi_session):
|
||||
with pytest.raises(error.NoSuchFrameException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=ContextTarget("_UNKNOWN_"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("realm", [None, False, 42, {}, []])
|
||||
async def test_params_realm_invalid_type(bidi_session, realm):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=RealmTarget(realm))
|
||||
|
||||
|
||||
async def test_params_realm_unknown(bidi_session):
|
||||
with pytest.raises(error.NoSuchFrameException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
target=RealmTarget("_UNKNOWN_"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("expression", [None, False, 42, {}, []])
|
||||
async def test_params_expression_invalid_type(bidi_session, top_context, expression):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression=expression,
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("await_promise", ["False", 0, 42, {}, []])
|
||||
async def test_params_await_promise_invalid_type(bidi_session, top_context, await_promise):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
await_promise=await_promise,
|
||||
target=ContextTarget(top_context["context"]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("result_ownership", [False, "_UNKNOWN_", 42, {}, []])
|
||||
async def test_params_result_ownership_invalid_value(bidi_session, top_context, result_ownership):
|
||||
with pytest.raises(error.InvalidArgumentException):
|
||||
await bidi_session.script.evaluate(
|
||||
expression="1 + 2",
|
||||
result_ownership=result_ownership,
|
||||
target=ContextTarget(top_context["context"]))
|
Загрузка…
Ссылка в новой задаче