thunderbolt: Changes for v5.12 merge window
This includes following Thunderbolt/USB4 changes for v5.12 merge window: * Start lane initialization after sleep for Thunderbolt 3 compatible devices * Add support for de-authorizing PCIe tunnels (software based connection manager only) * Add support for new ACPI 6.4 USB4 _OSC * Allow disabling XDomain protocol * Add support for new SL5 security level * Clean up kernel-docs to pass W=1 builds * A couple of cleanups and minor fixes All these have been in linux-next without reported issues. -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmAib9IgHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKAykw/+JfXClHYVlqRh IH7kFkD4nA7g7359PpmSLxRBzvkivz7w66BqNtp6GhIF1oGRtDJ5t5ufgwdYX3Ld tqH2Glhw9gV5EmeqDzq3TbLzAU+zm9a5bVE3vwQbxpgPGtDigKpDjqUobGFooDaB F8EX3H3rwI3i+1/S1vBAZbuJqND0cuDM2yQNYJ//Dch2wnZ/Vy6darzY+GpJYSWi ZlzWsvhfbo1V3n1PphzRkvzcMbVcJriVfwJihtY1h3H6RnnD4pqQJ7e1ec+4Pgdv fql3lJg0M02rzTSYLyLwuMOojZA+gPqLWb3Fj5pkfjcKoneq1BNTsl4xRzDpqwP3 /DeOPE+TO6SAsAkRksyH5W9nvaYGmLy0r9JREkkzdhH41RML4ueEjstaJyfWn1N+ bX2Ba+5A9pIWKYbwfbNTZPyHk9r94IdDDXzjXFLSeOTcCwMf97XX4sgrbPHedxh6 VAjIrYxuSCTRRN1G6xtPI9aer5wh7e01W71WK3SJhvpqs092E0m0p4gN/nm6yW1Z SSJg36uqp0egjU/NxsyNvB1ePQFiUkO7WIow9pVBU+US7vSuW9IXbC59lAx0XUE5 tonIF7zAiz1hLp6DZRYfTX7R9xbyshep0uhESceCcIsF5winsgUXL76QEMN0fEjf wmUG/HWrOd2O/IH8704rfnu7ajxATGM= =pzr4 -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next Mika writes: thunderbolt: Changes for v5.12 merge window This includes following Thunderbolt/USB4 changes for v5.12 merge window: * Start lane initialization after sleep for Thunderbolt 3 compatible devices * Add support for de-authorizing PCIe tunnels (software based connection manager only) * Add support for new ACPI 6.4 USB4 _OSC * Allow disabling XDomain protocol * Add support for new SL5 security level * Clean up kernel-docs to pass W=1 builds * A couple of cleanups and minor fixes All these have been in linux-next without reported issues. * tag 'thunderbolt-for-v5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (27 commits) thunderbolt: Add support for native USB4 _OSC ACPI: Add support for native USB4 control _OSC ACPI: Execute platform _OSC also with query bit clear thunderbolt: Allow disabling XDomain protocol thunderbolt: Add support for PCIe tunneling disabled (SL5) thunderbolt: dma_test: Drop unnecessary include thunderbolt: Add clarifying comments about USB4 terms router and adapter thunderbolt: switch: Fix kernel-doc descriptions of non-static functions thunderbolt: nhi: Fix kernel-doc descriptions of non-static functions thunderbolt: path: Fix kernel-doc descriptions of non-static functions thunderbolt: eeprom: Fix kernel-doc descriptions of non-static functions thunderbolt: ctl: Fix kernel-doc descriptions of non-static functions thunderbolt: switch: Fix function name in the header thunderbolt: tunnel: Fix misspelling of 'receive_path' thunderbolt: icm: Fix a couple of formatting issues thunderbolt: switch: Demote a bunch of non-conformant kernel-doc headers thunderbolt: tb: Kernel-doc function headers should document their parameters thunderbolt: nhi: Demote some non-conformant kernel-doc headers thunderbolt: xdomain: Fix 'tb_unregister_service_driver()'s 'drv' param thunderbolt: eeprom: Demote non-conformant kernel-doc headers to standard comment blocks ...
This commit is contained in:
Коммит
0a25669ba8
|
@ -49,6 +49,15 @@ Description: Holds a comma separated list of device unique_ids that
|
|||
If a device is authorized automatically during boot its
|
||||
boot attribute is set to 1.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/.../domainX/deauthorization
|
||||
Date: May 2021
|
||||
KernelVersion: 5.12
|
||||
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
Description: This attribute tells whether the system supports
|
||||
de-authorization of devices. Value of 1 means user can
|
||||
de-authorize PCIe tunnel by writing 0 to authorized
|
||||
attribute under each device.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
|
||||
Date: Mar 2019
|
||||
KernelVersion: 4.21
|
||||
|
@ -76,6 +85,8 @@ Description: This attribute holds current Thunderbolt security level
|
|||
usbonly Automatically tunnel USB controller of the
|
||||
connected Thunderbolt dock (and Display Port). All
|
||||
PCIe links downstream of the dock are removed.
|
||||
nopcie USB4 system where PCIe tunneling is disabled from
|
||||
the BIOS.
|
||||
======= ==================================================
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/.../authorized
|
||||
|
@ -84,22 +95,25 @@ KernelVersion: 4.13
|
|||
Contact: thunderbolt-software@lists.01.org
|
||||
Description: This attribute is used to authorize Thunderbolt devices
|
||||
after they have been connected. If the device is not
|
||||
authorized, no devices such as PCIe and Display port are
|
||||
available to the system.
|
||||
authorized, no PCIe devices are available to the system.
|
||||
|
||||
Contents of this attribute will be 0 when the device is not
|
||||
yet authorized.
|
||||
|
||||
Possible values are supported:
|
||||
|
||||
== ===========================================
|
||||
== ===================================================
|
||||
0 The device will be de-authorized (only supported if
|
||||
deauthorization attribute under domain contains 1)
|
||||
1 The device will be authorized and connected
|
||||
== ===========================================
|
||||
== ===================================================
|
||||
|
||||
When key attribute contains 32 byte hex string the possible
|
||||
values are:
|
||||
|
||||
== ========================================================
|
||||
0 The device will be de-authorized (only supported if
|
||||
deauthorization attribute under domain contains 1)
|
||||
1 The 32 byte hex string is added to the device NVM and
|
||||
the device is authorized.
|
||||
2 Send a challenge based on the 32 byte hex string. If the
|
||||
|
|
|
@ -47,6 +47,9 @@ be DMA masters and thus read contents of the host memory without CPU and OS
|
|||
knowing about it. There are ways to prevent this by setting up an IOMMU but
|
||||
it is not always available for various reasons.
|
||||
|
||||
Some USB4 systems have a BIOS setting to disable PCIe tunneling. This is
|
||||
treated as another security level (nopcie).
|
||||
|
||||
The security levels are as follows:
|
||||
|
||||
none
|
||||
|
@ -77,6 +80,10 @@ The security levels are as follows:
|
|||
Display Port in a dock. All PCIe links downstream of the dock are
|
||||
removed.
|
||||
|
||||
nopcie
|
||||
PCIe tunneling is disabled/forbidden from the BIOS. Available in some
|
||||
USB4 systems.
|
||||
|
||||
The current security level can be read from
|
||||
``/sys/bus/thunderbolt/devices/domainX/security`` where ``domainX`` is
|
||||
the Thunderbolt domain the host controller manages. There is typically
|
||||
|
@ -153,6 +160,22 @@ If the user still wants to connect the device they can either approve
|
|||
the device without a key or write a new key and write 1 to the
|
||||
``authorized`` file to get the new key stored on the device NVM.
|
||||
|
||||
De-authorizing devices
|
||||
----------------------
|
||||
It is possible to de-authorize devices by writing ``0`` to their
|
||||
``authorized`` attribute. This requires support from the connection
|
||||
manager implementation and can be checked by reading domain
|
||||
``deauthorization`` attribute. If it reads ``1`` then the feature is
|
||||
supported.
|
||||
|
||||
When a device is de-authorized the PCIe tunnel from the parent device
|
||||
PCIe downstream (or root) port to the device PCIe upstream port is torn
|
||||
down. This is essentially the same thing as PCIe hot-remove and the PCIe
|
||||
toplogy in question will not be accessible anymore until the device is
|
||||
authorized again. If there is storage such as NVMe or similar involved,
|
||||
there is a risk for data loss if the filesystem on that storage is not
|
||||
properly shut down. You have been warned!
|
||||
|
||||
DMA protection utilizing IOMMU
|
||||
------------------------------
|
||||
Recent systems from 2018 and forward with Thunderbolt ports may natively
|
||||
|
|
|
@ -281,10 +281,16 @@ bool osc_sb_apei_support_acked;
|
|||
bool osc_pc_lpi_support_confirmed;
|
||||
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
|
||||
|
||||
/*
|
||||
* ACPI 6.4 Operating System Capabilities for USB.
|
||||
*/
|
||||
bool osc_sb_native_usb4_support_confirmed;
|
||||
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
|
||||
|
||||
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
|
||||
static void acpi_bus_osc_support(void)
|
||||
static void acpi_bus_osc_negotiate_platform_control(void)
|
||||
{
|
||||
u32 capbuf[2];
|
||||
u32 capbuf[2], *capbuf_ret;
|
||||
struct acpi_osc_context context = {
|
||||
.uuid_str = sb_uuid_str,
|
||||
.rev = 1,
|
||||
|
@ -317,21 +323,109 @@ static void acpi_bus_osc_support(void)
|
|||
if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
|
||||
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
|
||||
|
||||
if (IS_ENABLED(CONFIG_USB4))
|
||||
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_NATIVE_USB4_SUPPORT;
|
||||
|
||||
if (!ghes_disable)
|
||||
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
|
||||
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
|
||||
return;
|
||||
if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
|
||||
u32 *capbuf_ret = context.ret.pointer;
|
||||
if (context.ret.length > OSC_SUPPORT_DWORD) {
|
||||
osc_sb_apei_support_acked =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
|
||||
osc_pc_lpi_support_confirmed =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
|
||||
return;
|
||||
|
||||
capbuf_ret = context.ret.pointer;
|
||||
if (context.ret.length <= OSC_SUPPORT_DWORD) {
|
||||
kfree(context.ret.pointer);
|
||||
return;
|
||||
}
|
||||
/* do we need to check other returned cap? Sounds no */
|
||||
|
||||
/*
|
||||
* Now run _OSC again with query flag clear and with the caps
|
||||
* supported by both the OS and the platform.
|
||||
*/
|
||||
capbuf[OSC_QUERY_DWORD] = 0;
|
||||
capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
|
||||
kfree(context.ret.pointer);
|
||||
|
||||
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
|
||||
return;
|
||||
|
||||
capbuf_ret = context.ret.pointer;
|
||||
if (context.ret.length > OSC_SUPPORT_DWORD) {
|
||||
osc_sb_apei_support_acked =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
|
||||
osc_pc_lpi_support_confirmed =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
|
||||
osc_sb_native_usb4_support_confirmed =
|
||||
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
|
||||
}
|
||||
|
||||
kfree(context.ret.pointer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Native control of USB4 capabilities. If any of the tunneling bits is
|
||||
* set it means OS is in control and we use software based connection
|
||||
* manager.
|
||||
*/
|
||||
u32 osc_sb_native_usb4_control;
|
||||
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
|
||||
|
||||
static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
|
||||
{
|
||||
printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
|
||||
(bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
|
||||
(bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
|
||||
(bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
|
||||
(bits & OSC_USB_XDOMAIN) ? '+' : '-');
|
||||
}
|
||||
|
||||
static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
|
||||
static void acpi_bus_osc_negotiate_usb_control(void)
|
||||
{
|
||||
u32 capbuf[3];
|
||||
struct acpi_osc_context context = {
|
||||
.uuid_str = sb_usb_uuid_str,
|
||||
.rev = 1,
|
||||
.cap.length = sizeof(capbuf),
|
||||
.cap.pointer = capbuf,
|
||||
};
|
||||
acpi_handle handle;
|
||||
acpi_status status;
|
||||
u32 control;
|
||||
|
||||
if (!osc_sb_native_usb4_support_confirmed)
|
||||
return;
|
||||
|
||||
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
|
||||
return;
|
||||
|
||||
control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
|
||||
OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
|
||||
|
||||
capbuf[OSC_QUERY_DWORD] = 0;
|
||||
capbuf[OSC_SUPPORT_DWORD] = 0;
|
||||
capbuf[OSC_CONTROL_DWORD] = control;
|
||||
|
||||
status = acpi_run_osc(handle, &context);
|
||||
if (ACPI_FAILURE(status))
|
||||
return;
|
||||
|
||||
if (context.ret.length != sizeof(capbuf)) {
|
||||
printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
osc_sb_native_usb4_control =
|
||||
control & ((u32 *)context.ret.pointer)[OSC_CONTROL_DWORD];
|
||||
|
||||
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
|
||||
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
|
||||
osc_sb_native_usb4_control);
|
||||
|
||||
out_free:
|
||||
kfree(context.ret.pointer);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@ -1168,7 +1262,8 @@ static int __init acpi_bus_init(void)
|
|||
* _OSC method may exist in module level code,
|
||||
* so it must be run after ACPI_FULL_INITIALIZATION
|
||||
*/
|
||||
acpi_bus_osc_support();
|
||||
acpi_bus_osc_negotiate_platform_control();
|
||||
acpi_bus_osc_negotiate_usb_control();
|
||||
|
||||
/*
|
||||
* _PDC control method may load dynamic SSDT tables,
|
||||
|
|
|
@ -115,3 +115,68 @@ void tb_acpi_add_links(struct tb_nhi *nhi)
|
|||
if (ACPI_FAILURE(status))
|
||||
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_acpi_is_native() - Did the platform grant native TBT/USB4 control
|
||||
*
|
||||
* Returns %true if the platform granted OS native control over
|
||||
* TBT/USB4. In this case software based connection manager can be used,
|
||||
* otherwise there is firmware based connection manager running.
|
||||
*/
|
||||
bool tb_acpi_is_native(void)
|
||||
{
|
||||
return osc_sb_native_usb4_support_confirmed &&
|
||||
osc_sb_native_usb4_control;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native USB3 tunneling.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_usb3(void)
|
||||
{
|
||||
if (tb_acpi_is_native())
|
||||
return osc_sb_native_usb4_control & OSC_USB_USB3_TUNNELING;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native DP tunneling.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_dp(void)
|
||||
{
|
||||
if (tb_acpi_is_native())
|
||||
return osc_sb_native_usb4_control & OSC_USB_DP_TUNNELING;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native PCIe tunneling.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_pcie(void)
|
||||
{
|
||||
if (tb_acpi_is_native())
|
||||
return osc_sb_native_usb4_control & OSC_USB_PCIE_TUNNELING;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows XDomain connections.
|
||||
*/
|
||||
bool tb_acpi_is_xdomain_allowed(void)
|
||||
{
|
||||
if (tb_acpi_is_native())
|
||||
return osc_sb_native_usb4_control & OSC_USB_XDOMAIN;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
|
|||
|
||||
/**
|
||||
* tb_switch_find_cap() - Find switch capability
|
||||
* @sw Switch to find the capability for
|
||||
* @sw: Switch to find the capability for
|
||||
* @cap: Capability to look
|
||||
*
|
||||
* Returns offset to start of capability or %-ENOENT if no such
|
||||
|
|
|
@ -20,7 +20,17 @@
|
|||
#define TB_CTL_RETRIES 4
|
||||
|
||||
/**
|
||||
* struct tb_cfg - thunderbolt control channel
|
||||
* struct tb_ctl - Thunderbolt control channel
|
||||
* @nhi: Pointer to the NHI structure
|
||||
* @tx: Transmit ring
|
||||
* @rx: Receive ring
|
||||
* @frame_pool: DMA pool for control messages
|
||||
* @rx_packets: Received control messages
|
||||
* @request_queue_lock: Lock protecting @request_queue
|
||||
* @request_queue: List of outstanding requests
|
||||
* @running: Is the control channel running at the moment
|
||||
* @callback: Callback called when hotplug message is received
|
||||
* @callback_data: Data passed to @callback
|
||||
*/
|
||||
struct tb_ctl {
|
||||
struct tb_nhi *nhi;
|
||||
|
@ -338,7 +348,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
|
|||
tb_ctl_pkg_free(pkg);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_cfg_tx() - transmit a packet on the control channel
|
||||
*
|
||||
* len must be a multiple of four.
|
||||
|
@ -375,7 +385,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
|
|||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
|
||||
*/
|
||||
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
|
||||
|
@ -602,6 +612,9 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
|
|||
|
||||
/**
|
||||
* tb_ctl_alloc() - allocate a control channel
|
||||
* @nhi: Pointer to NHI
|
||||
* @cb: Callback called for plug events
|
||||
* @cb_data: Data passed to @cb
|
||||
*
|
||||
* cb will be invoked once for every hot plug event.
|
||||
*
|
||||
|
@ -649,6 +662,7 @@ err:
|
|||
|
||||
/**
|
||||
* tb_ctl_free() - free a control channel
|
||||
* @ctl: Control channel to free
|
||||
*
|
||||
* Must be called after tb_ctl_stop.
|
||||
*
|
||||
|
@ -677,6 +691,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
|
|||
|
||||
/**
|
||||
* tb_cfg_start() - start/resume the control channel
|
||||
* @ctl: Control channel to start
|
||||
*/
|
||||
void tb_ctl_start(struct tb_ctl *ctl)
|
||||
{
|
||||
|
@ -691,7 +706,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
|
|||
}
|
||||
|
||||
/**
|
||||
* control() - pause the control channel
|
||||
* tb_ctrl_stop() - pause the control channel
|
||||
* @ctl: Control channel to stop
|
||||
*
|
||||
* All invocations of ctl->callback will have finished after this method
|
||||
* returns.
|
||||
|
@ -784,6 +800,9 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
|
|||
|
||||
/**
|
||||
* tb_cfg_reset() - send a reset packet and wait for a response
|
||||
* @ctl: Control channel pointer
|
||||
* @route: Router string for the router to send reset
|
||||
* @timeout_msec: Timeout in ms how long to wait for the response
|
||||
*
|
||||
* If the switch at route is incorrectly configured then we will not receive a
|
||||
* reply (even though the switch will reset). The caller should check for
|
||||
|
@ -820,9 +839,17 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
|
|||
}
|
||||
|
||||
/**
|
||||
* tb_cfg_read() - read from config space into buffer
|
||||
* tb_cfg_read_raw() - read from config space into buffer
|
||||
* @ctl: Pointer to the control channel
|
||||
* @buffer: Buffer where the data is read
|
||||
* @route: Route string of the router
|
||||
* @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
|
||||
* @space: Config space selector
|
||||
* @offset: Dword word offset of the register to start reading
|
||||
* @length: Number of dwords to read
|
||||
* @timeout_msec: Timeout in ms how long to wait for the response
|
||||
*
|
||||
* Offset and length are in dwords.
|
||||
* Reads from router config space without translating the possible error.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
|
||||
u64 route, u32 port, enum tb_cfg_space space,
|
||||
|
@ -884,8 +911,16 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
|
|||
|
||||
/**
|
||||
* tb_cfg_write() - write from buffer into config space
|
||||
* @ctl: Pointer to the control channel
|
||||
* @buffer: Data to write
|
||||
* @route: Route string of the router
|
||||
* @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
|
||||
* @space: Config space selector
|
||||
* @offset: Dword word offset of the register to start writing
|
||||
* @length: Number of dwords to write
|
||||
* @timeout_msec: Timeout in ms how long to wait for the response
|
||||
*
|
||||
* Offset and length are in dwords.
|
||||
* Writes to router config space without translating the possible error.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
|
||||
u64 route, u32 port, enum tb_cfg_space space,
|
||||
|
@ -1022,6 +1057,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
|
|||
|
||||
/**
|
||||
* tb_cfg_get_upstream_port() - get upstream port number of switch at route
|
||||
* @ctl: Pointer to the control channel
|
||||
* @route: Route string of the router
|
||||
*
|
||||
* Reads the first dword from the switches TB_CFG_SWITCH config area and
|
||||
* returns the port number from which the reply originated.
|
||||
|
|
|
@ -335,6 +335,8 @@ static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
|
|||
/* Write the block to MAIL_DATA registers */
|
||||
ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
|
||||
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
* Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -299,14 +298,12 @@ static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
|
|||
tf->frame.size = 0; /* means 4096 */
|
||||
tf->dma_test = dt;
|
||||
|
||||
tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
|
||||
tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
|
||||
if (!tf->data) {
|
||||
kfree(tf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(tf->data, dma_test_pattern, DMA_TEST_FRAME_SIZE);
|
||||
|
||||
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_dev, dma_addr)) {
|
||||
|
|
|
@ -118,6 +118,7 @@ static const char * const tb_security_names[] = {
|
|||
[TB_SECURITY_SECURE] = "secure",
|
||||
[TB_SECURITY_DPONLY] = "dponly",
|
||||
[TB_SECURITY_USBONLY] = "usbonly",
|
||||
[TB_SECURITY_NOPCIE] = "nopcie",
|
||||
};
|
||||
|
||||
static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
|
||||
|
@ -238,6 +239,22 @@ err_free_str:
|
|||
}
|
||||
static DEVICE_ATTR_RW(boot_acl);
|
||||
|
||||
static ssize_t deauthorization_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
const struct tb *tb = container_of(dev, struct tb, dev);
|
||||
bool deauthorization = false;
|
||||
|
||||
/* Only meaningful if authorization is supported */
|
||||
if (tb->security_level == TB_SECURITY_USER ||
|
||||
tb->security_level == TB_SECURITY_SECURE)
|
||||
deauthorization = !!tb->cm_ops->disapprove_switch;
|
||||
|
||||
return sprintf(buf, "%d\n", deauthorization);
|
||||
}
|
||||
static DEVICE_ATTR_RO(deauthorization);
|
||||
|
||||
static ssize_t iommu_dma_protection_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -267,6 +284,7 @@ static DEVICE_ATTR_RO(security);
|
|||
|
||||
static struct attribute *domain_attrs[] = {
|
||||
&dev_attr_boot_acl.attr,
|
||||
&dev_attr_deauthorization.attr,
|
||||
&dev_attr_iommu_dma_protection.attr,
|
||||
&dev_attr_security.attr,
|
||||
NULL,
|
||||
|
@ -289,7 +307,7 @@ static umode_t domain_attr_is_visible(struct kobject *kobj,
|
|||
return attr->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group domain_attr_group = {
|
||||
static const struct attribute_group domain_attr_group = {
|
||||
.is_visible = domain_attr_is_visible,
|
||||
.attrs = domain_attrs,
|
||||
};
|
||||
|
@ -394,7 +412,9 @@ static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
|
|||
switch (type) {
|
||||
case TB_CFG_PKG_XDOMAIN_REQ:
|
||||
case TB_CFG_PKG_XDOMAIN_RESP:
|
||||
return tb_xdomain_handle_request(tb, type, buf, size);
|
||||
if (tb_is_xdomain_enabled())
|
||||
return tb_xdomain_handle_request(tb, type, buf, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
tb->cm_ops->handle_event(tb, type, buf, size);
|
||||
|
@ -441,6 +461,9 @@ int tb_domain_add(struct tb *tb)
|
|||
goto err_ctl_stop;
|
||||
}
|
||||
|
||||
tb_dbg(tb, "security level set to %s\n",
|
||||
tb_security_names[tb->security_level]);
|
||||
|
||||
ret = device_add(&tb->dev);
|
||||
if (ret)
|
||||
goto err_ctl_stop;
|
||||
|
@ -601,14 +624,31 @@ int tb_domain_runtime_resume(struct tb *tb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_domain_disapprove_switch() - Disapprove switch
|
||||
* @tb: Domain the switch belongs to
|
||||
* @sw: Switch to disapprove
|
||||
*
|
||||
* This will disconnect PCIe tunnel from parent to this @sw.
|
||||
*
|
||||
* Return: %0 on success and negative errno in case of failure.
|
||||
*/
|
||||
int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
if (!tb->cm_ops->disapprove_switch)
|
||||
return -EPERM;
|
||||
|
||||
return tb->cm_ops->disapprove_switch(tb, sw);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_domain_approve_switch() - Approve switch
|
||||
* @tb: Domain the switch belongs to
|
||||
* @sw: Switch to approve
|
||||
*
|
||||
* This will approve switch by connection manager specific means. In
|
||||
* case of success the connection manager will create tunnels for all
|
||||
* supported protocols.
|
||||
* case of success the connection manager will create PCIe tunnel from
|
||||
* parent to @sw.
|
||||
*/
|
||||
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include "tb.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_ctl_write() - write control word
|
||||
*/
|
||||
static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
|
||||
|
@ -20,7 +20,7 @@ static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
|
|||
return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_ctl_write() - read control word
|
||||
*/
|
||||
static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
|
||||
|
@ -33,7 +33,7 @@ enum tb_eeprom_transfer {
|
|||
TB_EEPROM_OUT,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_active - enable rom access
|
||||
*
|
||||
* WARNING: Always disable access after usage. Otherwise the controller will
|
||||
|
@ -62,7 +62,7 @@ static int tb_eeprom_active(struct tb_switch *sw, bool enable)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_transfer - transfer one bit
|
||||
*
|
||||
* If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
|
||||
|
@ -90,7 +90,7 @@ static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
|
|||
return tb_eeprom_ctl_write(sw, ctl);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_out - write one byte to the bus
|
||||
*/
|
||||
static int tb_eeprom_out(struct tb_switch *sw, u8 val)
|
||||
|
@ -110,7 +110,7 @@ static int tb_eeprom_out(struct tb_switch *sw, u8 val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_in - read one byte from the bus
|
||||
*/
|
||||
static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
|
||||
|
@ -131,7 +131,7 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_get_drom_offset - get drom offset within eeprom
|
||||
*/
|
||||
static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
|
||||
|
@ -162,7 +162,7 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_eeprom_read_n - read count bytes from offset into val
|
||||
*/
|
||||
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
|
||||
|
@ -279,7 +279,9 @@ struct tb_drom_entry_port {
|
|||
|
||||
|
||||
/**
|
||||
* tb_drom_read_uid_only - read uid directly from drom
|
||||
* tb_drom_read_uid_only() - Read UID directly from DROM
|
||||
* @sw: Router whose UID to read
|
||||
* @uid: UID is placed here
|
||||
*
|
||||
* Does not use the cached copy in sw->drom. Used during resume to check switch
|
||||
* identity.
|
||||
|
@ -374,7 +376,7 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_drom_parse_entries - parse the linked list of drom entries
|
||||
*
|
||||
* Drom must have been copied to sw->drom.
|
||||
|
@ -410,7 +412,7 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
|
||||
*/
|
||||
static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
|
||||
|
@ -520,7 +522,14 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
|
|||
}
|
||||
|
||||
/**
|
||||
* tb_drom_read - copy drom to sw->drom and parse it
|
||||
* tb_drom_read() - Copy DROM to sw->drom and parse it
|
||||
* @sw: Router whose DROM to read and parse
|
||||
*
|
||||
* This function reads router DROM and if successful parses the entries and
|
||||
* populates the fields in @sw accordingly. Can be called for any router
|
||||
* generation.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
*/
|
||||
int tb_drom_read(struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
@ -85,8 +85,8 @@ struct usb4_switch_nvm_auth {
|
|||
* @set_uuid: Set UUID for the root switch (optional)
|
||||
* @device_connected: Handle device connected ICM message
|
||||
* @device_disconnected: Handle device disconnected ICM message
|
||||
* @xdomain_connected - Handle XDomain connected ICM message
|
||||
* @xdomain_disconnected - Handle XDomain disconnected ICM message
|
||||
* @xdomain_connected: Handle XDomain connected ICM message
|
||||
* @xdomain_disconnected: Handle XDomain disconnected ICM message
|
||||
* @rtd3_veto: Handle RTD3 veto notification ICM message
|
||||
*/
|
||||
struct icm {
|
||||
|
@ -1701,10 +1701,12 @@ static void icm_handle_notification(struct work_struct *work)
|
|||
icm->device_disconnected(tb, n->pkg);
|
||||
break;
|
||||
case ICM_EVENT_XDOMAIN_CONNECTED:
|
||||
icm->xdomain_connected(tb, n->pkg);
|
||||
if (tb_is_xdomain_enabled())
|
||||
icm->xdomain_connected(tb, n->pkg);
|
||||
break;
|
||||
case ICM_EVENT_XDOMAIN_DISCONNECTED:
|
||||
icm->xdomain_disconnected(tb, n->pkg);
|
||||
if (tb_is_xdomain_enabled())
|
||||
icm->xdomain_disconnected(tb, n->pkg);
|
||||
break;
|
||||
case ICM_EVENT_RTD3_VETO:
|
||||
icm->rtd3_veto(tb, n->pkg);
|
||||
|
|
|
@ -158,6 +158,41 @@ void tb_lc_unconfigure_xdomain(struct tb_port *port)
|
|||
tb_lc_set_xdomain_configured(port, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_lc_start_lane_initialization() - Start lane initialization
|
||||
* @port: Device router lane 0 adapter
|
||||
*
|
||||
* Starts lane initialization for @port after the router resumed from
|
||||
* sleep. Should be called for those downstream lane adapters that were
|
||||
* not connected (tb_lc_configure_port() was not called) before sleep.
|
||||
*
|
||||
* Returns %0 in success and negative errno in case of failure.
|
||||
*/
|
||||
int tb_lc_start_lane_initialization(struct tb_port *port)
|
||||
{
|
||||
struct tb_switch *sw = port->sw;
|
||||
int ret, cap;
|
||||
u32 ctrl;
|
||||
|
||||
if (!tb_route(sw))
|
||||
return 0;
|
||||
|
||||
if (sw->generation < 2)
|
||||
return 0;
|
||||
|
||||
cap = find_port_lc_cap(port);
|
||||
if (cap < 0)
|
||||
return cap;
|
||||
|
||||
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl |= TB_LC_SX_CTRL_SLI;
|
||||
|
||||
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
|
||||
}
|
||||
|
||||
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
|
|
@ -44,7 +44,7 @@ static int ring_interrupt_index(struct tb_ring *ring)
|
|||
return bit;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
|
||||
*
|
||||
* ring->nhi->lock must be held.
|
||||
|
@ -105,7 +105,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
|
|||
iowrite32(new, ring->nhi->iobase + reg);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* nhi_disable_interrupts() - disable interrupts for all rings
|
||||
*
|
||||
* Use only during init and shutdown.
|
||||
|
@ -182,7 +182,7 @@ static bool ring_empty(struct tb_ring *ring)
|
|||
return ring->head == ring->tail;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* ring_write_descriptors() - post frames from ring->queue to the controller
|
||||
*
|
||||
* ring->lock is held.
|
||||
|
@ -212,7 +212,7 @@ static void ring_write_descriptors(struct tb_ring *ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* ring_work() - progress completed frames
|
||||
*
|
||||
* If the ring is shutting down then all frames are marked as canceled and
|
||||
|
@ -592,6 +592,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
|
|||
|
||||
/**
|
||||
* tb_ring_start() - enable a ring
|
||||
* @ring: Ring to start
|
||||
*
|
||||
* Must not be invoked in parallel with tb_ring_stop().
|
||||
*/
|
||||
|
@ -667,6 +668,7 @@ EXPORT_SYMBOL_GPL(tb_ring_start);
|
|||
|
||||
/**
|
||||
* tb_ring_stop() - shutdown a ring
|
||||
* @ring: Ring to stop
|
||||
*
|
||||
* Must not be invoked from a callback.
|
||||
*
|
||||
|
@ -754,7 +756,7 @@ void tb_ring_free(struct tb_ring *ring)
|
|||
dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
|
||||
ring->hop);
|
||||
|
||||
/**
|
||||
/*
|
||||
* ring->work can no longer be scheduled (it is scheduled only
|
||||
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
|
||||
* to finish before freeing the ring.
|
||||
|
@ -1188,6 +1190,29 @@ static void tb_apple_add_links(struct tb_nhi *nhi)
|
|||
}
|
||||
}
|
||||
|
||||
static struct tb *nhi_select_cm(struct tb_nhi *nhi)
|
||||
{
|
||||
struct tb *tb;
|
||||
|
||||
/*
|
||||
* USB4 case is simple. If we got control of any of the
|
||||
* capabilities, we use software CM.
|
||||
*/
|
||||
if (tb_acpi_is_native())
|
||||
return tb_probe(nhi);
|
||||
|
||||
/*
|
||||
* Either firmware based CM is running (we did not get control
|
||||
* from the firmware) or this is pre-USB4 PC so try first
|
||||
* firmware CM and then fallback to software CM.
|
||||
*/
|
||||
tb = icm_probe(nhi);
|
||||
if (!tb)
|
||||
tb = tb_probe(nhi);
|
||||
|
||||
return tb;
|
||||
}
|
||||
|
||||
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct tb_nhi *nhi;
|
||||
|
@ -1256,9 +1281,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
tb_apple_add_links(nhi);
|
||||
tb_acpi_add_links(nhi);
|
||||
|
||||
tb = icm_probe(nhi);
|
||||
if (!tb)
|
||||
tb = tb_probe(nhi);
|
||||
tb = nhi_select_cm(nhi);
|
||||
if (!tb) {
|
||||
dev_err(&nhi->pdev->dev,
|
||||
"failed to determine connection manager, aborting\n");
|
||||
|
|
|
@ -466,6 +466,7 @@ void tb_path_deactivate(struct tb_path *path)
|
|||
|
||||
/**
|
||||
* tb_path_activate() - activate a path
|
||||
* @path: Path to activate
|
||||
*
|
||||
* Activate a path starting with the last hop and iterating backwards. The
|
||||
* caller must fill path->hops before calling tb_path_activate().
|
||||
|
@ -561,6 +562,7 @@ err:
|
|||
|
||||
/**
|
||||
* tb_path_is_invalid() - check whether any ports on the path are invalid
|
||||
* @path: Path to check
|
||||
*
|
||||
* Return: Returns true if the path is invalid, false otherwise.
|
||||
*/
|
||||
|
|
|
@ -525,6 +525,8 @@ int tb_port_state(struct tb_port *port)
|
|||
|
||||
/**
|
||||
* tb_wait_for_port() - wait for a port to become ready
|
||||
* @port: Port to wait
|
||||
* @wait_if_unplugged: Wait also when port is unplugged
|
||||
*
|
||||
* Wait up to 1 second for a port to reach state TB_PORT_UP. If
|
||||
* wait_if_unplugged is set then we also wait if the port is in state
|
||||
|
@ -589,6 +591,8 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
|
|||
|
||||
/**
|
||||
* tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
|
||||
* @port: Port to add/remove NFC credits
|
||||
* @credits: Credits to add/remove
|
||||
*
|
||||
* Change the number of NFC credits allocated to @port by @credits. To remove
|
||||
* NFC credits pass a negative amount of credits.
|
||||
|
@ -646,6 +650,8 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
|
|||
|
||||
/**
|
||||
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
|
||||
* @port: Port whose counters to clear
|
||||
* @counter: Counter index to clear
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
*/
|
||||
|
@ -718,7 +724,7 @@ int tb_port_disable(struct tb_port *port)
|
|||
return __tb_port_enable(port, false);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_init_port() - initialize a port
|
||||
*
|
||||
* This is a helper method for tb_switch_alloc. Does not check or initialize
|
||||
|
@ -1065,6 +1071,17 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
|
|||
tb_port_set_link_width(port, 1);
|
||||
}
|
||||
|
||||
static int tb_port_start_lane_initialization(struct tb_port *port)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (tb_switch_is_usb4(port->sw))
|
||||
return 0;
|
||||
|
||||
ret = tb_lc_start_lane_initialization(port);
|
||||
return ret == -EINVAL ? 0 : ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_port_is_enabled() - Is the adapter port enabled
|
||||
* @port: Port to check
|
||||
|
@ -1302,7 +1319,7 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
|
|||
}
|
||||
|
||||
/**
|
||||
* reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
|
||||
* tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
|
||||
* @sw: Switch to reset
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
|
@ -1326,7 +1343,7 @@ int tb_switch_reset(struct tb_switch *sw)
|
|||
return res.err;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_plug_events_active() - enable/disable plug events on a switch
|
||||
*
|
||||
* Also configures a sane plug_events_delay of 255ms.
|
||||
|
@ -1376,6 +1393,30 @@ static ssize_t authorized_show(struct device *dev,
|
|||
return sprintf(buf, "%u\n", sw->authorized);
|
||||
}
|
||||
|
||||
static int disapprove_switch(struct device *dev, void *not_used)
|
||||
{
|
||||
struct tb_switch *sw;
|
||||
|
||||
sw = tb_to_switch(dev);
|
||||
if (sw && sw->authorized) {
|
||||
int ret;
|
||||
|
||||
/* First children */
|
||||
ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_domain_disapprove_switch(sw->tb, sw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sw->authorized = 0;
|
||||
kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
@ -1383,10 +1424,18 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
|
|||
if (!mutex_trylock(&sw->tb->lock))
|
||||
return restart_syscall();
|
||||
|
||||
if (sw->authorized)
|
||||
if (!!sw->authorized == !!val)
|
||||
goto unlock;
|
||||
|
||||
switch (val) {
|
||||
/* Disapprove switch */
|
||||
case 0:
|
||||
if (tb_route(sw)) {
|
||||
ret = disapprove_switch(&sw->dev, NULL);
|
||||
goto unlock;
|
||||
}
|
||||
break;
|
||||
|
||||
/* Approve switch */
|
||||
case 1:
|
||||
if (sw->key)
|
||||
|
@ -1725,7 +1774,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
|
|||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct tb_switch *sw = tb_to_switch(dev);
|
||||
|
||||
if (attr == &dev_attr_device.attr) {
|
||||
if (attr == &dev_attr_authorized.attr) {
|
||||
if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
|
||||
sw->tb->security_level == TB_SECURITY_DPONLY)
|
||||
return 0;
|
||||
} else if (attr == &dev_attr_device.attr) {
|
||||
if (!sw->device)
|
||||
return 0;
|
||||
} else if (attr == &dev_attr_device_name.attr) {
|
||||
|
@ -1771,7 +1824,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
|
|||
return sw->safe_mode ? 0 : attr->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group switch_group = {
|
||||
static const struct attribute_group switch_group = {
|
||||
.is_visible = switch_attr_is_visible,
|
||||
.attrs = switch_attrs,
|
||||
};
|
||||
|
@ -2606,6 +2659,7 @@ void tb_switch_remove(struct tb_switch *sw)
|
|||
|
||||
/**
|
||||
* tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
|
||||
* @sw: Router to mark unplugged
|
||||
*/
|
||||
void tb_sw_set_unplugged(struct tb_switch *sw)
|
||||
{
|
||||
|
@ -2694,8 +2748,22 @@ int tb_switch_resume(struct tb_switch *sw)
|
|||
|
||||
/* check for surviving downstream switches */
|
||||
tb_switch_for_each_port(sw, port) {
|
||||
if (!tb_port_has_remote(port) && !port->xdomain)
|
||||
if (!tb_port_has_remote(port) && !port->xdomain) {
|
||||
/*
|
||||
* For disconnected downstream lane adapters
|
||||
* start lane initialization now so we detect
|
||||
* future connects.
|
||||
*/
|
||||
if (!tb_is_upstream_port(port) && tb_port_is_null(port))
|
||||
tb_port_start_lane_initialization(port);
|
||||
continue;
|
||||
} else if (port->xdomain) {
|
||||
/*
|
||||
* Start lane initialization for XDomain so the
|
||||
* link gets re-established.
|
||||
*/
|
||||
tb_port_start_lane_initialization(port);
|
||||
}
|
||||
|
||||
if (tb_wait_for_port(port, true) <= 0) {
|
||||
tb_port_warn(port,
|
||||
|
|
|
@ -179,6 +179,9 @@ static void tb_scan_xdomain(struct tb_port *port)
|
|||
struct tb_xdomain *xd;
|
||||
u64 route;
|
||||
|
||||
if (!tb_is_xdomain_enabled())
|
||||
return;
|
||||
|
||||
route = tb_downstream_route(port);
|
||||
xd = tb_xdomain_find_by_route(tb, route);
|
||||
if (xd) {
|
||||
|
@ -434,6 +437,11 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
|
|||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
if (!tb_acpi_may_tunnel_usb3()) {
|
||||
tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
|
||||
if (!up)
|
||||
return 0;
|
||||
|
@ -509,6 +517,9 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
|
|||
struct tb_port *port;
|
||||
int ret;
|
||||
|
||||
if (!tb_acpi_may_tunnel_usb3())
|
||||
return 0;
|
||||
|
||||
if (tb_route(sw)) {
|
||||
ret = tb_tunnel_usb3(sw->tb, sw);
|
||||
if (ret)
|
||||
|
@ -528,7 +539,7 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
|
|||
|
||||
static void tb_scan_port(struct tb_port *port);
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_scan_switch() - scan for and initialize downstream switches
|
||||
*/
|
||||
static void tb_scan_switch(struct tb_switch *sw)
|
||||
|
@ -544,7 +555,7 @@ static void tb_scan_switch(struct tb_switch *sw)
|
|||
pm_runtime_put_autosuspend(&sw->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_scan_port() - check for and initialize switches below port
|
||||
*/
|
||||
static void tb_scan_port(struct tb_port *port)
|
||||
|
@ -704,7 +715,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
|
|||
tb_tunnel_free(tunnel);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
|
||||
*/
|
||||
static void tb_free_invalid_tunnels(struct tb *tb)
|
||||
|
@ -719,7 +730,7 @@ static void tb_free_invalid_tunnels(struct tb *tb)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
|
||||
*/
|
||||
static void tb_free_unplugged_children(struct tb_switch *sw)
|
||||
|
@ -838,6 +849,11 @@ static void tb_tunnel_dp(struct tb *tb)
|
|||
struct tb_port *port, *in, *out;
|
||||
struct tb_tunnel *tunnel;
|
||||
|
||||
if (!tb_acpi_may_tunnel_dp()) {
|
||||
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find pair of inactive DP IN and DP OUT adapters and then
|
||||
* establish a DP tunnel between them.
|
||||
|
@ -1002,6 +1018,25 @@ static void tb_disconnect_and_release_dp(struct tb *tb)
|
|||
}
|
||||
}
|
||||
|
||||
static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_port *up;
|
||||
|
||||
up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
|
||||
if (WARN_ON(!up))
|
||||
return -ENODEV;
|
||||
|
||||
tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
|
||||
if (WARN_ON(!tunnel))
|
||||
return -ENODEV;
|
||||
|
||||
tb_tunnel_deactivate(tunnel);
|
||||
list_del(&tunnel->list);
|
||||
tb_tunnel_free(tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
struct tb_port *up, *down, *port;
|
||||
|
@ -1101,7 +1136,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
|
|||
|
||||
/* hotplug handling */
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_handle_hotplug() - handle hotplug event
|
||||
*
|
||||
* Executes on tb->wq.
|
||||
|
@ -1210,7 +1245,7 @@ out:
|
|||
kfree(ev);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* tb_schedule_hotplug_handler() - callback function for the control channel
|
||||
*
|
||||
* Delegates to tb_handle_hotplug.
|
||||
|
@ -1512,6 +1547,7 @@ static const struct tb_cm_ops tb_cm_ops = {
|
|||
.runtime_suspend = tb_runtime_suspend,
|
||||
.runtime_resume = tb_runtime_resume,
|
||||
.handle_event = tb_handle_event,
|
||||
.disapprove_switch = tb_disconnect_pci,
|
||||
.approve_switch = tb_tunnel_pci,
|
||||
.approve_xdomain_paths = tb_approve_xdomain_paths,
|
||||
.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
|
||||
|
@ -1526,7 +1562,11 @@ struct tb *tb_probe(struct tb_nhi *nhi)
|
|||
if (!tb)
|
||||
return NULL;
|
||||
|
||||
tb->security_level = TB_SECURITY_USER;
|
||||
if (tb_acpi_may_tunnel_pcie())
|
||||
tb->security_level = TB_SECURITY_USER;
|
||||
else
|
||||
tb->security_level = TB_SECURITY_NOPCIE;
|
||||
|
||||
tb->cm_ops = &tb_cm_ops;
|
||||
|
||||
tcm = tb_priv(tb);
|
||||
|
|
|
@ -138,6 +138,8 @@ struct tb_switch_tmu {
|
|||
*
|
||||
* When the switch is being added or removed to the domain (other
|
||||
* switches) you need to have domain lock held.
|
||||
*
|
||||
* In USB4 terminology this structure represents a router.
|
||||
*/
|
||||
struct tb_switch {
|
||||
struct device dev;
|
||||
|
@ -196,6 +198,9 @@ struct tb_switch {
|
|||
* @in_hopids: Currently allocated input HopIDs
|
||||
* @out_hopids: Currently allocated output HopIDs
|
||||
* @list: Used to link ports to DP resources list
|
||||
*
|
||||
* In USB4 terminology this structure represents an adapter (protocol or
|
||||
* lane adapter).
|
||||
*/
|
||||
struct tb_port {
|
||||
struct tb_regs_port_header config;
|
||||
|
@ -361,6 +366,7 @@ struct tb_path {
|
|||
* @handle_event: Handle thunderbolt event
|
||||
* @get_boot_acl: Get boot ACL list
|
||||
* @set_boot_acl: Set boot ACL list
|
||||
* @disapprove_switch: Disapprove switch (disconnect PCIe tunnel)
|
||||
* @approve_switch: Approve switch
|
||||
* @add_switch_key: Add key to switch
|
||||
* @challenge_switch_key: Challenge switch using key
|
||||
|
@ -394,6 +400,7 @@ struct tb_cm_ops {
|
|||
const void *buf, size_t size);
|
||||
int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
|
||||
int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
|
||||
int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw);
|
||||
int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
|
||||
int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
|
||||
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
|
||||
|
@ -629,6 +636,7 @@ int tb_domain_thaw_noirq(struct tb *tb);
|
|||
void tb_domain_complete(struct tb *tb);
|
||||
int tb_domain_runtime_suspend(struct tb *tb);
|
||||
int tb_domain_runtime_resume(struct tb *tb);
|
||||
int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw);
|
||||
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
|
||||
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
|
||||
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
|
||||
|
@ -923,6 +931,7 @@ int tb_lc_configure_port(struct tb_port *port);
|
|||
void tb_lc_unconfigure_port(struct tb_port *port);
|
||||
int tb_lc_configure_xdomain(struct tb_port *port);
|
||||
void tb_lc_unconfigure_xdomain(struct tb_port *port);
|
||||
int tb_lc_start_lane_initialization(struct tb_port *port);
|
||||
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
|
||||
int tb_lc_set_sleep(struct tb_switch *sw);
|
||||
bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
|
||||
|
@ -949,6 +958,7 @@ static inline u64 tb_downstream_route(struct tb_port *port)
|
|||
| ((u64) port->port << (port->sw->config.depth * 8));
|
||||
}
|
||||
|
||||
bool tb_is_xdomain_enabled(void);
|
||||
bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
|
||||
const void *buf, size_t size);
|
||||
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
|
||||
|
@ -1034,8 +1044,20 @@ void tb_check_quirks(struct tb_switch *sw);
|
|||
|
||||
#ifdef CONFIG_ACPI
|
||||
void tb_acpi_add_links(struct tb_nhi *nhi);
|
||||
|
||||
bool tb_acpi_is_native(void);
|
||||
bool tb_acpi_may_tunnel_usb3(void);
|
||||
bool tb_acpi_may_tunnel_dp(void);
|
||||
bool tb_acpi_may_tunnel_pcie(void);
|
||||
bool tb_acpi_is_xdomain_allowed(void);
|
||||
#else
|
||||
static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
|
||||
|
||||
static inline bool tb_acpi_is_native(void) { return true; }
|
||||
static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
|
||||
static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
|
||||
static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
|
||||
static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
|
|
@ -464,6 +464,7 @@ struct tb_regs_hop {
|
|||
#define TB_LC_SX_CTRL_L1D BIT(17)
|
||||
#define TB_LC_SX_CTRL_L2C BIT(20)
|
||||
#define TB_LC_SX_CTRL_L2D BIT(21)
|
||||
#define TB_LC_SX_CTRL_SLI BIT(29)
|
||||
#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
|
||||
#define TB_LC_SX_CTRL_SLP BIT(31)
|
||||
|
||||
|
|
|
@ -830,7 +830,7 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
|
|||
* @transmit_path: HopID used for transmitting packets
|
||||
* @receive_ring: NHI ring number used to receive packets from the
|
||||
* other domain. Set to %0 if RX path is not needed.
|
||||
* @reveive_path: HopID used for receiving packets
|
||||
* @receive_path: HopID used for receiving packets
|
||||
*
|
||||
* Return: Returns a tb_tunnel on success or NULL on failure.
|
||||
*/
|
||||
|
@ -932,12 +932,14 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
|
|||
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *consumed_up, int *consumed_down)
|
||||
{
|
||||
int pcie_enabled = tb_acpi_may_tunnel_pcie();
|
||||
|
||||
/*
|
||||
* PCIe tunneling affects the USB3 bandwidth so take that it
|
||||
* into account here.
|
||||
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
|
||||
* take that it into account here.
|
||||
*/
|
||||
*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
|
||||
*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
|
||||
*consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
|
||||
*consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -331,13 +331,18 @@ int usb4_switch_setup(struct tb_switch *sw)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
|
||||
if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
|
||||
tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
|
||||
val |= ROUTER_CS_5_UTO;
|
||||
xhci = false;
|
||||
}
|
||||
|
||||
/* Only enable PCIe tunneling if the parent router supports it */
|
||||
if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
|
||||
/*
|
||||
* Only enable PCIe tunneling if the parent router supports it
|
||||
* and it is not disabled.
|
||||
*/
|
||||
if (tb_acpi_may_tunnel_pcie() &&
|
||||
tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
|
||||
val |= ROUTER_CS_5_PTO;
|
||||
/*
|
||||
* xHCI can be enabled if PCIe tunneling is supported
|
||||
|
|
|
@ -30,6 +30,10 @@ struct xdomain_request_work {
|
|||
struct tb *tb;
|
||||
};
|
||||
|
||||
static bool tb_xdomain_enabled = true;
|
||||
module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
|
||||
MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
|
||||
|
||||
/* Serializes access to the properties and protocol handlers below */
|
||||
static DEFINE_MUTEX(xdomain_lock);
|
||||
|
||||
|
@ -47,6 +51,11 @@ static const uuid_t tb_xdp_uuid =
|
|||
UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
|
||||
0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
|
||||
|
||||
bool tb_is_xdomain_enabled(void)
|
||||
{
|
||||
return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
|
||||
}
|
||||
|
||||
static bool tb_xdomain_match(const struct tb_cfg_request *req,
|
||||
const struct ctl_pkg *pkg)
|
||||
{
|
||||
|
@ -670,7 +679,7 @@ EXPORT_SYMBOL_GPL(tb_register_service_driver);
|
|||
|
||||
/**
|
||||
* tb_unregister_service_driver() - Unregister XDomain service driver
|
||||
* @xdrv: Driver to unregister
|
||||
* @drv: Driver to unregister
|
||||
*
|
||||
* Unregisters XDomain service driver from the bus.
|
||||
*/
|
||||
|
@ -756,7 +765,7 @@ static struct attribute *tb_service_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group tb_service_attr_group = {
|
||||
static const struct attribute_group tb_service_attr_group = {
|
||||
.attrs = tb_service_attrs,
|
||||
};
|
||||
|
||||
|
@ -1239,7 +1248,7 @@ static struct attribute *xdomain_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group xdomain_attr_group = {
|
||||
static const struct attribute_group xdomain_attr_group = {
|
||||
.attrs = xdomain_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -546,9 +546,19 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
|||
#define OSC_SB_OSLPI_SUPPORT 0x00000100
|
||||
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
|
||||
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
|
||||
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
|
||||
|
||||
extern bool osc_sb_apei_support_acked;
|
||||
extern bool osc_pc_lpi_support_confirmed;
|
||||
extern bool osc_sb_native_usb4_support_confirmed;
|
||||
|
||||
/* USB4 Capabilities */
|
||||
#define OSC_USB_USB3_TUNNELING 0x00000001
|
||||
#define OSC_USB_DP_TUNNELING 0x00000002
|
||||
#define OSC_USB_PCIE_TUNNELING 0x00000004
|
||||
#define OSC_USB_XDOMAIN 0x00000008
|
||||
|
||||
extern u32 osc_sb_native_usb4_control;
|
||||
|
||||
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
|
||||
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
|
||||
|
|
|
@ -45,6 +45,8 @@ enum tb_cfg_pkg_type {
|
|||
* @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
|
||||
* Thunderbolt dock (and Display Port). All PCIe
|
||||
* links downstream of the dock are removed.
|
||||
* @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
|
||||
* PCIe tunneling is disabled from the BIOS.
|
||||
*/
|
||||
enum tb_security_level {
|
||||
TB_SECURITY_NONE,
|
||||
|
@ -52,6 +54,7 @@ enum tb_security_level {
|
|||
TB_SECURITY_SECURE,
|
||||
TB_SECURITY_DPONLY,
|
||||
TB_SECURITY_USBONLY,
|
||||
TB_SECURITY_NOPCIE,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Загрузка…
Ссылка в новой задаче