Merge branches 'acpi-ec', 'acpi-dma', 'acpi-processor' and 'acpi-cppc'
* acpi-ec: ACPI / EC: Clean up EC GPE mask flag ACPI: EC: Fix possible issues related to EC initialization order * acpi-dma: ACPI/IORT: Add IORT named component memory address limits ACPI: Make acpi_dma_configure() DMA regions aware ACPI: Introduce DMA ranges parsing ACPI: Make acpi_dev_get_resources() method agnostic * acpi-processor: ACPI / processor: make function acpi_processor_check_duplicates() static ACPI: processor: use dev_dbg() instead of dev_warn() when CPPC probe failed * acpi-cppc: mailbox: pcc: Drop uninformative output during boot
This commit is contained in:
Коммит
f928a49027
|
@ -670,7 +670,7 @@ err:
|
|||
|
||||
}
|
||||
|
||||
void __init acpi_processor_check_duplicates(void)
|
||||
static void __init acpi_processor_check_duplicates(void)
|
||||
{
|
||||
/* check the correctness for all processors in ACPI namespace */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
|
|
|
@ -680,13 +680,36 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
|||
return ret ? NULL : ops;
|
||||
}
|
||||
|
||||
static int nc_dma_get_range(struct device *dev, u64 *size)
|
||||
{
|
||||
struct acpi_iort_node *node;
|
||||
struct acpi_iort_named_component *ncomp;
|
||||
|
||||
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
|
||||
iort_match_node_callback, dev);
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
|
||||
ncomp = (struct acpi_iort_named_component *)node->node_data;
|
||||
|
||||
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<ncomp->memory_address_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iort_set_dma_mask - Set-up dma mask for a device.
|
||||
* iort_dma_setup() - Set-up device DMA parameters.
|
||||
*
|
||||
* @dev: device to configure
|
||||
* @dma_addr: device DMA address result pointer
|
||||
* @size: DMA range size result pointer
|
||||
*/
|
||||
void iort_set_dma_mask(struct device *dev)
|
||||
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
||||
{
|
||||
u64 mask, dmaaddr = 0, size = 0, offset = 0;
|
||||
int ret, msb;
|
||||
|
||||
/*
|
||||
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
|
||||
* setup the correct supported mask.
|
||||
|
@ -700,6 +723,36 @@ void iort_set_dma_mask(struct device *dev)
|
|||
*/
|
||||
if (!dev->dma_mask)
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
|
||||
else
|
||||
ret = nc_dma_get_range(dev, &size);
|
||||
|
||||
if (!ret) {
|
||||
msb = fls64(dmaaddr + size - 1);
|
||||
/*
|
||||
* Round-up to the power-of-two mask or set
|
||||
* the mask to the whole 64-bit address space
|
||||
* in case the DMA region covers the full
|
||||
* memory window.
|
||||
*/
|
||||
mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
|
||||
/*
|
||||
* Limit coherent and dma mask based on size
|
||||
* retrieved from firmware.
|
||||
*/
|
||||
dev->coherent_dma_mask = mask;
|
||||
*dev->dma_mask = mask;
|
||||
}
|
||||
|
||||
*dma_addr = dmaaddr;
|
||||
*dma_size = size;
|
||||
|
||||
dev->dma_pfn_offset = PFN_DOWN(offset);
|
||||
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -112,8 +112,7 @@ enum {
|
|||
EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
|
||||
EC_FLAGS_STARTED, /* Driver is started */
|
||||
EC_FLAGS_STOPPED, /* Driver is stopped */
|
||||
EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
|
||||
* current command processing */
|
||||
EC_FLAGS_GPE_MASKED, /* GPE masked */
|
||||
};
|
||||
|
||||
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
|
||||
|
@ -425,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
|
|||
wake_up(&ec->wait);
|
||||
}
|
||||
|
||||
static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
|
||||
static void acpi_ec_mask_gpe(struct acpi_ec *ec)
|
||||
{
|
||||
if (!test_bit(flag, &ec->flags)) {
|
||||
if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
|
||||
acpi_ec_disable_gpe(ec, false);
|
||||
ec_dbg_drv("Polling enabled");
|
||||
set_bit(flag, &ec->flags);
|
||||
set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
|
||||
static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
|
||||
{
|
||||
if (test_bit(flag, &ec->flags)) {
|
||||
clear_bit(flag, &ec->flags);
|
||||
if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
|
||||
clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
|
||||
acpi_ec_enable_gpe(ec, false);
|
||||
ec_dbg_drv("Polling disabled");
|
||||
}
|
||||
|
@ -464,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
|
|||
|
||||
static void acpi_ec_submit_query(struct acpi_ec *ec)
|
||||
{
|
||||
acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
|
||||
acpi_ec_mask_gpe(ec);
|
||||
if (!acpi_ec_event_enabled(ec))
|
||||
return;
|
||||
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
|
||||
|
@ -480,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
|
|||
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
|
||||
ec_dbg_evt("Command(%s) unblocked",
|
||||
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
|
||||
acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
|
||||
acpi_ec_unmask_gpe(ec);
|
||||
}
|
||||
|
||||
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
|
||||
|
@ -700,7 +699,7 @@ err:
|
|||
++t->irq_count;
|
||||
/* Allow triggering on 0 threshold */
|
||||
if (t->irq_count == ec_storm_threshold)
|
||||
acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
|
||||
acpi_ec_mask_gpe(ec);
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
@ -798,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
|
|||
|
||||
spin_lock_irqsave(&ec->lock, tmp);
|
||||
if (t->irq_count == ec_storm_threshold)
|
||||
acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
|
||||
acpi_ec_unmask_gpe(ec);
|
||||
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
|
||||
ec->curr = NULL;
|
||||
/* Disable GPE for command processing (IBF=0/OBF=1) */
|
||||
|
@ -1586,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec)
|
|||
{
|
||||
if (!boot_ec)
|
||||
return false;
|
||||
if (ec->handle == boot_ec->handle &&
|
||||
ec->gpe == boot_ec->gpe &&
|
||||
ec->command_addr == boot_ec->command_addr &&
|
||||
if (ec->command_addr == boot_ec->command_addr &&
|
||||
ec->data_addr == boot_ec->data_addr)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -1613,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device)
|
|||
|
||||
if (acpi_is_boot_ec(ec)) {
|
||||
boot_ec_is_ecdt = false;
|
||||
/*
|
||||
* Trust PNP0C09 namespace location rather than ECDT ID.
|
||||
*
|
||||
* But trust ECDT GPE rather than _GPE because of ASUS quirks,
|
||||
* so do not change boot_ec->gpe to ec->gpe.
|
||||
*/
|
||||
boot_ec->handle = ec->handle;
|
||||
acpi_handle_debug(ec->handle, "duplicated.\n");
|
||||
acpi_ec_free(ec);
|
||||
ec = boot_ec;
|
||||
|
@ -1747,18 +1751,20 @@ static int __init acpi_ec_ecdt_start(void)
|
|||
|
||||
if (!boot_ec)
|
||||
return -ENODEV;
|
||||
/*
|
||||
* The DSDT EC should have already been started in
|
||||
* acpi_ec_add().
|
||||
*/
|
||||
/* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
|
||||
if (!boot_ec_is_ecdt)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* At this point, the namespace and the GPE is initialized, so
|
||||
* start to find the namespace objects and handle the events.
|
||||
*
|
||||
* Note: ec->handle can be valid if this function is called after
|
||||
* acpi_ec_add(), hence the fast path.
|
||||
*/
|
||||
if (!acpi_ec_ecdt_get_handle(&handle))
|
||||
if (boot_ec->handle != ACPI_ROOT_OBJECT)
|
||||
handle = boot_ec->handle;
|
||||
else if (!acpi_ec_ecdt_get_handle(&handle))
|
||||
return -ENODEV;
|
||||
return acpi_config_boot_ec(boot_ec, handle, true, true);
|
||||
}
|
||||
|
@ -2011,8 +2017,8 @@ int __init acpi_ec_init(void)
|
|||
return result;
|
||||
|
||||
/* Drivers must be started after acpi_ec_query_init() */
|
||||
ecdt_fail = acpi_ec_ecdt_start();
|
||||
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
ecdt_fail = acpi_ec_ecdt_start();
|
||||
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device)
|
|||
|
||||
result = acpi_cppc_processor_probe(pr);
|
||||
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
|
||||
dev_warn(&device->dev, "CPPC data invalid or not present\n");
|
||||
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
|
||||
|
||||
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
|
||||
acpi_processor_power_init(pr);
|
||||
|
|
|
@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
static int __acpi_dev_get_resources(struct acpi_device *adev,
|
||||
struct list_head *list,
|
||||
int (*preproc)(struct acpi_resource *, void *),
|
||||
void *preproc_data, char *method)
|
||||
{
|
||||
struct res_proc_context c;
|
||||
acpi_status status;
|
||||
|
||||
if (!adev || !adev->handle || !list_empty(list))
|
||||
return -EINVAL;
|
||||
|
||||
if (!acpi_has_method(adev->handle, method))
|
||||
return 0;
|
||||
|
||||
c.list = list;
|
||||
c.preproc = preproc;
|
||||
c.preproc_data = preproc_data;
|
||||
c.count = 0;
|
||||
c.error = 0;
|
||||
status = acpi_walk_resources(adev->handle, method,
|
||||
acpi_dev_process_resource, &c);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_dev_free_resource_list(list);
|
||||
return c.error ? c.error : -EIO;
|
||||
}
|
||||
|
||||
return c.count;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_get_resources - Get current resources of a device.
|
||||
* @adev: ACPI device node to get the resources for.
|
||||
|
@ -601,31 +630,46 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
|
|||
int (*preproc)(struct acpi_resource *, void *),
|
||||
void *preproc_data)
|
||||
{
|
||||
struct res_proc_context c;
|
||||
acpi_status status;
|
||||
|
||||
if (!adev || !adev->handle || !list_empty(list))
|
||||
return -EINVAL;
|
||||
|
||||
if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
|
||||
return 0;
|
||||
|
||||
c.list = list;
|
||||
c.preproc = preproc;
|
||||
c.preproc_data = preproc_data;
|
||||
c.count = 0;
|
||||
c.error = 0;
|
||||
status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
|
||||
acpi_dev_process_resource, &c);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_dev_free_resource_list(list);
|
||||
return c.error ? c.error : -EIO;
|
||||
}
|
||||
|
||||
return c.count;
|
||||
return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
|
||||
METHOD_NAME__CRS);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
|
||||
|
||||
static int is_memory(struct acpi_resource *ares, void *not_used)
|
||||
{
|
||||
struct resource_win win;
|
||||
struct resource *res = &win.res;
|
||||
|
||||
memset(&win, 0, sizeof(win));
|
||||
|
||||
return !(acpi_dev_resource_memory(ares, res)
|
||||
|| acpi_dev_resource_address_space(ares, &win)
|
||||
|| acpi_dev_resource_ext_address_space(ares, &win));
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_get_dma_resources - Get current DMA resources of a device.
|
||||
* @adev: ACPI device node to get the resources for.
|
||||
* @list: Head of the resultant list of resources (must be empty).
|
||||
*
|
||||
* Evaluate the _DMA method for the given device node and process its
|
||||
* output.
|
||||
*
|
||||
* The resultant struct resource objects are put on the list pointed to
|
||||
* by @list, that must be empty initially, as members of struct
|
||||
* resource_entry objects. Callers of this routine should use
|
||||
* %acpi_dev_free_resource_list() to free that list.
|
||||
*
|
||||
* The number of resources in the output list is returned on success,
|
||||
* an error code reflecting the error condition is returned otherwise.
|
||||
*/
|
||||
int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
|
||||
{
|
||||
return __acpi_dev_get_resources(adev, list, is_memory, NULL,
|
||||
METHOD_NAME__DMA);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
|
||||
|
||||
/**
|
||||
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
|
||||
* types
|
||||
|
|
|
@ -1359,6 +1359,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
|||
return DEV_DMA_NON_COHERENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dma_get_range() - Get device DMA parameters.
|
||||
*
|
||||
* @dev: device to configure
|
||||
* @dma_addr: pointer device DMA address result
|
||||
* @offset: pointer to the DMA offset result
|
||||
* @size: pointer to DMA range size result
|
||||
*
|
||||
* Evaluate DMA regions and return respectively DMA region start, offset
|
||||
* and size in dma_addr, offset and size on parsing success; it does not
|
||||
* update the passed in values on failure.
|
||||
*
|
||||
* Return 0 on success, < 0 on failure.
|
||||
*/
|
||||
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
|
||||
u64 *size)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
LIST_HEAD(list);
|
||||
struct resource_entry *rentry;
|
||||
int ret;
|
||||
struct device *dma_dev = dev;
|
||||
u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
|
||||
|
||||
/*
|
||||
* Walk the device tree chasing an ACPI companion with a _DMA
|
||||
* object while we go. Stop if we find a device with an ACPI
|
||||
* companion containing a _DMA method.
|
||||
*/
|
||||
do {
|
||||
adev = ACPI_COMPANION(dma_dev);
|
||||
if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
|
||||
break;
|
||||
|
||||
dma_dev = dma_dev->parent;
|
||||
} while (dma_dev);
|
||||
|
||||
if (!dma_dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
|
||||
acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = acpi_dev_get_dma_resources(adev, &list);
|
||||
if (ret > 0) {
|
||||
list_for_each_entry(rentry, &list, node) {
|
||||
if (dma_offset && rentry->offset != dma_offset) {
|
||||
ret = -EINVAL;
|
||||
dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
|
||||
goto out;
|
||||
}
|
||||
dma_offset = rentry->offset;
|
||||
|
||||
/* Take lower and upper limits */
|
||||
if (rentry->res->start < dma_start)
|
||||
dma_start = rentry->res->start;
|
||||
if (rentry->res->end > dma_end)
|
||||
dma_end = rentry->res->end;
|
||||
}
|
||||
|
||||
if (dma_start >= dma_end) {
|
||||
ret = -EINVAL;
|
||||
dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
*dma_addr = dma_start - dma_offset;
|
||||
len = dma_end - dma_start;
|
||||
*size = max(len, len + 1);
|
||||
*offset = dma_offset;
|
||||
}
|
||||
out:
|
||||
acpi_dev_free_resource_list(&list);
|
||||
|
||||
return ret >= 0 ? 0 : ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dma_configure - Set-up DMA configuration for the device.
|
||||
* @dev: The pointer to the device
|
||||
|
@ -1367,20 +1446,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
|||
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
||||
{
|
||||
const struct iommu_ops *iommu;
|
||||
u64 size;
|
||||
u64 dma_addr = 0, size = 0;
|
||||
|
||||
iort_set_dma_mask(dev);
|
||||
iort_dma_setup(dev, &dma_addr, &size);
|
||||
|
||||
iommu = iort_iommu_configure(dev);
|
||||
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
/*
|
||||
* Assume dma valid range starts at 0 and covers the whole
|
||||
* coherent_dma_mask.
|
||||
*/
|
||||
arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
|
||||
arch_setup_dma_ops(dev, dma_addr, size,
|
||||
iommu, attr == DEV_DMA_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -457,10 +457,8 @@ static int __init acpi_pcc_probe(void)
|
|||
/* Search for PCCT */
|
||||
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
|
||||
|
||||
if (ACPI_FAILURE(status) || !pcct_tbl) {
|
||||
pr_warn("PCCT header not found.\n");
|
||||
if (ACPI_FAILURE(status) || !pcct_tbl)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
|
||||
sizeof(struct acpi_table_pcct),
|
||||
|
|
|
@ -578,6 +578,8 @@ struct acpi_pci_root {
|
|||
|
||||
bool acpi_dma_supported(struct acpi_device *adev);
|
||||
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
|
||||
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
|
||||
u64 *size);
|
||||
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
|
||||
void acpi_dma_deconfigure(struct device *dev);
|
||||
|
||||
|
|
|
@ -427,6 +427,8 @@ void acpi_dev_free_resource_list(struct list_head *list);
|
|||
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
|
||||
int (*preproc)(struct acpi_resource *, void *),
|
||||
void *preproc_data);
|
||||
int acpi_dev_get_dma_resources(struct acpi_device *adev,
|
||||
struct list_head *list);
|
||||
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
|
||||
unsigned long types);
|
||||
|
||||
|
@ -774,6 +776,12 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
|||
return DEV_DMA_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
|
||||
u64 *offset, u64 *size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int acpi_dma_configure(struct device *dev,
|
||||
enum dev_dma_attr attr)
|
||||
{
|
||||
|
|
|
@ -36,7 +36,7 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
|
|||
void acpi_configure_pmsi_domain(struct device *dev);
|
||||
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
|
||||
/* IOMMU interface */
|
||||
void iort_set_dma_mask(struct device *dev);
|
||||
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
|
||||
const struct iommu_ops *iort_iommu_configure(struct device *dev);
|
||||
#else
|
||||
static inline void acpi_iort_init(void) { }
|
||||
|
@ -47,7 +47,8 @@ static inline struct irq_domain *iort_get_device_domain(struct device *dev,
|
|||
{ return NULL; }
|
||||
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
|
||||
/* IOMMU interface */
|
||||
static inline void iort_set_dma_mask(struct device *dev) { }
|
||||
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
|
||||
u64 *size) { }
|
||||
static inline
|
||||
const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
{ return NULL; }
|
||||
|
|
Загрузка…
Ссылка в новой задаче