cxl/bus: Populate the target list at decoder create

As found by cxl_test, the implementation populated the target_list for
the single dport exceptional case, it missed populating the target_list
for the typical multi-dport case. Root decoders always know their target
list at the beginning of time, and even switch-level decoders should
have a target list of one or more zeros by default, depending on the
interleave-ways setting.

Walk the hosting port's dport list and populate based on the passed in
map.

Move devm_cxl_add_passthrough_decoder() out of line now that it does the
work of generating a target_map.

Before:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0

0

After:
$ cat /sys/bus/cxl/devices/root2/decoder*/target_list
0
0,1,2,3
0
0,1,2,3

Where root2 is a CXL topology root object generated by 'cxl_test'.

Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/163116439000.2460985.11713777051267946018.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2021-09-08 22:13:10 -07:00
Родитель 67dcdd4d3b
Коммит a5c2580216
3 изменённых файлов: 91 добавлений и 27 удалений

Просмотреть файл

@ -52,6 +52,12 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL; return -EINVAL;
} }
if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
CFMWS_INTERLEAVE_WAYS(cfmws));
return -EINVAL;
}
expected_len = struct_size((cfmws), interleave_targets, expected_len = struct_size((cfmws), interleave_targets,
CFMWS_INTERLEAVE_WAYS(cfmws)); CFMWS_INTERLEAVE_WAYS(cfmws));
@ -71,6 +77,7 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
static void cxl_add_cfmws_decoders(struct device *dev, static void cxl_add_cfmws_decoders(struct device *dev,
struct cxl_port *root_port) struct cxl_port *root_port)
{ {
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct acpi_cedt_cfmws *cfmws; struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld; struct cxl_decoder *cxld;
acpi_size len, cur = 0; acpi_size len, cur = 0;
@ -83,6 +90,7 @@ static void cxl_add_cfmws_decoders(struct device *dev,
while (cur < len) { while (cur < len) {
struct acpi_cedt_header *c = cedt_subtable + cur; struct acpi_cedt_header *c = cedt_subtable + cur;
int i;
if (c->type != ACPI_CEDT_TYPE_CFMWS) { if (c->type != ACPI_CEDT_TYPE_CFMWS) {
cur += c->length; cur += c->length;
@ -108,6 +116,9 @@ static void cxl_add_cfmws_decoders(struct device *dev,
continue; continue;
} }
for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
target_map[i] = cfmws->interleave_targets[i];
flags = cfmws_to_decoder_flags(cfmws->restrictions); flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld = devm_cxl_add_decoder(dev, root_port, cxld = devm_cxl_add_decoder(dev, root_port,
CFMWS_INTERLEAVE_WAYS(cfmws), CFMWS_INTERLEAVE_WAYS(cfmws),
@ -115,7 +126,7 @@ static void cxl_add_cfmws_decoders(struct device *dev,
CFMWS_INTERLEAVE_WAYS(cfmws), CFMWS_INTERLEAVE_WAYS(cfmws),
CFMWS_INTERLEAVE_GRANULARITY(cfmws), CFMWS_INTERLEAVE_GRANULARITY(cfmws),
CXL_DECODER_EXPANDER, CXL_DECODER_EXPANDER,
flags); flags, target_map);
if (IS_ERR(cxld)) { if (IS_ERR(cxld)) {
dev_err(dev, "Failed to add decoder for %#llx-%#llx\n", dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",

Просмотреть файл

@ -453,11 +453,38 @@ err:
} }
EXPORT_SYMBOL_GPL(cxl_add_dport); EXPORT_SYMBOL_GPL(cxl_add_dport);
static int decoder_populate_targets(struct device *host,
struct cxl_decoder *cxld,
struct cxl_port *port, int *target_map,
int nr_targets)
{
int rc = 0, i;
if (!target_map)
return 0;
device_lock(&port->dev);
for (i = 0; i < nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
dev_dbg(host, "%s: target: %d\n", dev_name(dport->dport), i);
cxld->target[i] = dport;
}
device_unlock(&port->dev);
return rc;
}
static struct cxl_decoder * static struct cxl_decoder *
cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, cxl_decoder_alloc(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t len, int interleave_ways, resource_size_t base, resource_size_t len,
int interleave_granularity, enum cxl_decoder_type type, int interleave_ways, int interleave_granularity,
unsigned long flags) enum cxl_decoder_type type, unsigned long flags,
int *target_map)
{ {
struct cxl_decoder *cxld; struct cxl_decoder *cxld;
struct device *dev; struct device *dev;
@ -493,10 +520,10 @@ cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
.target_type = type, .target_type = type,
}; };
/* handle implied target_list */ rc = decoder_populate_targets(host, cxld, port, target_map, nr_targets);
if (interleave_ways == 1) if (rc)
cxld->target[0] = goto err;
list_first_entry(&port->dports, struct cxl_dport, list);
dev = &cxld->dev; dev = &cxld->dev;
device_initialize(dev); device_initialize(dev);
device_set_pm_not_required(dev); device_set_pm_not_required(dev);
@ -519,14 +546,19 @@ struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len, resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity, int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags) enum cxl_decoder_type type, unsigned long flags,
int *target_map)
{ {
struct cxl_decoder *cxld; struct cxl_decoder *cxld;
struct device *dev; struct device *dev;
int rc; int rc;
cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways, if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
interleave_granularity, type, flags); return ERR_PTR(-EINVAL);
cxld = cxl_decoder_alloc(host, port, nr_targets, base, len,
interleave_ways, interleave_granularity, type,
flags, target_map);
if (IS_ERR(cxld)) if (IS_ERR(cxld))
return cxld; return cxld;
@ -550,6 +582,32 @@ err:
} }
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
/*
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
* single ported host-bridges need not publish a decoder capability when a
* passthrough decode can be assumed, i.e. all transactions that the uport sees
* are claimed and passed to the single dport. Default the range a 0-base
* 0-length until the first CXL region is activated.
*/
struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host,
struct cxl_port *port)
{
struct cxl_dport *dport;
int target_map[1];
device_lock(&port->dev);
dport = list_first_entry_or_null(&port->dports, typeof(*dport), list);
device_unlock(&port->dev);
if (!dport)
return ERR_PTR(-ENXIO);
target_map[0] = dport->port_id;
return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
CXL_DECODER_EXPANDER, 0, target_map);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_passthrough_decoder);
/** /**
* __cxl_driver_register - register a driver for the cxl bus * __cxl_driver_register - register a driver for the cxl bus
* @cxl_drv: cxl driver structure to attach * @cxl_drv: cxl driver structure to attach

Просмотреть файл

@ -180,6 +180,12 @@ enum cxl_decoder_type {
CXL_DECODER_EXPANDER = 3, CXL_DECODER_EXPANDER = 3,
}; };
/*
* Current specification goes up to 8, double that seems a reasonable
* software max for the foreseeable future
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
/** /**
* struct cxl_decoder - CXL address range decode configuration * struct cxl_decoder - CXL address range decode configuration
* @dev: this decoder's device * @dev: this decoder's device
@ -284,22 +290,11 @@ struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len, resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity, int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags); enum cxl_decoder_type type, unsigned long flags,
int *target_map);
/*
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
* single ported host-bridges need not publish a decoder capability when a
* passthrough decode can be assumed, i.e. all transactions that the uport sees
* are claimed and passed to the single dport. Default the range a 0-base
* 0-length until the first CXL region is activated.
*/
static inline struct cxl_decoder *
devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
{
return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
CXL_DECODER_EXPANDER, 0);
}
struct cxl_decoder *devm_cxl_add_passthrough_decoder(struct device *host,
struct cxl_port *port);
extern struct bus_type cxl_bus_type; extern struct bus_type cxl_bus_type;
struct cxl_driver { struct cxl_driver {