dmaengine: idxd: add support for configurable max wq xfer size
Add sysfs attribute max_xfer_size to wq in order to allow the max xfer size configured on a per wq basis. Add support code to configure the valid user input on wq enable. This is a performance tuning parameter. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/159865265404.29141.3049399618578194052.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Родитель
b0ef489e2a
Коммит
d7aad5550e
|
@ -170,6 +170,13 @@ Contact: dmaengine@vger.kernel.org
|
|||
Description: The number of entries in this work queue that may be filled
|
||||
via a limited portal.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/max_transfer_size
|
||||
Date: Aug 28, 2020
|
||||
KernelVersion: 5.10.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The max transfer sized for this workqueue. Cannot exceed device
|
||||
max transfer size. Configurable parameter.
|
||||
|
||||
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
|
||||
Date: Oct 25, 2019
|
||||
KernelVersion: 5.6.0
|
||||
|
|
|
@ -529,7 +529,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
wq->wqcfg.priority = wq->priority;
|
||||
|
||||
/* bytes 12-15 */
|
||||
wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
|
||||
wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
||||
wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
|
||||
|
||||
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
|
||||
|
|
|
@ -114,6 +114,7 @@ struct idxd_wq {
|
|||
struct sbitmap_queue sbq;
|
||||
struct dma_chan dma_chan;
|
||||
char name[WQ_NAME_SIZE + 1];
|
||||
u64 max_xfer_bytes;
|
||||
};
|
||||
|
||||
struct idxd_engine {
|
||||
|
|
|
@ -176,6 +176,7 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
|||
wq->idxd = idxd;
|
||||
mutex_init(&wq->wq_lock);
|
||||
wq->idxd_cdev.minor = -1;
|
||||
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
||||
}
|
||||
|
||||
for (i = 0; i < idxd->max_engines; i++) {
|
||||
|
|
|
@ -1064,6 +1064,45 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
|
|||
static struct device_attribute dev_attr_wq_cdev_minor =
|
||||
__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
|
||||
|
||||
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
|
||||
return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
|
||||
}
|
||||
|
||||
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
u64 xfer_size;
|
||||
int rc;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
rc = kstrtou64(buf, 0, &xfer_size);
|
||||
if (rc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (xfer_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
xfer_size = roundup_pow_of_two(xfer_size);
|
||||
if (xfer_size > idxd->max_xfer_bytes)
|
||||
return -EINVAL;
|
||||
|
||||
wq->max_xfer_bytes = xfer_size;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_max_transfer_size =
|
||||
__ATTR(max_transfer_size, 0644,
|
||||
wq_max_transfer_size_show, wq_max_transfer_size_store);
|
||||
|
||||
static struct attribute *idxd_wq_attributes[] = {
|
||||
&dev_attr_wq_clients.attr,
|
||||
&dev_attr_wq_state.attr,
|
||||
|
@ -1074,6 +1113,7 @@ static struct attribute *idxd_wq_attributes[] = {
|
|||
&dev_attr_wq_type.attr,
|
||||
&dev_attr_wq_name.attr,
|
||||
&dev_attr_wq_cdev_minor.attr,
|
||||
&dev_attr_wq_max_transfer_size.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче