Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
4c4de7d3c8
|
@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats.
|
|||
|
||||
index.txt - File index, Mailing list and Links (this document)
|
||||
|
||||
intel-pstate.txt - Intel pstate cpufreq driver specific file.
|
||||
|
||||
pcc-cpufreq.txt - PCC cpufreq driver specific file.
|
||||
|
||||
|
||||
|
|
|
@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
|
|||
}
|
||||
ghes_do_proc(ghes, ghes->estatus);
|
||||
|
||||
out:
|
||||
ghes_clear_estatus(ghes);
|
||||
|
||||
if (rc == -ENOENT)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* GHESv2 type HEST entries introduce support for error acknowledgment,
|
||||
* so only acknowledge the error if this support is present.
|
||||
*/
|
||||
if (is_hest_type_generic_v2(ghes)) {
|
||||
rc = ghes_ack_error(ghes->generic_v2);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
out:
|
||||
ghes_clear_estatus(ghes);
|
||||
if (is_hest_type_generic_v2(ghes))
|
||||
return ghes_ack_error(ghes->generic_v2);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
|
|||
|
||||
opp->available = availability_req;
|
||||
|
||||
dev_pm_opp_get(opp);
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
/* Notify the change of the OPP availability */
|
||||
if (availability_req)
|
||||
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
|
||||
|
@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
|
|||
blocking_notifier_call_chain(&opp_table->head,
|
||||
OPP_EVENT_DISABLE, opp);
|
||||
|
||||
dev_pm_opp_put(opp);
|
||||
goto put_table;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&opp_table->lock);
|
||||
put_table:
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -118,6 +118,10 @@ static const struct of_device_id blacklist[] __initconst = {
|
|||
|
||||
{ .compatible = "sigma,tango4", },
|
||||
|
||||
{ .compatible = "ti,am33xx", },
|
||||
{ .compatible = "ti,am43", },
|
||||
{ .compatible = "ti,dra7", },
|
||||
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
|
|||
atomic_set(&qp->qp_sec->error_list_count, 0);
|
||||
init_completion(&qp->qp_sec->error_complete);
|
||||
ret = security_ib_alloc_security(&qp->qp_sec->security);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(qp->qp_sec);
|
||||
qp->qp_sec = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
|||
resp.raw_packet_caps = attr.raw_packet_caps;
|
||||
resp.response_length += sizeof(resp.raw_packet_caps);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
|
||||
goto end;
|
||||
|
||||
resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
|
||||
resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags;
|
||||
resp.xrq_caps.max_ops = attr.xrq_caps.max_ops;
|
||||
resp.xrq_caps.max_sge = attr.xrq_caps.max_sge;
|
||||
resp.xrq_caps.flags = attr.xrq_caps.flags;
|
||||
resp.response_length += sizeof(resp.xrq_caps);
|
||||
resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
|
||||
resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
|
||||
resp.tm_caps.max_ops = attr.tm_caps.max_ops;
|
||||
resp.tm_caps.max_sge = attr.tm_caps.max_sge;
|
||||
resp.tm_caps.flags = attr.tm_caps.flags;
|
||||
resp.response_length += sizeof(resp.tm_caps);
|
||||
end:
|
||||
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
||||
return err;
|
||||
|
|
|
@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
|
|||
static int thermal_init(struct hfi1_devdata *dd);
|
||||
|
||||
static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
|
||||
static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
|
||||
int msecs);
|
||||
static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
|
||||
int msecs);
|
||||
static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
|
||||
|
@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
|
|||
u64 regs[CCE_NUM_INT_CSRS];
|
||||
u32 bit;
|
||||
int i;
|
||||
irqreturn_t handled = IRQ_NONE;
|
||||
|
||||
this_cpu_inc(*dd->int_counter);
|
||||
|
||||
|
@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
|
|||
for_each_set_bit(bit, (unsigned long *)®s[0],
|
||||
CCE_NUM_INT_CSRS * 64) {
|
||||
is_interrupt(dd, bit);
|
||||
handled = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
return handled;
|
||||
}
|
||||
|
||||
static irqreturn_t sdma_interrupt(int irq, void *data)
|
||||
|
@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
|
|||
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
|
||||
}
|
||||
|
||||
void reset_qsfp(struct hfi1_pportdata *ppd)
|
||||
int reset_qsfp(struct hfi1_pportdata *ppd)
|
||||
{
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
u64 mask, qsfp_mask;
|
||||
|
@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
|
|||
* for alarms and warnings
|
||||
*/
|
||||
set_qsfp_int_n(ppd, 1);
|
||||
|
||||
/*
|
||||
* After the reset, AOC transmitters are enabled by default. They need
|
||||
* to be turned off to complete the QSFP setup before they can be
|
||||
* enabled again.
|
||||
*/
|
||||
return set_qsfp_tx(ppd, 0);
|
||||
}
|
||||
|
||||
static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
|
||||
|
@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
|
|||
{
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
u32 previous_state;
|
||||
int offline_state_ret;
|
||||
int ret;
|
||||
|
||||
update_lcb_cache(dd);
|
||||
|
@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
|
|||
ppd->offline_disabled_reason =
|
||||
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
|
||||
|
||||
/*
|
||||
* Wait for offline transition. It can take a while for
|
||||
* the link to go down.
|
||||
*/
|
||||
ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Now in charge of LCB - must be after the physical state is
|
||||
* offline.quiet and before host_link_state is changed.
|
||||
*/
|
||||
set_host_lcb_access(dd);
|
||||
write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
|
||||
|
||||
/* make sure the logical state is also down */
|
||||
ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
|
||||
if (ret)
|
||||
force_logical_link_state_down(ppd);
|
||||
|
||||
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
|
||||
offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
|
||||
if (offline_state_ret < 0)
|
||||
return offline_state_ret;
|
||||
|
||||
/* Disabling AOC transmitters */
|
||||
if (ppd->port_type == PORT_TYPE_QSFP &&
|
||||
ppd->qsfp_info.limiting_active &&
|
||||
qsfp_mod_present(ppd)) {
|
||||
|
@ -10364,6 +10359,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the offline.Quiet transition if it hasn't happened yet. It
|
||||
* can take a while for the link to go down.
|
||||
*/
|
||||
if (offline_state_ret != PLS_OFFLINE_QUIET) {
|
||||
ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now in charge of LCB - must be after the physical state is
|
||||
* offline.quiet and before host_link_state is changed.
|
||||
*/
|
||||
set_host_lcb_access(dd);
|
||||
write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
|
||||
|
||||
/* make sure the logical state is also down */
|
||||
ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
|
||||
if (ret)
|
||||
force_logical_link_state_down(ppd);
|
||||
|
||||
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
|
||||
|
||||
/*
|
||||
* The LNI has a mandatory wait time after the physical state
|
||||
* moves to Offline.Quiet. The wait time may be different
|
||||
|
@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
|
|||
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
|
||||
/* went down while attempting link up */
|
||||
check_lni_states(ppd);
|
||||
|
||||
/* The QSFP doesn't need to be reset on LNI failure */
|
||||
ppd->qsfp_info.reset_needed = 0;
|
||||
}
|
||||
|
||||
/* the active link width (downgrade) is 0 on link down */
|
||||
|
@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait_phys_link_offline_quiet_substates - wait for any offline substate
|
||||
* @ppd: port device
|
||||
* @msecs: the number of milliseconds to wait
|
||||
*
|
||||
* Wait up to msecs milliseconds for any offline physical link
|
||||
* state change to occur.
|
||||
* Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
|
||||
*/
|
||||
static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
|
||||
int msecs)
|
||||
{
|
||||
u32 read_state;
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(msecs);
|
||||
while (1) {
|
||||
read_state = read_physical_state(ppd->dd);
|
||||
if ((read_state & 0xF0) == PLS_OFFLINE)
|
||||
break;
|
||||
if (time_after(jiffies, timeout)) {
|
||||
dd_dev_err(ppd->dd,
|
||||
"timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
|
||||
read_state, msecs);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
usleep_range(1950, 2050); /* sleep 2ms-ish */
|
||||
}
|
||||
|
||||
log_state_transition(ppd, read_state);
|
||||
return read_state;
|
||||
}
|
||||
|
||||
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
|
||||
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
|
||||
|
||||
|
|
|
@ -204,6 +204,7 @@
|
|||
#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
|
||||
#define PLS_OFFLINE_REPORT_FAILURE 0x93
|
||||
#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
|
||||
#define PLS_OFFLINE_QUIET_DURATION 0x95
|
||||
#define PLS_POLLING 0x20
|
||||
#define PLS_POLLING_QUIET 0x20
|
||||
#define PLS_POLLING_ACTIVE 0x21
|
||||
|
@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
|
|||
void handle_link_bounce(struct work_struct *work);
|
||||
void handle_start_link(struct work_struct *work);
|
||||
void handle_sma_message(struct work_struct *work);
|
||||
void reset_qsfp(struct hfi1_pportdata *ppd);
|
||||
int reset_qsfp(struct hfi1_pportdata *ppd);
|
||||
void qsfp_event(struct work_struct *work);
|
||||
void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
|
||||
int send_idle_sma(struct hfi1_devdata *dd, u64 message);
|
||||
|
|
|
@ -204,7 +204,10 @@ done_asic:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* magic character sequence that trails an image */
|
||||
/* magic character sequence that begins an image */
|
||||
#define IMAGE_START_MAGIC "APO="
|
||||
|
||||
/* magic character sequence that might trail an image */
|
||||
#define IMAGE_TRAIL_MAGIC "egamiAPO"
|
||||
|
||||
/* EPROM file types */
|
||||
|
@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
|
|||
{
|
||||
void *buffer;
|
||||
void *p;
|
||||
u32 length;
|
||||
int ret;
|
||||
|
||||
buffer = kmalloc(P1_SIZE, GFP_KERNEL);
|
||||
|
@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* scan for image magic that may trail the actual data */
|
||||
p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
|
||||
if (!p) {
|
||||
/* config partition is valid only if it starts with IMAGE_START_MAGIC */
|
||||
if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
|
||||
kfree(buffer);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* scan for image magic that may trail the actual data */
|
||||
p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
|
||||
if (p)
|
||||
length = p - buffer;
|
||||
else
|
||||
length = P1_SIZE;
|
||||
|
||||
*data = buffer;
|
||||
*size = p - buffer;
|
||||
*size = length;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
|
|||
switch (ret) {
|
||||
case 0:
|
||||
ret = setup_base_ctxt(fd, uctxt);
|
||||
if (uctxt->subctxt_cnt) {
|
||||
/*
|
||||
* Base context is done (successfully or not), notify
|
||||
* anybody using a sub-context that is waiting for
|
||||
* this completion.
|
||||
*/
|
||||
clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
|
||||
wake_up(&uctxt->wait);
|
||||
}
|
||||
if (ret)
|
||||
deallocate_ctxt(uctxt);
|
||||
break;
|
||||
case 1:
|
||||
ret = complete_subctxt(fd);
|
||||
|
@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
|
|||
/* Now allocate the RcvHdr queue and eager buffers. */
|
||||
ret = hfi1_create_rcvhdrq(dd, uctxt);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto done;
|
||||
|
||||
ret = hfi1_setup_eagerbufs(uctxt);
|
||||
if (ret)
|
||||
goto setup_failed;
|
||||
goto done;
|
||||
|
||||
/* If sub-contexts are enabled, do the appropriate setup */
|
||||
if (uctxt->subctxt_cnt)
|
||||
ret = setup_subctxt(uctxt);
|
||||
if (ret)
|
||||
goto setup_failed;
|
||||
goto done;
|
||||
|
||||
ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
|
||||
if (ret)
|
||||
goto setup_failed;
|
||||
goto done;
|
||||
|
||||
ret = init_user_ctxt(fd, uctxt);
|
||||
if (ret)
|
||||
goto setup_failed;
|
||||
goto done;
|
||||
|
||||
user_init(uctxt);
|
||||
|
||||
|
@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
|
|||
fd->uctxt = uctxt;
|
||||
hfi1_rcd_get(uctxt);
|
||||
|
||||
return 0;
|
||||
done:
|
||||
if (uctxt->subctxt_cnt) {
|
||||
/*
|
||||
* On error, set the failed bit so sub-contexts will clean up
|
||||
* correctly.
|
||||
*/
|
||||
if (ret)
|
||||
set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
|
||||
|
||||
setup_failed:
|
||||
/* Set the failed bit so sub-context init can do the right thing */
|
||||
set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
|
||||
deallocate_ctxt(uctxt);
|
||||
/*
|
||||
* Base context is done (successfully or not), notify anybody
|
||||
* using a sub-context that is waiting for this completion.
|
||||
*/
|
||||
clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
|
||||
wake_up(&uctxt->wait);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
/*
|
||||
* Code to adjust PCIe capabilities.
|
||||
*/
|
||||
static int tune_pcie_caps(struct hfi1_devdata *);
|
||||
static void tune_pcie_caps(struct hfi1_devdata *);
|
||||
|
||||
/*
|
||||
* Do all the common PCIe setup and initialization.
|
||||
|
@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
|
|||
*/
|
||||
int request_msix(struct hfi1_devdata *dd, u32 msireq)
|
||||
{
|
||||
int nvec, ret;
|
||||
int nvec;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
|
||||
|
@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
|
|||
return nvec;
|
||||
}
|
||||
|
||||
ret = tune_pcie_caps(dd);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
return ret;
|
||||
}
|
||||
tune_pcie_caps(dd);
|
||||
|
||||
/* check for legacy IRQ */
|
||||
if (nvec == 1 && !dd->pcidev->msix_enabled)
|
||||
|
@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
|
|||
module_param_named(aspm, aspm_mode, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
|
||||
|
||||
static int tune_pcie_caps(struct hfi1_devdata *dd)
|
||||
static void tune_pcie_caps(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct pci_dev *parent;
|
||||
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
|
||||
|
@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
|
|||
* Turn on extended tags in DevCtl in case the BIOS has turned it off
|
||||
* to improve WFR SDMA bandwidth
|
||||
*/
|
||||
ret = pcie_capability_read_word(dd->pcidev,
|
||||
PCI_EXP_DEVCTL, &ectl);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Unable to read from PCI config\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
|
||||
ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
|
||||
if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
|
||||
dd_dev_info(dd, "Enabling PCIe extended tags\n");
|
||||
ectl |= PCI_EXP_DEVCTL_EXT_TAG;
|
||||
ret = pcie_capability_write_word(dd->pcidev,
|
||||
PCI_EXP_DEVCTL, ectl);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Unable to write to PCI config\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
dd_dev_info(dd, "Unable to write to PCI config\n");
|
||||
}
|
||||
/* Find out supported and configured values for parent (root) */
|
||||
parent = dd->pcidev->bus->self;
|
||||
|
@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
|
|||
* The driver cannot perform the tuning if it does not have
|
||||
* access to the upstream component.
|
||||
*/
|
||||
if (!parent)
|
||||
return -EINVAL;
|
||||
if (!parent) {
|
||||
dd_dev_info(dd, "Parent not found\n");
|
||||
return;
|
||||
}
|
||||
if (!pci_is_root_bus(parent->bus)) {
|
||||
dd_dev_info(dd, "Parent not root\n");
|
||||
return -EINVAL;
|
||||
return;
|
||||
}
|
||||
if (!pci_is_pcie(parent)) {
|
||||
dd_dev_info(dd, "Parent is not PCI Express capable\n");
|
||||
return;
|
||||
}
|
||||
if (!pci_is_pcie(dd->pcidev)) {
|
||||
dd_dev_info(dd, "PCI device is not PCI Express capable\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
|
||||
return -EINVAL;
|
||||
rc_mpss = parent->pcie_mpss;
|
||||
rc_mps = ffs(pcie_get_mps(parent)) - 8;
|
||||
/* Find out supported and configured values for endpoint (us) */
|
||||
|
@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
|
|||
ep_mrrs = max_mrrs;
|
||||
pcie_set_readrq(dd->pcidev, ep_mrrs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* End of PCIe capability tuning */
|
||||
|
|
|
@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
|
|||
* reuse of stale settings established in our previous pass through.
|
||||
*/
|
||||
if (ppd->qsfp_info.reset_needed) {
|
||||
reset_qsfp(ppd);
|
||||
ret = reset_qsfp(ppd);
|
||||
if (ret)
|
||||
return ret;
|
||||
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
|
||||
} else {
|
||||
ppd->qsfp_info.reset_needed = 1;
|
||||
|
|
|
@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, tag_matching)) {
|
||||
props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
|
||||
props->xrq_caps.max_num_tags =
|
||||
props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
|
||||
props->tm_caps.max_num_tags =
|
||||
(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
|
||||
props->xrq_caps.flags = IB_TM_CAP_RC;
|
||||
props->xrq_caps.max_ops =
|
||||
props->tm_caps.flags = IB_TM_CAP_RC;
|
||||
props->tm_caps.max_ops =
|
||||
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
||||
props->xrq_caps.max_sge = MLX5_TM_MAX_SGE;
|
||||
props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
|
||||
|
|
|
@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
|||
{
|
||||
unsigned long tmp;
|
||||
unsigned long m;
|
||||
int i, k;
|
||||
u64 base = 0;
|
||||
int p = 0;
|
||||
int skip;
|
||||
int mask;
|
||||
u64 len;
|
||||
u64 pfn;
|
||||
u64 base = ~0, p = 0;
|
||||
u64 len, pfn;
|
||||
int i = 0;
|
||||
struct scatterlist *sg;
|
||||
int entry;
|
||||
unsigned long page_shift = umem->page_shift;
|
||||
|
@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
|||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
if (max_page_shift)
|
||||
m = min_t(unsigned long, max_page_shift - page_shift, m);
|
||||
skip = 1 << m;
|
||||
mask = skip - 1;
|
||||
i = 0;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> page_shift;
|
||||
pfn = sg_dma_address(sg) >> page_shift;
|
||||
for (k = 0; k < len; k++) {
|
||||
if (!(i & mask)) {
|
||||
tmp = (unsigned long)pfn;
|
||||
m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
|
||||
skip = 1 << m;
|
||||
mask = skip - 1;
|
||||
base = pfn;
|
||||
p = 0;
|
||||
} else {
|
||||
if (base + p != pfn) {
|
||||
tmp = (unsigned long)p;
|
||||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
skip = 1 << m;
|
||||
mask = skip - 1;
|
||||
base = pfn;
|
||||
p = 0;
|
||||
}
|
||||
}
|
||||
p++;
|
||||
i++;
|
||||
if (base + p != pfn) {
|
||||
/* If either the offset or the new
|
||||
* base are unaligned update m
|
||||
*/
|
||||
tmp = (unsigned long)(pfn | p);
|
||||
if (!IS_ALIGNED(tmp, 1 << m))
|
||||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
|
||||
base = pfn;
|
||||
p = 0;
|
||||
}
|
||||
|
||||
p += len;
|
||||
i += len;
|
||||
}
|
||||
|
||||
if (i) {
|
||||
|
|
|
@ -47,7 +47,8 @@ enum {
|
|||
|
||||
#define MLX5_UMR_ALIGN 2048
|
||||
|
||||
static int clean_mr(struct mlx5_ib_mr *mr);
|
||||
static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
|
||||
|
@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
|
||||
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
|
||||
update_xlt_flags);
|
||||
|
||||
if (err) {
|
||||
mlx5_ib_dereg_mr(&mr->ibmr);
|
||||
dereg_mr(dev, mr);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
|
||||
&npages, &page_shift, &ncont, &order);
|
||||
if (err < 0) {
|
||||
clean_mr(mr);
|
||||
clean_mr(dev, mr);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
|||
if (err) {
|
||||
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
|
||||
ib_umem_release(mr->umem);
|
||||
clean_mr(mr);
|
||||
clean_mr(dev, mr);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
|||
}
|
||||
}
|
||||
|
||||
static int clean_mr(struct mlx5_ib_mr *mr)
|
||||
static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
|
||||
int allocated_from_cache = mr->allocated_from_cache;
|
||||
int err;
|
||||
|
||||
|
@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
|
||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
int npages = mr->npages;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
|
||||
|
@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
|||
}
|
||||
#endif
|
||||
|
||||
clean_mr(mr);
|
||||
clean_mr(dev, mr);
|
||||
|
||||
if (umem) {
|
||||
ib_umem_release(umem);
|
||||
|
@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
|
||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
|
||||
return dereg_mr(dev, mr);
|
||||
}
|
||||
|
||||
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
enum ib_mr_type mr_type,
|
||||
u32 max_num_sg)
|
||||
|
|
|
@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
|||
mr->ibmr.iova);
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
|
||||
mr->ibmr.length);
|
||||
lower_32_bits(mr->ibmr.length));
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
|
@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
|||
mr->npages * 8);
|
||||
|
||||
nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
|
||||
"length: %d, rkey: %0x, pgl_paddr: %llx, "
|
||||
"length: %lld, rkey: %0x, pgl_paddr: %llx, "
|
||||
"page_list_len: %u, wqe_misc: %x\n",
|
||||
(unsigned long long) mr->ibmr.iova,
|
||||
mr->ibmr.length,
|
||||
|
|
|
@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
|
|||
*/
|
||||
priv->dev->broadcast[8] = priv->pkey >> 8;
|
||||
priv->dev->broadcast[9] = priv->pkey & 0xff;
|
||||
|
||||
/*
|
||||
* Update the broadcast address in the priv->broadcast object,
|
||||
* in case it already exists, otherwise no one will do that.
|
||||
*/
|
||||
if (priv->broadcast) {
|
||||
spin_lock_irq(&priv->lock);
|
||||
memcpy(priv->broadcast->mcmember.mgid.raw,
|
||||
priv->dev->broadcast + 4,
|
||||
sizeof(union ib_gid));
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
|
|||
{
|
||||
struct ipoib_dev_priv *priv;
|
||||
struct ib_port_attr attr;
|
||||
struct rdma_netdev *rn;
|
||||
int result = -ENOMEM;
|
||||
|
||||
priv = ipoib_intf_alloc(hca, port, format);
|
||||
|
@ -2279,7 +2280,8 @@ register_failed:
|
|||
ipoib_dev_cleanup(priv->dev);
|
||||
|
||||
device_init_failed:
|
||||
free_netdev(priv->dev);
|
||||
rn = netdev_priv(priv->dev);
|
||||
rn->free_rdma_netdev(priv->dev);
|
||||
kfree(priv);
|
||||
|
||||
alloc_mem_failed:
|
||||
|
@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
|
|||
return;
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, dev_list, list) {
|
||||
struct rdma_netdev *rn = netdev_priv(priv->dev);
|
||||
struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
|
||||
|
||||
ib_unregister_event_handler(&priv->event_handler);
|
||||
flush_workqueue(ipoib_workqueue);
|
||||
|
@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
|
|||
unregister_netdev(priv->dev);
|
||||
mutex_unlock(&priv->sysfs_mutex);
|
||||
|
||||
rn->free_rdma_netdev(priv->dev);
|
||||
parent_rn->free_rdma_netdev(priv->dev);
|
||||
|
||||
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
|
||||
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
|
||||
struct rdma_netdev *child_rn;
|
||||
|
||||
child_rn = netdev_priv(cpriv->dev);
|
||||
child_rn->free_rdma_netdev(cpriv->dev);
|
||||
kfree(cpriv);
|
||||
}
|
||||
|
||||
kfree(priv);
|
||||
}
|
||||
|
|
|
@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
|
|||
return restart_syscall();
|
||||
}
|
||||
|
||||
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
|
||||
if (!priv) {
|
||||
if (!down_write_trylock(&ppriv->vlan_rwsem)) {
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&ppriv->sysfs_mutex);
|
||||
return -ENOMEM;
|
||||
return restart_syscall();
|
||||
}
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
|
||||
if (!priv) {
|
||||
result = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* First ensure this isn't a duplicate. We check the parent device and
|
||||
|
@ -175,8 +178,11 @@ out:
|
|||
rtnl_unlock();
|
||||
mutex_unlock(&ppriv->sysfs_mutex);
|
||||
|
||||
if (result) {
|
||||
free_netdev(priv->dev);
|
||||
if (result && priv) {
|
||||
struct rdma_netdev *rn;
|
||||
|
||||
rn = netdev_priv(priv->dev);
|
||||
rn->free_rdma_netdev(priv->dev);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
|
@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
|
|||
return restart_syscall();
|
||||
}
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
if (!down_write_trylock(&ppriv->vlan_rwsem)) {
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&ppriv->sysfs_mutex);
|
||||
return restart_syscall();
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
|
||||
if (priv->pkey == pkey &&
|
||||
priv->child_type == IPOIB_LEGACY_CHILD) {
|
||||
|
@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
|
|||
mutex_unlock(&ppriv->sysfs_mutex);
|
||||
|
||||
if (dev) {
|
||||
free_netdev(dev);
|
||||
struct rdma_netdev *rn;
|
||||
|
||||
rn = netdev_priv(dev);
|
||||
rn->free_rdma_netdev(priv->dev);
|
||||
kfree(priv);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
|||
{
|
||||
int i;
|
||||
|
||||
iser_err("page vec npages %d data length %d\n",
|
||||
iser_err("page vec npages %d data length %lld\n",
|
||||
page_vec->npages, page_vec->fake_mr.length);
|
||||
for (i = 0; i < page_vec->npages; i++)
|
||||
iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
|
||||
|
|
14
fs/iomap.c
14
fs/iomap.c
|
@ -1009,6 +1009,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
WARN_ON_ONCE(ret);
|
||||
ret = 0;
|
||||
|
||||
if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
|
||||
!inode->i_sb->s_dio_done_wq) {
|
||||
ret = sb_init_dio_done_wq(inode->i_sb);
|
||||
if (ret < 0)
|
||||
goto out_free_dio;
|
||||
}
|
||||
|
||||
inode_dio_begin(inode);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
@ -1031,13 +1038,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (ret < 0)
|
||||
iomap_dio_set_error(dio, ret);
|
||||
|
||||
if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
|
||||
!inode->i_sb->s_dio_done_wq) {
|
||||
ret = sb_init_dio_done_wq(inode->i_sb);
|
||||
if (ret < 0)
|
||||
iomap_dio_set_error(dio, ret);
|
||||
}
|
||||
|
||||
if (!atomic_dec_and_test(&dio->ref)) {
|
||||
if (!is_sync_kiocb(iocb))
|
||||
return -EIOCBQUEUED;
|
||||
|
|
|
@ -156,7 +156,8 @@ __xfs_ag_resv_free(
|
|||
trace_xfs_ag_resv_free(pag, type, 0);
|
||||
|
||||
resv = xfs_perag_resv(pag, type);
|
||||
pag->pag_mount->m_ag_max_usable += resv->ar_asked;
|
||||
if (pag->pag_agno == 0)
|
||||
pag->pag_mount->m_ag_max_usable += resv->ar_asked;
|
||||
/*
|
||||
* AGFL blocks are always considered "free", so whatever
|
||||
* was reserved at mount time must be given back at umount.
|
||||
|
@ -216,7 +217,14 @@ __xfs_ag_resv_init(
|
|||
return error;
|
||||
}
|
||||
|
||||
mp->m_ag_max_usable -= ask;
|
||||
/*
|
||||
* Reduce the maximum per-AG allocation length by however much we're
|
||||
* trying to reserve for an AG. Since this is a filesystem-wide
|
||||
* counter, we only make the adjustment for AG 0. This assumes that
|
||||
* there aren't any AGs hungrier for per-AG reservation than AG 0.
|
||||
*/
|
||||
if (pag->pag_agno == 0)
|
||||
mp->m_ag_max_usable -= ask;
|
||||
|
||||
resv = xfs_perag_resv(pag, type);
|
||||
resv->ar_asked = ask;
|
||||
|
|
|
@ -49,7 +49,6 @@
|
|||
#include "xfs_rmap.h"
|
||||
#include "xfs_ag_resv.h"
|
||||
#include "xfs_refcount.h"
|
||||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_icache.h"
|
||||
|
||||
|
||||
|
@ -192,12 +191,8 @@ xfs_bmap_worst_indlen(
|
|||
int maxrecs; /* maximum record count at this level */
|
||||
xfs_mount_t *mp; /* mount structure */
|
||||
xfs_filblks_t rval; /* return value */
|
||||
xfs_filblks_t orig_len;
|
||||
|
||||
mp = ip->i_mount;
|
||||
|
||||
/* Calculate the worst-case size of the bmbt. */
|
||||
orig_len = len;
|
||||
maxrecs = mp->m_bmap_dmxr[0];
|
||||
for (level = 0, rval = 0;
|
||||
level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
|
||||
|
@ -205,20 +200,12 @@ xfs_bmap_worst_indlen(
|
|||
len += maxrecs - 1;
|
||||
do_div(len, maxrecs);
|
||||
rval += len;
|
||||
if (len == 1) {
|
||||
rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
|
||||
if (len == 1)
|
||||
return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
|
||||
level - 1;
|
||||
break;
|
||||
}
|
||||
if (level == 0)
|
||||
maxrecs = mp->m_bmap_dmxr[1];
|
||||
}
|
||||
|
||||
/* Calculate the worst-case size of the rmapbt. */
|
||||
if (xfs_sb_version_hasrmapbt(&mp->m_sb))
|
||||
rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
|
||||
mp->m_rmap_maxlevels;
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
|
|
|
@ -343,7 +343,8 @@ xfs_end_io(
|
|||
error = xfs_reflink_end_cow(ip, offset, size);
|
||||
break;
|
||||
case XFS_IO_UNWRITTEN:
|
||||
error = xfs_iomap_write_unwritten(ip, offset, size);
|
||||
/* writeback should never update isize */
|
||||
error = xfs_iomap_write_unwritten(ip, offset, size, false);
|
||||
break;
|
||||
default:
|
||||
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
|
||||
|
|
|
@ -1459,7 +1459,19 @@ xfs_shift_file_space(
|
|||
return error;
|
||||
|
||||
/*
|
||||
* The extent shiting code works on extent granularity. So, if
|
||||
* Clean out anything hanging around in the cow fork now that
|
||||
* we've flushed all the dirty data out to disk to avoid having
|
||||
* CoW extents at the wrong offsets.
|
||||
*/
|
||||
if (xfs_is_reflink_inode(ip)) {
|
||||
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
|
||||
true);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* The extent shifting code works on extent granularity. So, if
|
||||
* stop_fsb is not the starting block of extent, we need to split
|
||||
* the extent at stop_fsb.
|
||||
*/
|
||||
|
|
|
@ -1258,8 +1258,6 @@ xfs_buf_ioapply_map(
|
|||
int size;
|
||||
int offset;
|
||||
|
||||
total_nr_pages = bp->b_page_count;
|
||||
|
||||
/* skip the pages in the buffer before the start offset */
|
||||
page_index = 0;
|
||||
offset = *buf_offset;
|
||||
|
|
|
@ -347,7 +347,7 @@ xfs_verifier_error(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
|
||||
xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
|
||||
xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
|
||||
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
|
||||
__return_address, bp->b_ops->name, bp->b_bn);
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ xfs_zero_range(
|
|||
xfs_off_t count,
|
||||
bool *did_zero)
|
||||
{
|
||||
return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
|
||||
return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -377,8 +377,6 @@ restart:
|
|||
*/
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
if (iocb->ki_pos > i_size_read(inode)) {
|
||||
bool zero = false;
|
||||
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
if (!drained_dio) {
|
||||
if (*iolock == XFS_IOLOCK_SHARED) {
|
||||
|
@ -399,7 +397,7 @@ restart:
|
|||
drained_dio = true;
|
||||
goto restart;
|
||||
}
|
||||
error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
|
||||
error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), NULL);
|
||||
if (error)
|
||||
return error;
|
||||
} else
|
||||
|
@ -436,7 +434,6 @@ xfs_dio_write_end_io(
|
|||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
loff_t offset = iocb->ki_pos;
|
||||
bool update_size = false;
|
||||
int error = 0;
|
||||
|
||||
trace_xfs_end_io_direct_write(ip, offset, size);
|
||||
|
@ -447,6 +444,21 @@ xfs_dio_write_end_io(
|
|||
if (size <= 0)
|
||||
return size;
|
||||
|
||||
if (flags & IOMAP_DIO_COW) {
|
||||
error = xfs_reflink_end_cow(ip, offset, size);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unwritten conversion updates the in-core isize after extent
|
||||
* conversion but before updating the on-disk size. Updating isize any
|
||||
* earlier allows a racing dio read to find unwritten extents before
|
||||
* they are converted.
|
||||
*/
|
||||
if (flags & IOMAP_DIO_UNWRITTEN)
|
||||
return xfs_iomap_write_unwritten(ip, offset, size, true);
|
||||
|
||||
/*
|
||||
* We need to update the in-core inode size here so that we don't end up
|
||||
* with the on-disk inode size being outside the in-core inode size. We
|
||||
|
@ -461,20 +473,11 @@ xfs_dio_write_end_io(
|
|||
spin_lock(&ip->i_flags_lock);
|
||||
if (offset + size > i_size_read(inode)) {
|
||||
i_size_write(inode, offset + size);
|
||||
update_size = true;
|
||||
}
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
|
||||
if (flags & IOMAP_DIO_COW) {
|
||||
error = xfs_reflink_end_cow(ip, offset, size);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (flags & IOMAP_DIO_UNWRITTEN)
|
||||
error = xfs_iomap_write_unwritten(ip, offset, size);
|
||||
else if (update_size)
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
error = xfs_setfilesize(ip, offset, size);
|
||||
} else {
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -1624,10 +1624,12 @@ xfs_itruncate_extents(
|
|||
goto out;
|
||||
|
||||
/*
|
||||
* Clear the reflink flag if we truncated everything.
|
||||
* Clear the reflink flag if there are no data fork blocks and
|
||||
* there are no extents staged in the cow fork.
|
||||
*/
|
||||
if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
|
||||
if (ip->i_d.di_nblocks == 0)
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
xfs_inode_clear_cowblocks_tag(ip);
|
||||
}
|
||||
|
||||
|
|
|
@ -745,7 +745,7 @@ xfs_iflush_done(
|
|||
*/
|
||||
iip = INODE_ITEM(blip);
|
||||
if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
|
||||
lip->li_flags & XFS_LI_FAILED)
|
||||
(blip->li_flags & XFS_LI_FAILED))
|
||||
need_ail++;
|
||||
|
||||
blip = next;
|
||||
|
|
|
@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
|
|||
int *join_flags)
|
||||
{
|
||||
struct inode *inode = VFS_I(ip);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int error;
|
||||
|
||||
*join_flags = 0;
|
||||
|
@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
|
|||
if (fa->fsx_xflags & FS_XFLAG_DAX) {
|
||||
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
|
||||
return -EINVAL;
|
||||
if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
|
||||
if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -829,7 +829,8 @@ int
|
|||
xfs_iomap_write_unwritten(
|
||||
xfs_inode_t *ip,
|
||||
xfs_off_t offset,
|
||||
xfs_off_t count)
|
||||
xfs_off_t count,
|
||||
bool update_isize)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_fileoff_t offset_fsb;
|
||||
|
@ -840,6 +841,7 @@ xfs_iomap_write_unwritten(
|
|||
xfs_trans_t *tp;
|
||||
xfs_bmbt_irec_t imap;
|
||||
struct xfs_defer_ops dfops;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
xfs_fsize_t i_size;
|
||||
uint resblks;
|
||||
int error;
|
||||
|
@ -899,7 +901,8 @@ xfs_iomap_write_unwritten(
|
|||
i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
|
||||
if (i_size > offset + count)
|
||||
i_size = offset + count;
|
||||
|
||||
if (update_isize && i_size > i_size_read(inode))
|
||||
i_size_write(inode, i_size);
|
||||
i_size = xfs_new_eof(ip, i_size);
|
||||
if (i_size) {
|
||||
ip->i_d.di_size = i_size;
|
||||
|
|
|
@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
|
|||
struct xfs_bmbt_irec *, int);
|
||||
int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
|
||||
struct xfs_bmbt_irec *);
|
||||
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
|
||||
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
|
||||
|
||||
void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
|
||||
struct xfs_bmbt_irec *);
|
||||
|
|
|
@ -274,7 +274,7 @@ xfs_fs_commit_blocks(
|
|||
(end - 1) >> PAGE_SHIFT);
|
||||
WARN_ON_ONCE(error);
|
||||
|
||||
error = xfs_iomap_write_unwritten(ip, start, length);
|
||||
error = xfs_iomap_write_unwritten(ip, start, length, false);
|
||||
if (error)
|
||||
goto out_drop_iolock;
|
||||
}
|
||||
|
|
|
@ -1654,6 +1654,16 @@ xfs_fs_fill_super(
|
|||
"DAX and reflink have not been tested together!");
|
||||
}
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_DISCARD) {
|
||||
struct request_queue *q = bdev_get_queue(sb->s_bdev);
|
||||
|
||||
if (!blk_queue_discard(q)) {
|
||||
xfs_warn(mp, "mounting with \"discard\" option, but "
|
||||
"the device does not support discard");
|
||||
mp->m_flags &= ~XFS_MOUNT_DISCARD;
|
||||
}
|
||||
}
|
||||
|
||||
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
|
||||
if (mp->m_sb.sb_rblocks) {
|
||||
xfs_alert(mp,
|
||||
|
|
|
@ -285,7 +285,7 @@ enum ib_tm_cap_flags {
|
|||
IB_TM_CAP_RC = 1 << 0,
|
||||
};
|
||||
|
||||
struct ib_xrq_caps {
|
||||
struct ib_tm_caps {
|
||||
/* Max size of RNDV header */
|
||||
u32 max_rndv_hdr_size;
|
||||
/* Max number of entries in tag matching list */
|
||||
|
@ -358,7 +358,7 @@ struct ib_device_attr {
|
|||
struct ib_rss_caps rss_caps;
|
||||
u32 max_wq_type_rq;
|
||||
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
|
||||
struct ib_xrq_caps xrq_caps;
|
||||
struct ib_tm_caps tm_caps;
|
||||
};
|
||||
|
||||
enum ib_mtu {
|
||||
|
@ -1739,7 +1739,7 @@ struct ib_mr {
|
|||
u32 lkey;
|
||||
u32 rkey;
|
||||
u64 iova;
|
||||
u32 length;
|
||||
u64 length;
|
||||
unsigned int page_size;
|
||||
bool need_inval;
|
||||
union {
|
||||
|
|
|
@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
|
|||
struct ib_uverbs_rss_caps rss_caps;
|
||||
__u32 max_wq_type_rq;
|
||||
__u32 raw_packet_caps;
|
||||
struct ib_uverbs_tm_caps xrq_caps;
|
||||
struct ib_uverbs_tm_caps tm_caps;
|
||||
};
|
||||
|
||||
struct ib_uverbs_query_port {
|
||||
|
|
|
@ -412,6 +412,19 @@ err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
|
||||
{
|
||||
if (rb->aux_overwrite)
|
||||
return false;
|
||||
|
||||
if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
|
||||
rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Commit the data written by hardware into the ring buffer by adjusting
|
||||
* aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
|
||||
|
@ -451,10 +464,8 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
|
|||
}
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
|
||||
if (rb_need_aux_wakeup(rb))
|
||||
wakeup = true;
|
||||
rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
|
||||
}
|
||||
|
||||
if (wakeup) {
|
||||
if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
|
||||
|
@ -484,9 +495,8 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
|
|||
rb->aux_head += size;
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
|
||||
if (rb_need_aux_wakeup(rb)) {
|
||||
perf_output_wakeup(handle);
|
||||
rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
|
||||
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
|
||||
}
|
||||
|
||||
|
|
|
@ -473,14 +473,19 @@ static long seccomp_attach_filter(unsigned int flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __get_seccomp_filter(struct seccomp_filter *filter)
|
||||
{
|
||||
/* Reference count is bounded by the number of total processes. */
|
||||
refcount_inc(&filter->usage);
|
||||
}
|
||||
|
||||
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
|
||||
void get_seccomp_filter(struct task_struct *tsk)
|
||||
{
|
||||
struct seccomp_filter *orig = tsk->seccomp.filter;
|
||||
if (!orig)
|
||||
return;
|
||||
/* Reference count is bounded by the number of total processes. */
|
||||
refcount_inc(&orig->usage);
|
||||
__get_seccomp_filter(orig);
|
||||
}
|
||||
|
||||
static inline void seccomp_filter_free(struct seccomp_filter *filter)
|
||||
|
@ -491,10 +496,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
|
|||
}
|
||||
}
|
||||
|
||||
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
|
||||
void put_seccomp_filter(struct task_struct *tsk)
|
||||
static void __put_seccomp_filter(struct seccomp_filter *orig)
|
||||
{
|
||||
struct seccomp_filter *orig = tsk->seccomp.filter;
|
||||
/* Clean up single-reference branches iteratively. */
|
||||
while (orig && refcount_dec_and_test(&orig->usage)) {
|
||||
struct seccomp_filter *freeme = orig;
|
||||
|
@ -503,6 +506,12 @@ void put_seccomp_filter(struct task_struct *tsk)
|
|||
}
|
||||
}
|
||||
|
||||
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
|
||||
void put_seccomp_filter(struct task_struct *tsk)
|
||||
{
|
||||
__put_seccomp_filter(tsk->seccomp.filter);
|
||||
}
|
||||
|
||||
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
|
||||
{
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
@ -1025,13 +1034,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
|
|||
if (!data)
|
||||
goto out;
|
||||
|
||||
get_seccomp_filter(task);
|
||||
__get_seccomp_filter(filter);
|
||||
spin_unlock_irq(&task->sighand->siglock);
|
||||
|
||||
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
|
||||
ret = -EFAULT;
|
||||
|
||||
put_seccomp_filter(task);
|
||||
__put_seccomp_filter(filter);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
|
|
|
@ -126,14 +126,4 @@ config BT_DEBUGFS
|
|||
Provide extensive information about internal Bluetooth states
|
||||
in debugfs.
|
||||
|
||||
config BT_LEGACY_IOCTL
|
||||
bool "Enable legacy ioctl interfaces"
|
||||
depends on BT && BT_BREDR
|
||||
default y
|
||||
help
|
||||
Enable support for legacy ioctl interfaces. This is only needed
|
||||
for old and deprecated applications using direct ioctl calls for
|
||||
controller management. Since Linux 3.4 all configuration and
|
||||
setup is done via mgmt interface and this is no longer needed.
|
||||
|
||||
source "drivers/bluetooth/Kconfig"
|
||||
|
|
|
@ -878,7 +878,6 @@ static int hci_sock_release(struct socket *sock)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_LEGACY_IOCTL
|
||||
static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
|
||||
{
|
||||
bdaddr_t bdaddr;
|
||||
|
@ -1050,7 +1049,6 @@ done:
|
|||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
int addr_len)
|
||||
|
@ -1971,11 +1969,7 @@ static const struct proto_ops hci_sock_ops = {
|
|||
.getname = hci_sock_getname,
|
||||
.sendmsg = hci_sock_sendmsg,
|
||||
.recvmsg = hci_sock_recvmsg,
|
||||
#ifdef CONFIG_BT_LEGACY_IOCTL
|
||||
.ioctl = hci_sock_ioctl,
|
||||
#else
|
||||
.ioctl = sock_no_ioctl,
|
||||
#endif
|
||||
.poll = datagram_poll,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
|
|
|
@ -401,7 +401,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
if (unlikely(n != mw->mw_nents))
|
||||
goto out_mapmr_err;
|
||||
|
||||
dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
|
||||
dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n",
|
||||
__func__, frmr, mw->mw_nents, mr->length);
|
||||
|
||||
key = (u8)(mr->rkey & 0x000000FF);
|
||||
|
|
Загрузка…
Ссылка в новой задаче