Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Completely minor snmp doc conflict.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-01-21 14:41:32 -08:00
Родитель 28f9d1a3d4 49a57857ae
Коммит fa7f3a8d56
331 изменённых файлов: 3354 добавлений и 1444 удалений

Просмотреть файл

@ -72,6 +72,10 @@ ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_integrity_vec'
@ -85,6 +89,7 @@ ForEachMacros:
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
@ -103,6 +108,8 @@ ForEachMacros:
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
@ -121,11 +128,21 @@ ForEachMacros:
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
- 'for_each_card_components'
- 'for_each_card_links'
- 'for_each_card_links_safe'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
@ -133,6 +150,10 @@ ForEachMacros:
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
@ -149,6 +170,7 @@ ForEachMacros:
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
@ -160,6 +182,7 @@ ForEachMacros:
- 'for_each_mem_range_rev'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_netdev'
- 'for_each_netdev_continue'
@ -183,12 +206,14 @@ ForEachMacros:
- 'for_each_node_with_property'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
@ -206,14 +231,17 @@ ForEachMacros:
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_reserved_mem_region'
- 'for_each_resv_unavail_range'
- 'for_each_rtd_codec_dai'
- 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom'
- 'for_each_rtdcom_safe'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_sg'
- 'for_each_sg_page'
- 'for_each_sibling_event'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
@ -251,6 +279,8 @@ ForEachMacros:
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
@ -267,11 +297,14 @@ ForEachMacros:
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
@ -291,6 +324,7 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
@ -357,12 +391,14 @@ ForEachMacros:
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@ -371,6 +407,11 @@ ForEachMacros:
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'xa_for_each'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0

Просмотреть файл

@ -235,4 +235,4 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/arm/cpus.yaml

Просмотреть файл

@ -684,7 +684,7 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/arm/cpus.yaml
[2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt

Просмотреть файл

@ -4,7 +4,7 @@ SP810 System Controller
Required properties:
- compatible: standard compatible string for a Primecell peripheral,
see Documentation/devicetree/bindings/arm/primecell.txt
see Documentation/devicetree/bindings/arm/primecell.yaml
for more details
should be: "arm,sp810", "arm,primecell"

Просмотреть файл

@ -472,4 +472,4 @@ cpus {
===============================================================================
[1] ARM Linux kernel documentation
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/arm/cpus.yaml

Просмотреть файл

@ -18,4 +18,4 @@ Required Properties:
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.

Просмотреть файл

@ -1,6 +1,6 @@
* ARM PrimeCell Color LCD Controller PL110/PL111
See also Documentation/devicetree/bindings/arm/primecell.txt
See also Documentation/devicetree/bindings/arm/primecell.yaml
Required properties:

Просмотреть файл

@ -14,8 +14,6 @@ Required properties:
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see
Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
and
Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
for specific details about the offset property.

Просмотреть файл

@ -78,7 +78,7 @@ Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node,
containing a set of sub-nodes, each with the following property:
- affinity: Should be a list of phandles to CPU nodes (as described in
Documentation/devicetree/bindings/arm/cpus.txt).
Documentation/devicetree/bindings/arm/cpus.yaml).
GICv3 has one or more Interrupt Translation Services (ITS) that are
used to route Message Signalled Interrupts (MSI) to the CPUs.

Просмотреть файл

@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
= EXAMPLE
The following example represents the GLINK RPM node on a MSM8996 device, with
the function for the "rpm_request" channel defined, which is used for
regualtors and root clocks.
regulators and root clocks.
apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";

Просмотреть файл

@ -41,12 +41,12 @@ processor ID) and a string identifier.
- qcom,local-pid:
Usage: required
Value type: <u32>
Definition: specifies the identfier of the local endpoint of this edge
Definition: specifies the identifier of the local endpoint of this edge
- qcom,remote-pid:
Usage: required
Value type: <u32>
Definition: specifies the identfier of the remote endpoint of this edge
Definition: specifies the identifier of the remote endpoint of this edge
= SUBNODES
Each SMP2P pair contain a set of inbound and outbound entries, these are

Просмотреть файл

@ -163,6 +163,14 @@ C. Boot options
be preserved until there actually is some text is output to the console.
This option causes fbcon to bind immediately to the fbdev device.
7. fbcon=logo-pos:<location>
The only possible 'location' is 'center' (without quotes), and when
given, the bootup logo is moved from the default top-left corner
location to the center of the framebuffer. If more than one logo is
displayed due to multiple CPUs, the collected line of logos is moved
as a whole.
C. Attaching, Detaching and Unloading
Before going on to how to attach, detach and unload the framebuffer console, an

Просмотреть файл

@ -11,19 +11,19 @@ Contents:
batman-adv
can
can_ucan_protocol
dpaa2/index
e100
e1000
e1000e
fm10k
igb
igbvf
ixgb
ixgbe
ixgbevf
i40e
iavf
ice
device_drivers/freescale/dpaa2/index
device_drivers/intel/e100
device_drivers/intel/e1000
device_drivers/intel/e1000e
device_drivers/intel/fm10k
device_drivers/intel/igb
device_drivers/intel/igbvf
device_drivers/intel/ixgb
device_drivers/intel/ixgbe
device_drivers/intel/ixgbevf
device_drivers/intel/i40e
device_drivers/intel/iavf
device_drivers/intel/ice
kapi
z8530book
msg_zerocopy

Просмотреть файл

@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
size should be set when the call is begun. tx_total_len may not be less
than zero.
(*) Check to see the completion state of a call so that the caller can assess
whether it needs to be retried.
enum rxrpc_call_completion {
RXRPC_CALL_SUCCEEDED,
RXRPC_CALL_REMOTELY_ABORTED,
RXRPC_CALL_LOCALLY_ABORTED,
RXRPC_CALL_LOCAL_ERROR,
RXRPC_CALL_NETWORK_ERROR,
};
int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
enum rxrpc_call_completion *_compl,
u32 *_abort_code);
On return, -EINPROGRESS will be returned if the call is still ongoing; if
it is finished, *_compl will be set to indicate the manner of completion,
*_abort_code will be set to any abort code that occurred. 0 will be
returned on a successful completion, -ECONNABORTED will be returned if the
client failed due to a remote abort and anything else will return an
appropriate error code.
The caller should look at this information to decide if it's worth
retrying the call.
(*) Retry a client call.
int rxrpc_kernel_retry_call(struct socket *sock,
struct rxrpc_call *call,
struct sockaddr_rxrpc *srx,
struct key *key);
This attempts to partially reinitialise a call and submit it again while
reusing the original call's Tx queue to avoid the need to repackage and
re-encrypt the data to be sent. call indicates the call to retry, srx the
new address to send it to and key the encryption key to use for signing or
encrypting the packets.
For this to work, the first Tx data packet must still be in the transmit
queue, and currently this is only permitted for local and network errors
and the call must not have been aborted. Any partially constructed Tx
packet is left as is and can continue being filled afterwards.
It returns 0 if the call was requeued and an error otherwise.
(*) Get call RTT.
u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);

Просмотреть файл

@ -366,6 +366,27 @@ to the accept queue.
TCP Fast Open
=============
* TcpEstabResets
Defined in `RFC1213 tcpEstabResets`_.
.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
* TcpAttemptFails
Defined in `RFC1213 tcpAttemptFails`_.
.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
* TcpOutRsts
Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
the 'segments sent containing the RST flag', but in linux kernel, this
couner indicates the segments kerenl tried to send. The sending
process might be failed due to some errors (e.g. memory alloc failed).
.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
TCP Fast Path
============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
code provides a good explanation of them, I pasted them below::
@ -413,7 +434,6 @@ increase 1.
TCP abort
=========
* TcpExtTCPAbortOnData
It means TCP layer has data in flight, but need to close the
@ -589,7 +609,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP
stack of kernel will increase TcpExtTCPSACKReorder for both of the
above scenarios.
DSACK
=====
The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
@ -612,8 +631,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
The TCP stack receives a DSACK, which indicate an acknowledged
The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
* TcpExtTCPDSACKOfoRecv
@ -621,6 +639,56 @@ duplicate packet is received.
The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
invalid SACK and DSACK
====================
When a SACK (or DSACK) block is invalid, a corresponding counter would
be updated. The validation method is base on the start/end sequence
number of the SACK block. For more details, please refer the comment
of the function tcp_is_sackblock_valid in the kernel source code. A
SACK option could have up to 4 blocks, they are checked
individually. E.g., if 3 blocks of a SACk is invalid, the
corresponding counter would be updated 3 times. The comment of the
`Add counters for discarded SACK blocks`_ patch has additional
explaination:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
* TcpExtTCPSACKDiscard
This counter indicates how many SACK blocks are invalid. If the invalid
SACK block is caused by ACK recording, the TCP stack will only ignore
it and won't update this counter.
* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
When a DSACK block is invalid, one of these two counters would be
updated. Which counter will be updated depends on the undo_marker flag
of the TCP socket. If the undo_marker is not set, the TCP stack isn't
likely to re-transmit any packets, and we still receive an invalid
DSACK block, the reason might be that the packet is duplicated in the
middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
will be updated. As implied in its name, it might be an old packet.
SACK shift
=========
The linux networking stack stores data in sk_buff struct (skb for
short). If a SACK block acrosses multiple skb, the TCP stack will try
to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
15 in skb2 would be moved to skb1. This operation is 'shift'. If a
SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
discard, this operation is 'merge'.
* TcpExtTCPSackShifted
A skb is shifted
* TcpExtTCPSackMerged
A skb is merged
* TcpExtTCPSackShiftFallback
A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.
TCP out of order
================
* TcpExtTCPOFOQueue
@ -721,6 +789,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
TCP receive window
=================
* TcpExtTCPWantZeroWindowAdv
Depending on current memory usage, the TCP stack tries to set receive
window to zero. But the receive window might still be a no-zero
value. For example, if the previous window size is 10, and the TCP
stack receives 3 bytes, the current window size would be 7 even if the
window size calculated by the memory usage is zero.
* TcpExtTCPToZeroWindowAdv
The TCP receive window is set to zero from a no-zero value.
* TcpExtTCPFromZeroWindowAdv
The TCP receive window is set to no-zero value from zero.
Delayed ACK
==========
The TCP Delayed ACK is a technique which is used for reducing the
packet count in the network. For more details, please refer the
`Delayed ACK wiki`_
.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
* TcpExtDelayedACKs
A delayed ACK timer expires. The TCP stack will send a pure ACK packet
and exit the delayed ACK mode.
* TcpExtDelayedACKLocked
A delayed ACK timer expires, but the TCP stack can't send an ACK
immediately due to the socket is locked by a userspace program. The
TCP stack will send a pure ACK later (after the userspace program
unlock the socket). When the TCP stack sends the pure ACK later, the
TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
mode.
* TcpExtDelayedACKLost
It will be updated when the TCP stack receives a packet which has been
ACKed. A Delayed ACK loss might cause this issue, but it would also be
triggered by other reasons, such as a packet is duplicated in the
network.
Tail Loss Probe (TLP)
===================
TLP is an algorithm which is used to detect TCP packet loss. For more
details, please refer the `TLP paper`_.
.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
* TcpExtTCPLossProbes
A TLP probe packet is sent.
* TcpExtTCPLossProbeRecovery
A packet loss is detected and recovered by TLP.
examples
========

Просмотреть файл

@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in
/include/linux/net_tstamp.h as:
include/uapi/linux/net_tstamp.h as:
struct hwtstamp_config {
int flags; /* no flags defined right now, must be zero */
@ -487,7 +487,7 @@ enum {
HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
/* for the complete list of values, please check
* the include file /include/linux/net_tstamp.h
* the include file include/uapi/linux/net_tstamp.h
*/
};

Просмотреть файл

@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
CAVIUM LIQUIDIO NETWORK DRIVER
M: Derek Chickles <derek.chickles@caviumnetworks.com>
M: Satanand Burla <satananda.burla@caviumnetworks.com>
M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
M: Derek Chickles <dchickles@marvell.com>
M: Satanand Burla <sburla@marvell.com>
M: Felix Manlunas <fmanlunas@marvell.com>
L: netdev@vger.kernel.org
W: http://www.cavium.com
S: Supported

Просмотреть файл

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Shy Crocodile
# *DOCUMENTATION*
@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
PHONY += prepare0
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
# archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created.
# Listed in dependency order
PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
PHONY += prepare archprepare prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts)
PHONY += $(mrproper-dirs) mrproper archmrproper
PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
mrproper: clean archmrproper $(mrproper-dirs)
mrproper: clean $(mrproper-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)

Просмотреть файл

@ -60,8 +60,6 @@
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#else
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef __ASSEMBLY__

Просмотреть файл

@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void)
* later determine that kpti is required, then
* kpti_install_ng_mappings() will make them non-global.
*/
if (arm64_kernel_unmapped_at_el0())
return true;
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return arm64_kernel_unmapped_at_el0();
return false;
/*
* KASLR is enabled so we're going to be enabling kpti on non-broken

Просмотреть файл

@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
static __init const u8 *get_cmdline(void *fdt)
static __init const u8 *kaslr_get_cmdline(void *fdt)
{
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
cmdline = get_cmdline(fdt);
cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0;
@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
return offset;
}

Просмотреть файл

@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot
archmrproper:
archclean:
$(Q)$(MAKE) $(clean)=$(boot)

Просмотреть файл

@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
export AWK
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all

Просмотреть файл

@ -3155,6 +3155,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC

Просмотреть файл

@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
pm_power_off = bcm47xx_machine_halt;
}
#ifdef CONFIG_BCM47XX_BCMA
static struct device * __init bcm47xx_setup_device(void)
{
struct device *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
err = dev_set_name(dev, "bcm47xx_soc");
if (err) {
pr_err("Failed to set SoC device name: %d\n", err);
kfree(dev);
return NULL;
}
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err)
pr_err("Failed to set SoC DMA mask: %d\n", err);
return dev;
}
#endif
/*
* This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init).
@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err;
bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
if (!bcm47xx_bus.bcma.dev)
panic("Failed to setup SoC device\n");
err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
if (device_register(bcm47xx_bus.bcma.dev))
pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus);
break;
#endif

Просмотреть файл

@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
" sync \n"
" synci ($0) \n");
relocated_kexec_smp_wait(NULL);
kexec_reboot();
}
#endif

Просмотреть файл

@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set

Просмотреть файл

@ -18,8 +18,6 @@
#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
#define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5
#endif /* _FALCON_IRQ__ */

Просмотреть файл

@ -19,8 +19,6 @@
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
#define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5
#endif

Просмотреть файл

@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);

Просмотреть файл

@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
.irq_set_type = ltq_eiu_settype,
};
static void ltq_hw_irqdispatch(int module)
static void ltq_hw_irq_handler(struct irq_desc *desc)
{
int module = irq_desc_get_irq(desc) - 2;
u32 irq;
int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0)
@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
* other bits might be bogus
*/
irq = __fls(irq);
do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
LTQ_EBU_PCC_ISTAT);
}
#define DEFINE_HWx_IRQDISPATCH(x) \
static void ltq_hw ## x ## _irqdispatch(void) \
{ \
ltq_hw_irqdispatch(x); \
}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)
#if MIPS_CPU_TIMER_IRQ == 7
static void ltq_hw5_irqdispatch(void)
{
do_IRQ(MIPS_CPU_TIMER_IRQ);
}
#else
DEFINE_HWx_IRQDISPATCH(5)
#endif
static void ltq_hw_irq_handler(struct irq_desc *desc)
{
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
int irq;
if (!pending) {
spurious_interrupt();
return;
}
pending >>= CAUSEB_IP;
while (pending) {
irq = fls(pending) - 1;
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
pending &= ~BIT(irq);
}
}
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct irq_chip *chip = &ltq_irq_type;
@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
if (cpu_has_vint) {
pr_info("Setting up vectored interrupts\n");
set_vi_handler(2, ltq_hw0_irqdispatch);
set_vi_handler(3, ltq_hw1_irqdispatch);
set_vi_handler(4, ltq_hw2_irqdispatch);
set_vi_handler(5, ltq_hw3_irqdispatch);
set_vi_handler(6, ltq_hw4_irqdispatch);
set_vi_handler(7, ltq_hw5_irqdispatch);
}
ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
/* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/*
* if the timer irq is not one of the mips irqs we need to
* create a mapping
*/
if (MIPS_CPU_TIMER_IRQ != 7)
irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
/* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
return MIPS_CPU_TIMER_IRQ;
return CP0_LEGACY_COMPARE_IRQ;
}
static struct of_device_id __initdata of_irq_ids[] = {

Просмотреть файл

@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
return 0;
} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;

Просмотреть файл

@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
KBUILD_DEFCONFIG := defconfig
comma = ,
ifdef CONFIG_FUNCTION_TRACER
arch-y += -malways-save-lp -mno-relax
endif
@ -54,8 +51,6 @@ endif
boot := arch/nds32/boot
core-y += $(boot)/dts/
.PHONY: FORCE
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@ -68,9 +63,6 @@ prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
CLEAN_FILES += include/asm-nds32/constants.h*
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)

Просмотреть файл

@ -20,7 +20,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@ -50,5 +49,3 @@ else
BUILTIN_DTB := n
endif
core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
all: vmlinux

Просмотреть файл

@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */

Просмотреть файл

@ -852,11 +852,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB.
*/
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */

Просмотреть файл

@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
}
} else
#endif
/* Fall through, for non-TM restore */
if (!MSR_TM_ACTIVE(msr)) {
{
/*
* Fall through, for non-TM restore
*
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called

Просмотреть файл

@ -967,13 +967,6 @@ out:
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
unsigned long __init arch_syscall_addr(int nr)
{
return sys_call_table[nr*2];
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{

Просмотреть файл

@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
!is_sier_available()))
return 0;
if (idx == PERF_REG_POWERPC_MMCRA &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}

Просмотреть файл

@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys));
seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys));
seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner);
}
seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys));
seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);

Просмотреть файл

@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
/* see if there is a keyboard in the device tree
with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard")
if (kbd->parent && kbd->parent->type
&& strcmp(kbd->parent->type, "adb") == 0)
if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
if (kbd)

Просмотреть файл

@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
}
} else {
/* Create a group for 1 GPU and attached NPUs for POWER8 */
pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL);
pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
table_group = &pe->npucomp->table_group;
table_group->ops = &pnv_npu_peers_ops;
iommu_register_group(table_group, hose->global_number,

Просмотреть файл

@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
if (phb->type == PNV_PHB_NPU_NVLINK)
if (phb->type == PNV_PHB_NPU_NVLINK ||
phb->type == PNV_PHB_NPU_OCAPI)
continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) {

Просмотреть файл

@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu"))
continue;
#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose));
#endif
break;
}
}

Просмотреть файл

@ -617,7 +617,7 @@ config X86_INTEL_QUARK
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
depends on X86 && ACPI
depends on X86 && ACPI && PCI
select COMMON_CLK
select PINCTRL
select IOSF_MBI

Просмотреть файл

@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{
if (unlikely(!access_ok(ptr,len)))
return 0;
__uaccess_begin();
__uaccess_begin_nospec();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)

Просмотреть файл

@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
#ifdef CONFIG_X86_X2APIC
if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
#endif
val &= ~X2APIC_ENABLE;
val &= ~X2APIC_ENABLE;
break;
}
return val;

Просмотреть файл

@ -361,8 +361,6 @@ void xen_timer_resume(void)
{
int cpu;
pvclock_resume();
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
};
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
if (!xen_clock)
return;
@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
int ret;
if (!xen_clock)
return;
goto out;
t.addr.v = &xen_clock->pvti;
@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
out:
/* Need pvclock_resume() before using xen_clocksource_read(). */
pvclock_resume();
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)

Просмотреть файл

@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
}
/**
* __bfq_deactivate_entity - deactivate an entity from its service tree.
* @entity: the entity to deactivate.
* __bfq_deactivate_entity - update sched_data and service trees for
* entity, so as to represent entity as inactive
* @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
* Deactivates an entity, independently of its previous state. Must
* be invoked only if entity is on a service tree. Extracts the entity
* from that tree, and if necessary and allowed, puts it into the idle
* tree.
* If necessary and allowed, puts entity into the idle tree. NOTE:
* entity may be on no tree if in service.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{

Просмотреть файл

@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* This file is released under the GPL.
*/
#include <linux/blkdev.h>

Просмотреть файл

@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);

Просмотреть файл

@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
crypto_set_spawn(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)

Просмотреть файл

@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
key += rta->rta_len;
keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;

Просмотреть файл

@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
aead_request_complete(req, err);
authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)

Просмотреть файл

@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12);

Просмотреть файл

@ -41,7 +41,8 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-$(CONFIG_PCI) += acpi_lpss.o
acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o

Просмотреть файл

@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
goto error0;
}
/*
* ACPI 2.0 requires the EC driver to be loaded and work before
* the EC device is found in the namespace (i.e. before
* acpi_load_tables() is called).
*
* This is accomplished by looking for the ECDT table, and getting
* the EC parameters out of that.
*
* Ignore the result. Not having an ECDT is not fatal.
*/
status = acpi_ec_ecdt_probe();
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
goto error1;
}
/*
* ACPI 2.0 requires the EC driver to be loaded and work before the EC
* device is found in the namespace.
*
* This is accomplished by looking for the ECDT table and getting the EC
* parameters out of that.
*
* Do that before calling acpi_initialize_objects() which may trigger EC
* address space accesses.
*/
acpi_ec_ecdt_probe();
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX

Просмотреть файл

@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
#ifdef CONFIG_PCI
void acpi_lpss_init(void);
#else
static inline void acpi_lpss_init(void) {}
#endif
void acpi_apd_init(void);

Просмотреть файл

@ -26,7 +26,6 @@
#include <acpi/nfit.h>
#include "intel.h"
#include "nfit.h"
#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
}
EXPORT_SYMBOL(to_nfit_uuid);
static struct acpi_nfit_desc *to_acpi_nfit_desc(
struct nvdimm_bus_descriptor *nd_desc)
{
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
*flags = memdev->flags;
physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
*flags = memdev->flags;
return memdev->physical_id;
return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
ndr_desc->nd_set = nd_set;
guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
if (nvdimm)
return 0;

Просмотреть файл

@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
static void nvdimm_invalidate_cache(void);
static int intel_security_unlock(struct nvdimm *nvdimm,
static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
return 0;
}
static int intel_security_erase(struct nvdimm *nvdimm,
static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
return 0;
}
static int intel_security_query_overwrite(struct nvdimm *nvdimm)
static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
return 0;
}
static int intel_security_overwrite(struct nvdimm *nvdimm,
static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
const struct nvdimm_key_data *nkey)
{
int rc;

Просмотреть файл

@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
depends on ATA_ACPI && ATA_BMDMA
depends on ATA_ACPI && ATA_BMDMA && PCI
help
This option enables an ACPI method driver which drives
motherboard PATA controller interfaces through the ACPI

Просмотреть файл

@ -718,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */

Просмотреть файл

@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
if (!d->chip->mask_base)
continue;
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (d->chip->mask_invert) {
@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
return -ENOTSUPP;
return 0;
reg = t->type_reg_offset / map->reg_stride;
@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
if (!chip->mask_base)
continue;
reg = chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (chip->mask_invert)

Просмотреть файл

@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) {
if (bdev->bd_disk)
if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize);
else
set_blocksize(bdev, config->blksize);
} else
bdev->bd_invalidated = 1;
bdput(bdev);
}

Просмотреть файл

@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1

Просмотреть файл

@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
const u8 *origkey = key;
const unsigned int origkeylen = keylen;
int ret = 0;
struct crypto_authenc_keys keys;
int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
ret = crypto_authenc_extractkeys(&keys, key, keylen);
if (ret)
goto badkey;
param = RTA_DATA(rta);
ctx->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckeylen)
goto badkey;
if (ctx->enckeylen > MAX_KEY_SIZE)
if (keys.enckeylen > MAX_KEY_SIZE ||
keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
ctx->authkeylen = keylen - ctx->enckeylen;
ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
if (ctx->authkeylen > MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
memcpy(ctx->authkey, key, ctx->authkeylen);
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
if (des_ekey(tmp, key) == 0) {
if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags);
@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
const u32 *K = (const u32 *)key;
const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret =
crypto_aead_setkey(ctx->fallback_cipher, origkey,
origkeylen);
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;

Просмотреть файл

@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;

Просмотреть файл

@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
goto unmap;
}
if (buflen) {
state->buf_dma = dma_map_single(jrdev, buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
}
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);

Просмотреть файл

@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)

Просмотреть файл

@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#include "desc.h"
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
static inline bool is_mdha(u32 algtype)
{
return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
OP_ALG_CHA_MDHA;
}
#endif /* CAAM_ERROR_H */

Просмотреть файл

@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
if (sr->callback)
sr->callback(sr->cb_arg, err);
softreq_destroy(sr);
req_completed++;
}

Просмотреть файл

@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
const u8 *enckey, *authkey;
int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
if (!RTA_OK(rta, keylen))
struct crypto_authenc_keys keys;
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enc_keylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enc_keylen)
goto badkey;
ctx->auth_keylen = keylen - ctx->enc_keylen;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
enckey = key;
authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}

Просмотреть файл

@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
err = ERR_PTR(-ENOMEM);
goto error_sg;
if (!edesc)
return ERR_PTR(-ENOMEM);
if (ivsize) {
iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
}
return edesc;
error_sg:
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,

Просмотреть файл

@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
int height;
struct amdgpu_device *adev = dev->dev_private;
int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
int pitch = mode_cmd->pitches[0] / cpp;
pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
if (mode_cmd->pitches[0] != pitch) {
DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
pitch, mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
height = ALIGN(mode_cmd->height, 8);
if (obj->size < pitch * height) {
DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
pitch * height, obj->size);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put_unlocked(obj);

Просмотреть файл

@ -4,8 +4,8 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && X86_64
imply AMD_IOMMU_V2
depends on DRM_AMDGPU && (X86_64 || ARM64)
imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
Enable this if you want to use HSA features on AMD GPU devices.

Просмотреть файл

@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
#if CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
*
@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size;
int numa_node_id;
#ifdef CONFIG_X86_64
uint32_t entries = 0;
#endif
int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr->length);
/* Fill in Subtype: IO Link */
#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries,
(struct crat_subtype_iolink *)sub_type_hdr);
@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
crat_table->num_domains++;
}

Просмотреть файл

@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
* TODO: Rather than assiging @gpu to first topology device withtout
* gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
list_for_each_entry(dev, &topology_device_list, list) {
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
if (!gpu->device_info->needs_iommu_device &&
dev->node_props.cpu_cores_count)
continue;
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
}
up_write(&topology_lock);
return out_dev;
}
@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{
const struct cpuinfo_x86 *cpuinfo;
int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask)
@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
cpuinfo = &cpu_data(first_cpu_of_numa_node);
return cpuinfo->apicid;
#ifdef CONFIG_X86_64
return cpu_data(first_cpu_of_numa_node).apicid;
#else
return first_cpu_of_numa_node;
#endif
}
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor

Просмотреть файл

@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
brightness, 0, 0))
brightness, 0))
return 0;
else
return 1;
@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
!new_crtc_state->vrr_enabled)
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)

Просмотреть файл

@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
const struct dc_stream_state *stream)
uint32_t frame_ramp)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
(abm->funcs->set_backlight_level_pwm == NULL))
return false;
if (stream)
((struct dc_stream_state *)stream)->bl_pwm_level =
backlight_pwm_u16_16;
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@ -2637,11 +2632,6 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
pipe_ctx->stream->bl_pwm_level,
0,
pipe_ctx->stream);
}
}

Просмотреть файл

@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
const struct dc_stream_state *stream);
uint32_t frame_ramp);
int dc_link_get_backlight_level(const struct dc_link *dc_link);

Просмотреть файл

@ -91,7 +91,6 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
unsigned int bl_pwm_level;
/* from core_stream struct */
struct dc_context *ctx;

Просмотреть файл

@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */

Просмотреть файл

@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset < 0)
if (src_y_offset + (int)height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,

Просмотреть файл

@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
if (src_y_offset + (int)hubp->curs_attr.height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)

Просмотреть файл

@ -2355,30 +2355,23 @@ static void dcn10_apply_ctx_for_surface(
top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|| !pipe_ctx->plane_state)
|| !pipe_ctx->plane_state
|| !tg->funcs->is_tg_enabled(tg))
continue;
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
tg->funcs->unlock(tg);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|| !pipe_ctx->plane_state)
continue;
dcn10_pipe_control_lock(dc, pipe_ctx, false);
}
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);

Просмотреть файл

@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
#define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16
#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
@ -100,6 +101,7 @@ struct iram_table_v_2 {
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
{

Просмотреть файл

@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
{
uint32_t result;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
result = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
return 0;
}
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
tmp_result = vega12_run_acg_btc(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to run ACG BTC!",
result = tmp_result);
result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!",

Просмотреть файл

@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}

Просмотреть файл

@ -41,7 +41,7 @@ struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);

Просмотреть файл

@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{
unsigned int index;
u64 virtaddr;
unsigned long req_size, pgoff = 0;
unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start;
pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (!intel_vgpu_in_aperture(vgpu, req_start))
return -EINVAL;
if (req_start + req_size >
vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
return -EINVAL;
pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
}
@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
return 0;
}
static void kvmgt_detach_vgpu(unsigned long handle)
static void kvmgt_detach_vgpu(void *p_vgpu)
{
/* nothing to do here */
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
if (!vgpu->vdev.region)
return;
for (i = 0; i < vgpu->vdev.num_regions; i++)
if (vgpu->vdev.region[i].ops->release)
vgpu->vdev.region[i].ops->release(vgpu,
&vgpu->vdev.region[i]);
vgpu->vdev.num_regions = 0;
kfree(vgpu->vdev.region);
vgpu->vdev.region = NULL;
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)

Просмотреть файл

@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
if (!intel_gvt_host.mpt->detach_vgpu)
return;
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
intel_gvt_host.mpt->detach_vgpu(vgpu);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)

Просмотреть файл

@ -46,7 +46,6 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
static void meson_crtc_enable(struct drm_crtc *crtc)
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
meson_crtc->enabled = true;
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
if (!meson_crtc->enabled)
meson_crtc_enable(crtc);
priv->viu.osd1_enabled = true;
}
@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
if (crtc->state->enable && !meson_crtc->enabled)
meson_crtc_enable(crtc);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);

Просмотреть файл

@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
};
static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static irqreturn_t meson_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */
@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
!of_device_is_available(remote_node))
!of_device_is_available(remote_node)) {
of_node_put(remote_node);
continue;
}
count += meson_probe_remote(pdev, match, remote, remote_node);
@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote))
if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue;
}
count += meson_probe_remote(pdev, &match, np, remote);
of_node_put(remote);
}
if (count && !match)

Просмотреть файл

@ -2434,6 +2434,38 @@ nv140_chipset = {
.sec2 = gp102_sec2_new,
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
.bar = tu104_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = tu104_devinit_new,
.fault = tu104_fault_new,
.fb = gv100_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.ltc = gp102_ltc_new,
.mc = tu104_mc_new,
.mmu = tu104_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = tu104_ce_new,
.ce[1] = tu104_ce_new,
.ce[2] = tu104_ce_new,
.ce[3] = tu104_ce_new,
.ce[4] = tu104_ce_new,
.disp = tu104_disp_new,
.dma = gv100_dma_new,
.fifo = tu104_fifo_new,
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
default:

Просмотреть файл

@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
.gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,

Просмотреть файл

@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
struct drm_gem_object *qxl_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");

Просмотреть файл

@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge);
if (!ret)
if (!ret) {
of_node_put(endpoint);
break;
}
}
of_node_put(port);

Просмотреть файл

@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
of_node_put(remote);
of_node_put(port);
of_node_put(ep);
return frontend;
}
}
}
of_node_put(port);
return ERR_PTR(-EINVAL);
}

Просмотреть файл

@ -127,14 +127,10 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,

Просмотреть файл

@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше