Char/Misc driver patches for 4.3-rc1
Here's the "big" char/misc driver update for 4.3-rc1. Not much really interesting here, just a number of little changes all over the place, and some nice consolidation of the nvmem drivers to a common framework. As usual, the mei drivers stand out as the largest "churn" to handle new devices and features in their hardware. All have been in linux-next for a while with no issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iEYEABECAAYFAlXV844ACgkQMUfUDdst+ymYfQCgmDKjq3fsVHCxNZPxnukFYzvb xZkAnRb8fuub5gVQFP29A+rhyiuWD13v =Bq9K -----END PGP SIGNATURE----- Merge tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc Pull char/misc driver patches from Greg KH: "Here's the "big" char/misc driver update for 4.3-rc1. Not much really interesting here, just a number of little changes all over the place, and some nice consolidation of the nvmem drivers to a common framework. As usual, the mei drivers stand out as the largest "churn" to handle new devices and features in their hardware. All have been in linux-next for a while with no issues" * tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (136 commits) auxdisplay: ks0108: initialize local parport variable extcon: palmas: Fix build break due to devm_gpiod_get_optional API change extcon: palmas: Support GPIO based USB ID detection extcon: Fix signedness bugs about break error handling extcon: Drop owner assignment from i2c_driver extcon: arizona: Simplify pdata symantics for micd_dbtime extcon: arizona: Declare 3-pole jack if we detect open circuit on mic extcon: Add exception handling to prevent the NULL pointer access extcon: arizona: Ensure variables are set for headphone detection extcon: arizona: Use gpiod inteface to handle micd_pol_gpio gpio extcon: arizona: Add basic microphone detection DT/ACPI bindings extcon: arizona: Update to use the new device properties API extcon: palmas: Remove the mutually_exclusive array extcon: Remove optional print_state() function pointer of struct extcon_dev extcon: Remove duplicate header file in extcon.h extcon: max77843: Clear IRQ bits state before request IRQ toshiba laptop: replace ioremap_cache with ioremap misc: eeprom: max6875: clean up max6875_read() misc: eeprom: clean up eeprom_read() misc: eeprom: 93xx46: clean up eeprom_93xx46_bin_read/write ...
This commit is contained in:
Коммит
1c00038c76
|
@ -0,0 +1,29 @@
|
|||
What: /sys/bus/vmbus/devices/vmbus_*/id
|
||||
Date: Jul 2009
|
||||
KernelVersion: 2.6.31
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The VMBus child_relid of the device's primary channel
|
||||
Users: tools/hv/lsvmbus
|
||||
|
||||
What: /sys/bus/vmbus/devices/vmbus_*/class_id
|
||||
Date: Jul 2009
|
||||
KernelVersion: 2.6.31
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The VMBus interface type GUID of the device
|
||||
Users: tools/hv/lsvmbus
|
||||
|
||||
What: /sys/bus/vmbus/devices/vmbus_*/device_id
|
||||
Date: Jul 2009
|
||||
KernelVersion: 2.6.31
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The VMBus interface instance GUID of the device
|
||||
Users: tools/hv/lsvmbus
|
||||
|
||||
What: /sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping
|
||||
Date: Jul 2015
|
||||
KernelVersion: 4.2.0
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The mapping of which primary/sub channels are bound to which
|
||||
Virtual Processors.
|
||||
Format: <channel's child_relid:the bound cpu's number>
|
||||
Users: tools/hv/lsvmbus
|
|
@ -112,7 +112,7 @@ KernelVersion: 3.19
|
|||
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
Description: (RW) Mask to apply to all the context ID comparator.
|
||||
|
||||
What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_val
|
||||
What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_pid
|
||||
Date: November 2014
|
||||
KernelVersion: 3.19
|
||||
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
|
|
|
@ -249,7 +249,7 @@ KernelVersion: 4.01
|
|||
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
Description: (RW) Select which context ID comparator to work with.
|
||||
|
||||
What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_val
|
||||
What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_pid
|
||||
Date: April 2015
|
||||
KernelVersion: 4.01
|
||||
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
What: /sys/devices/*/<our-device>/eeprom
|
||||
Date: August 2013
|
||||
Contact: Oliver Schinagl <oliver@schinagl.nl>
|
||||
Description: read-only access to the SID (Security-ID) on current
|
||||
A-series SoC's from Allwinner. Currently supports A10, A10s, A13
|
||||
and A20 CPU's. The earlier A1x series of SoCs exports 16 bytes,
|
||||
whereas the newer A20 SoC exposes 512 bytes split into sections.
|
||||
Besides the 16 bytes of SID, there's also an SJTAG area,
|
||||
HDMI-HDCP key and some custom keys. Below a quick overview, for
|
||||
details see the user manual:
|
||||
0x000 128 bit root-key (sun[457]i)
|
||||
0x010 128 bit boot-key (sun7i)
|
||||
0x020 64 bit security-jtag-key (sun7i)
|
||||
0x028 16 bit key configuration (sun7i)
|
||||
0x02b 16 bit custom-vendor-key (sun7i)
|
||||
0x02c 320 bit low general key (sun7i)
|
||||
0x040 32 bit read-control access (sun7i)
|
||||
0x064 224 bit low general key (sun7i)
|
||||
0x080 2304 bit HDCP-key (sun7i)
|
||||
0x1a0 768 bit high general key (sun7i)
|
||||
Users: any user space application which wants to read the SID on
|
||||
Allwinner's A-series of CPU's.
|
|
@ -17,6 +17,7 @@ its hardware characteristcs.
|
|||
- "arm,coresight-tmc", "arm,primecell";
|
||||
- "arm,coresight-funnel", "arm,primecell";
|
||||
- "arm,coresight-etm3x", "arm,primecell";
|
||||
- "arm,coresight-etm4x", "arm,primecell";
|
||||
- "qcom,coresight-replicator1x", "arm,primecell";
|
||||
|
||||
* reg: physical base address and length of the register
|
||||
|
|
|
@ -10,8 +10,11 @@ Required Properties:
|
|||
|
||||
Optional Properties:
|
||||
- ti,wakeup : To enable the wakeup comparator in probe
|
||||
- ti,enable-id-detection: Perform ID detection.
|
||||
- ti,enable-id-detection: Perform ID detection. If id-gpio is specified
|
||||
it performs id-detection using GPIO else using OTG core.
|
||||
- ti,enable-vbus-detection: Perform VBUS detection.
|
||||
- id-gpio: gpio for GPIO ID detection. See gpio binding.
|
||||
- debounce-delay-ms: debounce delay for GPIO ID pin in milliseconds.
|
||||
|
||||
palmas-usb {
|
||||
compatible = "ti,twl6035-usb", "ti,palmas-usb";
|
||||
|
|
|
@ -4,6 +4,10 @@ Required properties:
|
|||
- compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
|
||||
- reg: Should contain registers location and length
|
||||
|
||||
= Data cells =
|
||||
Are child nodes of qfprom, bindings of which as described in
|
||||
bindings/nvmem/nvmem.txt
|
||||
|
||||
Example for sun4i:
|
||||
sid@01c23800 {
|
||||
compatible = "allwinner,sun4i-a10-sid";
|
|
@ -0,0 +1,80 @@
|
|||
= NVMEM(Non Volatile Memory) Data Device Tree Bindings =
|
||||
|
||||
This binding is intended to represent the location of hardware
|
||||
configuration data stored in NVMEMs like eeprom, efuses and so on.
|
||||
|
||||
On a significant proportion of boards, the manufacturer has stored
|
||||
some data on NVMEM, for the OS to be able to retrieve these information
|
||||
and act upon it. Obviously, the OS has to know about where to retrieve
|
||||
these data from, and where they are stored on the storage device.
|
||||
|
||||
This document is here to document this.
|
||||
|
||||
= Data providers =
|
||||
Contains bindings specific to provider drivers and data cells as children
|
||||
of this node.
|
||||
|
||||
Optional properties:
|
||||
read-only: Mark the provider as read only.
|
||||
|
||||
= Data cells =
|
||||
These are the child nodes of the provider which contain data cell
|
||||
information like offset and size in nvmem provider.
|
||||
|
||||
Required properties:
|
||||
reg: specifies the offset in byte within the storage device.
|
||||
|
||||
Optional properties:
|
||||
|
||||
bits: Is pair of bit location and number of bits, which specifies offset
|
||||
in bit and number of bits within the address range specified by reg property.
|
||||
Offset takes values from 0-7.
|
||||
|
||||
For example:
|
||||
|
||||
/* Provider */
|
||||
qfprom: qfprom@00700000 {
|
||||
...
|
||||
|
||||
/* Data cells */
|
||||
tsens_calibration: calib@404 {
|
||||
reg = <0x404 0x10>;
|
||||
};
|
||||
|
||||
tsens_calibration_bckp: calib_bckp@504 {
|
||||
reg = <0x504 0x11>;
|
||||
bits = <6 128>
|
||||
};
|
||||
|
||||
pvs_version: pvs-version@6 {
|
||||
reg = <0x6 0x2>
|
||||
bits = <7 2>
|
||||
};
|
||||
|
||||
speed_bin: speed-bin@c{
|
||||
reg = <0xc 0x1>;
|
||||
bits = <2 3>;
|
||||
|
||||
};
|
||||
...
|
||||
};
|
||||
|
||||
= Data consumers =
|
||||
Are device nodes which consume nvmem data cells/providers.
|
||||
|
||||
Required-properties:
|
||||
nvmem-cells: list of phandle to the nvmem data cells.
|
||||
nvmem-cell-names: names for the each nvmem-cells specified. Required if
|
||||
nvmem-cells is used.
|
||||
|
||||
Optional-properties:
|
||||
nvmem : list of phandles to nvmem providers.
|
||||
nvmem-names: names for the each nvmem provider. required if nvmem is used.
|
||||
|
||||
For example:
|
||||
|
||||
tsens {
|
||||
...
|
||||
nvmem-cells = <&tsens_calibration>;
|
||||
nvmem-cell-names = "calibration";
|
||||
};
|
|
@ -0,0 +1,35 @@
|
|||
= Qualcomm QFPROM device tree bindings =
|
||||
|
||||
This binding is intended to represent QFPROM which is found in most QCOM SOCs.
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "qcom,qfprom"
|
||||
- reg: Should contain registers location and length
|
||||
|
||||
= Data cells =
|
||||
Are child nodes of qfprom, bindings of which as described in
|
||||
bindings/nvmem/nvmem.txt
|
||||
|
||||
Example:
|
||||
|
||||
qfprom: qfprom@00700000 {
|
||||
compatible = "qcom,qfprom";
|
||||
reg = <0x00700000 0x8000>;
|
||||
...
|
||||
/* Data cells */
|
||||
tsens_calibration: calib@404 {
|
||||
reg = <0x4404 0x10>;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
= Data consumers =
|
||||
Are device nodes which consume nvmem data cells.
|
||||
|
||||
For example:
|
||||
|
||||
tsens {
|
||||
...
|
||||
nvmem-cells = <&tsens_calibration>;
|
||||
nvmem-cell-names = "calibration";
|
||||
};
|
|
@ -0,0 +1,48 @@
|
|||
Qualcomm Coincell Charger:
|
||||
|
||||
The hardware block controls charging for a coincell or capacitor that is
|
||||
used to provide power backup for certain features of the power management
|
||||
IC (PMIC)
|
||||
|
||||
- compatible:
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be: "qcom,pm8941-coincell"
|
||||
|
||||
- reg:
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: base address of the coincell charger registers
|
||||
|
||||
- qcom,rset-ohms:
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: resistance (in ohms) for current-limiting resistor
|
||||
must be one of: 800, 1200, 1700, 2100
|
||||
|
||||
- qcom,vset-millivolts:
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: voltage (in millivolts) to apply for charging
|
||||
must be one of: 2500, 3000, 3100, 3200
|
||||
|
||||
- qcom,charger-disable:
|
||||
Usage: optional
|
||||
Value type: <boolean>
|
||||
Definition: definining this property disables charging
|
||||
|
||||
This charger is a sub-node of one of the 8941 PMIC blocks, and is specified
|
||||
as a child node in DTS of that node. See ../mfd/qcom,spmi-pmic.txt and
|
||||
../mfd/qcom-pm8xxx.txt
|
||||
|
||||
Example:
|
||||
|
||||
pm8941@0 {
|
||||
coincell@2800 {
|
||||
compatible = "qcom,pm8941-coincell";
|
||||
reg = <0x2800>;
|
||||
|
||||
qcom,rset-ohms = <2100>;
|
||||
qcom,vset-millivolts = <3000>;
|
||||
};
|
||||
};
|
|
@ -124,6 +124,8 @@ Code Seq#(hex) Include File Comments
|
|||
'H' 00-7F linux/hiddev.h conflict!
|
||||
'H' 00-0F linux/hidraw.h conflict!
|
||||
'H' 01 linux/mei.h conflict!
|
||||
'H' 02 linux/mei.h conflict!
|
||||
'H' 03 linux/mei.h conflict!
|
||||
'H' 00-0F sound/asound.h conflict!
|
||||
'H' 20-40 sound/asound_fm.h conflict!
|
||||
'H' 80-8F sound/sfnt_info.h conflict!
|
||||
|
|
|
@ -96,7 +96,7 @@ A code snippet for an application communicating with Intel AMTHI client:
|
|||
IOCTL
|
||||
=====
|
||||
|
||||
The Intel MEI Driver supports the following IOCTL command:
|
||||
The Intel MEI Driver supports the following IOCTL commands:
|
||||
IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client).
|
||||
|
||||
usage:
|
||||
|
@ -125,6 +125,49 @@ The Intel MEI Driver supports the following IOCTL command:
|
|||
data that can be sent or received. (e.g. if MTU=2K, can send
|
||||
requests up to bytes 2k and received responses up to 2k bytes).
|
||||
|
||||
IOCTL_MEI_NOTIFY_SET: enable or disable event notifications
|
||||
|
||||
Usage:
|
||||
uint32_t enable;
|
||||
ioctl(fd, IOCTL_MEI_NOTIFY_SET, &enable);
|
||||
|
||||
Inputs:
|
||||
uint32_t enable = 1;
|
||||
or
|
||||
uint32_t enable[disable] = 0;
|
||||
|
||||
Error returns:
|
||||
EINVAL Wrong IOCTL Number
|
||||
ENODEV Device is not initialized or the client not connected
|
||||
ENOMEM Unable to allocate memory to client internal data.
|
||||
EFAULT Fatal Error (e.g. Unable to access user input data)
|
||||
EOPNOTSUPP if the device doesn't support the feature
|
||||
|
||||
Notes:
|
||||
The client must be connected in order to enable notification events
|
||||
|
||||
|
||||
IOCTL_MEI_NOTIFY_GET : retrieve event
|
||||
|
||||
Usage:
|
||||
uint32_t event;
|
||||
ioctl(fd, IOCTL_MEI_NOTIFY_GET, &event);
|
||||
|
||||
Outputs:
|
||||
1 - if an event is pending
|
||||
0 - if there is no even pending
|
||||
|
||||
Error returns:
|
||||
EINVAL Wrong IOCTL Number
|
||||
ENODEV Device is not initialized or the client not connected
|
||||
ENOMEM Unable to allocate memory to client internal data.
|
||||
EFAULT Fatal Error (e.g. Unable to access user input data)
|
||||
EOPNOTSUPP if the device doesn't support the feature
|
||||
|
||||
Notes:
|
||||
The client must be connected and event notification has to be enabled
|
||||
in order to receive an event
|
||||
|
||||
|
||||
Intel ME Applications
|
||||
=====================
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
NVMEM SUBSYSTEM
|
||||
Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
|
||||
This document explains the NVMEM Framework along with the APIs provided,
|
||||
and how to use it.
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
*NVMEM* is the abbreviation for Non Volatile Memory layer. It is used to
|
||||
retrieve configuration of SOC or Device specific data from non volatile
|
||||
memories like eeprom, efuses and so on.
|
||||
|
||||
Before this framework existed, NVMEM drivers like eeprom were stored in
|
||||
drivers/misc, where they all had to duplicate pretty much the same code to
|
||||
register a sysfs file, allow in-kernel users to access the content of the
|
||||
devices they were driving, etc.
|
||||
|
||||
This was also a problem as far as other in-kernel users were involved, since
|
||||
the solutions used were pretty much different from one driver to another, there
|
||||
was a rather big abstraction leak.
|
||||
|
||||
This framework aims at solve these problems. It also introduces DT
|
||||
representation for consumer devices to go get the data they require (MAC
|
||||
Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs. This
|
||||
framework is based on regmap, so that most of the abstraction available in
|
||||
regmap can be reused, across multiple types of buses.
|
||||
|
||||
NVMEM Providers
|
||||
+++++++++++++++
|
||||
|
||||
NVMEM provider refers to an entity that implements methods to initialize, read
|
||||
and write the non-volatile memory.
|
||||
|
||||
2. Registering/Unregistering the NVMEM provider
|
||||
===============================================
|
||||
|
||||
A NVMEM provider can register with NVMEM core by supplying relevant
|
||||
nvmem configuration to nvmem_register(), on success core would return a valid
|
||||
nvmem_device pointer.
|
||||
|
||||
nvmem_unregister(nvmem) is used to unregister a previously registered provider.
|
||||
|
||||
For example, a simple qfprom case:
|
||||
|
||||
static struct nvmem_config econfig = {
|
||||
.name = "qfprom",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int qfprom_probe(struct platform_device *pdev)
|
||||
{
|
||||
...
|
||||
econfig.dev = &pdev->dev;
|
||||
nvmem = nvmem_register(&econfig);
|
||||
...
|
||||
}
|
||||
|
||||
It is mandatory that the NVMEM provider has a regmap associated with its
|
||||
struct device. Failure to do would return error code from nvmem_register().
|
||||
|
||||
NVMEM Consumers
|
||||
+++++++++++++++
|
||||
|
||||
NVMEM consumers are the entities which make use of the NVMEM provider to
|
||||
read from and to NVMEM.
|
||||
|
||||
3. NVMEM cell based consumer APIs
|
||||
=================================
|
||||
|
||||
NVMEM cells are the data entries/fields in the NVMEM.
|
||||
The NVMEM framework provides 3 APIs to read/write NVMEM cells.
|
||||
|
||||
struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
|
||||
struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
|
||||
|
||||
void nvmem_cell_put(struct nvmem_cell *cell);
|
||||
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
|
||||
|
||||
void *nvmem_cell_read(struct nvmem_cell *cell, ssize_t *len);
|
||||
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, ssize_t len);
|
||||
|
||||
*nvmem_cell_get() apis will get a reference to nvmem cell for a given id,
|
||||
and nvmem_cell_read/write() can then read or write to the cell.
|
||||
Once the usage of the cell is finished the consumer should call *nvmem_cell_put()
|
||||
to free all the allocation memory for the cell.
|
||||
|
||||
4. Direct NVMEM device based consumer APIs
|
||||
==========================================
|
||||
|
||||
In some instances it is necessary to directly read/write the NVMEM.
|
||||
To facilitate such consumers NVMEM framework provides below apis.
|
||||
|
||||
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
|
||||
struct nvmem_device *devm_nvmem_device_get(struct device *dev,
|
||||
const char *name);
|
||||
void nvmem_device_put(struct nvmem_device *nvmem);
|
||||
int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
|
||||
size_t bytes, void *buf);
|
||||
int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
|
||||
size_t bytes, void *buf);
|
||||
int nvmem_device_cell_read(struct nvmem_device *nvmem,
|
||||
struct nvmem_cell_info *info, void *buf);
|
||||
int nvmem_device_cell_write(struct nvmem_device *nvmem,
|
||||
struct nvmem_cell_info *info, void *buf);
|
||||
|
||||
Before the consumers can read/write NVMEM directly, it should get hold
|
||||
of nvmem_controller from one of the *nvmem_device_get() api.
|
||||
|
||||
The difference between these apis and cell based apis is that these apis always
|
||||
take nvmem_device as parameter.
|
||||
|
||||
5. Releasing a reference to the NVMEM
|
||||
=====================================
|
||||
|
||||
When a consumers no longer needs the NVMEM, it has to release the reference
|
||||
to the NVMEM it has obtained using the APIs mentioned in the above section.
|
||||
The NVMEM framework provides 2 APIs to release a reference to the NVMEM.
|
||||
|
||||
void nvmem_cell_put(struct nvmem_cell *cell);
|
||||
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
|
||||
void nvmem_device_put(struct nvmem_device *nvmem);
|
||||
void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
|
||||
|
||||
Both these APIs are used to release a reference to the NVMEM and
|
||||
devm_nvmem_cell_put and devm_nvmem_device_put destroys the devres associated
|
||||
with this NVMEM.
|
||||
|
||||
Userspace
|
||||
+++++++++
|
||||
|
||||
6. Userspace binary interface
|
||||
==============================
|
||||
|
||||
Userspace can read/write the raw NVMEM file located at
|
||||
/sys/bus/nvmem/devices/*/nvmem
|
||||
|
||||
ex:
|
||||
|
||||
hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
|
||||
|
||||
0000000 0000 0000 0000 0000 0000 0000 0000 0000
|
||||
*
|
||||
00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
|
||||
0000000 0000 0000 0000 0000 0000 0000 0000 0000
|
||||
...
|
||||
*
|
||||
0001000
|
||||
|
||||
7. DeviceTree Binding
|
||||
=====================
|
||||
|
||||
See Documentation/devicetree/bindings/nvmem/nvmem.txt
|
|
@ -72,7 +72,7 @@ More details follow:
|
|||
|
|
||||
v
|
||||
Disable regular cpu hotplug
|
||||
by setting cpu_hotplug_disabled=1
|
||||
by increasing cpu_hotplug_disabled
|
||||
|
|
||||
v
|
||||
Release cpu_add_remove_lock
|
||||
|
@ -89,7 +89,7 @@ Resuming back is likewise, with the counterparts being (in the order of
|
|||
execution during resume):
|
||||
* enable_nonboot_cpus() which involves:
|
||||
| Acquire cpu_add_remove_lock
|
||||
| Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
|
||||
| Decrease cpu_hotplug_disabled, thereby enabling regular cpu hotplug
|
||||
| Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
|
||||
| Release cpu_add_remove_lock
|
||||
v
|
||||
|
@ -120,7 +120,7 @@ after the entire cycle is complete (i.e., suspend + resume).
|
|||
Acquire cpu_add_remove_lock
|
||||
|
|
||||
v
|
||||
If cpu_hotplug_disabled is 1
|
||||
If cpu_hotplug_disabled > 0
|
||||
return gracefully
|
||||
|
|
||||
|
|
||||
|
|
|
@ -15,7 +15,7 @@ HW assisted tracing is becoming increasingly useful when dealing with systems
|
|||
that have many SoCs and other components like GPU and DMA engines. ARM has
|
||||
developed a HW assisted tracing solution by means of different components, each
|
||||
being added to a design at synthesis time to cater to specific tracing needs.
|
||||
Compoments are generally categorised as source, link and sinks and are
|
||||
Components are generally categorised as source, link and sinks and are
|
||||
(usually) discovered using the AMBA bus.
|
||||
|
||||
"Sources" generate a compressed stream representing the processor instruction
|
||||
|
@ -138,7 +138,7 @@ void coresight_unregister(struct coresight_device *csdev);
|
|||
|
||||
The registering function is taking a "struct coresight_device *csdev" and
|
||||
register the device with the core framework. The unregister function takes
|
||||
a reference to a "strut coresight_device", obtained at registration time.
|
||||
a reference to a "struct coresight_device", obtained at registration time.
|
||||
|
||||
If everything goes well during the registration process the new devices will
|
||||
show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:
|
||||
|
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -4966,6 +4966,7 @@ F: drivers/scsi/storvsc_drv.c
|
|||
F: drivers/video/fbdev/hyperv_fb.c
|
||||
F: include/linux/hyperv.h
|
||||
F: tools/hv/
|
||||
F: Documentation/ABI/stable/sysfs-bus-vmbus
|
||||
|
||||
I2C OVER PARALLEL PORT
|
||||
M: Jean Delvare <jdelvare@suse.com>
|
||||
|
@ -7298,6 +7299,15 @@ S: Supported
|
|||
F: drivers/block/nvme*
|
||||
F: include/linux/nvme.h
|
||||
|
||||
NVMEM FRAMEWORK
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
S: Maintained
|
||||
F: drivers/nvmem/
|
||||
F: Documentation/devicetree/bindings/nvmem/
|
||||
F: include/linux/nvmem-consumer.h
|
||||
F: include/linux/nvmem-provider.h
|
||||
|
||||
NXP-NCI NFC DRIVER
|
||||
M: Clément Perrochaud <clement.perrochaud@effinnov.com>
|
||||
R: Charles Gorand <charles.gorand@effinnov.com>
|
||||
|
|
|
@ -17,3 +17,13 @@
|
|||
status = "ok";
|
||||
};
|
||||
};
|
||||
|
||||
&spmi_bus {
|
||||
pm8941@0 {
|
||||
coincell@2800 {
|
||||
status = "ok";
|
||||
qcom,rset-ohms = <2100>;
|
||||
qcom,vset-millivolts = <3000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -125,6 +125,12 @@
|
|||
interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
|
||||
qcom,external-resistor-micro-ohms = <10000>;
|
||||
};
|
||||
|
||||
coincell@2800 {
|
||||
compatible = "qcom,pm8941-coincell";
|
||||
reg = <0x2800>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
usid1: pm8941@1 {
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
struct ms_hyperv_info {
|
||||
u32 features;
|
||||
u32 misc_features;
|
||||
u32 hints;
|
||||
};
|
||||
|
||||
|
@ -20,4 +21,8 @@ void hyperv_vector_handler(struct pt_regs *regs);
|
|||
void hv_setup_vmbus_irq(void (*handler)(void));
|
||||
void hv_remove_vmbus_irq(void);
|
||||
|
||||
void hv_setup_kexec_handler(void (*handler)(void));
|
||||
void hv_remove_kexec_handler(void);
|
||||
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
|
||||
void hv_remove_crash_handler(void);
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
|
||||
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
|
||||
#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
|
||||
/* Partition reference TSC MSR is available */
|
||||
#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
|
||||
|
||||
/* A partition's reference time stamp counter (TSC) page */
|
||||
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/efi.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/hyperv.h>
|
||||
|
@ -28,10 +29,14 @@
|
|||
#include <asm/i8259.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
struct ms_hyperv_info ms_hyperv;
|
||||
EXPORT_SYMBOL_GPL(ms_hyperv);
|
||||
|
||||
static void (*hv_kexec_handler)(void);
|
||||
static void (*hv_crash_handler)(struct pt_regs *regs);
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
static void (*vmbus_handler)(void);
|
||||
|
||||
|
@ -67,8 +72,47 @@ void hv_remove_vmbus_irq(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
|
||||
EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
|
||||
|
||||
void hv_setup_kexec_handler(void (*handler)(void))
|
||||
{
|
||||
hv_kexec_handler = handler;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
|
||||
|
||||
void hv_remove_kexec_handler(void)
|
||||
{
|
||||
hv_kexec_handler = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
|
||||
|
||||
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
|
||||
{
|
||||
hv_crash_handler = handler;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
|
||||
|
||||
void hv_remove_crash_handler(void)
|
||||
{
|
||||
hv_crash_handler = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
|
||||
#endif
|
||||
|
||||
static void hv_machine_shutdown(void)
|
||||
{
|
||||
if (kexec_in_progress && hv_kexec_handler)
|
||||
hv_kexec_handler();
|
||||
native_machine_shutdown();
|
||||
}
|
||||
|
||||
static void hv_machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
if (hv_crash_handler)
|
||||
hv_crash_handler(regs);
|
||||
native_machine_crash_shutdown(regs);
|
||||
}
|
||||
|
||||
|
||||
static uint32_t __init ms_hyperv_platform(void)
|
||||
{
|
||||
u32 eax;
|
||||
|
@ -114,6 +158,7 @@ static void __init ms_hyperv_init_platform(void)
|
|||
* Extract the features and hints
|
||||
*/
|
||||
ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
|
||||
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
|
||||
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
|
||||
|
||||
printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
|
||||
|
@ -141,6 +186,8 @@ static void __init ms_hyperv_init_platform(void)
|
|||
no_timer_check = 1;
|
||||
#endif
|
||||
|
||||
machine_ops.shutdown = hv_machine_shutdown;
|
||||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
|
|
|
@ -184,4 +184,6 @@ source "drivers/android/Kconfig"
|
|||
|
||||
source "drivers/nvdimm/Kconfig"
|
||||
|
||||
source "drivers/nvmem/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -165,3 +165,4 @@ obj-$(CONFIG_RAS) += ras/
|
|||
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
|
||||
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
|
||||
obj-$(CONFIG_ANDROID) += android/
|
||||
obj-$(CONFIG_NVMEM) += nvmem/
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -90,17 +92,19 @@ void ks0108_displaystate(unsigned char state)
|
|||
|
||||
void ks0108_startline(unsigned char startline)
|
||||
{
|
||||
ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7));
|
||||
ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) |
|
||||
bit(7));
|
||||
}
|
||||
|
||||
void ks0108_address(unsigned char address)
|
||||
{
|
||||
ks0108_writedata(min(address,(unsigned char)63) | bit(6));
|
||||
ks0108_writedata(min_t(unsigned char, address, 63) | bit(6));
|
||||
}
|
||||
|
||||
void ks0108_page(unsigned char page)
|
||||
{
|
||||
ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7));
|
||||
ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) |
|
||||
bit(5) | bit(7));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ks0108_writedata);
|
||||
|
@ -121,52 +125,71 @@ unsigned char ks0108_isinited(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ks0108_isinited);
|
||||
|
||||
static void ks0108_parport_attach(struct parport *port)
|
||||
{
|
||||
struct pardev_cb ks0108_cb;
|
||||
|
||||
if (port->base != ks0108_port)
|
||||
return;
|
||||
|
||||
memset(&ks0108_cb, 0, sizeof(ks0108_cb));
|
||||
ks0108_cb.flags = PARPORT_DEV_EXCL;
|
||||
ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME,
|
||||
&ks0108_cb, 0);
|
||||
if (!ks0108_pardevice) {
|
||||
pr_err("ERROR: parport didn't register new device\n");
|
||||
return;
|
||||
}
|
||||
if (parport_claim(ks0108_pardevice)) {
|
||||
pr_err("could not claim access to parport %i. Aborting.\n",
|
||||
ks0108_port);
|
||||
goto err_unreg_device;
|
||||
}
|
||||
|
||||
ks0108_parport = port;
|
||||
ks0108_inited = 1;
|
||||
return;
|
||||
|
||||
err_unreg_device:
|
||||
parport_unregister_device(ks0108_pardevice);
|
||||
ks0108_pardevice = NULL;
|
||||
}
|
||||
|
||||
static void ks0108_parport_detach(struct parport *port)
|
||||
{
|
||||
if (port->base != ks0108_port)
|
||||
return;
|
||||
|
||||
if (!ks0108_pardevice) {
|
||||
pr_err("%s: already unregistered.\n", KS0108_NAME);
|
||||
return;
|
||||
}
|
||||
|
||||
parport_release(ks0108_pardevice);
|
||||
parport_unregister_device(ks0108_pardevice);
|
||||
ks0108_pardevice = NULL;
|
||||
ks0108_parport = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Module Init & Exit
|
||||
*/
|
||||
|
||||
static struct parport_driver ks0108_parport_driver = {
|
||||
.name = "ks0108",
|
||||
.match_port = ks0108_parport_attach,
|
||||
.detach = ks0108_parport_detach,
|
||||
.devmodel = true,
|
||||
};
|
||||
|
||||
static int __init ks0108_init(void)
|
||||
{
|
||||
int result;
|
||||
int ret = -EINVAL;
|
||||
|
||||
ks0108_parport = parport_find_base(ks0108_port);
|
||||
if (ks0108_parport == NULL) {
|
||||
printk(KERN_ERR KS0108_NAME ": ERROR: "
|
||||
"parport didn't find %i port\n", ks0108_port);
|
||||
goto none;
|
||||
}
|
||||
|
||||
ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
|
||||
NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
|
||||
if (ks0108_pardevice == NULL) {
|
||||
printk(KERN_ERR KS0108_NAME ": ERROR: "
|
||||
"parport didn't register new device\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
result = parport_claim(ks0108_pardevice);
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR KS0108_NAME ": ERROR: "
|
||||
"can't claim %i parport, maybe in use\n", ks0108_port);
|
||||
ret = result;
|
||||
goto registered;
|
||||
}
|
||||
|
||||
ks0108_inited = 1;
|
||||
return 0;
|
||||
|
||||
registered:
|
||||
parport_unregister_device(ks0108_pardevice);
|
||||
|
||||
none:
|
||||
return ret;
|
||||
return parport_register_driver(&ks0108_parport_driver);
|
||||
}
|
||||
|
||||
static void __exit ks0108_exit(void)
|
||||
{
|
||||
parport_release(ks0108_pardevice);
|
||||
parport_unregister_device(ks0108_pardevice);
|
||||
parport_unregister_driver(&ks0108_parport_driver);
|
||||
}
|
||||
|
||||
module_init(ks0108_init);
|
||||
|
|
|
@ -243,17 +243,15 @@ int misc_register(struct miscdevice * misc)
|
|||
* @misc: device to unregister
|
||||
*
|
||||
* Unregister a miscellaneous device that was previously
|
||||
* successfully registered with misc_register(). Success
|
||||
* is indicated by a zero return, a negative errno code
|
||||
* indicates an error.
|
||||
* successfully registered with misc_register().
|
||||
*/
|
||||
|
||||
int misc_deregister(struct miscdevice *misc)
|
||||
void misc_deregister(struct miscdevice *misc)
|
||||
{
|
||||
int i = DYNAMIC_MINORS - misc->minor - 1;
|
||||
|
||||
if (WARN_ON(list_empty(&misc->list)))
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
mutex_lock(&misc_mtx);
|
||||
list_del(&misc->list);
|
||||
|
@ -261,7 +259,6 @@ int misc_deregister(struct miscdevice *misc)
|
|||
if (i < DYNAMIC_MINORS && i >= 0)
|
||||
clear_bit(i, misc_minors);
|
||||
mutex_unlock(&misc_mtx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(misc_register);
|
||||
|
@ -281,10 +278,9 @@ static char *misc_devnode(struct device *dev, umode_t *mode)
|
|||
static int __init misc_init(void)
|
||||
{
|
||||
int err;
|
||||
struct proc_dir_entry *ret;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_create("misc", 0, NULL, &misc_proc_fops);
|
||||
#endif
|
||||
ret = proc_create("misc", 0, NULL, &misc_proc_fops);
|
||||
misc_class = class_create(THIS_MODULE, "misc");
|
||||
err = PTR_ERR(misc_class);
|
||||
if (IS_ERR(misc_class))
|
||||
|
@ -300,7 +296,8 @@ fail_printk:
|
|||
printk("unable to get major %d for misc devices\n", MISC_MAJOR);
|
||||
class_destroy(misc_class);
|
||||
fail_remove:
|
||||
remove_proc_entry("misc", NULL);
|
||||
if (ret)
|
||||
remove_proc_entry("misc", NULL);
|
||||
return err;
|
||||
}
|
||||
subsys_initcall(misc_init);
|
||||
|
|
|
@ -702,7 +702,7 @@ static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
|
|||
seq_printf(seq, "%ds%s\n", nvram[10],
|
||||
nvram[10] < 8 ? ", no memory test" : "");
|
||||
|
||||
vmode = (nvram[14] << 8) || nvram[15];
|
||||
vmode = (nvram[14] << 8) | nvram[15];
|
||||
seq_printf(seq,
|
||||
"Video mode : %s colors, %d columns, %s %s monitor\n",
|
||||
colors[vmode & 7],
|
||||
|
|
|
@ -430,7 +430,7 @@ static int tosh_probe(void)
|
|||
int i,major,minor,day,year,month,flag;
|
||||
unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
|
||||
SMMRegisters regs;
|
||||
void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
|
||||
void __iomem *bios = ioremap(0xf0000, 0x10000);
|
||||
|
||||
if (!bios)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -193,14 +193,16 @@ static int xilly_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/*
|
||||
* In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
|
||||
* is the right thing. But some unclever PCIe drivers report it's OK
|
||||
* when the hardware drops those 64-bit PCIe packets. So trust
|
||||
* nobody and use 32 bits DMA addressing in any case.
|
||||
* Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
|
||||
* even when the PCIe driver claims that a 64-bit mask is OK. On the
|
||||
* other hand, on some architectures, 64-bit addressing is mandatory.
|
||||
* So go for the 64-bit mask only when failing is the other option.
|
||||
*/
|
||||
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
endpoint->dma_using_dac = 0;
|
||||
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
endpoint->dma_using_dac = 1;
|
||||
} else {
|
||||
dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -20,10 +20,12 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/extcon.h>
|
||||
|
||||
|
@ -46,6 +48,9 @@
|
|||
#define HPDET_DEBOUNCE 500
|
||||
#define DEFAULT_MICD_TIMEOUT 2000
|
||||
|
||||
#define MICD_DBTIME_TWO_READINGS 2
|
||||
#define MICD_DBTIME_FOUR_READINGS 4
|
||||
|
||||
#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
|
||||
ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
|
||||
ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
|
||||
|
@ -94,6 +99,8 @@ struct arizona_extcon_info {
|
|||
int hpdet_ip_version;
|
||||
|
||||
struct extcon_dev *edev;
|
||||
|
||||
struct gpio_desc *micd_pol_gpio;
|
||||
};
|
||||
|
||||
static const struct arizona_micd_config micd_default_modes[] = {
|
||||
|
@ -204,6 +211,10 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
|
|||
if (arizona->pdata.micd_pol_gpio > 0)
|
||||
gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
|
||||
info->micd_modes[mode].gpio);
|
||||
else
|
||||
gpiod_set_value_cansleep(info->micd_pol_gpio,
|
||||
info->micd_modes[mode].gpio);
|
||||
|
||||
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
||||
ARIZONA_MICD_BIAS_SRC_MASK,
|
||||
info->micd_modes[mode].bias <<
|
||||
|
@ -757,10 +768,11 @@ static void arizona_micd_timeout_work(struct work_struct *work)
|
|||
mutex_lock(&info->lock);
|
||||
|
||||
dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n");
|
||||
arizona_identify_headphone(info);
|
||||
|
||||
info->detecting = false;
|
||||
|
||||
arizona_identify_headphone(info);
|
||||
|
||||
arizona_stop_mic(info);
|
||||
|
||||
mutex_unlock(&info->lock);
|
||||
|
@ -820,12 +832,18 @@ static void arizona_micd_detect(struct work_struct *work)
|
|||
/* Due to jack detect this should never happen */
|
||||
if (!(val & ARIZONA_MICD_STS)) {
|
||||
dev_warn(arizona->dev, "Detected open circuit\n");
|
||||
info->mic = false;
|
||||
arizona_stop_mic(info);
|
||||
info->detecting = false;
|
||||
arizona_identify_headphone(info);
|
||||
goto handled;
|
||||
}
|
||||
|
||||
/* If we got a high impedence we should have a headset, report it. */
|
||||
if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
|
||||
info->mic = true;
|
||||
info->detecting = false;
|
||||
|
||||
arizona_identify_headphone(info);
|
||||
|
||||
ret = extcon_set_cable_state_(info->edev,
|
||||
|
@ -841,8 +859,6 @@ static void arizona_micd_detect(struct work_struct *work)
|
|||
ret);
|
||||
}
|
||||
|
||||
info->mic = true;
|
||||
info->detecting = false;
|
||||
goto handled;
|
||||
}
|
||||
|
||||
|
@ -855,10 +871,11 @@ static void arizona_micd_detect(struct work_struct *work)
|
|||
if (info->detecting && (val & MICD_LVL_1_TO_7)) {
|
||||
if (info->jack_flips >= info->micd_num_modes * 10) {
|
||||
dev_dbg(arizona->dev, "Detected HP/line\n");
|
||||
arizona_identify_headphone(info);
|
||||
|
||||
info->detecting = false;
|
||||
|
||||
arizona_identify_headphone(info);
|
||||
|
||||
arizona_stop_mic(info);
|
||||
} else {
|
||||
info->micd_mode++;
|
||||
|
@ -1110,12 +1127,12 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
|
|||
regmap_update_bits(arizona->regmap, reg, mask, level);
|
||||
}
|
||||
|
||||
static int arizona_extcon_of_get_pdata(struct arizona *arizona)
|
||||
static int arizona_extcon_device_get_pdata(struct arizona *arizona)
|
||||
{
|
||||
struct arizona_pdata *pdata = &arizona->pdata;
|
||||
unsigned int val = ARIZONA_ACCDET_MODE_HPL;
|
||||
|
||||
of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
|
||||
device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val);
|
||||
switch (val) {
|
||||
case ARIZONA_ACCDET_MODE_HPL:
|
||||
case ARIZONA_ACCDET_MODE_HPR:
|
||||
|
@ -1127,6 +1144,24 @@ static int arizona_extcon_of_get_pdata(struct arizona *arizona)
|
|||
pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
|
||||
}
|
||||
|
||||
device_property_read_u32(arizona->dev, "wlf,micd-detect-debounce",
|
||||
&pdata->micd_detect_debounce);
|
||||
|
||||
device_property_read_u32(arizona->dev, "wlf,micd-bias-start-time",
|
||||
&pdata->micd_bias_start_time);
|
||||
|
||||
device_property_read_u32(arizona->dev, "wlf,micd-rate",
|
||||
&pdata->micd_rate);
|
||||
|
||||
device_property_read_u32(arizona->dev, "wlf,micd-dbtime",
|
||||
&pdata->micd_dbtime);
|
||||
|
||||
device_property_read_u32(arizona->dev, "wlf,micd-timeout",
|
||||
&pdata->micd_timeout);
|
||||
|
||||
pdata->micd_force_micbias = device_property_read_bool(arizona->dev,
|
||||
"wlf,micd-force-micbias");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1147,10 +1182,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_ENABLED(CONFIG_OF)) {
|
||||
if (!dev_get_platdata(arizona->dev))
|
||||
arizona_extcon_of_get_pdata(arizona);
|
||||
}
|
||||
if (!dev_get_platdata(arizona->dev))
|
||||
arizona_extcon_device_get_pdata(arizona);
|
||||
|
||||
info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
|
||||
if (IS_ERR(info->micvdd)) {
|
||||
|
@ -1241,6 +1274,27 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
arizona->pdata.micd_pol_gpio, ret);
|
||||
goto err_register;
|
||||
}
|
||||
} else {
|
||||
if (info->micd_modes[0].gpio)
|
||||
mode = GPIOD_OUT_HIGH;
|
||||
else
|
||||
mode = GPIOD_OUT_LOW;
|
||||
|
||||
/* We can't use devm here because we need to do the get
|
||||
* against the MFD device, as that is where the of_node
|
||||
* will reside, but if we devm against that the GPIO
|
||||
* will not be freed if the extcon driver is unloaded.
|
||||
*/
|
||||
info->micd_pol_gpio = gpiod_get_optional(arizona->dev,
|
||||
"wlf,micd-pol",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(info->micd_pol_gpio)) {
|
||||
ret = PTR_ERR(info->micd_pol_gpio);
|
||||
dev_err(arizona->dev,
|
||||
"Failed to get microphone polarity GPIO: %d\n",
|
||||
ret);
|
||||
goto err_register;
|
||||
}
|
||||
}
|
||||
|
||||
if (arizona->pdata.hpdet_id_gpio > 0) {
|
||||
|
@ -1251,7 +1305,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
|
||||
arizona->pdata.hpdet_id_gpio, ret);
|
||||
goto err_register;
|
||||
goto err_gpio;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1267,11 +1321,19 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
arizona->pdata.micd_rate
|
||||
<< ARIZONA_MICD_RATE_SHIFT);
|
||||
|
||||
if (arizona->pdata.micd_dbtime)
|
||||
switch (arizona->pdata.micd_dbtime) {
|
||||
case MICD_DBTIME_FOUR_READINGS:
|
||||
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
||||
ARIZONA_MICD_DBTIME_MASK,
|
||||
arizona->pdata.micd_dbtime
|
||||
<< ARIZONA_MICD_DBTIME_SHIFT);
|
||||
ARIZONA_MICD_DBTIME);
|
||||
break;
|
||||
case MICD_DBTIME_TWO_READINGS:
|
||||
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
||||
ARIZONA_MICD_DBTIME_MASK, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40);
|
||||
|
||||
|
@ -1295,7 +1357,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
dev_err(arizona->dev,
|
||||
"MICD ranges must be sorted\n");
|
||||
ret = -EINVAL;
|
||||
goto err_input;
|
||||
goto err_gpio;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1314,7 +1376,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
dev_err(arizona->dev, "Unsupported MICD level %d\n",
|
||||
info->micd_ranges[i].max);
|
||||
ret = -EINVAL;
|
||||
goto err_input;
|
||||
goto err_gpio;
|
||||
}
|
||||
|
||||
dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n",
|
||||
|
@ -1387,7 +1449,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
|||
if (ret != 0) {
|
||||
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
|
||||
ret);
|
||||
goto err_input;
|
||||
goto err_gpio;
|
||||
}
|
||||
|
||||
ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
|
||||
|
@ -1458,7 +1520,8 @@ err_rise_wake:
|
|||
arizona_set_irq_wake(arizona, jack_irq_rise, 0);
|
||||
err_rise:
|
||||
arizona_free_irq(arizona, jack_irq_rise, info);
|
||||
err_input:
|
||||
err_gpio:
|
||||
gpiod_put(info->micd_pol_gpio);
|
||||
err_register:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
|
@ -1470,6 +1533,8 @@ static int arizona_extcon_remove(struct platform_device *pdev)
|
|||
struct arizona *arizona = info->arizona;
|
||||
int jack_irq_rise, jack_irq_fall;
|
||||
|
||||
gpiod_put(info->micd_pol_gpio);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
regmap_update_bits(arizona->regmap,
|
||||
|
|
|
@ -65,22 +65,6 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
|
||||
{
|
||||
struct device *dev = edev->dev.parent;
|
||||
struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev);
|
||||
const char *state;
|
||||
|
||||
if (extcon_get_state(edev))
|
||||
state = extcon_data->state_on;
|
||||
else
|
||||
state = extcon_data->state_off;
|
||||
|
||||
if (state)
|
||||
return sprintf(buf, "%s\n", state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int gpio_extcon_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
||||
|
@ -110,8 +94,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
|
|||
extcon_data->state_on = pdata->state_on;
|
||||
extcon_data->state_off = pdata->state_off;
|
||||
extcon_data->check_on_resume = pdata->check_on_resume;
|
||||
if (pdata->state_on && pdata->state_off)
|
||||
extcon_data->edev->print_state = extcon_gpio_print_state;
|
||||
|
||||
ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
|
||||
pdev->name);
|
||||
|
|
|
@ -781,6 +781,15 @@ static int max77843_muic_probe(struct platform_device *pdev)
|
|||
/* Support virtual irq domain for max77843 MUIC device */
|
||||
INIT_WORK(&info->irq_work, max77843_muic_irq_work);
|
||||
|
||||
/* Clear IRQ bits before request IRQs */
|
||||
ret = regmap_bulk_read(max77843->regmap_muic,
|
||||
MAX77843_MUIC_REG_INT1, info->status,
|
||||
MAX77843_MUIC_IRQ_NUM);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
|
||||
goto err_muic_irq;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
|
||||
struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
|
||||
unsigned int virq = 0;
|
||||
|
|
|
@ -28,6 +28,11 @@
|
|||
#include <linux/mfd/palmas.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
|
||||
|
||||
static const unsigned int palmas_extcon_cable[] = {
|
||||
EXTCON_USB,
|
||||
|
@ -35,8 +40,6 @@ static const unsigned int palmas_extcon_cable[] = {
|
|||
EXTCON_NONE,
|
||||
};
|
||||
|
||||
static const int mutually_exclusive[] = {0x3, 0x0};
|
||||
|
||||
static void palmas_usb_wakeup(struct palmas *palmas, int enable)
|
||||
{
|
||||
if (enable)
|
||||
|
@ -120,19 +123,54 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void palmas_gpio_id_detect(struct work_struct *work)
|
||||
{
|
||||
int id;
|
||||
struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
|
||||
struct palmas_usb,
|
||||
wq_detectid);
|
||||
struct extcon_dev *edev = palmas_usb->edev;
|
||||
|
||||
if (!palmas_usb->id_gpiod)
|
||||
return;
|
||||
|
||||
id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
|
||||
|
||||
if (id) {
|
||||
extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
} else {
|
||||
extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb)
|
||||
{
|
||||
struct palmas_usb *palmas_usb = _palmas_usb;
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid,
|
||||
palmas_usb->sw_debounce_jiffies);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void palmas_enable_irq(struct palmas_usb *palmas_usb)
|
||||
{
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
PALMAS_USB_VBUS_CTRL_SET,
|
||||
PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
|
||||
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
PALMAS_USB_ID_CTRL_SET, PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
|
||||
if (palmas_usb->enable_id_detection) {
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
PALMAS_USB_ID_CTRL_SET,
|
||||
PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
|
||||
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
PALMAS_USB_ID_INT_EN_HI_SET,
|
||||
PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
|
||||
PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
PALMAS_USB_ID_INT_EN_HI_SET,
|
||||
PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
|
||||
PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
|
||||
}
|
||||
|
||||
if (palmas_usb->enable_vbus_detection)
|
||||
palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
|
||||
|
@ -171,20 +209,37 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
|||
palmas_usb->wakeup = pdata->wakeup;
|
||||
}
|
||||
|
||||
palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
|
||||
GPIOD_IN);
|
||||
if (IS_ERR(palmas_usb->id_gpiod)) {
|
||||
dev_err(&pdev->dev, "failed to get id gpio\n");
|
||||
return PTR_ERR(palmas_usb->id_gpiod);
|
||||
}
|
||||
|
||||
if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
|
||||
palmas_usb->enable_id_detection = false;
|
||||
palmas_usb->enable_gpio_id_detection = true;
|
||||
}
|
||||
|
||||
if (palmas_usb->enable_gpio_id_detection) {
|
||||
u32 debounce;
|
||||
|
||||
if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
|
||||
debounce = USB_GPIO_DEBOUNCE_MS;
|
||||
|
||||
status = gpiod_set_debounce(palmas_usb->id_gpiod,
|
||||
debounce * 1000);
|
||||
if (status < 0)
|
||||
palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect);
|
||||
|
||||
palmas->usb = palmas_usb;
|
||||
palmas_usb->palmas = palmas;
|
||||
|
||||
palmas_usb->dev = &pdev->dev;
|
||||
|
||||
palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_ID_OTG_IRQ);
|
||||
palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_ID_IRQ);
|
||||
palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_VBUS_OTG_IRQ);
|
||||
palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_VBUS_IRQ);
|
||||
|
||||
palmas_usb_wakeup(palmas, palmas_usb->wakeup);
|
||||
|
||||
platform_set_drvdata(pdev, palmas_usb);
|
||||
|
@ -195,7 +250,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "failed to allocate extcon device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
palmas_usb->edev->mutually_exclusive = mutually_exclusive;
|
||||
|
||||
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
|
||||
if (status) {
|
||||
|
@ -204,6 +258,10 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
if (palmas_usb->enable_id_detection) {
|
||||
palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_ID_OTG_IRQ);
|
||||
palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_ID_IRQ);
|
||||
status = devm_request_threaded_irq(palmas_usb->dev,
|
||||
palmas_usb->id_irq,
|
||||
NULL, palmas_id_irq_handler,
|
||||
|
@ -215,9 +273,33 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
|||
palmas_usb->id_irq, status);
|
||||
return status;
|
||||
}
|
||||
} else if (palmas_usb->enable_gpio_id_detection) {
|
||||
palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod);
|
||||
if (palmas_usb->gpio_id_irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get id irq\n");
|
||||
return palmas_usb->gpio_id_irq;
|
||||
}
|
||||
status = devm_request_threaded_irq(&pdev->dev,
|
||||
palmas_usb->gpio_id_irq,
|
||||
NULL,
|
||||
palmas_gpio_id_irq_handler,
|
||||
IRQF_TRIGGER_RISING |
|
||||
IRQF_TRIGGER_FALLING |
|
||||
IRQF_ONESHOT,
|
||||
"palmas_usb_id",
|
||||
palmas_usb);
|
||||
if (status < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"failed to request handler for id irq\n");
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
if (palmas_usb->enable_vbus_detection) {
|
||||
palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_VBUS_OTG_IRQ);
|
||||
palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
|
||||
PALMAS_VBUS_IRQ);
|
||||
status = devm_request_threaded_irq(palmas_usb->dev,
|
||||
palmas_usb->vbus_irq, NULL,
|
||||
palmas_vbus_irq_handler,
|
||||
|
@ -232,10 +314,21 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
palmas_enable_irq(palmas_usb);
|
||||
/* perform initial detection */
|
||||
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
|
||||
device_set_wakeup_capable(&pdev->dev, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int palmas_usb_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
|
||||
|
||||
cancel_delayed_work_sync(&palmas_usb->wq_detectid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int palmas_usb_suspend(struct device *dev)
|
||||
{
|
||||
|
@ -246,6 +339,8 @@ static int palmas_usb_suspend(struct device *dev)
|
|||
enable_irq_wake(palmas_usb->vbus_irq);
|
||||
if (palmas_usb->enable_id_detection)
|
||||
enable_irq_wake(palmas_usb->id_irq);
|
||||
if (palmas_usb->enable_gpio_id_detection)
|
||||
enable_irq_wake(palmas_usb->gpio_id_irq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -259,6 +354,8 @@ static int palmas_usb_resume(struct device *dev)
|
|||
disable_irq_wake(palmas_usb->vbus_irq);
|
||||
if (palmas_usb->enable_id_detection)
|
||||
disable_irq_wake(palmas_usb->id_irq);
|
||||
if (palmas_usb->enable_gpio_id_detection)
|
||||
disable_irq_wake(palmas_usb->gpio_id_irq);
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
@ -276,6 +373,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
|
|||
|
||||
static struct platform_driver palmas_usb_driver = {
|
||||
.probe = palmas_usb_probe,
|
||||
.remove = palmas_usb_remove,
|
||||
.driver = {
|
||||
.name = "palmas-usb",
|
||||
.of_match_table = of_palmas_match_tbl,
|
||||
|
|
|
@ -693,7 +693,6 @@ MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id);
|
|||
static struct i2c_driver rt8973a_muic_i2c_driver = {
|
||||
.driver = {
|
||||
.name = "rt8973a",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &rt8973a_muic_pm_ops,
|
||||
.of_match_table = rt8973a_dt_match,
|
||||
},
|
||||
|
|
|
@ -685,7 +685,6 @@ MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
|
|||
static struct i2c_driver sm5502_muic_i2c_driver = {
|
||||
.driver = {
|
||||
.name = "sm5502",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &sm5502_muic_pm_ops,
|
||||
.of_match_table = sm5502_dt_match,
|
||||
},
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/extcon.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
|
|
@ -126,7 +126,7 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
|
|||
|
||||
static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
|
||||
{
|
||||
unsigned int id = -EINVAL;
|
||||
int id = -EINVAL;
|
||||
int i = 0;
|
||||
|
||||
/* Find the id of extcon cable */
|
||||
|
@ -143,7 +143,7 @@ static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
|
|||
|
||||
static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
|
||||
{
|
||||
unsigned int id;
|
||||
int id;
|
||||
|
||||
if (edev->max_supported == 0)
|
||||
return -EINVAL;
|
||||
|
@ -172,14 +172,6 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
|
|||
int i, count = 0;
|
||||
struct extcon_dev *edev = dev_get_drvdata(dev);
|
||||
|
||||
if (edev->print_state) {
|
||||
int ret = edev->print_state(edev, buf);
|
||||
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
/* Use default if failed */
|
||||
}
|
||||
|
||||
if (edev->max_supported == 0)
|
||||
return sprintf(buf, "%u\n", edev->state);
|
||||
|
||||
|
@ -272,6 +264,9 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
|
|||
unsigned long flags;
|
||||
bool attached;
|
||||
|
||||
if (!edev)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&edev->lock, flags);
|
||||
|
||||
if (edev->state != ((edev->state & ~mask) | (state & mask))) {
|
||||
|
@ -345,6 +340,9 @@ EXPORT_SYMBOL_GPL(extcon_update_state);
|
|||
*/
|
||||
int extcon_set_state(struct extcon_dev *edev, u32 state)
|
||||
{
|
||||
if (!edev)
|
||||
return -EINVAL;
|
||||
|
||||
return extcon_update_state(edev, 0xffffffff, state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(extcon_set_state);
|
||||
|
@ -358,6 +356,9 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
|
|||
{
|
||||
int index;
|
||||
|
||||
if (!edev)
|
||||
return -EINVAL;
|
||||
|
||||
index = find_cable_index_by_id(edev, id);
|
||||
if (index < 0)
|
||||
return index;
|
||||
|
@ -378,7 +379,7 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
|
|||
*/
|
||||
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
|
||||
{
|
||||
unsigned int id;
|
||||
int id;
|
||||
|
||||
id = find_cable_id_by_name(edev, cable_name);
|
||||
if (id < 0)
|
||||
|
@ -402,6 +403,9 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
|
|||
u32 state;
|
||||
int index;
|
||||
|
||||
if (!edev)
|
||||
return -EINVAL;
|
||||
|
||||
index = find_cable_index_by_id(edev, id);
|
||||
if (index < 0)
|
||||
return index;
|
||||
|
@ -426,7 +430,7 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
|
|||
int extcon_set_cable_state(struct extcon_dev *edev,
|
||||
const char *cable_name, bool cable_state)
|
||||
{
|
||||
unsigned int id;
|
||||
int id;
|
||||
|
||||
id = find_cable_id_by_name(edev, cable_name);
|
||||
if (id < 0)
|
||||
|
@ -444,6 +448,9 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
|
|||
{
|
||||
struct extcon_dev *sd;
|
||||
|
||||
if (!extcon_name)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&extcon_dev_list_lock);
|
||||
list_for_each_entry(sd, &extcon_dev_list, entry) {
|
||||
if (!strcmp(sd->name, extcon_name))
|
||||
|
@ -572,6 +579,9 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
|
|||
unsigned long flags;
|
||||
int ret, idx;
|
||||
|
||||
if (!edev || !nb)
|
||||
return -EINVAL;
|
||||
|
||||
idx = find_cable_index_by_id(edev, id);
|
||||
|
||||
spin_lock_irqsave(&edev->lock, flags);
|
||||
|
@ -594,6 +604,9 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
|
|||
unsigned long flags;
|
||||
int ret, idx;
|
||||
|
||||
if (!edev || !nb)
|
||||
return -EINVAL;
|
||||
|
||||
idx = find_cable_index_by_id(edev, id);
|
||||
|
||||
spin_lock_irqsave(&edev->lock, flags);
|
||||
|
@ -654,6 +667,9 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
|
|||
{
|
||||
struct extcon_dev *edev;
|
||||
|
||||
if (!supported_cable)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
|
||||
if (!edev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -754,7 +770,7 @@ int extcon_dev_register(struct extcon_dev *edev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (!edev->supported_cable)
|
||||
if (!edev || !edev->supported_cable)
|
||||
return -EINVAL;
|
||||
|
||||
for (; edev->supported_cable[index] != EXTCON_NONE; index++);
|
||||
|
@ -960,6 +976,9 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
|||
{
|
||||
int index;
|
||||
|
||||
if (!edev)
|
||||
return;
|
||||
|
||||
mutex_lock(&extcon_dev_list_lock);
|
||||
list_del(&edev->entry);
|
||||
mutex_unlock(&extcon_dev_list_lock);
|
||||
|
@ -1066,6 +1085,9 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
|
|||
struct device_node *node;
|
||||
struct extcon_dev *edev;
|
||||
|
||||
if (!dev)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!dev->of_node) {
|
||||
dev_err(dev, "device does not have a device node entry\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
|
|
@ -601,6 +601,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
|||
u64 aligned_data = 0;
|
||||
int ret;
|
||||
bool signal = false;
|
||||
int num_vecs = ((bufferlen != 0) ? 3 : 1);
|
||||
|
||||
|
||||
/* Setup the descriptor */
|
||||
|
@ -618,7 +619,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
|||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
|
||||
&signal);
|
||||
|
||||
/*
|
||||
* Signalling the host is conditional on many factors:
|
||||
|
|
|
@ -347,6 +347,7 @@ enum {
|
|||
IDE = 0,
|
||||
SCSI,
|
||||
NIC,
|
||||
ND_NIC,
|
||||
MAX_PERF_CHN,
|
||||
};
|
||||
|
||||
|
@ -391,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
|||
struct vmbus_channel *primary = channel->primary_channel;
|
||||
int next_node;
|
||||
struct cpumask available_mask;
|
||||
struct cpumask *alloced_mask;
|
||||
|
||||
for (i = IDE; i < MAX_PERF_CHN; i++) {
|
||||
if (!memcmp(type_guid->b, hp_devs[i].guid,
|
||||
|
@ -408,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
|||
* channel, bind it to cpu 0.
|
||||
*/
|
||||
channel->numa_node = 0;
|
||||
cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
|
||||
channel->target_cpu = 0;
|
||||
channel->target_vp = hv_context.vp_index[0];
|
||||
return;
|
||||
|
@ -433,21 +434,38 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
|||
channel->numa_node = next_node;
|
||||
primary = channel;
|
||||
}
|
||||
alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
|
||||
|
||||
if (cpumask_weight(&primary->alloced_cpus_in_node) ==
|
||||
if (cpumask_weight(alloced_mask) ==
|
||||
cpumask_weight(cpumask_of_node(primary->numa_node))) {
|
||||
/*
|
||||
* We have cycled through all the CPUs in the node;
|
||||
* reset the alloced map.
|
||||
*/
|
||||
cpumask_clear(&primary->alloced_cpus_in_node);
|
||||
cpumask_clear(alloced_mask);
|
||||
}
|
||||
|
||||
cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
|
||||
cpumask_xor(&available_mask, alloced_mask,
|
||||
cpumask_of_node(primary->numa_node));
|
||||
|
||||
cur_cpu = cpumask_next(-1, &available_mask);
|
||||
cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
|
||||
cur_cpu = -1;
|
||||
while (true) {
|
||||
cur_cpu = cpumask_next(cur_cpu, &available_mask);
|
||||
if (cur_cpu >= nr_cpu_ids) {
|
||||
cur_cpu = -1;
|
||||
cpumask_copy(&available_mask,
|
||||
cpumask_of_node(primary->numa_node));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!cpumask_test_cpu(cur_cpu,
|
||||
&primary->alloced_cpus_in_node)) {
|
||||
cpumask_set_cpu(cur_cpu,
|
||||
&primary->alloced_cpus_in_node);
|
||||
cpumask_set_cpu(cur_cpu, alloced_mask);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
channel->target_cpu = cur_cpu;
|
||||
channel->target_vp = hv_context.vp_index[cur_cpu];
|
||||
|
@ -469,6 +487,10 @@ void vmbus_initiate_unload(void)
|
|||
{
|
||||
struct vmbus_channel_message_header hdr;
|
||||
|
||||
/* Pre-Win2012R2 hosts don't support reconnect */
|
||||
if (vmbus_proto_version < VERSION_WIN8_1)
|
||||
return;
|
||||
|
||||
init_completion(&vmbus_connection.unload_event);
|
||||
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
|
||||
hdr.msgtype = CHANNELMSG_UNLOAD;
|
||||
|
|
150
drivers/hv/hv.c
150
drivers/hv/hv.c
|
@ -93,11 +93,14 @@ static int query_hypervisor_info(void)
|
|||
*/
|
||||
static u64 do_hypercall(u64 control, void *input, void *output)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 hv_status = 0;
|
||||
u64 input_address = (input) ? virt_to_phys(input) : 0;
|
||||
u64 output_address = (output) ? virt_to_phys(output) : 0;
|
||||
void *hypercall_page = hv_context.hypercall_page;
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 hv_status = 0;
|
||||
|
||||
if (!hypercall_page)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
|
||||
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
|
||||
|
@ -112,13 +115,13 @@ static u64 do_hypercall(u64 control, void *input, void *output)
|
|||
u32 control_lo = control & 0xFFFFFFFF;
|
||||
u32 hv_status_hi = 1;
|
||||
u32 hv_status_lo = 1;
|
||||
u64 input_address = (input) ? virt_to_phys(input) : 0;
|
||||
u32 input_address_hi = input_address >> 32;
|
||||
u32 input_address_lo = input_address & 0xFFFFFFFF;
|
||||
u64 output_address = (output) ? virt_to_phys(output) : 0;
|
||||
u32 output_address_hi = output_address >> 32;
|
||||
u32 output_address_lo = output_address & 0xFFFFFFFF;
|
||||
void *hypercall_page = hv_context.hypercall_page;
|
||||
|
||||
if (!hypercall_page)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
|
||||
"=a"(hv_status_lo) : "d" (control_hi),
|
||||
|
@ -130,6 +133,56 @@ static u64 do_hypercall(u64 control, void *input, void *output)
|
|||
#endif /* !x86_64 */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static cycle_t read_hv_clock_tsc(struct clocksource *arg)
|
||||
{
|
||||
cycle_t current_tick;
|
||||
struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
|
||||
|
||||
if (tsc_pg->tsc_sequence != -1) {
|
||||
/*
|
||||
* Use the tsc page to compute the value.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
cycle_t tmp;
|
||||
u32 sequence = tsc_pg->tsc_sequence;
|
||||
u64 cur_tsc;
|
||||
u64 scale = tsc_pg->tsc_scale;
|
||||
s64 offset = tsc_pg->tsc_offset;
|
||||
|
||||
rdtscll(cur_tsc);
|
||||
/* current_tick = ((cur_tsc *scale) >> 64) + offset */
|
||||
asm("mulq %3"
|
||||
: "=d" (current_tick), "=a" (tmp)
|
||||
: "a" (cur_tsc), "r" (scale));
|
||||
|
||||
current_tick += offset;
|
||||
if (tsc_pg->tsc_sequence == sequence)
|
||||
return current_tick;
|
||||
|
||||
if (tsc_pg->tsc_sequence != -1)
|
||||
continue;
|
||||
/*
|
||||
* Fallback using MSR method.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
return current_tick;
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_tsc = {
|
||||
.name = "hyperv_clocksource_tsc_page",
|
||||
.rating = 425,
|
||||
.read = read_hv_clock_tsc,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* hv_init - Main initialization routine.
|
||||
*
|
||||
|
@ -139,7 +192,9 @@ int hv_init(void)
|
|||
{
|
||||
int max_leaf;
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
union hv_x64_msr_hypercall_contents tsc_msr;
|
||||
void *virtaddr = NULL;
|
||||
void *va_tsc = NULL;
|
||||
|
||||
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.synic_message_page, 0,
|
||||
|
@ -183,6 +238,22 @@ int hv_init(void)
|
|||
|
||||
hv_context.hypercall_page = virtaddr;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
||||
va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
|
||||
if (!va_tsc)
|
||||
goto cleanup;
|
||||
hv_context.tsc_page = va_tsc;
|
||||
|
||||
rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
|
||||
tsc_msr.enable = 1;
|
||||
tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
|
||||
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -216,6 +287,21 @@ void hv_cleanup(void)
|
|||
vfree(hv_context.hypercall_page);
|
||||
hv_context.hypercall_page = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Cleanup the TSC page based CS.
|
||||
*/
|
||||
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
||||
clocksource_change_rating(&hyperv_cs_tsc, 10);
|
||||
clocksource_unregister(&hyperv_cs_tsc);
|
||||
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
||||
vfree(hv_context.tsc_page);
|
||||
hv_context.tsc_page = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -271,7 +357,7 @@ static int hv_ce_set_next_event(unsigned long delta,
|
|||
{
|
||||
cycle_t current_tick;
|
||||
|
||||
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
|
||||
WARN_ON(!clockevent_state_oneshot(evt));
|
||||
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
current_tick += delta;
|
||||
|
@ -279,31 +365,24 @@ static int hv_ce_set_next_event(unsigned long delta,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void hv_ce_setmode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
static int hv_ce_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hv_ce_set_oneshot(struct clock_event_device *evt)
|
||||
{
|
||||
union hv_timer_config timer_cfg;
|
||||
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
/* unsupported */
|
||||
break;
|
||||
timer_cfg.enable = 1;
|
||||
timer_cfg.auto_enable = 1;
|
||||
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
|
||||
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
timer_cfg.enable = 1;
|
||||
timer_cfg.auto_enable = 1;
|
||||
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_RESUME:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
|
||||
|
@ -318,7 +397,8 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
|
|||
* references to the hv_vmbus module making it impossible to unload.
|
||||
*/
|
||||
|
||||
dev->set_mode = hv_ce_setmode;
|
||||
dev->set_state_shutdown = hv_ce_shutdown;
|
||||
dev->set_state_oneshot = hv_ce_set_oneshot;
|
||||
dev->set_next_event = hv_ce_set_next_event;
|
||||
}
|
||||
|
||||
|
@ -329,6 +409,13 @@ int hv_synic_alloc(void)
|
|||
size_t ced_size = sizeof(struct clock_event_device);
|
||||
int cpu;
|
||||
|
||||
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
|
||||
GFP_ATOMIC);
|
||||
if (hv_context.hv_numa_map == NULL) {
|
||||
pr_err("Unable to allocate NUMA map\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
|
||||
if (hv_context.event_dpc[cpu] == NULL) {
|
||||
|
@ -342,6 +429,7 @@ int hv_synic_alloc(void)
|
|||
pr_err("Unable to allocate clock event device\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
|
||||
|
||||
hv_context.synic_message_page[cpu] =
|
||||
|
@ -390,6 +478,7 @@ void hv_synic_free(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
kfree(hv_context.hv_numa_map);
|
||||
for_each_online_cpu(cpu)
|
||||
hv_synic_free_cpu(cpu);
|
||||
}
|
||||
|
@ -503,8 +592,7 @@ void hv_synic_cleanup(void *arg)
|
|||
|
||||
/* Turn off clockevent device */
|
||||
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
|
||||
hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
|
||||
hv_context.clk_evt[cpu]);
|
||||
hv_ce_shutdown(hv_context.clk_evt[cpu]);
|
||||
|
||||
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
|
||||
|
@ -530,6 +618,4 @@ void hv_synic_cleanup(void *arg)
|
|||
rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
sctrl.enable = 0;
|
||||
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
|
||||
hv_synic_free_cpu(cpu);
|
||||
}
|
||||
|
|
|
@ -62,11 +62,13 @@
|
|||
enum {
|
||||
DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
|
||||
DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
|
||||
DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
|
||||
|
||||
DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
|
||||
DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
|
||||
DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
|
||||
|
||||
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
|
||||
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
|
||||
};
|
||||
|
||||
|
||||
|
@ -1296,13 +1298,25 @@ static void version_resp(struct hv_dynmem_device *dm,
|
|||
if (dm->next_version == 0)
|
||||
goto version_error;
|
||||
|
||||
dm->next_version = 0;
|
||||
memset(&version_req, 0, sizeof(struct dm_version_request));
|
||||
version_req.hdr.type = DM_VERSION_REQUEST;
|
||||
version_req.hdr.size = sizeof(struct dm_version_request);
|
||||
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
|
||||
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
|
||||
version_req.is_last_attempt = 1;
|
||||
version_req.version.version = dm->next_version;
|
||||
|
||||
/*
|
||||
* Set the next version to try in case current version fails.
|
||||
* Win7 protocol ought to be the last one to try.
|
||||
*/
|
||||
switch (version_req.version.version) {
|
||||
case DYNMEM_PROTOCOL_VERSION_WIN8:
|
||||
dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
|
||||
version_req.is_last_attempt = 0;
|
||||
break;
|
||||
default:
|
||||
dm->next_version = 0;
|
||||
version_req.is_last_attempt = 1;
|
||||
}
|
||||
|
||||
ret = vmbus_sendpacket(dm->dev->channel, &version_req,
|
||||
sizeof(struct dm_version_request),
|
||||
|
@ -1442,7 +1456,7 @@ static int balloon_probe(struct hv_device *dev,
|
|||
|
||||
dm_device.dev = dev;
|
||||
dm_device.state = DM_INITIALIZING;
|
||||
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
|
||||
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
|
||||
init_completion(&dm_device.host_event);
|
||||
init_completion(&dm_device.config_event);
|
||||
INIT_LIST_HEAD(&dm_device.ha_region_list);
|
||||
|
@ -1474,7 +1488,7 @@ static int balloon_probe(struct hv_device *dev,
|
|||
version_req.hdr.type = DM_VERSION_REQUEST;
|
||||
version_req.hdr.size = sizeof(struct dm_version_request);
|
||||
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
|
||||
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
|
||||
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
|
||||
version_req.is_last_attempt = 0;
|
||||
|
||||
ret = vmbus_sendpacket(dev->channel, &version_req,
|
||||
|
|
|
@ -116,7 +116,7 @@ static int fcopy_handle_handshake(u32 version)
|
|||
|
||||
static void fcopy_send_data(struct work_struct *dummy)
|
||||
{
|
||||
struct hv_start_fcopy smsg_out;
|
||||
struct hv_start_fcopy *smsg_out = NULL;
|
||||
int operation = fcopy_transaction.fcopy_msg->operation;
|
||||
struct hv_start_fcopy *smsg_in;
|
||||
void *out_src;
|
||||
|
@ -136,21 +136,24 @@ static void fcopy_send_data(struct work_struct *dummy)
|
|||
switch (operation) {
|
||||
case START_FILE_COPY:
|
||||
out_len = sizeof(struct hv_start_fcopy);
|
||||
memset(&smsg_out, 0, out_len);
|
||||
smsg_out.hdr.operation = operation;
|
||||
smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
|
||||
if (!smsg_out)
|
||||
return;
|
||||
|
||||
smsg_out->hdr.operation = operation;
|
||||
smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
|
||||
|
||||
utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
|
||||
UTF16_LITTLE_ENDIAN,
|
||||
(__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
|
||||
(__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
|
||||
|
||||
utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
|
||||
UTF16_LITTLE_ENDIAN,
|
||||
(__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
|
||||
(__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
|
||||
|
||||
smsg_out.copy_flags = smsg_in->copy_flags;
|
||||
smsg_out.file_size = smsg_in->file_size;
|
||||
out_src = &smsg_out;
|
||||
smsg_out->copy_flags = smsg_in->copy_flags;
|
||||
smsg_out->file_size = smsg_in->file_size;
|
||||
out_src = smsg_out;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -168,6 +171,8 @@ static void fcopy_send_data(struct work_struct *dummy)
|
|||
fcopy_transaction.state = HVUTIL_READY;
|
||||
}
|
||||
}
|
||||
kfree(smsg_out);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -353,6 +353,9 @@ kvp_send_key(struct work_struct *dummy)
|
|||
return;
|
||||
|
||||
message = kzalloc(sizeof(*message), GFP_KERNEL);
|
||||
if (!message)
|
||||
return;
|
||||
|
||||
message->kvp_hdr.operation = operation;
|
||||
message->kvp_hdr.pool = pool;
|
||||
in_msg = kvp_transaction.kvp_msg;
|
||||
|
|
|
@ -186,7 +186,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
|
|||
return -EINVAL;
|
||||
} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
|
||||
cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
|
||||
if (!msg)
|
||||
if (!cn_msg)
|
||||
return -ENOMEM;
|
||||
cn_msg->id.idx = hvt->cn_id.idx;
|
||||
cn_msg->id.val = hvt->cn_id.val;
|
||||
|
|
|
@ -141,7 +141,7 @@ struct hv_port_info {
|
|||
struct {
|
||||
u32 target_sint;
|
||||
u32 target_vp;
|
||||
u16 base_flag_bumber;
|
||||
u16 base_flag_number;
|
||||
u16 flag_count;
|
||||
u32 rsvdz;
|
||||
} event_port_info;
|
||||
|
@ -517,6 +517,7 @@ struct hv_context {
|
|||
u64 guestid;
|
||||
|
||||
void *hypercall_page;
|
||||
void *tsc_page;
|
||||
|
||||
bool synic_initialized;
|
||||
|
||||
|
@ -551,10 +552,23 @@ struct hv_context {
|
|||
* Support PV clockevent device.
|
||||
*/
|
||||
struct clock_event_device *clk_evt[NR_CPUS];
|
||||
/*
|
||||
* To manage allocations in a NUMA node.
|
||||
* Array indexed by numa node ID.
|
||||
*/
|
||||
struct cpumask *hv_numa_map;
|
||||
};
|
||||
|
||||
extern struct hv_context hv_context;
|
||||
|
||||
struct ms_hyperv_tsc_page {
|
||||
volatile u32 tsc_sequence;
|
||||
u32 reserved1;
|
||||
volatile u64 tsc_scale;
|
||||
volatile s64 tsc_offset;
|
||||
u64 reserved2[509];
|
||||
};
|
||||
|
||||
struct hv_ring_buffer_debug_info {
|
||||
u32 current_interrupt_mask;
|
||||
u32 current_read_index;
|
||||
|
|
|
@ -103,10 +103,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
|
|||
* there is room for the producer to send the pending packet.
|
||||
*/
|
||||
|
||||
static bool hv_need_to_signal_on_read(u32 old_rd,
|
||||
struct hv_ring_buffer_info *rbi)
|
||||
static bool hv_need_to_signal_on_read(u32 prev_write_sz,
|
||||
struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
u32 prev_write_sz;
|
||||
u32 cur_write_sz;
|
||||
u32 r_size;
|
||||
u32 write_loc = rbi->ring_buffer->write_index;
|
||||
|
@ -123,10 +122,6 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
|
|||
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
|
||||
read_loc - write_loc;
|
||||
|
||||
prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
|
||||
old_rd - write_loc;
|
||||
|
||||
|
||||
if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
|
||||
return true;
|
||||
|
||||
|
@ -517,7 +512,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
|
|||
u32 next_read_location = 0;
|
||||
u64 prev_indices = 0;
|
||||
unsigned long flags;
|
||||
u32 old_read;
|
||||
|
||||
if (buflen <= 0)
|
||||
return -EINVAL;
|
||||
|
@ -528,8 +522,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
|
|||
&bytes_avail_toread,
|
||||
&bytes_avail_towrite);
|
||||
|
||||
old_read = bytes_avail_toread;
|
||||
|
||||
/* Make sure there is something to read */
|
||||
if (bytes_avail_toread < buflen) {
|
||||
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
|
||||
|
@ -560,7 +552,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
|
|||
|
||||
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
|
||||
|
||||
*signal = hv_need_to_signal_on_read(old_read, inring_info);
|
||||
*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#include <asm/mshyperv.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
static struct acpi_device *hv_acpi_dev;
|
||||
|
@ -48,12 +50,18 @@ static struct completion probe_event;
|
|||
static int irq;
|
||||
|
||||
|
||||
static int hyperv_panic_event(struct notifier_block *nb,
|
||||
unsigned long event, void *ptr)
|
||||
static void hyperv_report_panic(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
static bool panic_reported;
|
||||
|
||||
regs = current_pt_regs();
|
||||
/*
|
||||
* We prefer to report panic on 'die' chain as we have proper
|
||||
* registers to report, but if we miss it (e.g. on BUG()) we need
|
||||
* to report it on 'panic'.
|
||||
*/
|
||||
if (panic_reported)
|
||||
return;
|
||||
panic_reported = true;
|
||||
|
||||
wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
|
||||
|
@ -65,18 +73,37 @@ static int hyperv_panic_event(struct notifier_block *nb,
|
|||
* Let Hyper-V know there is crash data available
|
||||
*/
|
||||
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
||||
}
|
||||
|
||||
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
|
||||
void *args)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
regs = current_pt_regs();
|
||||
|
||||
hyperv_report_panic(regs);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
|
||||
void *args)
|
||||
{
|
||||
struct die_args *die = (struct die_args *)args;
|
||||
struct pt_regs *regs = die->regs;
|
||||
|
||||
hyperv_report_panic(regs);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block hyperv_die_block = {
|
||||
.notifier_call = hyperv_die_event,
|
||||
};
|
||||
static struct notifier_block hyperv_panic_block = {
|
||||
.notifier_call = hyperv_panic_event,
|
||||
};
|
||||
|
||||
struct resource hyperv_mmio = {
|
||||
.name = "hyperv mmio",
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(hyperv_mmio);
|
||||
struct resource *hyperv_mmio;
|
||||
|
||||
static int vmbus_exists(void)
|
||||
{
|
||||
|
@ -414,6 +441,43 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
||||
|
||||
static ssize_t channel_vp_mapping_show(struct device *dev,
|
||||
struct device_attribute *dev_attr,
|
||||
char *buf)
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
|
||||
unsigned long flags;
|
||||
int buf_size = PAGE_SIZE, n_written, tot_written;
|
||||
struct list_head *cur;
|
||||
|
||||
if (!channel)
|
||||
return -ENODEV;
|
||||
|
||||
tot_written = snprintf(buf, buf_size, "%u:%u\n",
|
||||
channel->offermsg.child_relid, channel->target_cpu);
|
||||
|
||||
spin_lock_irqsave(&channel->lock, flags);
|
||||
|
||||
list_for_each(cur, &channel->sc_list) {
|
||||
if (tot_written >= buf_size - 1)
|
||||
break;
|
||||
|
||||
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
|
||||
n_written = scnprintf(buf + tot_written,
|
||||
buf_size - tot_written,
|
||||
"%u:%u\n",
|
||||
cur_sc->offermsg.child_relid,
|
||||
cur_sc->target_cpu);
|
||||
tot_written += n_written;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
|
||||
return tot_written;
|
||||
}
|
||||
static DEVICE_ATTR_RO(channel_vp_mapping);
|
||||
|
||||
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
|
||||
static struct attribute *vmbus_attrs[] = {
|
||||
&dev_attr_id.attr,
|
||||
|
@ -438,6 +502,7 @@ static struct attribute *vmbus_attrs[] = {
|
|||
&dev_attr_in_write_index.attr,
|
||||
&dev_attr_in_read_bytes_avail.attr,
|
||||
&dev_attr_in_write_bytes_avail.attr,
|
||||
&dev_attr_channel_vp_mapping.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(vmbus);
|
||||
|
@ -763,38 +828,6 @@ static void vmbus_isr(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int hyperv_cpu_disable(void)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
|
||||
{
|
||||
static void *previous_cpu_disable;
|
||||
|
||||
/*
|
||||
* Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
|
||||
* ...) is not supported at this moment as channel interrupts are
|
||||
* distributed across all of them.
|
||||
*/
|
||||
|
||||
if ((vmbus_proto_version == VERSION_WS2008) ||
|
||||
(vmbus_proto_version == VERSION_WIN7))
|
||||
return;
|
||||
|
||||
if (vmbus_loaded) {
|
||||
previous_cpu_disable = smp_ops.cpu_disable;
|
||||
smp_ops.cpu_disable = hyperv_cpu_disable;
|
||||
pr_notice("CPU offlining is not supported by hypervisor\n");
|
||||
} else if (previous_cpu_disable)
|
||||
smp_ops.cpu_disable = previous_cpu_disable;
|
||||
}
|
||||
#else
|
||||
static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* vmbus_bus_init -Main vmbus driver initialization routine.
|
||||
|
@ -836,12 +869,14 @@ static int vmbus_bus_init(int irq)
|
|||
if (ret)
|
||||
goto err_alloc;
|
||||
|
||||
hv_cpu_hotplug_quirk(true);
|
||||
if (vmbus_proto_version > VERSION_WIN7)
|
||||
cpu_hotplug_disable();
|
||||
|
||||
/*
|
||||
* Only register if the crash MSRs are available
|
||||
*/
|
||||
if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||
register_die_notifier(&hyperv_die_block);
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&hyperv_panic_block);
|
||||
}
|
||||
|
@ -863,8 +898,8 @@ err_cleanup:
|
|||
}
|
||||
|
||||
/**
|
||||
* __vmbus_child_driver_register - Register a vmbus's driver
|
||||
* @drv: Pointer to driver structure you want to register
|
||||
* __vmbus_child_driver_register() - Register a vmbus's driver
|
||||
* @hv_driver: Pointer to driver structure you want to register
|
||||
* @owner: owner module of the drv
|
||||
* @mod_name: module name string
|
||||
*
|
||||
|
@ -896,7 +931,8 @@ EXPORT_SYMBOL_GPL(__vmbus_driver_register);
|
|||
|
||||
/**
|
||||
* vmbus_driver_unregister() - Unregister a vmbus's driver
|
||||
* @drv: Pointer to driver structure you want to un-register
|
||||
* @hv_driver: Pointer to driver structure you want to
|
||||
* un-register
|
||||
*
|
||||
* Un-register the given driver that was previous registered with a call to
|
||||
* vmbus_driver_register()
|
||||
|
@ -982,30 +1018,184 @@ void vmbus_device_unregister(struct hv_device *device_obj)
|
|||
|
||||
|
||||
/*
|
||||
* VMBUS is an acpi enumerated device. Get the the information we
|
||||
* VMBUS is an acpi enumerated device. Get the information we
|
||||
* need from DSDT.
|
||||
*/
|
||||
|
||||
#define VTPM_BASE_ADDRESS 0xfed40000
|
||||
static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
|
||||
{
|
||||
resource_size_t start = 0;
|
||||
resource_size_t end = 0;
|
||||
struct resource *new_res;
|
||||
struct resource **old_res = &hyperv_mmio;
|
||||
struct resource **prev_res = NULL;
|
||||
|
||||
switch (res->type) {
|
||||
case ACPI_RESOURCE_TYPE_IRQ:
|
||||
irq = res->data.irq.interrupts[0];
|
||||
return AE_OK;
|
||||
|
||||
/*
|
||||
* "Address" descriptors are for bus windows. Ignore
|
||||
* "memory" descriptors, which are for registers on
|
||||
* devices.
|
||||
*/
|
||||
case ACPI_RESOURCE_TYPE_ADDRESS32:
|
||||
start = res->data.address32.address.minimum;
|
||||
end = res->data.address32.address.maximum;
|
||||
break;
|
||||
|
||||
case ACPI_RESOURCE_TYPE_ADDRESS64:
|
||||
hyperv_mmio.start = res->data.address64.address.minimum;
|
||||
hyperv_mmio.end = res->data.address64.address.maximum;
|
||||
start = res->data.address64.address.minimum;
|
||||
end = res->data.address64.address.maximum;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Unused resource type */
|
||||
return AE_OK;
|
||||
|
||||
}
|
||||
/*
|
||||
* Ignore ranges that are below 1MB, as they're not
|
||||
* necessary or useful here.
|
||||
*/
|
||||
if (end < 0x100000)
|
||||
return AE_OK;
|
||||
|
||||
new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
|
||||
if (!new_res)
|
||||
return AE_NO_MEMORY;
|
||||
|
||||
/* If this range overlaps the virtual TPM, truncate it. */
|
||||
if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
|
||||
end = VTPM_BASE_ADDRESS;
|
||||
|
||||
new_res->name = "hyperv mmio";
|
||||
new_res->flags = IORESOURCE_MEM;
|
||||
new_res->start = start;
|
||||
new_res->end = end;
|
||||
|
||||
do {
|
||||
if (!*old_res) {
|
||||
*old_res = new_res;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((*old_res)->end < new_res->start) {
|
||||
new_res->sibling = *old_res;
|
||||
if (prev_res)
|
||||
(*prev_res)->sibling = new_res;
|
||||
*old_res = new_res;
|
||||
break;
|
||||
}
|
||||
|
||||
prev_res = old_res;
|
||||
old_res = &(*old_res)->sibling;
|
||||
|
||||
} while (1);
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int vmbus_acpi_remove(struct acpi_device *device)
|
||||
{
|
||||
struct resource *cur_res;
|
||||
struct resource *next_res;
|
||||
|
||||
if (hyperv_mmio) {
|
||||
for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
|
||||
next_res = cur_res->sibling;
|
||||
kfree(cur_res);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
|
||||
* @new: If successful, supplied a pointer to the
|
||||
* allocated MMIO space.
|
||||
* @device_obj: Identifies the caller
|
||||
* @min: Minimum guest physical address of the
|
||||
* allocation
|
||||
* @max: Maximum guest physical address
|
||||
* @size: Size of the range to be allocated
|
||||
* @align: Alignment of the range to be allocated
|
||||
* @fb_overlap_ok: Whether this allocation can be allowed
|
||||
* to overlap the video frame buffer.
|
||||
*
|
||||
* This function walks the resources granted to VMBus by the
|
||||
* _CRS object in the ACPI namespace underneath the parent
|
||||
* "bridge" whether that's a root PCI bus in the Generation 1
|
||||
* case or a Module Device in the Generation 2 case. It then
|
||||
* attempts to allocate from the global MMIO pool in a way that
|
||||
* matches the constraints supplied in these parameters and by
|
||||
* that _CRS.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
||||
resource_size_t min, resource_size_t max,
|
||||
resource_size_t size, resource_size_t align,
|
||||
bool fb_overlap_ok)
|
||||
{
|
||||
struct resource *iter;
|
||||
resource_size_t range_min, range_max, start, local_min, local_max;
|
||||
const char *dev_n = dev_name(&device_obj->device);
|
||||
u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
|
||||
int i;
|
||||
|
||||
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
|
||||
if ((iter->start >= max) || (iter->end <= min))
|
||||
continue;
|
||||
|
||||
range_min = iter->start;
|
||||
range_max = iter->end;
|
||||
|
||||
/* If this range overlaps the frame buffer, split it into
|
||||
two tries. */
|
||||
for (i = 0; i < 2; i++) {
|
||||
local_min = range_min;
|
||||
local_max = range_max;
|
||||
if (fb_overlap_ok || (range_min >= fb_end) ||
|
||||
(range_max <= screen_info.lfb_base)) {
|
||||
i++;
|
||||
} else {
|
||||
if ((range_min <= screen_info.lfb_base) &&
|
||||
(range_max >= screen_info.lfb_base)) {
|
||||
/*
|
||||
* The frame buffer is in this window,
|
||||
* so trim this into the part that
|
||||
* preceeds the frame buffer.
|
||||
*/
|
||||
local_max = screen_info.lfb_base - 1;
|
||||
range_min = fb_end;
|
||||
} else {
|
||||
range_min = fb_end;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
start = (local_min + align - 1) & ~(align - 1);
|
||||
for (; start + size - 1 <= local_max; start += align) {
|
||||
*new = request_mem_region_exclusive(start, size,
|
||||
dev_n);
|
||||
if (*new)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
|
||||
|
||||
static int vmbus_acpi_add(struct acpi_device *device)
|
||||
{
|
||||
acpi_status result;
|
||||
int ret_val = -ENODEV;
|
||||
struct acpi_device *ancestor;
|
||||
|
||||
hv_acpi_dev = device;
|
||||
|
||||
|
@ -1015,35 +1205,27 @@ static int vmbus_acpi_add(struct acpi_device *device)
|
|||
if (ACPI_FAILURE(result))
|
||||
goto acpi_walk_err;
|
||||
/*
|
||||
* The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
|
||||
* has the mmio ranges. Get that.
|
||||
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
|
||||
* firmware) is the VMOD that has the mmio ranges. Get that.
|
||||
*/
|
||||
if (device->parent) {
|
||||
result = acpi_walk_resources(device->parent->handle,
|
||||
METHOD_NAME__CRS,
|
||||
vmbus_walk_resources, NULL);
|
||||
for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
|
||||
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
|
||||
vmbus_walk_resources, NULL);
|
||||
|
||||
if (ACPI_FAILURE(result))
|
||||
goto acpi_walk_err;
|
||||
if (hyperv_mmio.start && hyperv_mmio.end)
|
||||
request_resource(&iomem_resource, &hyperv_mmio);
|
||||
continue;
|
||||
if (hyperv_mmio)
|
||||
break;
|
||||
}
|
||||
ret_val = 0;
|
||||
|
||||
acpi_walk_err:
|
||||
complete(&probe_event);
|
||||
if (ret_val)
|
||||
vmbus_acpi_remove(device);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int vmbus_acpi_remove(struct acpi_device *device)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (hyperv_mmio.start && hyperv_mmio.end)
|
||||
ret = release_resource(&hyperv_mmio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
|
||||
{"VMBUS", 0},
|
||||
{"VMBus", 0},
|
||||
|
@ -1060,6 +1242,29 @@ static struct acpi_driver vmbus_acpi_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static void hv_kexec_handler(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
hv_synic_clockevents_cleanup();
|
||||
vmbus_initiate_unload();
|
||||
for_each_online_cpu(cpu)
|
||||
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
|
||||
hv_cleanup();
|
||||
};
|
||||
|
||||
static void hv_crash_handler(struct pt_regs *regs)
|
||||
{
|
||||
vmbus_initiate_unload();
|
||||
/*
|
||||
* In crash handler we can't schedule synic cleanup for all CPUs,
|
||||
* doing the cleanup for current CPU only. This should be sufficient
|
||||
* for kdump.
|
||||
*/
|
||||
hv_synic_cleanup(NULL);
|
||||
hv_cleanup();
|
||||
};
|
||||
|
||||
static int __init hv_acpi_init(void)
|
||||
{
|
||||
int ret, t;
|
||||
|
@ -1092,6 +1297,9 @@ static int __init hv_acpi_init(void)
|
|||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
hv_setup_kexec_handler(hv_kexec_handler);
|
||||
hv_setup_crash_handler(hv_crash_handler);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -1104,13 +1312,16 @@ static void __exit vmbus_exit(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
hv_remove_kexec_handler();
|
||||
hv_remove_crash_handler();
|
||||
vmbus_connection.conn_state = DISCONNECTED;
|
||||
hv_synic_clockevents_cleanup();
|
||||
vmbus_disconnect();
|
||||
hv_remove_vmbus_irq();
|
||||
tasklet_kill(&msg_dpc);
|
||||
vmbus_free_channels();
|
||||
if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||
unregister_die_notifier(&hyperv_die_block);
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&hyperv_panic_block);
|
||||
}
|
||||
|
@ -1120,8 +1331,10 @@ static void __exit vmbus_exit(void)
|
|||
tasklet_kill(hv_context.event_dpc[cpu]);
|
||||
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
|
||||
}
|
||||
hv_synic_free();
|
||||
acpi_bus_unregister_driver(&vmbus_acpi_driver);
|
||||
hv_cpu_hotplug_quirk(false);
|
||||
if (vmbus_proto_version > VERSION_WIN7)
|
||||
cpu_hotplug_enable();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -183,7 +183,9 @@
|
|||
* @seq_13_event: event causing the transition from 1 to 3.
|
||||
* @seq_curr_state: current value of the sequencer register.
|
||||
* @ctxid_idx: index for the context ID registers.
|
||||
* @ctxid_val: value for the context ID to trigger on.
|
||||
* @ctxid_pid: value for the context ID to trigger on.
|
||||
* @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
|
||||
* the same value of ctxid_pid.
|
||||
* @ctxid_mask: mask applicable to all the context IDs.
|
||||
* @sync_freq: Synchronisation frequency.
|
||||
* @timestamp_event: Defines an event that requests the insertion
|
||||
|
@ -235,7 +237,8 @@ struct etm_drvdata {
|
|||
u32 seq_13_event;
|
||||
u32 seq_curr_state;
|
||||
u8 ctxid_idx;
|
||||
u32 ctxid_val[ETM_MAX_CTXID_CMP];
|
||||
u32 ctxid_pid[ETM_MAX_CTXID_CMP];
|
||||
u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
|
||||
u32 ctxid_mask;
|
||||
u32 sync_freq;
|
||||
u32 timestamp_event;
|
||||
|
|
|
@ -237,8 +237,11 @@ static void etm_set_default(struct etm_drvdata *drvdata)
|
|||
|
||||
drvdata->seq_curr_state = 0x0;
|
||||
drvdata->ctxid_idx = 0x0;
|
||||
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
|
||||
drvdata->ctxid_val[i] = 0x0;
|
||||
for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
|
||||
drvdata->ctxid_pid[i] = 0x0;
|
||||
drvdata->ctxid_vpid[i] = 0x0;
|
||||
}
|
||||
|
||||
drvdata->ctxid_mask = 0x0;
|
||||
}
|
||||
|
||||
|
@ -289,7 +292,7 @@ static void etm_enable_hw(void *info)
|
|||
for (i = 0; i < drvdata->nr_ext_out; i++)
|
||||
etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
|
||||
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
|
||||
etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
|
||||
etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
|
||||
etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
|
||||
etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
|
||||
/* No external input selected */
|
||||
|
@ -1386,38 +1389,41 @@ static ssize_t ctxid_idx_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RW(ctxid_idx);
|
||||
|
||||
static ssize_t ctxid_val_show(struct device *dev,
|
||||
static ssize_t ctxid_pid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long val;
|
||||
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
val = drvdata->ctxid_val[drvdata->ctxid_idx];
|
||||
val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
|
||||
return sprintf(buf, "%#lx\n", val);
|
||||
}
|
||||
|
||||
static ssize_t ctxid_val_store(struct device *dev,
|
||||
static ssize_t ctxid_pid_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
int ret;
|
||||
unsigned long val;
|
||||
unsigned long vpid, pid;
|
||||
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
|
||||
ret = kstrtoul(buf, 16, &val);
|
||||
ret = kstrtoul(buf, 16, &vpid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pid = coresight_vpid_to_pid(vpid);
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
drvdata->ctxid_val[drvdata->ctxid_idx] = val;
|
||||
drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
|
||||
drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
|
||||
return size;
|
||||
}
|
||||
static DEVICE_ATTR_RW(ctxid_val);
|
||||
static DEVICE_ATTR_RW(ctxid_pid);
|
||||
|
||||
static ssize_t ctxid_mask_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -1609,7 +1615,7 @@ static struct attribute *coresight_etm_attrs[] = {
|
|||
&dev_attr_seq_13_event.attr,
|
||||
&dev_attr_seq_curr_state.attr,
|
||||
&dev_attr_ctxid_idx.attr,
|
||||
&dev_attr_ctxid_val.attr,
|
||||
&dev_attr_ctxid_pid.attr,
|
||||
&dev_attr_ctxid_mask.attr,
|
||||
&dev_attr_sync_freq.attr,
|
||||
&dev_attr_timestamp_event.attr,
|
||||
|
@ -1912,6 +1918,11 @@ static struct amba_id etm_ids[] = {
|
|||
.mask = 0x0003ffff,
|
||||
.data = "PTM 1.1",
|
||||
},
|
||||
{ /* PTM 1.1 Qualcomm */
|
||||
.id = 0x0003006f,
|
||||
.mask = 0x0003ffff,
|
||||
.data = "PTM 1.1",
|
||||
},
|
||||
{ 0, 0},
|
||||
};
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ static void etm4_enable_hw(void *info)
|
|||
drvdata->base + TRCACATRn(i));
|
||||
}
|
||||
for (i = 0; i < drvdata->numcidc; i++)
|
||||
writeq_relaxed(drvdata->ctxid_val[i],
|
||||
writeq_relaxed(drvdata->ctxid_pid[i],
|
||||
drvdata->base + TRCCIDCVRn(i));
|
||||
writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
|
||||
writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
|
||||
|
@ -506,8 +506,11 @@ static ssize_t reset_store(struct device *dev,
|
|||
}
|
||||
|
||||
drvdata->ctxid_idx = 0x0;
|
||||
for (i = 0; i < drvdata->numcidc; i++)
|
||||
drvdata->ctxid_val[i] = 0x0;
|
||||
for (i = 0; i < drvdata->numcidc; i++) {
|
||||
drvdata->ctxid_pid[i] = 0x0;
|
||||
drvdata->ctxid_vpid[i] = 0x0;
|
||||
}
|
||||
|
||||
drvdata->ctxid_mask0 = 0x0;
|
||||
drvdata->ctxid_mask1 = 0x0;
|
||||
|
||||
|
@ -1815,7 +1818,7 @@ static ssize_t ctxid_idx_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RW(ctxid_idx);
|
||||
|
||||
static ssize_t ctxid_val_show(struct device *dev,
|
||||
static ssize_t ctxid_pid_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
|
@ -1825,17 +1828,17 @@ static ssize_t ctxid_val_show(struct device *dev,
|
|||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
idx = drvdata->ctxid_idx;
|
||||
val = (unsigned long)drvdata->ctxid_val[idx];
|
||||
val = (unsigned long)drvdata->ctxid_vpid[idx];
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
|
||||
}
|
||||
|
||||
static ssize_t ctxid_val_store(struct device *dev,
|
||||
static ssize_t ctxid_pid_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
u8 idx;
|
||||
unsigned long val;
|
||||
unsigned long vpid, pid;
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
|
||||
/*
|
||||
|
@ -1845,16 +1848,19 @@ static ssize_t ctxid_val_store(struct device *dev,
|
|||
*/
|
||||
if (!drvdata->ctxid_size || !drvdata->numcidc)
|
||||
return -EINVAL;
|
||||
if (kstrtoul(buf, 16, &val))
|
||||
if (kstrtoul(buf, 16, &vpid))
|
||||
return -EINVAL;
|
||||
|
||||
pid = coresight_vpid_to_pid(vpid);
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
idx = drvdata->ctxid_idx;
|
||||
drvdata->ctxid_val[idx] = (u64)val;
|
||||
drvdata->ctxid_pid[idx] = (u64)pid;
|
||||
drvdata->ctxid_vpid[idx] = (u64)vpid;
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
return size;
|
||||
}
|
||||
static DEVICE_ATTR_RW(ctxid_val);
|
||||
static DEVICE_ATTR_RW(ctxid_pid);
|
||||
|
||||
static ssize_t ctxid_masks_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -1949,7 +1955,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
|
|||
*/
|
||||
for (j = 0; j < 8; j++) {
|
||||
if (maskbyte & 1)
|
||||
drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
|
||||
drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
|
||||
maskbyte >>= 1;
|
||||
}
|
||||
/* Select the next ctxid comparator mask value */
|
||||
|
@ -2193,7 +2199,7 @@ static struct attribute *coresight_etmv4_attrs[] = {
|
|||
&dev_attr_res_idx.attr,
|
||||
&dev_attr_res_ctrl.attr,
|
||||
&dev_attr_ctxid_idx.attr,
|
||||
&dev_attr_ctxid_val.attr,
|
||||
&dev_attr_ctxid_pid.attr,
|
||||
&dev_attr_ctxid_masks.attr,
|
||||
&dev_attr_vmid_idx.attr,
|
||||
&dev_attr_vmid_val.attr,
|
||||
|
@ -2513,8 +2519,11 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
|
|||
drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
|
||||
}
|
||||
|
||||
for (i = 0; i < drvdata->numcidc; i++)
|
||||
drvdata->ctxid_val[i] = 0x0;
|
||||
for (i = 0; i < drvdata->numcidc; i++) {
|
||||
drvdata->ctxid_pid[i] = 0x0;
|
||||
drvdata->ctxid_vpid[i] = 0x0;
|
||||
}
|
||||
|
||||
drvdata->ctxid_mask0 = 0x0;
|
||||
drvdata->ctxid_mask1 = 0x0;
|
||||
|
||||
|
|
|
@ -265,7 +265,9 @@
|
|||
* @addr_type: Current status of the comparator register.
|
||||
* @ctxid_idx: Context ID index selector.
|
||||
* @ctxid_size: Size of the context ID field to consider.
|
||||
* @ctxid_val: Value of the context ID comparator.
|
||||
* @ctxid_pid: Value of the context ID comparator.
|
||||
* @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
|
||||
* the same value of ctxid_pid.
|
||||
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
|
||||
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
|
||||
* @vmid_idx: VM ID index selector.
|
||||
|
@ -352,7 +354,8 @@ struct etmv4_drvdata {
|
|||
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
|
||||
u8 ctxid_idx;
|
||||
u8 ctxid_size;
|
||||
u64 ctxid_val[ETMv4_MAX_CTXID_CMP];
|
||||
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
|
||||
u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
|
||||
u32 ctxid_mask0;
|
||||
u32 ctxid_mask1;
|
||||
u8 vmid_idx;
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
|
@ -184,17 +183,7 @@ static struct platform_driver replicator_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static int __init replicator_init(void)
|
||||
{
|
||||
return platform_driver_register(&replicator_driver);
|
||||
}
|
||||
module_init(replicator_init);
|
||||
|
||||
static void __exit replicator_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&replicator_driver);
|
||||
}
|
||||
module_exit(replicator_exit);
|
||||
builtin_platform_driver(replicator_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("CoreSight Replicator driver");
|
||||
|
|
|
@ -1919,9 +1919,7 @@ int __init dm_interface_init(void)
|
|||
|
||||
void dm_interface_exit(void)
|
||||
{
|
||||
if (misc_deregister(&_dm_misc) < 0)
|
||||
DMERR("misc_deregister failed for control device");
|
||||
|
||||
misc_deregister(&_dm_misc);
|
||||
dm_hash_exit();
|
||||
}
|
||||
|
||||
|
|
|
@ -271,6 +271,16 @@ config HP_ILO
|
|||
To compile this driver as a module, choose M here: the
|
||||
module will be called hpilo.
|
||||
|
||||
config QCOM_COINCELL
|
||||
tristate "Qualcomm coincell charger support"
|
||||
depends on MFD_SPMI_PMIC || COMPILE_TEST
|
||||
help
|
||||
This driver supports the coincell block found inside of
|
||||
Qualcomm PMICs. The coincell charger provides a means to
|
||||
charge a coincell battery or backup capacitor which is used
|
||||
to maintain PMIC register and RTC state in the absence of
|
||||
external power.
|
||||
|
||||
config SGI_GRU
|
||||
tristate "SGI GRU driver"
|
||||
depends on X86_UV && SMP
|
||||
|
|
|
@ -18,6 +18,7 @@ obj-$(CONFIG_LKDTM) += lkdtm.o
|
|||
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
|
||||
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
|
||||
obj-$(CONFIG_PHANTOM) += phantom.o
|
||||
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
|
||||
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
|
||||
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
|
||||
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
|
||||
|
|
|
@ -106,7 +106,6 @@ MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
|
|||
static struct i2c_driver ad_dpot_i2c_driver = {
|
||||
.driver = {
|
||||
.name = "ad_dpot",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = ad_dpot_i2c_probe,
|
||||
.remove = ad_dpot_i2c_remove,
|
||||
|
|
|
@ -1275,7 +1275,6 @@ static const struct dev_pm_ops apds990x_pm_ops = {
|
|||
static struct i2c_driver apds990x_driver = {
|
||||
.driver = {
|
||||
.name = "apds990x",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &apds990x_pm_ops,
|
||||
},
|
||||
.probe = apds990x_probe,
|
||||
|
|
|
@ -1396,7 +1396,6 @@ static const struct dev_pm_ops bh1770_pm_ops = {
|
|||
static struct i2c_driver bh1770_driver = {
|
||||
.driver = {
|
||||
.name = "bh1770glc",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &bh1770_pm_ops,
|
||||
},
|
||||
.probe = bh1770_probe,
|
||||
|
|
|
@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(i2c, bmp085_id);
|
|||
|
||||
static struct i2c_driver bmp085_i2c_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = BMP085_NAME,
|
||||
},
|
||||
.id_table = bmp085_id,
|
||||
|
|
|
@ -443,12 +443,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
|
|||
struct afu_config_record *cr = to_cr(kobj);
|
||||
struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
|
||||
|
||||
u64 i, j, val, size = afu->crs_len;
|
||||
|
||||
if (off > size)
|
||||
return 0;
|
||||
if (off + count > size)
|
||||
count = size - off;
|
||||
u64 i, j, val;
|
||||
|
||||
for (i = 0; i < count;) {
|
||||
val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
|
||||
|
|
|
@ -148,12 +148,6 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
|
|||
dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
|
||||
buf, off, count);
|
||||
|
||||
if (off >= DS1682_EEPROM_SIZE)
|
||||
return 0;
|
||||
|
||||
if (off + count > DS1682_EEPROM_SIZE)
|
||||
count = DS1682_EEPROM_SIZE - off;
|
||||
|
||||
rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
|
||||
count, buf);
|
||||
if (rc < 0)
|
||||
|
@ -171,12 +165,6 @@ static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
|
|||
dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
|
||||
buf, off, count);
|
||||
|
||||
if (off >= DS1682_EEPROM_SIZE)
|
||||
return -ENOSPC;
|
||||
|
||||
if (off + count > DS1682_EEPROM_SIZE)
|
||||
count = DS1682_EEPROM_SIZE - off;
|
||||
|
||||
/* Write out to the device */
|
||||
if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
|
||||
count, buf) < 0)
|
||||
|
|
|
@ -96,17 +96,4 @@ config EEPROM_DIGSY_MTC_CFG
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config EEPROM_SUNXI_SID
|
||||
tristate "Allwinner sunxi security ID support"
|
||||
depends on ARCH_SUNXI && SYSFS
|
||||
help
|
||||
This is a driver for the 'security ID' available on various Allwinner
|
||||
devices.
|
||||
|
||||
Due to the potential risks involved with changing e-fuses,
|
||||
this driver is read-only.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called sunxi_sid.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -4,5 +4,4 @@ obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
|
|||
obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
|
||||
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
|
||||
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
|
||||
obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o
|
||||
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
|
||||
|
|
|
@ -686,7 +686,6 @@ static int at24_remove(struct i2c_client *client)
|
|||
static struct i2c_driver at24_driver = {
|
||||
.driver = {
|
||||
.name = "at24",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = at24_probe,
|
||||
.remove = at24_remove,
|
||||
|
|
|
@ -88,11 +88,6 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
|
|||
struct eeprom_data *data = i2c_get_clientdata(client);
|
||||
u8 slice;
|
||||
|
||||
if (off > EEPROM_SIZE)
|
||||
return 0;
|
||||
if (off + count > EEPROM_SIZE)
|
||||
count = EEPROM_SIZE - off;
|
||||
|
||||
/* Only refresh slices which contain requested bytes */
|
||||
for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
|
||||
eeprom_update_client(client, slice);
|
||||
|
|
|
@ -48,13 +48,6 @@ eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
|
|||
dev = container_of(kobj, struct device, kobj);
|
||||
edev = dev_get_drvdata(dev);
|
||||
|
||||
if (unlikely(off >= edev->bin.size))
|
||||
return 0;
|
||||
if ((off + count) > edev->bin.size)
|
||||
count = edev->bin.size - off;
|
||||
if (unlikely(!count))
|
||||
return count;
|
||||
|
||||
cmd_addr = OP_READ << edev->addrlen;
|
||||
|
||||
if (edev->addrlen == 7) {
|
||||
|
@ -200,13 +193,6 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
|
|||
dev = container_of(kobj, struct device, kobj);
|
||||
edev = dev_get_drvdata(dev);
|
||||
|
||||
if (unlikely(off >= edev->bin.size))
|
||||
return -EFBIG;
|
||||
if ((off + count) > edev->bin.size)
|
||||
count = edev->bin.size - off;
|
||||
if (unlikely(!count))
|
||||
return count;
|
||||
|
||||
/* only write even number of bytes on 16-bit devices */
|
||||
if (edev->addrlen == 6) {
|
||||
step = 2;
|
||||
|
|
|
@ -114,12 +114,6 @@ static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
|
|||
struct max6875_data *data = i2c_get_clientdata(client);
|
||||
int slice, max_slice;
|
||||
|
||||
if (off > USER_EEPROM_SIZE)
|
||||
return 0;
|
||||
|
||||
if (off + count > USER_EEPROM_SIZE)
|
||||
count = USER_EEPROM_SIZE - off;
|
||||
|
||||
/* refresh slices which contain requested bytes */
|
||||
max_slice = (off + count - 1) >> SLICE_BITS;
|
||||
for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
|
||||
|
|
|
@ -1,156 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
|
||||
* http://www.linux-sunxi.org
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* This driver exposes the Allwinner security ID, efuses exported in byte-
|
||||
* sized chunks.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define DRV_NAME "sunxi-sid"
|
||||
|
||||
struct sunxi_sid_data {
|
||||
void __iomem *reg_base;
|
||||
unsigned int keysize;
|
||||
};
|
||||
|
||||
/* We read the entire key, due to a 32 bit read alignment requirement. Since we
|
||||
* want to return the requested byte, this results in somewhat slower code and
|
||||
* uses 4 times more reads as needed but keeps code simpler. Since the SID is
|
||||
* only very rarely probed, this is not really an issue.
|
||||
*/
|
||||
static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data,
|
||||
const unsigned int offset)
|
||||
{
|
||||
u32 sid_key;
|
||||
|
||||
if (offset >= sid_data->keysize)
|
||||
return 0;
|
||||
|
||||
sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4));
|
||||
sid_key >>= (offset % 4) * 8;
|
||||
|
||||
return sid_key; /* Only return the last byte */
|
||||
}
|
||||
|
||||
static ssize_t sid_read(struct file *fd, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
loff_t pos, size_t size)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct sunxi_sid_data *sid_data;
|
||||
int i;
|
||||
|
||||
pdev = to_platform_device(kobj_to_dev(kobj));
|
||||
sid_data = platform_get_drvdata(pdev);
|
||||
|
||||
if (pos < 0 || pos >= sid_data->keysize)
|
||||
return 0;
|
||||
if (size > sid_data->keysize - pos)
|
||||
size = sid_data->keysize - pos;
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
buf[i] = sunxi_sid_read_byte(sid_data, pos + i);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct bin_attribute sid_bin_attr = {
|
||||
.attr = { .name = "eeprom", .mode = S_IRUGO, },
|
||||
.read = sid_read,
|
||||
};
|
||||
|
||||
static int sunxi_sid_remove(struct platform_device *pdev)
|
||||
{
|
||||
device_remove_bin_file(&pdev->dev, &sid_bin_attr);
|
||||
dev_dbg(&pdev->dev, "driver unloaded\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id sunxi_sid_of_match[] = {
|
||||
{ .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
|
||||
{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
|
||||
{/* sentinel */},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
|
||||
|
||||
static int sunxi_sid_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sunxi_sid_data *sid_data;
|
||||
struct resource *res;
|
||||
const struct of_device_id *of_dev_id;
|
||||
u8 *entropy;
|
||||
unsigned int i;
|
||||
|
||||
sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data),
|
||||
GFP_KERNEL);
|
||||
if (!sid_data)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(sid_data->reg_base))
|
||||
return PTR_ERR(sid_data->reg_base);
|
||||
|
||||
of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev);
|
||||
if (!of_dev_id)
|
||||
return -ENODEV;
|
||||
sid_data->keysize = (int)of_dev_id->data;
|
||||
|
||||
platform_set_drvdata(pdev, sid_data);
|
||||
|
||||
sid_bin_attr.size = sid_data->keysize;
|
||||
if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
|
||||
return -ENODEV;
|
||||
|
||||
entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL);
|
||||
for (i = 0; i < sid_data->keysize; i++)
|
||||
entropy[i] = sunxi_sid_read_byte(sid_data, i);
|
||||
add_device_randomness(entropy, sid_data->keysize);
|
||||
kfree(entropy);
|
||||
|
||||
dev_dbg(&pdev->dev, "loaded\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sunxi_sid_driver = {
|
||||
.probe = sunxi_sid_probe,
|
||||
.remove = sunxi_sid_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.of_match_table = sunxi_sid_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(sunxi_sid_driver);
|
||||
|
||||
MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
|
||||
MODULE_DESCRIPTION("Allwinner sunxi security id driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -465,7 +465,6 @@ MODULE_DEVICE_TABLE(i2c, isl29003_id);
|
|||
static struct i2c_driver isl29003_driver = {
|
||||
.driver = {
|
||||
.name = ISL29003_DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = ISL29003_PM_OPS,
|
||||
},
|
||||
.probe = isl29003_probe,
|
||||
|
|
|
@ -274,7 +274,6 @@ static const struct dev_pm_ops lis3_pm_ops = {
|
|||
static struct i2c_driver lis3lv02d_i2c_driver = {
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &lis3_pm_ops,
|
||||
.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
|
||||
},
|
||||
|
|
|
@ -11,7 +11,7 @@ mei-objs += main.o
|
|||
mei-objs += amthif.o
|
||||
mei-objs += wd.o
|
||||
mei-objs += bus.o
|
||||
mei-objs += nfc.o
|
||||
mei-objs += bus-fixup.o
|
||||
mei-$(CONFIG_DEBUG_FS) += debugfs.o
|
||||
|
||||
obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
*
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
* Copyright (c) 2003-2013, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uuid.h>
|
||||
|
||||
#include <linux/mei_cl_bus.h>
|
||||
|
||||
#include "mei_dev.h"
|
||||
#include "client.h"
|
||||
|
||||
#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
|
||||
0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
|
||||
|
||||
static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
|
||||
|
||||
#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
|
||||
0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
|
||||
|
||||
#define MEI_UUID_ANY NULL_UUID_LE
|
||||
|
||||
/**
|
||||
* number_of_connections - determine whether an client be on the bus
|
||||
* according number of connections
|
||||
* We support only clients:
|
||||
* 1. with single connection
|
||||
* 2. and fixed clients (max_number_of_connections == 0)
|
||||
*
|
||||
* @cldev: me clients device
|
||||
*/
|
||||
static void number_of_connections(struct mei_cl_device *cldev)
|
||||
{
|
||||
dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
|
||||
__func__, mei_me_cl_uuid(cldev->me_cl));
|
||||
|
||||
if (cldev->me_cl->props.max_number_of_connections > 1)
|
||||
cldev->do_match = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blacklist - blacklist a client from the bus
|
||||
*
|
||||
* @cldev: me clients device
|
||||
*/
|
||||
static void blacklist(struct mei_cl_device *cldev)
|
||||
{
|
||||
dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
|
||||
__func__, mei_me_cl_uuid(cldev->me_cl));
|
||||
cldev->do_match = 0;
|
||||
}
|
||||
|
||||
struct mei_nfc_cmd {
|
||||
u8 command;
|
||||
u8 status;
|
||||
u16 req_id;
|
||||
u32 reserved;
|
||||
u16 data_size;
|
||||
u8 sub_command;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_reply {
|
||||
u8 command;
|
||||
u8 status;
|
||||
u16 req_id;
|
||||
u32 reserved;
|
||||
u16 data_size;
|
||||
u8 sub_command;
|
||||
u8 reply_status;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_if_version {
|
||||
u8 radio_version_sw[3];
|
||||
u8 reserved[3];
|
||||
u8 radio_version_hw[3];
|
||||
u8 i2c_addr;
|
||||
u8 fw_ivn;
|
||||
u8 vendor_id;
|
||||
u8 radio_type;
|
||||
} __packed;
|
||||
|
||||
|
||||
#define MEI_NFC_CMD_MAINTENANCE 0x00
|
||||
#define MEI_NFC_SUBCMD_IF_VERSION 0x01
|
||||
|
||||
/* Vendors */
|
||||
#define MEI_NFC_VENDOR_INSIDE 0x00
|
||||
#define MEI_NFC_VENDOR_NXP 0x01
|
||||
|
||||
/* Radio types */
|
||||
#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
|
||||
#define MEI_NFC_VENDOR_NXP_PN544 0x01
|
||||
|
||||
/**
|
||||
* mei_nfc_if_version - get NFC interface version
|
||||
*
|
||||
* @cl: host client (nfc info)
|
||||
* @ver: NFC interface version to be filled in
|
||||
*
|
||||
* Return: 0 on success; < 0 otherwise
|
||||
*/
|
||||
static int mei_nfc_if_version(struct mei_cl *cl,
|
||||
struct mei_nfc_if_version *ver)
|
||||
{
|
||||
struct mei_device *bus;
|
||||
struct mei_nfc_cmd cmd = {
|
||||
.command = MEI_NFC_CMD_MAINTENANCE,
|
||||
.data_size = 1,
|
||||
.sub_command = MEI_NFC_SUBCMD_IF_VERSION,
|
||||
};
|
||||
struct mei_nfc_reply *reply = NULL;
|
||||
size_t if_version_length;
|
||||
int bytes_recv, ret;
|
||||
|
||||
bus = cl->dev;
|
||||
|
||||
WARN_ON(mutex_is_locked(&bus->device_lock));
|
||||
|
||||
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
|
||||
if (ret < 0) {
|
||||
dev_err(bus->dev, "Could not send IF version cmd\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* to be sure on the stack we alloc memory */
|
||||
if_version_length = sizeof(struct mei_nfc_reply) +
|
||||
sizeof(struct mei_nfc_if_version);
|
||||
|
||||
reply = kzalloc(if_version_length, GFP_KERNEL);
|
||||
if (!reply)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = 0;
|
||||
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
|
||||
if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
|
||||
dev_err(bus->dev, "Could not read IF version\n");
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
|
||||
|
||||
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
|
||||
ver->fw_ivn, ver->vendor_id, ver->radio_type);
|
||||
|
||||
err:
|
||||
kfree(reply);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_nfc_radio_name - derive nfc radio name from the interface version
|
||||
*
|
||||
* @ver: NFC radio version
|
||||
*
|
||||
* Return: radio name string
|
||||
*/
|
||||
static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver)
|
||||
{
|
||||
|
||||
if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) {
|
||||
if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD)
|
||||
return "microread";
|
||||
}
|
||||
|
||||
if (ver->vendor_id == MEI_NFC_VENDOR_NXP) {
|
||||
if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544)
|
||||
return "pn544";
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_nfc - The nfc fixup function. The function retrieves nfc radio
|
||||
* name and set is as device attribute so we can load
|
||||
* the proper device driver for it
|
||||
*
|
||||
* @cldev: me client device (nfc)
|
||||
*/
|
||||
static void mei_nfc(struct mei_cl_device *cldev)
|
||||
{
|
||||
struct mei_device *bus;
|
||||
struct mei_cl *cl;
|
||||
struct mei_me_client *me_cl = NULL;
|
||||
struct mei_nfc_if_version ver;
|
||||
const char *radio_name = NULL;
|
||||
int ret;
|
||||
|
||||
bus = cldev->bus;
|
||||
|
||||
dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
|
||||
__func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
|
||||
|
||||
mutex_lock(&bus->device_lock);
|
||||
/* we need to connect to INFO GUID */
|
||||
cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
|
||||
if (IS_ERR(cl)) {
|
||||
ret = PTR_ERR(cl);
|
||||
cl = NULL;
|
||||
dev_err(bus->dev, "nfc hook alloc failed %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid);
|
||||
if (!me_cl) {
|
||||
ret = -ENOTTY;
|
||||
dev_err(bus->dev, "Cannot find nfc info %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = mei_cl_connect(cl, me_cl, NULL);
|
||||
if (ret < 0) {
|
||||
dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_unlock(&bus->device_lock);
|
||||
|
||||
ret = mei_nfc_if_version(cl, &ver);
|
||||
if (ret)
|
||||
goto disconnect;
|
||||
|
||||
radio_name = mei_nfc_radio_name(&ver);
|
||||
|
||||
if (!radio_name) {
|
||||
ret = -ENOENT;
|
||||
dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n",
|
||||
ret);
|
||||
goto disconnect;
|
||||
}
|
||||
|
||||
dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
|
||||
strlcpy(cldev->name, radio_name, sizeof(cldev->name));
|
||||
|
||||
disconnect:
|
||||
mutex_lock(&bus->device_lock);
|
||||
if (mei_cl_disconnect(cl) < 0)
|
||||
dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n");
|
||||
|
||||
mei_cl_flush_queues(cl, NULL);
|
||||
|
||||
out:
|
||||
mei_cl_unlink(cl);
|
||||
mutex_unlock(&bus->device_lock);
|
||||
mei_me_cl_put(me_cl);
|
||||
kfree(cl);
|
||||
|
||||
if (ret)
|
||||
cldev->do_match = 0;
|
||||
|
||||
dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
|
||||
}
|
||||
|
||||
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
|
||||
|
||||
static struct mei_fixup {
|
||||
|
||||
const uuid_le uuid;
|
||||
void (*hook)(struct mei_cl_device *cldev);
|
||||
} mei_fixups[] = {
|
||||
MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
|
||||
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
|
||||
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
|
||||
};
|
||||
|
||||
/**
|
||||
* mei_cl_dev_fixup - run fixup handlers
|
||||
*
|
||||
* @cldev: me client device
|
||||
*/
|
||||
void mei_cl_dev_fixup(struct mei_cl_device *cldev)
|
||||
{
|
||||
struct mei_fixup *f;
|
||||
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
|
||||
|
||||
f = &mei_fixups[i];
|
||||
if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 ||
|
||||
uuid_le_cmp(f->uuid, *uuid) == 0)
|
||||
f->hook(cldev);
|
||||
}
|
||||
}
|
||||
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -555,10 +555,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
|
|||
init_waitqueue_head(&cl->wait);
|
||||
init_waitqueue_head(&cl->rx_wait);
|
||||
init_waitqueue_head(&cl->tx_wait);
|
||||
init_waitqueue_head(&cl->ev_wait);
|
||||
INIT_LIST_HEAD(&cl->rd_completed);
|
||||
INIT_LIST_HEAD(&cl->rd_pending);
|
||||
INIT_LIST_HEAD(&cl->link);
|
||||
INIT_LIST_HEAD(&cl->device_link);
|
||||
cl->writing_state = MEI_IDLE;
|
||||
cl->state = MEI_FILE_INITIALIZING;
|
||||
cl->dev = dev;
|
||||
|
@ -690,16 +690,12 @@ void mei_host_client_init(struct work_struct *work)
|
|||
mei_wd_host_init(dev, me_cl);
|
||||
mei_me_cl_put(me_cl);
|
||||
|
||||
me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
|
||||
if (me_cl)
|
||||
mei_nfc_host_init(dev, me_cl);
|
||||
mei_me_cl_put(me_cl);
|
||||
|
||||
|
||||
dev->dev_state = MEI_DEV_ENABLED;
|
||||
dev->reset_count = 0;
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
||||
mei_cl_bus_rescan(dev);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
dev_dbg(dev->dev, "rpm: autosuspend\n");
|
||||
pm_runtime_autosuspend(dev->dev);
|
||||
|
@ -841,45 +837,22 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* mei_cl_disconnect - disconnect host client from the me one
|
||||
* __mei_cl_disconnect - disconnect host client from the me one
|
||||
* internal function runtime pm has to be already acquired
|
||||
*
|
||||
* @cl: host client
|
||||
*
|
||||
* Locking: called under "dev->device_lock" lock
|
||||
*
|
||||
* Return: 0 on success, <0 on failure.
|
||||
*/
|
||||
int mei_cl_disconnect(struct mei_cl *cl)
|
||||
static int __mei_cl_disconnect(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_cl_cb *cb;
|
||||
int rets;
|
||||
|
||||
if (WARN_ON(!cl || !cl->dev))
|
||||
return -ENODEV;
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
cl_dbg(dev, cl, "disconnecting");
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return 0;
|
||||
|
||||
if (mei_cl_is_fixed_address(cl)) {
|
||||
mei_cl_set_disconnected(cl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
cl_err(dev, cl, "rpm: get failed %d\n", rets);
|
||||
return rets;
|
||||
}
|
||||
|
||||
cl->state = MEI_FILE_DISCONNECTING;
|
||||
|
||||
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
|
||||
|
@ -915,11 +888,52 @@ out:
|
|||
if (!rets)
|
||||
cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
|
||||
|
||||
mei_io_cb_free(cb);
|
||||
return rets;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_disconnect - disconnect host client from the me one
|
||||
*
|
||||
* @cl: host client
|
||||
*
|
||||
* Locking: called under "dev->device_lock" lock
|
||||
*
|
||||
* Return: 0 on success, <0 on failure.
|
||||
*/
|
||||
int mei_cl_disconnect(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
int rets;
|
||||
|
||||
if (WARN_ON(!cl || !cl->dev))
|
||||
return -ENODEV;
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
cl_dbg(dev, cl, "disconnecting");
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return 0;
|
||||
|
||||
if (mei_cl_is_fixed_address(cl)) {
|
||||
mei_cl_set_disconnected(cl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
cl_err(dev, cl, "rpm: get failed %d\n", rets);
|
||||
return rets;
|
||||
}
|
||||
|
||||
rets = __mei_cl_disconnect(cl);
|
||||
|
||||
cl_dbg(dev, cl, "rpm: autosuspend\n");
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
mei_io_cb_free(cb);
|
||||
return rets;
|
||||
}
|
||||
|
||||
|
@ -1064,11 +1078,23 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
|
|||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(cl->wait,
|
||||
(cl->state == MEI_FILE_CONNECTED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
|
||||
cl->state == MEI_FILE_DISCONNECT_REPLY),
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
|
||||
mei_io_list_flush(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush(&dev->ctrl_wr_list, cl);
|
||||
/* ignore disconnect return valuue;
|
||||
* in case of failure reset will be invoked
|
||||
*/
|
||||
__mei_cl_disconnect(cl);
|
||||
rets = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* timeout or something went really wrong */
|
||||
if (!cl->status)
|
||||
cl->status = -EFAULT;
|
||||
|
@ -1180,6 +1206,221 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_notify_fop2req - convert fop to proper request
|
||||
*
|
||||
* @fop: client notification start response command
|
||||
*
|
||||
* Return: MEI_HBM_NOTIFICATION_START/STOP
|
||||
*/
|
||||
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
|
||||
{
|
||||
if (fop == MEI_FOP_NOTIFY_START)
|
||||
return MEI_HBM_NOTIFICATION_START;
|
||||
else
|
||||
return MEI_HBM_NOTIFICATION_STOP;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_notify_req2fop - convert notification request top file operation type
|
||||
*
|
||||
* @req: hbm notification request type
|
||||
*
|
||||
* Return: MEI_FOP_NOTIFY_START/STOP
|
||||
*/
|
||||
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
|
||||
{
|
||||
if (req == MEI_HBM_NOTIFICATION_START)
|
||||
return MEI_FOP_NOTIFY_START;
|
||||
else
|
||||
return MEI_FOP_NOTIFY_STOP;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_irq_notify - send notification request in irq_thread context
|
||||
*
|
||||
* @cl: client
|
||||
* @cb: callback block.
|
||||
* @cmpl_list: complete list.
|
||||
*
|
||||
* Return: 0 on such and error otherwise.
|
||||
*/
|
||||
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
int slots;
|
||||
int ret;
|
||||
bool request;
|
||||
|
||||
msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
|
||||
slots = mei_hbuf_empty_slots(dev);
|
||||
|
||||
if (slots < msg_slots)
|
||||
return -EMSGSIZE;
|
||||
|
||||
request = mei_cl_notify_fop2req(cb->fop_type);
|
||||
ret = mei_hbm_cl_notify_req(dev, cl, request);
|
||||
if (ret) {
|
||||
cl->status = ret;
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_notify_request - send notification stop/start request
|
||||
*
|
||||
* @cl: host client
|
||||
* @file: associate request with file
|
||||
* @request: 1 for start or 0 for stop
|
||||
*
|
||||
* Locking: called under "dev->device_lock" lock
|
||||
*
|
||||
* Return: 0 on such and error otherwise.
|
||||
*/
|
||||
int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_cl_cb *cb;
|
||||
enum mei_cb_file_ops fop_type;
|
||||
int rets;
|
||||
|
||||
if (WARN_ON(!cl || !cl->dev))
|
||||
return -ENODEV;
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
if (!dev->hbm_f_ev_supported) {
|
||||
cl_dbg(dev, cl, "notifications not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
cl_err(dev, cl, "rpm: get failed %d\n", rets);
|
||||
return rets;
|
||||
}
|
||||
|
||||
fop_type = mei_cl_notify_req2fop(request);
|
||||
cb = mei_io_cb_init(cl, fop_type, file);
|
||||
if (!cb) {
|
||||
rets = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mei_hbuf_acquire(dev)) {
|
||||
if (mei_hbm_cl_notify_req(dev, cl, request)) {
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
} else {
|
||||
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(cl->wait, cl->notify_en == request,
|
||||
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (cl->notify_en != request) {
|
||||
mei_io_list_flush(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush(&dev->ctrl_wr_list, cl);
|
||||
if (!cl->status)
|
||||
cl->status = -EFAULT;
|
||||
}
|
||||
|
||||
rets = cl->status;
|
||||
|
||||
out:
|
||||
cl_dbg(dev, cl, "rpm: autosuspend\n");
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
mei_io_cb_free(cb);
|
||||
return rets;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_notify - raise notification
|
||||
*
|
||||
* @cl: host client
|
||||
*
|
||||
* Locking: called under "dev->device_lock" lock
|
||||
*/
|
||||
void mei_cl_notify(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
|
||||
if (!cl || !cl->dev)
|
||||
return;
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
if (!cl->notify_en)
|
||||
return;
|
||||
|
||||
cl_dbg(dev, cl, "notify event");
|
||||
cl->notify_ev = true;
|
||||
wake_up_interruptible_all(&cl->ev_wait);
|
||||
|
||||
if (cl->ev_async)
|
||||
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
|
||||
|
||||
mei_cl_bus_notify_event(cl);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_notify_get - get or wait for notification event
|
||||
*
|
||||
* @cl: host client
|
||||
* @block: this request is blocking
|
||||
* @notify_ev: true if notification event was received
|
||||
*
|
||||
* Locking: called under "dev->device_lock" lock
|
||||
*
|
||||
* Return: 0 on such and error otherwise.
|
||||
*/
|
||||
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
int rets;
|
||||
|
||||
*notify_ev = false;
|
||||
|
||||
if (WARN_ON(!cl || !cl->dev))
|
||||
return -ENODEV;
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return -ENODEV;
|
||||
|
||||
if (cl->notify_ev)
|
||||
goto out;
|
||||
|
||||
if (!block)
|
||||
return -EAGAIN;
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (rets < 0)
|
||||
return rets;
|
||||
|
||||
out:
|
||||
*notify_ev = cl->notify_ev;
|
||||
cl->notify_ev = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_read_start - the start read client message function.
|
||||
*
|
||||
|
@ -1356,6 +1597,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|||
struct mei_device *dev;
|
||||
struct mei_msg_data *buf;
|
||||
struct mei_msg_hdr mei_hdr;
|
||||
int size;
|
||||
int rets;
|
||||
|
||||
|
||||
|
@ -1367,10 +1609,10 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|||
|
||||
dev = cl->dev;
|
||||
|
||||
|
||||
buf = &cb->buf;
|
||||
size = buf->size;
|
||||
|
||||
cl_dbg(dev, cl, "size=%d\n", buf->size);
|
||||
cl_dbg(dev, cl, "size=%d\n", size);
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
|
@ -1394,21 +1636,21 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|||
|
||||
if (rets == 0) {
|
||||
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
|
||||
rets = buf->size;
|
||||
rets = size;
|
||||
goto out;
|
||||
}
|
||||
if (!mei_hbuf_acquire(dev)) {
|
||||
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
|
||||
rets = buf->size;
|
||||
rets = size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check for a maximum length */
|
||||
if (buf->size > mei_hbuf_max_len(dev)) {
|
||||
if (size > mei_hbuf_max_len(dev)) {
|
||||
mei_hdr.length = mei_hbuf_max_len(dev);
|
||||
mei_hdr.msg_complete = 0;
|
||||
} else {
|
||||
mei_hdr.length = buf->size;
|
||||
mei_hdr.length = size;
|
||||
mei_hdr.msg_complete = 1;
|
||||
}
|
||||
|
||||
|
@ -1430,6 +1672,7 @@ out:
|
|||
else
|
||||
list_add_tail(&cb->list, &dev->write_list.list);
|
||||
|
||||
cb = NULL;
|
||||
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
@ -1444,7 +1687,7 @@ out:
|
|||
}
|
||||
}
|
||||
|
||||
rets = buf->size;
|
||||
rets = size;
|
||||
err:
|
||||
cl_dbg(dev, cl, "rpm: autosuspend\n");
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
|
@ -1486,6 +1729,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
|
|||
|
||||
case MEI_FOP_CONNECT:
|
||||
case MEI_FOP_DISCONNECT:
|
||||
case MEI_FOP_NOTIFY_STOP:
|
||||
case MEI_FOP_NOTIFY_START:
|
||||
if (waitqueue_active(&cl->wait))
|
||||
wake_up(&cl->wait);
|
||||
|
||||
|
@ -1528,6 +1773,12 @@ void mei_cl_all_wakeup(struct mei_device *dev)
|
|||
cl_dbg(dev, cl, "Waking up writing client!\n");
|
||||
wake_up_interruptible(&cl->tx_wait);
|
||||
}
|
||||
|
||||
/* synchronized under device mutex */
|
||||
if (waitqueue_active(&cl->ev_wait)) {
|
||||
cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
|
||||
wake_up_interruptible(&cl->ev_wait);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -219,6 +219,14 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
|
|||
|
||||
void mei_host_client_init(struct work_struct *work);
|
||||
|
||||
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
|
||||
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
|
||||
int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
|
||||
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
|
||||
void mei_cl_notify(struct mei_cl *cl);
|
||||
|
||||
void mei_cl_all_disconnect(struct mei_device *dev);
|
||||
void mei_cl_all_wakeup(struct mei_device *dev);
|
||||
void mei_cl_all_write_clear(struct mei_device *dev);
|
||||
|
|
|
@ -154,6 +154,12 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
|
|||
pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
|
||||
dev->hbm_f_pg_supported);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
|
||||
dev->hbm_f_dc_supported);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
|
||||
dev->hbm_f_dot_supported);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
|
||||
dev->hbm_f_ev_supported);
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
|
||||
|
|
|
@ -52,6 +52,7 @@ static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
|
|||
MEI_CL_CS(ALREADY_STARTED);
|
||||
MEI_CL_CS(OUT_OF_RESOURCES);
|
||||
MEI_CL_CS(MESSAGE_SMALL);
|
||||
MEI_CL_CS(NOT_ALLOWED);
|
||||
default: return "unknown";
|
||||
}
|
||||
#undef MEI_CL_CCS
|
||||
|
@ -89,6 +90,7 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
|
|||
case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
|
||||
case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
|
||||
case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
|
||||
case MEI_CL_CONN_NOT_ALLOWED: return -EBUSY;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -299,6 +301,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
|
|||
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
|
||||
memset(enum_req, 0, len);
|
||||
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
|
||||
enum_req->allow_add = dev->hbm_f_dc_supported;
|
||||
|
||||
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
|
||||
if (ret) {
|
||||
|
@ -343,6 +346,180 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_add_cl_resp - send response to fw on client add request
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @addr: me address
|
||||
* @status: response status
|
||||
*
|
||||
* Return: 0 on success and < 0 on failure
|
||||
*/
|
||||
static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
|
||||
{
|
||||
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
|
||||
struct hbm_add_client_response *resp;
|
||||
const size_t len = sizeof(struct hbm_add_client_response);
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev->dev, "adding client response\n");
|
||||
|
||||
resp = (struct hbm_add_client_response *)dev->wr_msg.data;
|
||||
|
||||
mei_hbm_hdr(mei_hdr, len);
|
||||
memset(resp, 0, sizeof(struct hbm_add_client_response));
|
||||
|
||||
resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
|
||||
resp->me_addr = addr;
|
||||
resp->status = status;
|
||||
|
||||
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "add client response write failed: ret = %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_fw_add_cl_req - request from the fw to add a client
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @req: add client request
|
||||
*
|
||||
* Return: 0 on success and < 0 on failure
|
||||
*/
|
||||
static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
|
||||
struct hbm_add_client_request *req)
|
||||
{
|
||||
int ret;
|
||||
u8 status = MEI_HBMS_SUCCESS;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct hbm_add_client_request) !=
|
||||
sizeof(struct hbm_props_response));
|
||||
|
||||
ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
|
||||
if (ret)
|
||||
status = !MEI_HBMS_SUCCESS;
|
||||
|
||||
return mei_hbm_add_cl_resp(dev, req->me_addr, status);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_cl_notify_req - send notification request
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @cl: a client to disconnect from
|
||||
* @start: true for start false for stop
|
||||
*
|
||||
* Return: 0 on success and -EIO on write failure
|
||||
*/
|
||||
int mei_hbm_cl_notify_req(struct mei_device *dev,
|
||||
struct mei_cl *cl, u8 start)
|
||||
{
|
||||
|
||||
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
|
||||
struct hbm_notification_request *req;
|
||||
const size_t len = sizeof(struct hbm_notification_request);
|
||||
int ret;
|
||||
|
||||
mei_hbm_hdr(mei_hdr, len);
|
||||
mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
|
||||
|
||||
req = (struct hbm_notification_request *)dev->wr_msg.data;
|
||||
req->start = start;
|
||||
|
||||
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* notify_res_to_fop - convert notification response to the proper
|
||||
* notification FOP
|
||||
*
|
||||
* @cmd: client notification start response command
|
||||
*
|
||||
* Return: MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP;
|
||||
*/
|
||||
static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd)
|
||||
{
|
||||
struct hbm_notification_response *rs =
|
||||
(struct hbm_notification_response *)cmd;
|
||||
|
||||
return mei_cl_notify_req2fop(rs->start);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_cl_notify_start_res - update the client state according
|
||||
* notify start response
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @cl: mei host client
|
||||
* @cmd: client notification start response command
|
||||
*/
|
||||
static void mei_hbm_cl_notify_start_res(struct mei_device *dev,
|
||||
struct mei_cl *cl,
|
||||
struct mei_hbm_cl_cmd *cmd)
|
||||
{
|
||||
struct hbm_notification_response *rs =
|
||||
(struct hbm_notification_response *)cmd;
|
||||
|
||||
cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
|
||||
|
||||
if (rs->status == MEI_HBMS_SUCCESS ||
|
||||
rs->status == MEI_HBMS_ALREADY_STARTED) {
|
||||
cl->notify_en = true;
|
||||
cl->status = 0;
|
||||
} else {
|
||||
cl->status = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_cl_notify_stop_res - update the client state according
|
||||
* notify stop response
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @cl: mei host client
|
||||
* @cmd: client notification stop response command
|
||||
*/
|
||||
static void mei_hbm_cl_notify_stop_res(struct mei_device *dev,
|
||||
struct mei_cl *cl,
|
||||
struct mei_hbm_cl_cmd *cmd)
|
||||
{
|
||||
struct hbm_notification_response *rs =
|
||||
(struct hbm_notification_response *)cmd;
|
||||
|
||||
cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
|
||||
|
||||
if (rs->status == MEI_HBMS_SUCCESS ||
|
||||
rs->status == MEI_HBMS_NOT_STARTED) {
|
||||
cl->notify_en = false;
|
||||
cl->status = 0;
|
||||
} else {
|
||||
/* TODO: spec is not clear yet about other possible issues */
|
||||
cl->status = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_cl_notify - signal notification event
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @cmd: notification client message
|
||||
*/
|
||||
static void mei_hbm_cl_notify(struct mei_device *dev,
|
||||
struct mei_hbm_cl_cmd *cmd)
|
||||
{
|
||||
struct mei_cl *cl;
|
||||
|
||||
cl = mei_hbm_cl_find_by_cmd(dev, cmd);
|
||||
if (cl)
|
||||
mei_cl_notify(cl);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_prop_req - request property for a single client
|
||||
*
|
||||
|
@ -610,8 +787,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
|
|||
|
||||
if (rs->status == MEI_CL_CONN_SUCCESS)
|
||||
cl->state = MEI_FILE_CONNECTED;
|
||||
else
|
||||
else {
|
||||
cl->state = MEI_FILE_DISCONNECT_REPLY;
|
||||
if (rs->status == MEI_CL_CONN_NOT_FOUND)
|
||||
mei_me_cl_del(dev, cl->me_cl);
|
||||
}
|
||||
cl->status = mei_cl_conn_status_to_errno(rs->status);
|
||||
}
|
||||
|
||||
|
@ -654,6 +834,12 @@ static void mei_hbm_cl_res(struct mei_device *dev,
|
|||
case MEI_FOP_DISCONNECT:
|
||||
mei_hbm_cl_disconnect_res(dev, cl, rs);
|
||||
break;
|
||||
case MEI_FOP_NOTIFY_START:
|
||||
mei_hbm_cl_notify_start_res(dev, cl, rs);
|
||||
break;
|
||||
case MEI_FOP_NOTIFY_STOP:
|
||||
mei_hbm_cl_notify_stop_res(dev, cl, rs);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
@ -693,6 +879,79 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_pg_enter_res - PG enter response received
|
||||
*
|
||||
* @dev: the device structure.
|
||||
*
|
||||
* Return: 0 on success, -EPROTO on state mismatch
|
||||
*/
|
||||
static int mei_hbm_pg_enter_res(struct mei_device *dev)
|
||||
{
|
||||
if (mei_pg_state(dev) != MEI_PG_OFF ||
|
||||
dev->pg_event != MEI_PG_EVENT_WAIT) {
|
||||
dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
|
||||
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
dev->pg_event = MEI_PG_EVENT_RECEIVED;
|
||||
wake_up(&dev->wait_pg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_pg_resume - process with PG resume
|
||||
*
|
||||
* @dev: the device structure.
|
||||
*/
|
||||
void mei_hbm_pg_resume(struct mei_device *dev)
|
||||
{
|
||||
pm_request_resume(dev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
|
||||
|
||||
/**
|
||||
* mei_hbm_pg_exit_res - PG exit response received
|
||||
*
|
||||
* @dev: the device structure.
|
||||
*
|
||||
* Return: 0 on success, -EPROTO on state mismatch
|
||||
*/
|
||||
static int mei_hbm_pg_exit_res(struct mei_device *dev)
|
||||
{
|
||||
if (mei_pg_state(dev) != MEI_PG_ON ||
|
||||
(dev->pg_event != MEI_PG_EVENT_WAIT &&
|
||||
dev->pg_event != MEI_PG_EVENT_IDLE)) {
|
||||
dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
|
||||
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
switch (dev->pg_event) {
|
||||
case MEI_PG_EVENT_WAIT:
|
||||
dev->pg_event = MEI_PG_EVENT_RECEIVED;
|
||||
wake_up(&dev->wait_pg);
|
||||
break;
|
||||
case MEI_PG_EVENT_IDLE:
|
||||
/*
|
||||
* If the driver is not waiting on this then
|
||||
* this is HW initiated exit from PG.
|
||||
* Start runtime pm resume sequence to exit from PG.
|
||||
*/
|
||||
dev->pg_event = MEI_PG_EVENT_RECEIVED;
|
||||
mei_hbm_pg_resume(dev);
|
||||
break;
|
||||
default:
|
||||
WARN(1, "hbm: pg exit response: unexpected pg event = %d\n",
|
||||
dev->pg_event);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hbm_config_features - check what hbm features and commands
|
||||
* are supported by the fw
|
||||
|
@ -709,6 +968,17 @@ static void mei_hbm_config_features(struct mei_device *dev)
|
|||
if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
|
||||
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
|
||||
dev->hbm_f_pg_supported = 1;
|
||||
|
||||
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
|
||||
dev->hbm_f_dc_supported = 1;
|
||||
|
||||
/* disconnect on connect timeout instead of link reset */
|
||||
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
|
||||
dev->hbm_f_dot_supported = 1;
|
||||
|
||||
/* Notification Event Support */
|
||||
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
|
||||
dev->hbm_f_ev_supported = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -740,6 +1010,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
|||
struct hbm_host_version_response *version_res;
|
||||
struct hbm_props_response *props_res;
|
||||
struct hbm_host_enum_response *enum_res;
|
||||
struct hbm_add_client_request *add_cl_req;
|
||||
int ret;
|
||||
|
||||
struct mei_hbm_cl_cmd *cl_cmd;
|
||||
struct hbm_client_connect_request *disconnect_req;
|
||||
|
@ -828,24 +1100,17 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
|||
break;
|
||||
|
||||
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
|
||||
dev_dbg(dev->dev, "power gate isolation entry response received\n");
|
||||
dev->pg_event = MEI_PG_EVENT_RECEIVED;
|
||||
if (waitqueue_active(&dev->wait_pg))
|
||||
wake_up(&dev->wait_pg);
|
||||
dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
|
||||
ret = mei_hbm_pg_enter_res(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case MEI_PG_ISOLATION_EXIT_REQ_CMD:
|
||||
dev_dbg(dev->dev, "power gate isolation exit request received\n");
|
||||
dev->pg_event = MEI_PG_EVENT_RECEIVED;
|
||||
if (waitqueue_active(&dev->wait_pg))
|
||||
wake_up(&dev->wait_pg);
|
||||
else
|
||||
/*
|
||||
* If the driver is not waiting on this then
|
||||
* this is HW initiated exit from PG.
|
||||
* Start runtime pm resume sequence to exit from PG.
|
||||
*/
|
||||
pm_request_resume(dev->dev);
|
||||
dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
|
||||
ret = mei_hbm_pg_exit_res(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case HOST_CLIENT_PROPERTIES_RES_CMD:
|
||||
|
@ -937,6 +1202,39 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
|||
return -EIO;
|
||||
}
|
||||
break;
|
||||
|
||||
case MEI_HBM_ADD_CLIENT_REQ_CMD:
|
||||
dev_dbg(dev->dev, "hbm: add client request received\n");
|
||||
/*
|
||||
* after the host receives the enum_resp
|
||||
* message clients may be added or removed
|
||||
*/
|
||||
if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
|
||||
dev->hbm_state >= MEI_HBM_STOPPED) {
|
||||
dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
|
||||
dev->dev_state, dev->hbm_state);
|
||||
return -EPROTO;
|
||||
}
|
||||
add_cl_req = (struct hbm_add_client_request *)mei_msg;
|
||||
ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
|
||||
ret);
|
||||
return -EIO;
|
||||
}
|
||||
dev_dbg(dev->dev, "hbm: add client request processed\n");
|
||||
break;
|
||||
|
||||
case MEI_HBM_NOTIFY_RES_CMD:
|
||||
dev_dbg(dev->dev, "hbm: notify response received\n");
|
||||
mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
|
||||
break;
|
||||
|
||||
case MEI_HBM_NOTIFICATION_CMD:
|
||||
dev_dbg(dev->dev, "hbm: notification\n");
|
||||
mei_hbm_cl_notify(dev, cl_cmd);
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
|
|
|
@ -54,6 +54,9 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
|
|||
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
|
||||
bool mei_hbm_version_is_supported(struct mei_device *dev);
|
||||
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
|
||||
void mei_hbm_pg_resume(struct mei_device *dev);
|
||||
int mei_hbm_cl_notify_req(struct mei_device *dev,
|
||||
struct mei_cl *cl, u8 request);
|
||||
|
||||
#endif /* _MEI_HBM_H_ */
|
||||
|
||||
|
|
|
@ -117,12 +117,17 @@
|
|||
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
|
||||
#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */
|
||||
|
||||
#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */
|
||||
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
|
||||
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
|
||||
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
|
||||
/*
|
||||
* MEI HW Section
|
||||
*/
|
||||
|
||||
/* Host Firmware Status Registers in PCI Config Space */
|
||||
#define PCI_CFG_HFS_1 0x40
|
||||
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
|
||||
#define PCI_CFG_HFS_2 0x48
|
||||
#define PCI_CFG_HFS_3 0x60
|
||||
#define PCI_CFG_HFS_4 0x64
|
||||
|
@ -140,7 +145,8 @@
|
|||
#define ME_CSR_HA 0xC
|
||||
/* H_HGC_CSR - PGI register */
|
||||
#define H_HPG_CSR 0x10
|
||||
|
||||
/* H_D0I3C - D0I3 Control */
|
||||
#define H_D0I3C 0x800
|
||||
|
||||
/* register bits of H_CSR (Host Control Status register) */
|
||||
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
|
||||
|
@ -159,7 +165,14 @@
|
|||
#define H_IS 0x00000002
|
||||
/* Host Interrupt Enable */
|
||||
#define H_IE 0x00000001
|
||||
/* Host D0I3 Interrupt Enable */
|
||||
#define H_D0I3C_IE 0x00000020
|
||||
/* Host D0I3 Interrupt Status */
|
||||
#define H_D0I3C_IS 0x00000040
|
||||
|
||||
/* H_CSR masks */
|
||||
#define H_CSR_IE_MASK (H_IE | H_D0I3C_IE)
|
||||
#define H_CSR_IS_MASK (H_IS | H_D0I3C_IS)
|
||||
|
||||
/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
|
||||
/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
|
||||
|
@ -183,8 +196,14 @@ access to ME_CBD */
|
|||
#define ME_IE_HRA 0x00000001
|
||||
|
||||
|
||||
/* register bits - H_HPG_CSR */
|
||||
#define H_HPG_CSR_PGIHEXR 0x00000001
|
||||
#define H_HPG_CSR_PGI 0x00000002
|
||||
/* H_HPG_CSR register bits */
|
||||
#define H_HPG_CSR_PGIHEXR 0x00000001
|
||||
#define H_HPG_CSR_PGI 0x00000002
|
||||
|
||||
/* H_D0I3C register bits */
|
||||
#define H_D0I3C_CIP 0x00000001
|
||||
#define H_D0I3C_IR 0x00000002
|
||||
#define H_D0I3C_I3 0x00000004
|
||||
#define H_D0I3C_RR 0x00000008
|
||||
|
||||
#endif /* _MEI_HW_MEI_REGS_H_ */
|
||||
|
|
|
@ -134,10 +134,39 @@ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
|
|||
*/
|
||||
static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
|
||||
{
|
||||
reg &= ~H_IS;
|
||||
reg &= ~H_CSR_IS_MASK;
|
||||
mei_hcsr_write(dev, reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: H_D0I3C register value (u32)
|
||||
*/
|
||||
static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
|
||||
trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg);
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3c_write - writes H_D0I3C register to device
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @reg: new register value
|
||||
*/
|
||||
static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
|
||||
{
|
||||
trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg);
|
||||
mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_fw_status - read fw status register from pci config space
|
||||
*
|
||||
|
@ -176,12 +205,25 @@ static int mei_me_fw_status(struct mei_device *dev,
|
|||
*/
|
||||
static void mei_me_hw_config(struct mei_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
u32 hcsr, reg;
|
||||
|
||||
/* Doesn't change in runtime */
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
|
||||
|
||||
reg = 0;
|
||||
pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®);
|
||||
hw->d0i3_supported =
|
||||
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
|
||||
|
||||
hw->pg_state = MEI_PG_OFF;
|
||||
if (hw->d0i3_supported) {
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (reg & H_D0I3C_I3)
|
||||
hw->pg_state = MEI_PG_ON;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -208,7 +250,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
|
|||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
if ((hcsr & H_IS) == H_IS)
|
||||
if (hcsr & H_CSR_IS_MASK)
|
||||
mei_hcsr_write(dev, hcsr);
|
||||
}
|
||||
/**
|
||||
|
@ -220,7 +262,7 @@ static void mei_me_intr_enable(struct mei_device *dev)
|
|||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
hcsr |= H_IE;
|
||||
hcsr |= H_CSR_IE_MASK;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
|
@ -233,7 +275,7 @@ static void mei_me_intr_disable(struct mei_device *dev)
|
|||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
hcsr &= ~H_IE;
|
||||
hcsr &= ~H_CSR_IE_MASK;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
|
@ -253,57 +295,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
|
|||
/* complete this write before we set host ready on another CPU */
|
||||
mmiowb();
|
||||
}
|
||||
/**
|
||||
* mei_me_hw_reset - resets fw via mei csr register.
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @intr_enable: if interrupt should be enabled after reset.
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
|
||||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
/* H_RST may be found lit before reset is started,
|
||||
* for example if preceding reset flow hasn't completed.
|
||||
* In that case asserting H_RST will be ignored, therefore
|
||||
* we need to clean H_RST bit to start a successful reset sequence.
|
||||
*/
|
||||
if ((hcsr & H_RST) == H_RST) {
|
||||
dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
|
||||
hcsr &= ~H_RST;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
}
|
||||
|
||||
hcsr |= H_RST | H_IG | H_IS;
|
||||
|
||||
if (intr_enable)
|
||||
hcsr |= H_IE;
|
||||
else
|
||||
hcsr &= ~H_IE;
|
||||
|
||||
dev->recvd_hw_ready = false;
|
||||
mei_hcsr_write(dev, hcsr);
|
||||
|
||||
/*
|
||||
* Host reads the H_CSR once to ensure that the
|
||||
* posted write to H_CSR completes.
|
||||
*/
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
|
||||
if ((hcsr & H_RST) == 0)
|
||||
dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
|
||||
|
||||
if ((hcsr & H_RDY) == H_RDY)
|
||||
dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
|
||||
|
||||
if (intr_enable == false)
|
||||
mei_me_hw_reset_release(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_host_set_ready - enable device
|
||||
|
@ -314,7 +305,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
|
|||
{
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
|
||||
hcsr |= H_IE | H_IG | H_RDY;
|
||||
hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
|
@ -601,13 +592,13 @@ static void mei_me_pg_unset(struct mei_device *dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_enter_sync - perform pg entry procedure
|
||||
* mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
int mei_me_pg_enter_sync(struct mei_device *dev)
|
||||
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
|
@ -638,13 +629,13 @@ int mei_me_pg_enter_sync(struct mei_device *dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_exit_sync - perform pg exit procedure
|
||||
* mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
int mei_me_pg_exit_sync(struct mei_device *dev)
|
||||
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
|
@ -712,8 +703,12 @@ static bool mei_me_pg_in_transition(struct mei_device *dev)
|
|||
*/
|
||||
static bool mei_me_pg_is_enabled(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 reg = mei_me_mecsr_read(dev);
|
||||
|
||||
if (hw->d0i3_supported)
|
||||
return true;
|
||||
|
||||
if ((reg & ME_PGIC_HRA) == 0)
|
||||
goto notsupported;
|
||||
|
||||
|
@ -723,7 +718,8 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev)
|
|||
return true;
|
||||
|
||||
notsupported:
|
||||
dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
|
||||
dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
|
||||
hw->d0i3_supported,
|
||||
!!(reg & ME_PGIC_HRA),
|
||||
dev->version.major_version,
|
||||
dev->version.minor_version,
|
||||
|
@ -734,11 +730,211 @@ notsupported:
|
|||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_intr - perform pg processing in interrupt thread handler
|
||||
* mei_me_d0i3_set - write d0i3 register bit on mei device.
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @intr: ask for interrupt
|
||||
*
|
||||
* Return: D0I3C register value
|
||||
*/
|
||||
static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
|
||||
{
|
||||
u32 reg = mei_me_d0i3c_read(dev);
|
||||
|
||||
reg |= H_D0I3C_I3;
|
||||
if (intr)
|
||||
reg |= H_D0I3C_IR;
|
||||
else
|
||||
reg &= ~H_D0I3C_IR;
|
||||
mei_me_d0i3c_write(dev, reg);
|
||||
/* read it to ensure HW consistency */
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3_unset - clean d0i3 register bit on mei device.
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: D0I3C register value
|
||||
*/
|
||||
static u32 mei_me_d0i3_unset(struct mei_device *dev)
|
||||
{
|
||||
u32 reg = mei_me_d0i3c_read(dev);
|
||||
|
||||
reg &= ~H_D0I3C_I3;
|
||||
reg |= H_D0I3C_IR;
|
||||
mei_me_d0i3c_write(dev, reg);
|
||||
/* read it to ensure HW consistency */
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3_enter_sync - perform d0i3 entry procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
|
||||
unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (reg & H_D0I3C_I3) {
|
||||
/* we are in d0i3, nothing to do */
|
||||
dev_dbg(dev->dev, "d0i3 set not needed\n");
|
||||
ret = 0;
|
||||
goto on;
|
||||
}
|
||||
|
||||
/* PGI entry procedure */
|
||||
dev->pg_event = MEI_PG_EVENT_WAIT;
|
||||
|
||||
ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
|
||||
if (ret)
|
||||
/* FIXME: should we reset here? */
|
||||
goto out;
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
|
||||
ret = -ETIME;
|
||||
goto out;
|
||||
}
|
||||
/* end PGI entry procedure */
|
||||
|
||||
dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
|
||||
|
||||
reg = mei_me_d0i3_set(dev, true);
|
||||
if (!(reg & H_D0I3C_CIP)) {
|
||||
dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
|
||||
ret = 0;
|
||||
goto on;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (!(reg & H_D0I3C_I3)) {
|
||||
ret = -ETIME;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
on:
|
||||
hw->pg_state = MEI_PG_ON;
|
||||
out:
|
||||
dev->pg_event = MEI_PG_EVENT_IDLE;
|
||||
dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3_enter - perform d0i3 entry procedure
|
||||
* no hbm PG handshake
|
||||
* no waiting for confirmation; runs with interrupts
|
||||
* disabled
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
static int mei_me_d0i3_enter(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 reg;
|
||||
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (reg & H_D0I3C_I3) {
|
||||
/* we are in d0i3, nothing to do */
|
||||
dev_dbg(dev->dev, "already d0i3 : set not needed\n");
|
||||
goto on;
|
||||
}
|
||||
|
||||
mei_me_d0i3_set(dev, false);
|
||||
on:
|
||||
hw->pg_state = MEI_PG_ON;
|
||||
dev->pg_event = MEI_PG_EVENT_IDLE;
|
||||
dev_dbg(dev->dev, "d0i3 enter\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3_exit_sync - perform d0i3 exit procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
|
||||
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (!(reg & H_D0I3C_I3)) {
|
||||
/* we are not in d0i3, nothing to do */
|
||||
dev_dbg(dev->dev, "d0i3 exit not needed\n");
|
||||
ret = 0;
|
||||
goto off;
|
||||
}
|
||||
|
||||
reg = mei_me_d0i3_unset(dev);
|
||||
if (!(reg & H_D0I3C_CIP)) {
|
||||
dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
|
||||
ret = 0;
|
||||
goto off;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_pg,
|
||||
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
|
||||
reg = mei_me_d0i3c_read(dev);
|
||||
if (reg & H_D0I3C_I3) {
|
||||
ret = -ETIME;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
off:
|
||||
hw->pg_state = MEI_PG_OFF;
|
||||
out:
|
||||
dev->pg_event = MEI_PG_EVENT_IDLE;
|
||||
|
||||
dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_legacy_intr - perform legacy pg processing
|
||||
* in interrupt thread handler
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_me_pg_intr(struct mei_device *dev)
|
||||
static void mei_me_pg_legacy_intr(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
|
@ -751,6 +947,162 @@ static void mei_me_pg_intr(struct mei_device *dev)
|
|||
wake_up(&dev->wait_pg);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_me_d0i3_intr(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
|
||||
(hw->intr_source & H_D0I3C_IS)) {
|
||||
dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
|
||||
if (hw->pg_state == MEI_PG_ON) {
|
||||
hw->pg_state = MEI_PG_OFF;
|
||||
if (dev->hbm_state != MEI_HBM_IDLE) {
|
||||
/*
|
||||
* force H_RDY because it could be
|
||||
* wiped off during PG
|
||||
*/
|
||||
dev_dbg(dev->dev, "d0i3 set host ready\n");
|
||||
mei_me_host_set_ready(dev);
|
||||
}
|
||||
} else {
|
||||
hw->pg_state = MEI_PG_ON;
|
||||
}
|
||||
|
||||
wake_up(&dev->wait_pg);
|
||||
}
|
||||
|
||||
if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
|
||||
/*
|
||||
* HW sent some data and we are in D0i3, so
|
||||
* we got here because of HW initiated exit from D0i3.
|
||||
* Start runtime pm resume sequence to exit low power state.
|
||||
*/
|
||||
dev_dbg(dev->dev, "d0i3 want resume\n");
|
||||
mei_hbm_pg_resume(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_intr - perform pg processing in interrupt thread handler
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_me_pg_intr(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
if (hw->d0i3_supported)
|
||||
mei_me_d0i3_intr(dev);
|
||||
else
|
||||
mei_me_pg_legacy_intr(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_enter_sync - perform runtime pm entry procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
int mei_me_pg_enter_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
if (hw->d0i3_supported)
|
||||
return mei_me_d0i3_enter_sync(dev);
|
||||
else
|
||||
return mei_me_pg_legacy_enter_sync(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_pg_exit_sync - perform runtime pm exit procedure
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
int mei_me_pg_exit_sync(struct mei_device *dev)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
|
||||
if (hw->d0i3_supported)
|
||||
return mei_me_d0i3_exit_sync(dev);
|
||||
else
|
||||
return mei_me_pg_legacy_exit_sync(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_hw_reset - resets fw via mei csr register.
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @intr_enable: if interrupt should be enabled after reset.
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
|
||||
{
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
int ret;
|
||||
u32 hcsr;
|
||||
|
||||
if (intr_enable) {
|
||||
mei_me_intr_enable(dev);
|
||||
if (hw->d0i3_supported) {
|
||||
ret = mei_me_d0i3_exit_sync(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
/* H_RST may be found lit before reset is started,
|
||||
* for example if preceding reset flow hasn't completed.
|
||||
* In that case asserting H_RST will be ignored, therefore
|
||||
* we need to clean H_RST bit to start a successful reset sequence.
|
||||
*/
|
||||
if ((hcsr & H_RST) == H_RST) {
|
||||
dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
|
||||
hcsr &= ~H_RST;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
}
|
||||
|
||||
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
|
||||
|
||||
if (!intr_enable)
|
||||
hcsr &= ~H_CSR_IE_MASK;
|
||||
|
||||
dev->recvd_hw_ready = false;
|
||||
mei_hcsr_write(dev, hcsr);
|
||||
|
||||
/*
|
||||
* Host reads the H_CSR once to ensure that the
|
||||
* posted write to H_CSR completes.
|
||||
*/
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
|
||||
if ((hcsr & H_RST) == 0)
|
||||
dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
|
||||
|
||||
if ((hcsr & H_RDY) == H_RDY)
|
||||
dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
|
||||
|
||||
if (!intr_enable) {
|
||||
mei_me_hw_reset_release(dev);
|
||||
if (hw->d0i3_supported) {
|
||||
ret = mei_me_d0i3_enter(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_irq_quick_handler - The ISR of the MEI device
|
||||
*
|
||||
|
@ -759,16 +1111,20 @@ static void mei_me_pg_intr(struct mei_device *dev)
|
|||
*
|
||||
* Return: irqreturn_t
|
||||
*/
|
||||
|
||||
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mei_device *dev = (struct mei_device *) dev_id;
|
||||
u32 hcsr = mei_hcsr_read(dev);
|
||||
struct mei_device *dev = (struct mei_device *)dev_id;
|
||||
struct mei_me_hw *hw = to_me_hw(dev);
|
||||
u32 hcsr;
|
||||
|
||||
if ((hcsr & H_IS) != H_IS)
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
if (!(hcsr & H_CSR_IS_MASK))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* clear H_IS bit in H_CSR */
|
||||
hw->intr_source = hcsr & H_CSR_IS_MASK;
|
||||
dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
|
||||
|
||||
/* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
|
||||
mei_hcsr_write(dev, hcsr);
|
||||
|
||||
return IRQ_WAKE_THREAD;
|
||||
|
@ -796,11 +1152,6 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
|||
mutex_lock(&dev->device_lock);
|
||||
mei_io_list_init(&complete_list);
|
||||
|
||||
/* Ack the interrupt here
|
||||
* In case of MSI we don't go through the quick handler */
|
||||
if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
|
||||
mei_clear_interrupts(dev);
|
||||
|
||||
/* check if ME wants a reset */
|
||||
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
|
||||
dev_warn(dev->dev, "FW not ready: resetting.\n");
|
||||
|
|
|
@ -50,13 +50,17 @@ struct mei_cfg {
|
|||
* struct mei_me_hw - me hw specific data
|
||||
*
|
||||
* @cfg: per device generation config and ops
|
||||
* @mem_addr: io memory address
|
||||
* @pg_state: power gating state
|
||||
* @mem_addr: io memory address
|
||||
* @intr_source: interrupt source
|
||||
* @pg_state: power gating state
|
||||
* @d0i3_supported: di03 support
|
||||
*/
|
||||
struct mei_me_hw {
|
||||
const struct mei_cfg *cfg;
|
||||
void __iomem *mem_addr;
|
||||
u32 intr_source;
|
||||
enum mei_pg_state pg_state;
|
||||
bool d0i3_supported;
|
||||
};
|
||||
|
||||
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
|
||||
|
|
|
@ -31,14 +31,15 @@
|
|||
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
|
||||
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
|
||||
|
||||
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
|
||||
#define MEI_HBM_TIMEOUT 1 /* 1 second */
|
||||
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
|
||||
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
|
||||
#define MEI_HBM_TIMEOUT 1 /* 1 second */
|
||||
|
||||
/*
|
||||
* MEI Version
|
||||
*/
|
||||
#define HBM_MINOR_VERSION 1
|
||||
#define HBM_MAJOR_VERSION 1
|
||||
#define HBM_MINOR_VERSION 0
|
||||
#define HBM_MAJOR_VERSION 2
|
||||
|
||||
/*
|
||||
* MEI version with PGI support
|
||||
|
@ -46,6 +47,24 @@
|
|||
#define HBM_MINOR_VERSION_PGI 1
|
||||
#define HBM_MAJOR_VERSION_PGI 1
|
||||
|
||||
/*
|
||||
* MEI version with Dynamic clients support
|
||||
*/
|
||||
#define HBM_MINOR_VERSION_DC 0
|
||||
#define HBM_MAJOR_VERSION_DC 2
|
||||
|
||||
/*
|
||||
* MEI version with disconnect on connection timeout support
|
||||
*/
|
||||
#define HBM_MINOR_VERSION_DOT 0
|
||||
#define HBM_MAJOR_VERSION_DOT 2
|
||||
|
||||
/*
|
||||
* MEI version with notifcation support
|
||||
*/
|
||||
#define HBM_MINOR_VERSION_EV 0
|
||||
#define HBM_MAJOR_VERSION_EV 2
|
||||
|
||||
/* Host bus message command opcode */
|
||||
#define MEI_HBM_CMD_OP_MSK 0x7f
|
||||
/* Host bus message command RESPONSE */
|
||||
|
@ -81,6 +100,13 @@
|
|||
#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b
|
||||
#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b
|
||||
|
||||
#define MEI_HBM_ADD_CLIENT_REQ_CMD 0x0f
|
||||
#define MEI_HBM_ADD_CLIENT_RES_CMD 0x8f
|
||||
|
||||
#define MEI_HBM_NOTIFY_REQ_CMD 0x10
|
||||
#define MEI_HBM_NOTIFY_RES_CMD 0x90
|
||||
#define MEI_HBM_NOTIFICATION_CMD 0x11
|
||||
|
||||
/*
|
||||
* MEI Stop Reason
|
||||
* used by hbm_host_stop_request.reason
|
||||
|
@ -136,6 +162,7 @@ enum mei_cl_connect_status {
|
|||
MEI_CL_CONN_ALREADY_STARTED = MEI_HBMS_ALREADY_EXISTS,
|
||||
MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED,
|
||||
MEI_CL_CONN_MESSAGE_SMALL = MEI_HBMS_INVALID_PARAMETER,
|
||||
MEI_CL_CONN_NOT_ALLOWED = MEI_HBMS_NOT_ALLOWED,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -213,9 +240,17 @@ struct hbm_me_stop_request {
|
|||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_host_enum_request - enumeration request from host to fw
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @allow_add: allow dynamic clients add HBM version >= 2.0
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct hbm_host_enum_request {
|
||||
u8 hbm_cmd;
|
||||
u8 reserved[3];
|
||||
u8 allow_add;
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
struct hbm_host_enum_response {
|
||||
|
@ -247,6 +282,38 @@ struct hbm_props_response {
|
|||
struct mei_client_properties client_properties;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_add_client_request - request to add a client
|
||||
* might be sent by fw after enumeration has already completed
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @me_addr: address of the client in ME
|
||||
* @reserved: reserved
|
||||
* @client_properties: client properties
|
||||
*/
|
||||
struct hbm_add_client_request {
|
||||
u8 hbm_cmd;
|
||||
u8 me_addr;
|
||||
u8 reserved[2];
|
||||
struct mei_client_properties client_properties;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_add_client_response - response to add a client
|
||||
* sent by the host to report client addition status to fw
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @me_addr: address of the client in ME
|
||||
* @status: if HBMS_SUCCESS then the client can now accept connections.
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct hbm_add_client_response {
|
||||
u8 hbm_cmd;
|
||||
u8 me_addr;
|
||||
u8 status;
|
||||
u8 reserved[1];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_power_gate - power gate request/response
|
||||
*
|
||||
|
@ -298,5 +365,62 @@ struct hbm_flow_control {
|
|||
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MEI_HBM_NOTIFICATION_START 1
|
||||
#define MEI_HBM_NOTIFICATION_STOP 0
|
||||
/**
|
||||
* struct hbm_notification_request - start/stop notification request
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @me_addr: address of the client in ME
|
||||
* @host_addr: address of the client in the driver
|
||||
* @start: start = 1 or stop = 0 asynchronous notifications
|
||||
*/
|
||||
struct hbm_notification_request {
|
||||
u8 hbm_cmd;
|
||||
u8 me_addr;
|
||||
u8 host_addr;
|
||||
u8 start;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_notification_response - start/stop notification response
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @me_addr: address of the client in ME
|
||||
* @host_addr: - address of the client in the driver
|
||||
* @status: (mei_hbm_status) response status for the request
|
||||
* - MEI_HBMS_SUCCESS: successful stop/start
|
||||
* - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found.
|
||||
* - MEI_HBMS_ALREADY_STARTED: for start requests for a previously
|
||||
* started notification.
|
||||
* - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom
|
||||
* asynchronous notifications are currently disabled.
|
||||
*
|
||||
* @start: start = 1 or stop = 0 asynchronous notifications
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct hbm_notification_response {
|
||||
u8 hbm_cmd;
|
||||
u8 me_addr;
|
||||
u8 host_addr;
|
||||
u8 status;
|
||||
u8 start;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct hbm_notification - notification event
|
||||
*
|
||||
* @hbm_cmd: bus message command header
|
||||
* @me_addr: address of the client in ME
|
||||
* @host_addr: address of the client in the driver
|
||||
* @reserved: reserved for alignment
|
||||
*/
|
||||
struct hbm_notification {
|
||||
u8 hbm_cmd;
|
||||
u8 me_addr;
|
||||
u8 host_addr;
|
||||
u8 reserved[1];
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -331,7 +331,7 @@ void mei_stop(struct mei_device *dev)
|
|||
|
||||
mei_cancel_work(dev);
|
||||
|
||||
mei_nfc_host_exit(dev);
|
||||
mei_cl_bus_remove_devices(dev);
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
|
@ -390,6 +390,7 @@ void mei_device_init(struct mei_device *dev,
|
|||
INIT_LIST_HEAD(&dev->me_clients);
|
||||
mutex_init(&dev->device_lock);
|
||||
init_rwsem(&dev->me_clients_rwsem);
|
||||
mutex_init(&dev->cl_bus_lock);
|
||||
init_waitqueue_head(&dev->wait_hw_ready);
|
||||
init_waitqueue_head(&dev->wait_pg);
|
||||
init_waitqueue_head(&dev->wait_hbm_start);
|
||||
|
|
|
@ -403,6 +403,13 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
|
|||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case MEI_FOP_NOTIFY_START:
|
||||
case MEI_FOP_NOTIFY_STOP:
|
||||
ret = mei_cl_irq_notify(cl, cb, cmpl_list);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -424,6 +431,24 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
|
|||
EXPORT_SYMBOL_GPL(mei_irq_write_handler);
|
||||
|
||||
|
||||
/**
|
||||
* mei_connect_timeout - connect/disconnect timeouts
|
||||
*
|
||||
* @cl: host client
|
||||
*/
|
||||
static void mei_connect_timeout(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
|
||||
if (cl->state == MEI_FILE_CONNECTING) {
|
||||
if (dev->hbm_f_dot_supported) {
|
||||
cl->state = MEI_FILE_DISCONNECT_REQUIRED;
|
||||
wake_up(&cl->wait);
|
||||
return;
|
||||
}
|
||||
}
|
||||
mei_reset(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_timer - timer function.
|
||||
|
@ -464,7 +489,7 @@ void mei_timer(struct work_struct *work)
|
|||
if (cl->timer_count) {
|
||||
if (--cl->timer_count == 0) {
|
||||
dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
|
||||
mei_reset(dev);
|
||||
mei_connect_timeout(cl);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -445,6 +445,45 @@ end:
|
|||
return rets;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_ioctl_client_notify_request -
|
||||
* propagate event notification request to client
|
||||
*
|
||||
* @file: pointer to file structure
|
||||
* @request: 0 - disable, 1 - enable
|
||||
*
|
||||
* Return: 0 on success , <0 on error
|
||||
*/
|
||||
static int mei_ioctl_client_notify_request(struct file *file, u32 request)
|
||||
{
|
||||
struct mei_cl *cl = file->private_data;
|
||||
|
||||
return mei_cl_notify_request(cl, file, request);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_ioctl_client_notify_get - wait for notification request
|
||||
*
|
||||
* @file: pointer to file structure
|
||||
* @notify_get: 0 - disable, 1 - enable
|
||||
*
|
||||
* Return: 0 on success , <0 on error
|
||||
*/
|
||||
static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
|
||||
{
|
||||
struct mei_cl *cl = file->private_data;
|
||||
bool notify_ev;
|
||||
bool block = (file->f_flags & O_NONBLOCK) == 0;
|
||||
int rets;
|
||||
|
||||
rets = mei_cl_notify_get(cl, block, ¬ify_ev);
|
||||
if (rets)
|
||||
return rets;
|
||||
|
||||
*notify_get = notify_ev ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_ioctl - the IOCTL function
|
||||
*
|
||||
|
@ -459,6 +498,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
|
|||
struct mei_device *dev;
|
||||
struct mei_cl *cl = file->private_data;
|
||||
struct mei_connect_client_data connect_data;
|
||||
u32 notify_get, notify_req;
|
||||
int rets;
|
||||
|
||||
|
||||
|
@ -499,6 +539,33 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
|
|||
|
||||
break;
|
||||
|
||||
case IOCTL_MEI_NOTIFY_SET:
|
||||
dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
|
||||
if (copy_from_user(¬ify_req,
|
||||
(char __user *)data, sizeof(notify_req))) {
|
||||
dev_dbg(dev->dev, "failed to copy data from userland\n");
|
||||
rets = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
rets = mei_ioctl_client_notify_request(file, notify_req);
|
||||
break;
|
||||
|
||||
case IOCTL_MEI_NOTIFY_GET:
|
||||
dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
|
||||
rets = mei_ioctl_client_notify_get(file, ¬ify_get);
|
||||
if (rets)
|
||||
goto out;
|
||||
|
||||
dev_dbg(dev->dev, "copy connect data to user\n");
|
||||
if (copy_to_user((char __user *)data,
|
||||
¬ify_get, sizeof(notify_get))) {
|
||||
dev_dbg(dev->dev, "failed to copy data to userland\n");
|
||||
rets = -EFAULT;
|
||||
goto out;
|
||||
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
|
||||
rets = -ENOIOCTLCMD;
|
||||
|
@ -541,6 +608,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
|
|||
struct mei_cl *cl = file->private_data;
|
||||
struct mei_device *dev;
|
||||
unsigned int mask = 0;
|
||||
bool notify_en;
|
||||
|
||||
if (WARN_ON(!cl || !cl->dev))
|
||||
return POLLERR;
|
||||
|
@ -549,6 +617,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
|
|||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
notify_en = cl->notify_en && (req_events & POLLPRI);
|
||||
|
||||
if (dev->dev_state != MEI_DEV_ENABLED ||
|
||||
!mei_cl_is_connected(cl)) {
|
||||
|
@ -561,6 +630,12 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (notify_en) {
|
||||
poll_wait(file, &cl->ev_wait, wait);
|
||||
if (cl->notify_ev)
|
||||
mask |= POLLPRI;
|
||||
}
|
||||
|
||||
if (req_events & (POLLIN | POLLRDNORM)) {
|
||||
poll_wait(file, &cl->rx_wait, wait);
|
||||
|
||||
|
@ -575,6 +650,26 @@ out:
|
|||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_fasync - asynchronous io support
|
||||
*
|
||||
* @fd: file descriptor
|
||||
* @file: pointer to file structure
|
||||
* @band: band bitmap
|
||||
*
|
||||
* Return: poll mask
|
||||
*/
|
||||
static int mei_fasync(int fd, struct file *file, int band)
|
||||
{
|
||||
|
||||
struct mei_cl *cl = file->private_data;
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return POLLERR;
|
||||
|
||||
return fasync_helper(fd, file, band, &cl->ev_async);
|
||||
}
|
||||
|
||||
/**
|
||||
* fw_status_show - mei device attribute show method
|
||||
*
|
||||
|
@ -627,6 +722,7 @@ static const struct file_operations mei_fops = {
|
|||
.release = mei_release,
|
||||
.write = mei_write,
|
||||
.poll = mei_poll,
|
||||
.fasync = mei_fasync,
|
||||
.llseek = no_llseek
|
||||
};
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ enum file_state {
|
|||
MEI_FILE_CONNECTED,
|
||||
MEI_FILE_DISCONNECTING,
|
||||
MEI_FILE_DISCONNECT_REPLY,
|
||||
MEI_FILE_DISCONNECT_REQUIRED,
|
||||
MEI_FILE_DISCONNECTED,
|
||||
};
|
||||
|
||||
|
@ -135,6 +136,8 @@ enum mei_wd_states {
|
|||
* @MEI_FOP_CONNECT: connect
|
||||
* @MEI_FOP_DISCONNECT: disconnect
|
||||
* @MEI_FOP_DISCONNECT_RSP: disconnect response
|
||||
* @MEI_FOP_NOTIFY_START: start notification
|
||||
* @MEI_FOP_NOTIFY_STOP: stop notification
|
||||
*/
|
||||
enum mei_cb_file_ops {
|
||||
MEI_FOP_READ = 0,
|
||||
|
@ -142,6 +145,8 @@ enum mei_cb_file_ops {
|
|||
MEI_FOP_CONNECT,
|
||||
MEI_FOP_DISCONNECT,
|
||||
MEI_FOP_DISCONNECT_RSP,
|
||||
MEI_FOP_NOTIFY_START,
|
||||
MEI_FOP_NOTIFY_STOP,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -178,7 +183,7 @@ struct mei_fw_status {
|
|||
* @client_id: me client id
|
||||
* @mei_flow_ctrl_creds: flow control credits
|
||||
* @connect_count: number connections to this client
|
||||
* @reserved: reserved
|
||||
* @bus_added: added to bus
|
||||
*/
|
||||
struct mei_me_client {
|
||||
struct list_head list;
|
||||
|
@ -187,7 +192,7 @@ struct mei_me_client {
|
|||
u8 client_id;
|
||||
u8 mei_flow_ctrl_creds;
|
||||
u8 connect_count;
|
||||
u8 reserved;
|
||||
u8 bus_added;
|
||||
};
|
||||
|
||||
|
||||
|
@ -230,18 +235,21 @@ struct mei_cl_cb {
|
|||
* @tx_wait: wait queue for tx completion
|
||||
* @rx_wait: wait queue for rx completion
|
||||
* @wait: wait queue for management operation
|
||||
* @ev_wait: notification wait queue
|
||||
* @ev_async: event async notification
|
||||
* @status: connection status
|
||||
* @me_cl: fw client connected
|
||||
* @host_client_id: host id
|
||||
* @mei_flow_ctrl_creds: transmit flow credentials
|
||||
* @timer_count: watchdog timer for operation completion
|
||||
* @reserved: reserved for alignment
|
||||
* @notify_en: notification - enabled/disabled
|
||||
* @notify_ev: pending notification event
|
||||
* @writing_state: state of the tx
|
||||
* @rd_pending: pending read credits
|
||||
* @rd_completed: completed read
|
||||
*
|
||||
* @device: device on the mei client bus
|
||||
* @device_link: link to bus clients
|
||||
* @cldev: device on the mei client bus
|
||||
*/
|
||||
struct mei_cl {
|
||||
struct list_head link;
|
||||
|
@ -250,19 +258,21 @@ struct mei_cl {
|
|||
wait_queue_head_t tx_wait;
|
||||
wait_queue_head_t rx_wait;
|
||||
wait_queue_head_t wait;
|
||||
wait_queue_head_t ev_wait;
|
||||
struct fasync_struct *ev_async;
|
||||
int status;
|
||||
struct mei_me_client *me_cl;
|
||||
u8 host_client_id;
|
||||
u8 mei_flow_ctrl_creds;
|
||||
u8 timer_count;
|
||||
u8 reserved;
|
||||
u8 notify_en;
|
||||
u8 notify_ev;
|
||||
enum mei_file_transaction_states writing_state;
|
||||
struct list_head rd_pending;
|
||||
struct list_head rd_completed;
|
||||
|
||||
/* MEI CL bus data */
|
||||
struct mei_cl_device *device;
|
||||
struct list_head device_link;
|
||||
struct mei_cl_device *cldev;
|
||||
};
|
||||
|
||||
/** struct mei_hw_ops
|
||||
|
@ -329,21 +339,16 @@ struct mei_hw_ops {
|
|||
};
|
||||
|
||||
/* MEI bus API*/
|
||||
|
||||
struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
|
||||
struct mei_me_client *me_cl,
|
||||
struct mei_cl *cl,
|
||||
char *name);
|
||||
void mei_cl_remove_device(struct mei_cl_device *device);
|
||||
|
||||
void mei_cl_bus_rescan(struct mei_device *bus);
|
||||
void mei_cl_dev_fixup(struct mei_cl_device *dev);
|
||||
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
|
||||
bool blocking);
|
||||
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
|
||||
void mei_cl_bus_rx_event(struct mei_cl *cl);
|
||||
void mei_cl_bus_remove_devices(struct mei_device *dev);
|
||||
void mei_cl_bus_notify_event(struct mei_cl *cl);
|
||||
void mei_cl_bus_remove_devices(struct mei_device *bus);
|
||||
int mei_cl_bus_init(void);
|
||||
void mei_cl_bus_exit(void);
|
||||
struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
|
||||
|
||||
/**
|
||||
* enum mei_pg_event - power gating transition events
|
||||
|
@ -416,7 +421,10 @@ const char *mei_pg_state_str(enum mei_pg_state state);
|
|||
* @wr_msg : the buffer for hbm control messages
|
||||
*
|
||||
* @version : HBM protocol version in use
|
||||
* @hbm_f_pg_supported : hbm feature pgi protocol
|
||||
* @hbm_f_pg_supported : hbm feature pgi protocol
|
||||
* @hbm_f_dc_supported : hbm feature dynamic clients
|
||||
* @hbm_f_dot_supported : hbm feature disconnect on timeout
|
||||
* @hbm_f_ev_supported : hbm feature event notification
|
||||
*
|
||||
* @me_clients_rwsem: rw lock over me_clients list
|
||||
* @me_clients : list of FW clients
|
||||
|
@ -447,6 +455,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
|
|||
* @reset_work : work item for the device reset
|
||||
*
|
||||
* @device_list : mei client bus list
|
||||
* @cl_bus_lock : client bus list lock
|
||||
*
|
||||
* @dbgfs_dir : debugfs mei root directory
|
||||
*
|
||||
|
@ -509,6 +518,9 @@ struct mei_device {
|
|||
|
||||
struct hbm_version version;
|
||||
unsigned int hbm_f_pg_supported:1;
|
||||
unsigned int hbm_f_dc_supported:1;
|
||||
unsigned int hbm_f_dot_supported:1;
|
||||
unsigned int hbm_f_ev_supported:1;
|
||||
|
||||
struct rw_semaphore me_clients_rwsem;
|
||||
struct list_head me_clients;
|
||||
|
@ -543,6 +555,7 @@ struct mei_device {
|
|||
|
||||
/* List of bus devices */
|
||||
struct list_head device_list;
|
||||
struct mutex cl_bus_lock;
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
struct dentry *dbgfs_dir;
|
||||
|
|
|
@ -1,415 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
* Copyright (c) 2003-2013, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/mei_cl_bus.h>
|
||||
|
||||
#include "mei_dev.h"
|
||||
#include "client.h"
|
||||
|
||||
struct mei_nfc_cmd {
|
||||
u8 command;
|
||||
u8 status;
|
||||
u16 req_id;
|
||||
u32 reserved;
|
||||
u16 data_size;
|
||||
u8 sub_command;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_reply {
|
||||
u8 command;
|
||||
u8 status;
|
||||
u16 req_id;
|
||||
u32 reserved;
|
||||
u16 data_size;
|
||||
u8 sub_command;
|
||||
u8 reply_status;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_if_version {
|
||||
u8 radio_version_sw[3];
|
||||
u8 reserved[3];
|
||||
u8 radio_version_hw[3];
|
||||
u8 i2c_addr;
|
||||
u8 fw_ivn;
|
||||
u8 vendor_id;
|
||||
u8 radio_type;
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_connect {
|
||||
u8 fw_ivn;
|
||||
u8 vendor_id;
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_connect_resp {
|
||||
u8 fw_ivn;
|
||||
u8 vendor_id;
|
||||
u16 me_major;
|
||||
u16 me_minor;
|
||||
u16 me_hotfix;
|
||||
u16 me_build;
|
||||
} __packed;
|
||||
|
||||
struct mei_nfc_hci_hdr {
|
||||
u8 cmd;
|
||||
u8 status;
|
||||
u16 req_id;
|
||||
u32 reserved;
|
||||
u16 data_size;
|
||||
} __packed;
|
||||
|
||||
#define MEI_NFC_CMD_MAINTENANCE 0x00
|
||||
#define MEI_NFC_CMD_HCI_SEND 0x01
|
||||
#define MEI_NFC_CMD_HCI_RECV 0x02
|
||||
|
||||
#define MEI_NFC_SUBCMD_CONNECT 0x00
|
||||
#define MEI_NFC_SUBCMD_IF_VERSION 0x01
|
||||
|
||||
#define MEI_NFC_HEADER_SIZE 10
|
||||
|
||||
/**
|
||||
* struct mei_nfc_dev - NFC mei device
|
||||
*
|
||||
* @me_cl: NFC me client
|
||||
* @cl: NFC host client
|
||||
* @cl_info: NFC info host client
|
||||
* @init_work: perform connection to the info client
|
||||
* @fw_ivn: NFC Interface Version Number
|
||||
* @vendor_id: NFC manufacturer ID
|
||||
* @radio_type: NFC radio type
|
||||
* @bus_name: bus name
|
||||
*
|
||||
*/
|
||||
struct mei_nfc_dev {
|
||||
struct mei_me_client *me_cl;
|
||||
struct mei_cl *cl;
|
||||
struct mei_cl *cl_info;
|
||||
struct work_struct init_work;
|
||||
u8 fw_ivn;
|
||||
u8 vendor_id;
|
||||
u8 radio_type;
|
||||
char *bus_name;
|
||||
};
|
||||
|
||||
/* UUIDs for NFC F/W clients */
|
||||
const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
|
||||
0x94, 0xd4, 0x50, 0x26,
|
||||
0x67, 0x23, 0x77, 0x5c);
|
||||
|
||||
static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
|
||||
0x48, 0xa4, 0xef, 0xab,
|
||||
0xba, 0x8a, 0x12, 0x06);
|
||||
|
||||
/* Vendors */
|
||||
#define MEI_NFC_VENDOR_INSIDE 0x00
|
||||
#define MEI_NFC_VENDOR_NXP 0x01
|
||||
|
||||
/* Radio types */
|
||||
#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
|
||||
#define MEI_NFC_VENDOR_NXP_PN544 0x01
|
||||
|
||||
static void mei_nfc_free(struct mei_nfc_dev *ndev)
|
||||
{
|
||||
if (!ndev)
|
||||
return;
|
||||
|
||||
if (ndev->cl) {
|
||||
list_del(&ndev->cl->device_link);
|
||||
mei_cl_unlink(ndev->cl);
|
||||
kfree(ndev->cl);
|
||||
}
|
||||
|
||||
if (ndev->cl_info) {
|
||||
list_del(&ndev->cl_info->device_link);
|
||||
mei_cl_unlink(ndev->cl_info);
|
||||
kfree(ndev->cl_info);
|
||||
}
|
||||
|
||||
mei_me_cl_put(ndev->me_cl);
|
||||
kfree(ndev);
|
||||
}
|
||||
|
||||
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
|
||||
if (!ndev->cl)
|
||||
return -ENODEV;
|
||||
|
||||
dev = ndev->cl->dev;
|
||||
|
||||
switch (ndev->vendor_id) {
|
||||
case MEI_NFC_VENDOR_INSIDE:
|
||||
switch (ndev->radio_type) {
|
||||
case MEI_NFC_VENDOR_INSIDE_UREAD:
|
||||
ndev->bus_name = "microread";
|
||||
return 0;
|
||||
|
||||
default:
|
||||
dev_err(dev->dev, "Unknown radio type 0x%x\n",
|
||||
ndev->radio_type);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
case MEI_NFC_VENDOR_NXP:
|
||||
switch (ndev->radio_type) {
|
||||
case MEI_NFC_VENDOR_NXP_PN544:
|
||||
ndev->bus_name = "pn544";
|
||||
return 0;
|
||||
default:
|
||||
dev_err(dev->dev, "Unknown radio type 0x%x\n",
|
||||
ndev->radio_type);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
default:
|
||||
dev_err(dev->dev, "Unknown vendor ID 0x%x\n",
|
||||
ndev->vendor_id);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_cl *cl;
|
||||
|
||||
struct mei_nfc_cmd cmd;
|
||||
struct mei_nfc_reply *reply = NULL;
|
||||
struct mei_nfc_if_version *version;
|
||||
size_t if_version_length;
|
||||
int bytes_recv, ret;
|
||||
|
||||
cl = ndev->cl_info;
|
||||
dev = cl->dev;
|
||||
|
||||
memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
|
||||
cmd.command = MEI_NFC_CMD_MAINTENANCE;
|
||||
cmd.data_size = 1;
|
||||
cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
|
||||
|
||||
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "Could not send IF version cmd\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* to be sure on the stack we alloc memory */
|
||||
if_version_length = sizeof(struct mei_nfc_reply) +
|
||||
sizeof(struct mei_nfc_if_version);
|
||||
|
||||
reply = kzalloc(if_version_length, GFP_KERNEL);
|
||||
if (!reply)
|
||||
return -ENOMEM;
|
||||
|
||||
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
|
||||
if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
|
||||
dev_err(dev->dev, "Could not read IF version\n");
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
version = (struct mei_nfc_if_version *)reply->data;
|
||||
|
||||
ndev->fw_ivn = version->fw_ivn;
|
||||
ndev->vendor_id = version->vendor_id;
|
||||
ndev->radio_type = version->radio_type;
|
||||
|
||||
err:
|
||||
kfree(reply);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mei_nfc_init(struct work_struct *work)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_cl_device *cldev;
|
||||
struct mei_nfc_dev *ndev;
|
||||
struct mei_cl *cl_info;
|
||||
struct mei_me_client *me_cl_info;
|
||||
|
||||
ndev = container_of(work, struct mei_nfc_dev, init_work);
|
||||
|
||||
cl_info = ndev->cl_info;
|
||||
dev = cl_info->dev;
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
/* check for valid client id */
|
||||
me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
|
||||
if (!me_cl_info) {
|
||||
mutex_unlock(&dev->device_lock);
|
||||
dev_info(dev->dev, "nfc: failed to find the info client\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
|
||||
mei_me_cl_put(me_cl_info);
|
||||
mutex_unlock(&dev->device_lock);
|
||||
dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
|
||||
|
||||
goto err;
|
||||
}
|
||||
mei_me_cl_put(me_cl_info);
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
||||
if (mei_nfc_if_version(ndev) < 0) {
|
||||
dev_err(dev->dev, "Could not get the NFC interface version");
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
|
||||
ndev->fw_ivn, ndev->vendor_id, ndev->radio_type);
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (mei_cl_disconnect(cl_info) < 0) {
|
||||
mutex_unlock(&dev->device_lock);
|
||||
dev_err(dev->dev, "Could not disconnect the NFC INFO ME client");
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
||||
if (mei_nfc_build_bus_name(ndev) < 0) {
|
||||
dev_err(dev->dev, "Could not build the bus ID name\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
|
||||
ndev->bus_name);
|
||||
if (!cldev) {
|
||||
dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
cldev->priv_data = ndev;
|
||||
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
mutex_lock(&dev->device_lock);
|
||||
mei_nfc_free(ndev);
|
||||
mutex_unlock(&dev->device_lock);
|
||||
|
||||
}
|
||||
|
||||
|
||||
int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
|
||||
{
|
||||
struct mei_nfc_dev *ndev;
|
||||
struct mei_cl *cl_info, *cl;
|
||||
int ret;
|
||||
|
||||
|
||||
/* in case of internal reset bail out
|
||||
* as the device is already setup
|
||||
*/
|
||||
cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
|
||||
if (cl)
|
||||
return 0;
|
||||
|
||||
ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
|
||||
if (!ndev) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ndev->me_cl = mei_me_cl_get(me_cl);
|
||||
if (!ndev->me_cl) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
|
||||
if (IS_ERR(cl_info)) {
|
||||
ret = PTR_ERR(cl_info);
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add_tail(&cl_info->device_link, &dev->device_list);
|
||||
|
||||
ndev->cl_info = cl_info;
|
||||
|
||||
cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
|
||||
if (IS_ERR(cl)) {
|
||||
ret = PTR_ERR(cl);
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add_tail(&cl->device_link, &dev->device_list);
|
||||
|
||||
ndev->cl = cl;
|
||||
|
||||
INIT_WORK(&ndev->init_work, mei_nfc_init);
|
||||
schedule_work(&ndev->init_work);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mei_nfc_free(ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mei_nfc_host_exit(struct mei_device *dev)
|
||||
{
|
||||
struct mei_nfc_dev *ndev;
|
||||
struct mei_cl *cl;
|
||||
struct mei_cl_device *cldev;
|
||||
|
||||
cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
|
||||
if (!cl)
|
||||
return;
|
||||
|
||||
cldev = cl->device;
|
||||
if (!cldev)
|
||||
return;
|
||||
|
||||
ndev = (struct mei_nfc_dev *)cldev->priv_data;
|
||||
if (ndev)
|
||||
cancel_work_sync(&ndev->init_work);
|
||||
|
||||
cldev->priv_data = NULL;
|
||||
|
||||
/* Need to remove the device here
|
||||
* since mei_nfc_free will unlink the clients
|
||||
*/
|
||||
mei_cl_remove_device(cldev);
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
mei_nfc_free(ndev);
|
||||
mutex_unlock(&dev->device_lock);
|
||||
}
|
||||
|
||||
|
|
@ -82,6 +82,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|||
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
|
||||
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
@ -128,6 +133,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
|
||||
struct mei_device *dev;
|
||||
struct mei_me_hw *hw;
|
||||
unsigned int irqflags;
|
||||
int err;
|
||||
|
||||
|
||||
|
@ -180,17 +186,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
pci_enable_msi(pdev);
|
||||
|
||||
/* request and enable interrupt */
|
||||
if (pci_dev_msi_enabled(pdev))
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
NULL,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_ONESHOT, KBUILD_MODNAME, dev);
|
||||
else
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
|
||||
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
mei_me_irq_quick_handler,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_SHARED, KBUILD_MODNAME, dev);
|
||||
|
||||
irqflags, KBUILD_MODNAME, dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
|
||||
pdev->irq);
|
||||
|
@ -319,6 +320,7 @@ static int mei_me_pci_resume(struct device *device)
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct mei_device *dev;
|
||||
unsigned int irqflags;
|
||||
int err;
|
||||
|
||||
dev = pci_get_drvdata(pdev);
|
||||
|
@ -327,17 +329,13 @@ static int mei_me_pci_resume(struct device *device)
|
|||
|
||||
pci_enable_msi(pdev);
|
||||
|
||||
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
|
||||
|
||||
/* request and enable interrupt */
|
||||
if (pci_dev_msi_enabled(pdev))
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
NULL,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_ONESHOT, KBUILD_MODNAME, dev);
|
||||
else
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
err = request_threaded_irq(pdev->irq,
|
||||
mei_me_irq_quick_handler,
|
||||
mei_me_irq_thread_handler,
|
||||
IRQF_SHARED, KBUILD_MODNAME, dev);
|
||||
irqflags, KBUILD_MODNAME, dev);
|
||||
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015, Sony Mobile Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
struct qcom_coincell {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
u32 base_addr;
|
||||
};
|
||||
|
||||
#define QCOM_COINCELL_REG_RSET 0x44
|
||||
#define QCOM_COINCELL_REG_VSET 0x45
|
||||
#define QCOM_COINCELL_REG_ENABLE 0x46
|
||||
|
||||
#define QCOM_COINCELL_ENABLE BIT(7)
|
||||
|
||||
static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 };
|
||||
static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 };
|
||||
/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */
|
||||
|
||||
/* if enable==0, rset and vset are ignored */
|
||||
static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset,
|
||||
int vset, bool enable)
|
||||
{
|
||||
int i, j, rc;
|
||||
|
||||
/* if disabling, just do that and skip other operations */
|
||||
if (!enable)
|
||||
return regmap_write(chgr->regmap,
|
||||
chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0);
|
||||
|
||||
/* find index for current-limiting resistor */
|
||||
for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++)
|
||||
if (rset == qcom_rset_map[i])
|
||||
break;
|
||||
|
||||
if (i >= ARRAY_SIZE(qcom_rset_map)) {
|
||||
dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* find index for charge voltage */
|
||||
for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++)
|
||||
if (vset == qcom_vset_map[j])
|
||||
break;
|
||||
|
||||
if (j >= ARRAY_SIZE(qcom_vset_map)) {
|
||||
dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = regmap_write(chgr->regmap,
|
||||
chgr->base_addr + QCOM_COINCELL_REG_RSET, i);
|
||||
if (rc) {
|
||||
/*
|
||||
* This is mainly to flag a bad base_addr (reg) from dts.
|
||||
* Other failures writing to the registers should be
|
||||
* extremely rare, or indicative of problems that
|
||||
* should be reported elsewhere (eg. spmi failure).
|
||||
*/
|
||||
dev_err(chgr->dev, "could not write to RSET register\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = regmap_write(chgr->regmap,
|
||||
chgr->base_addr + QCOM_COINCELL_REG_VSET, j);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* set 'enable' register */
|
||||
return regmap_write(chgr->regmap,
|
||||
chgr->base_addr + QCOM_COINCELL_REG_ENABLE,
|
||||
QCOM_COINCELL_ENABLE);
|
||||
}
|
||||
|
||||
static int qcom_coincell_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct qcom_coincell chgr;
|
||||
u32 rset, vset;
|
||||
bool enable;
|
||||
int rc;
|
||||
|
||||
chgr.dev = &pdev->dev;
|
||||
|
||||
chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL);
|
||||
if (!chgr.regmap) {
|
||||
dev_err(chgr.dev, "Unable to get regmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = of_property_read_u32(node, "reg", &chgr.base_addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
enable = !of_property_read_bool(node, "qcom,charger-disable");
|
||||
|
||||
if (enable) {
|
||||
rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
|
||||
if (rc) {
|
||||
dev_err(chgr.dev,
|
||||
"can't find 'qcom,rset-ohms' in DT block");
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
|
||||
if (rc) {
|
||||
dev_err(chgr.dev,
|
||||
"can't find 'qcom,vset-millivolts' in DT block");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return qcom_coincell_chgr_config(&chgr, rset, vset, enable);
|
||||
}
|
||||
|
||||
static const struct of_device_id qcom_coincell_match_table[] = {
|
||||
{ .compatible = "qcom,pm8941-coincell", },
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, qcom_coincell_match_table);
|
||||
|
||||
static struct platform_driver qcom_coincell_driver = {
|
||||
.driver = {
|
||||
.name = "qcom-spmi-coincell",
|
||||
.of_match_table = qcom_coincell_match_table,
|
||||
},
|
||||
.probe = qcom_coincell_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(qcom_coincell_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -36,8 +36,6 @@
|
|||
#include <linux/skbuff.h>
|
||||
#include <linux/ti_wilink_st.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
|
||||
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
|
||||
|
@ -45,9 +43,6 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
|
|||
/**********************************************************************/
|
||||
/* internal functions */
|
||||
|
||||
struct ti_st_plat_data *dt_pdata;
|
||||
static struct ti_st_plat_data *get_platform_data(struct device *dev);
|
||||
|
||||
/**
|
||||
* st_get_plat_device -
|
||||
* function which returns the reference to the platform device
|
||||
|
@ -469,12 +464,7 @@ long st_kim_start(void *kim_data)
|
|||
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
|
||||
|
||||
pr_info(" %s", __func__);
|
||||
if (kim_gdata->kim_pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = kim_gdata->kim_pdev->dev.platform_data;
|
||||
}
|
||||
pdata = kim_gdata->kim_pdev->dev.platform_data;
|
||||
|
||||
do {
|
||||
/* platform specific enabling code here */
|
||||
|
@ -482,9 +472,9 @@ long st_kim_start(void *kim_data)
|
|||
pdata->chip_enable(kim_gdata);
|
||||
|
||||
/* Configure BT nShutdown to HIGH state */
|
||||
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
|
||||
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
|
||||
mdelay(5); /* FIXME: a proper toggle */
|
||||
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
|
||||
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
|
||||
mdelay(100);
|
||||
/* re-initialize the completion */
|
||||
reinit_completion(&kim_gdata->ldisc_installed);
|
||||
|
@ -534,18 +524,12 @@ long st_kim_stop(void *kim_data)
|
|||
{
|
||||
long err = 0;
|
||||
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
|
||||
struct ti_st_plat_data *pdata;
|
||||
struct ti_st_plat_data *pdata =
|
||||
kim_gdata->kim_pdev->dev.platform_data;
|
||||
struct tty_struct *tty = kim_gdata->core_data->tty;
|
||||
|
||||
reinit_completion(&kim_gdata->ldisc_installed);
|
||||
|
||||
if (kim_gdata->kim_pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else
|
||||
pdata = kim_gdata->kim_pdev->dev.platform_data;
|
||||
|
||||
|
||||
if (tty) { /* can be called before ldisc is installed */
|
||||
/* Flush any pending characters in the driver and discipline. */
|
||||
tty_ldisc_flush(tty);
|
||||
|
@ -566,11 +550,11 @@ long st_kim_stop(void *kim_data)
|
|||
}
|
||||
|
||||
/* By default configure BT nShutdown to LOW state */
|
||||
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
|
||||
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
|
||||
mdelay(1);
|
||||
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
|
||||
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
|
||||
mdelay(1);
|
||||
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
|
||||
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
|
||||
|
||||
/* platform specific disable */
|
||||
if (pdata->chip_disable)
|
||||
|
@ -737,52 +721,13 @@ static const struct file_operations list_debugfs_fops = {
|
|||
* board-*.c file
|
||||
*/
|
||||
|
||||
static const struct of_device_id kim_of_match[] = {
|
||||
{
|
||||
.compatible = "kim",
|
||||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, kim_of_match);
|
||||
|
||||
static struct ti_st_plat_data *get_platform_data(struct device *dev)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
const u32 *dt_property;
|
||||
int len;
|
||||
|
||||
dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
|
||||
if (!dt_pdata)
|
||||
return NULL;
|
||||
|
||||
dt_property = of_get_property(np, "dev_name", &len);
|
||||
if (dt_property)
|
||||
memcpy(&dt_pdata->dev_name, dt_property, len);
|
||||
of_property_read_u32(np, "nshutdown_gpio",
|
||||
&dt_pdata->nshutdown_gpio);
|
||||
of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
|
||||
of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
|
||||
|
||||
return dt_pdata;
|
||||
}
|
||||
|
||||
static struct dentry *kim_debugfs_dir;
|
||||
static int kim_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct kim_data_s *kim_gdata;
|
||||
struct ti_st_plat_data *pdata;
|
||||
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
|
||||
int err;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
pdata = get_platform_data(&pdev->dev);
|
||||
else
|
||||
pdata = pdev->dev.platform_data;
|
||||
|
||||
if (pdata == NULL) {
|
||||
dev_err(&pdev->dev, "Platform Data is missing\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
|
||||
/* multiple devices could exist */
|
||||
st_kim_devices[pdev->id] = pdev;
|
||||
|
@ -863,16 +808,9 @@ err_core_init:
|
|||
static int kim_remove(struct platform_device *pdev)
|
||||
{
|
||||
/* free the GPIOs requested */
|
||||
struct ti_st_plat_data *pdata;
|
||||
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
|
||||
struct kim_data_s *kim_gdata;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = pdev->dev.platform_data;
|
||||
}
|
||||
|
||||
kim_gdata = platform_get_drvdata(pdev);
|
||||
|
||||
/* Free the Bluetooth/FM/GPIO
|
||||
|
@ -890,22 +828,12 @@ static int kim_remove(struct platform_device *pdev)
|
|||
|
||||
kfree(kim_gdata);
|
||||
kim_gdata = NULL;
|
||||
kfree(dt_pdata);
|
||||
dt_pdata = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
{
|
||||
struct ti_st_plat_data *pdata;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = pdev->dev.platform_data;
|
||||
}
|
||||
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
if (pdata->suspend)
|
||||
return pdata->suspend(pdev, state);
|
||||
|
@ -915,14 +843,7 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
|
||||
static int kim_resume(struct platform_device *pdev)
|
||||
{
|
||||
struct ti_st_plat_data *pdata;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = pdev->dev.platform_data;
|
||||
}
|
||||
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
if (pdata->resume)
|
||||
return pdata->resume(pdev);
|
||||
|
@ -939,8 +860,6 @@ static struct platform_driver kim_platform_driver = {
|
|||
.resume = kim_resume,
|
||||
.driver = {
|
||||
.name = "kim",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(kim_of_match),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/ti_wilink_st.h>
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
/* internal functions */
|
||||
static void send_ll_cmd(struct st_data_s *st_data,
|
||||
unsigned char cmd)
|
||||
|
@ -54,13 +53,7 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
|
|||
|
||||
/* communicate to platform about chip asleep */
|
||||
kim_data = st_data->kim_data;
|
||||
if (kim_data->kim_pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = kim_data->kim_pdev->dev.platform_data;
|
||||
}
|
||||
|
||||
pdata = kim_data->kim_pdev->dev.platform_data;
|
||||
if (pdata->chip_asleep)
|
||||
pdata->chip_asleep(NULL);
|
||||
}
|
||||
|
@ -93,13 +86,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
|
|||
|
||||
/* communicate to platform about chip wakeup */
|
||||
kim_data = st_data->kim_data;
|
||||
if (kim_data->kim_pdev->dev.of_node) {
|
||||
pr_debug("use device tree data");
|
||||
pdata = dt_pdata;
|
||||
} else {
|
||||
pdata = kim_data->kim_pdev->dev.platform_data;
|
||||
}
|
||||
|
||||
pdata = kim_data->kim_pdev->dev.platform_data;
|
||||
if (pdata->chip_awake)
|
||||
pdata->chip_awake(NULL);
|
||||
}
|
||||
|
|
|
@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(i2c, tsl2550_id);
|
|||
static struct i2c_driver tsl2550_driver = {
|
||||
.driver = {
|
||||
.name = TSL2550_DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = TSL2550_PM_OPS,
|
||||
},
|
||||
.probe = tsl2550_probe,
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
MODULE_AUTHOR("VMware, Inc.");
|
||||
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
|
||||
MODULE_VERSION("1.2.1.3-k");
|
||||
MODULE_VERSION("1.3.0.0-k");
|
||||
MODULE_ALIAS("dmi:*:svnVMware*:*");
|
||||
MODULE_ALIAS("vmware_vmmemctl");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -110,9 +110,18 @@ MODULE_LICENSE("GPL");
|
|||
*/
|
||||
#define VMW_BALLOON_HV_PORT 0x5670
|
||||
#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
|
||||
#define VMW_BALLOON_PROTOCOL_VERSION 2
|
||||
#define VMW_BALLOON_GUEST_ID 1 /* Linux */
|
||||
|
||||
enum vmwballoon_capabilities {
|
||||
/*
|
||||
* Bit 0 is reserved and not associated to any capability.
|
||||
*/
|
||||
VMW_BALLOON_BASIC_CMDS = (1 << 1),
|
||||
VMW_BALLOON_BATCHED_CMDS = (1 << 2)
|
||||
};
|
||||
|
||||
#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS)
|
||||
|
||||
#define VMW_BALLOON_CMD_START 0
|
||||
#define VMW_BALLOON_CMD_GET_TARGET 1
|
||||
#define VMW_BALLOON_CMD_LOCK 2
|
||||
|
@ -120,32 +129,36 @@ MODULE_LICENSE("GPL");
|
|||
#define VMW_BALLOON_CMD_GUEST_ID 4
|
||||
|
||||
/* error codes */
|
||||
#define VMW_BALLOON_SUCCESS 0
|
||||
#define VMW_BALLOON_FAILURE -1
|
||||
#define VMW_BALLOON_ERROR_CMD_INVALID 1
|
||||
#define VMW_BALLOON_ERROR_PPN_INVALID 2
|
||||
#define VMW_BALLOON_ERROR_PPN_LOCKED 3
|
||||
#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
|
||||
#define VMW_BALLOON_ERROR_PPN_PINNED 5
|
||||
#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
|
||||
#define VMW_BALLOON_ERROR_RESET 7
|
||||
#define VMW_BALLOON_ERROR_BUSY 8
|
||||
#define VMW_BALLOON_SUCCESS 0
|
||||
#define VMW_BALLOON_FAILURE -1
|
||||
#define VMW_BALLOON_ERROR_CMD_INVALID 1
|
||||
#define VMW_BALLOON_ERROR_PPN_INVALID 2
|
||||
#define VMW_BALLOON_ERROR_PPN_LOCKED 3
|
||||
#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
|
||||
#define VMW_BALLOON_ERROR_PPN_PINNED 5
|
||||
#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
|
||||
#define VMW_BALLOON_ERROR_RESET 7
|
||||
#define VMW_BALLOON_ERROR_BUSY 8
|
||||
|
||||
#define VMWARE_BALLOON_CMD(cmd, data, result) \
|
||||
({ \
|
||||
unsigned long __stat, __dummy1, __dummy2; \
|
||||
__asm__ __volatile__ ("inl %%dx" : \
|
||||
"=a"(__stat), \
|
||||
"=c"(__dummy1), \
|
||||
"=d"(__dummy2), \
|
||||
"=b"(result) : \
|
||||
"0"(VMW_BALLOON_HV_MAGIC), \
|
||||
"1"(VMW_BALLOON_CMD_##cmd), \
|
||||
"2"(VMW_BALLOON_HV_PORT), \
|
||||
"3"(data) : \
|
||||
"memory"); \
|
||||
result &= -1UL; \
|
||||
__stat & -1UL; \
|
||||
#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
|
||||
|
||||
#define VMWARE_BALLOON_CMD(cmd, data, result) \
|
||||
({ \
|
||||
unsigned long __status, __dummy1, __dummy2; \
|
||||
__asm__ __volatile__ ("inl %%dx" : \
|
||||
"=a"(__status), \
|
||||
"=c"(__dummy1), \
|
||||
"=d"(__dummy2), \
|
||||
"=b"(result) : \
|
||||
"0"(VMW_BALLOON_HV_MAGIC), \
|
||||
"1"(VMW_BALLOON_CMD_##cmd), \
|
||||
"2"(VMW_BALLOON_HV_PORT), \
|
||||
"3"(data) : \
|
||||
"memory"); \
|
||||
if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
|
||||
result = __dummy1; \
|
||||
result &= -1UL; \
|
||||
__status & -1UL; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -223,11 +236,12 @@ static struct vmballoon balloon;
|
|||
*/
|
||||
static bool vmballoon_send_start(struct vmballoon *b)
|
||||
{
|
||||
unsigned long status, dummy;
|
||||
unsigned long status, capabilities;
|
||||
|
||||
STATS_INC(b->stats.start);
|
||||
|
||||
status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
|
||||
status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES,
|
||||
capabilities);
|
||||
if (status == VMW_BALLOON_SUCCESS)
|
||||
return true;
|
||||
|
||||
|
@ -402,55 +416,37 @@ static void vmballoon_reset(struct vmballoon *b)
|
|||
}
|
||||
|
||||
/*
|
||||
* Allocate (or reserve) a page for the balloon and notify the host. If host
|
||||
* refuses the page put it on "refuse" list and allocate another one until host
|
||||
* is satisfied. "Refused" pages are released at the end of inflation cycle
|
||||
* (when we allocate b->rate_alloc pages).
|
||||
* Notify the host of a ballooned page. If host rejects the page put it on the
|
||||
* refuse list, those refused page are then released at the end of the
|
||||
* inflation cycle.
|
||||
*/
|
||||
static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
|
||||
static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
|
||||
{
|
||||
struct page *page;
|
||||
gfp_t flags;
|
||||
unsigned int hv_status;
|
||||
int locked;
|
||||
flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
|
||||
int locked, hv_status;
|
||||
|
||||
do {
|
||||
if (!can_sleep)
|
||||
STATS_INC(b->stats.alloc);
|
||||
else
|
||||
STATS_INC(b->stats.sleep_alloc);
|
||||
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
|
||||
if (locked > 0) {
|
||||
STATS_INC(b->stats.refused_alloc);
|
||||
|
||||
page = alloc_page(flags);
|
||||
if (!page) {
|
||||
if (!can_sleep)
|
||||
STATS_INC(b->stats.alloc_fail);
|
||||
else
|
||||
STATS_INC(b->stats.sleep_alloc_fail);
|
||||
return -ENOMEM;
|
||||
if (hv_status == VMW_BALLOON_ERROR_RESET ||
|
||||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
|
||||
__free_page(page);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* inform monitor */
|
||||
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
|
||||
if (locked > 0) {
|
||||
STATS_INC(b->stats.refused_alloc);
|
||||
|
||||
if (hv_status == VMW_BALLOON_ERROR_RESET ||
|
||||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
|
||||
__free_page(page);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Place page on the list of non-balloonable pages
|
||||
* and retry allocation, unless we already accumulated
|
||||
* too many of them, in which case take a breather.
|
||||
*/
|
||||
/*
|
||||
* Place page on the list of non-balloonable pages
|
||||
* and retry allocation, unless we already accumulated
|
||||
* too many of them, in which case take a breather.
|
||||
*/
|
||||
if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
|
||||
b->n_refused_pages++;
|
||||
list_add(&page->lru, &b->refused_pages);
|
||||
if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
|
||||
return -EIO;
|
||||
} else {
|
||||
__free_page(page);
|
||||
}
|
||||
} while (locked != 0);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* track allocated page */
|
||||
list_add(&page->lru, &b->pages);
|
||||
|
@ -512,7 +508,7 @@ static void vmballoon_inflate(struct vmballoon *b)
|
|||
unsigned int i;
|
||||
unsigned int allocations = 0;
|
||||
int error = 0;
|
||||
bool alloc_can_sleep = false;
|
||||
gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
|
||||
|
||||
pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
|
||||
|
||||
|
@ -543,19 +539,16 @@ static void vmballoon_inflate(struct vmballoon *b)
|
|||
__func__, goal, rate, b->rate_alloc);
|
||||
|
||||
for (i = 0; i < goal; i++) {
|
||||
struct page *page;
|
||||
|
||||
error = vmballoon_reserve_page(b, alloc_can_sleep);
|
||||
if (error) {
|
||||
if (error != -ENOMEM) {
|
||||
/*
|
||||
* Not a page allocation failure, stop this
|
||||
* cycle. Maybe we'll get new target from
|
||||
* the host soon.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
if (flags == VMW_PAGE_ALLOC_NOSLEEP)
|
||||
STATS_INC(b->stats.alloc);
|
||||
else
|
||||
STATS_INC(b->stats.sleep_alloc);
|
||||
|
||||
if (alloc_can_sleep) {
|
||||
page = alloc_page(flags);
|
||||
if (!page) {
|
||||
if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
|
||||
/*
|
||||
* CANSLEEP page allocation failed, so guest
|
||||
* is under severe memory pressure. Quickly
|
||||
|
@ -563,8 +556,10 @@ static void vmballoon_inflate(struct vmballoon *b)
|
|||
*/
|
||||
b->rate_alloc = max(b->rate_alloc / 2,
|
||||
VMW_BALLOON_RATE_ALLOC_MIN);
|
||||
STATS_INC(b->stats.sleep_alloc_fail);
|
||||
break;
|
||||
}
|
||||
STATS_INC(b->stats.alloc_fail);
|
||||
|
||||
/*
|
||||
* NOSLEEP page allocation failed, so the guest is
|
||||
|
@ -579,11 +574,16 @@ static void vmballoon_inflate(struct vmballoon *b)
|
|||
if (i >= b->rate_alloc)
|
||||
break;
|
||||
|
||||
alloc_can_sleep = true;
|
||||
flags = VMW_PAGE_ALLOC_CANSLEEP;
|
||||
/* Lower rate for sleeping allocations. */
|
||||
rate = b->rate_alloc;
|
||||
continue;
|
||||
}
|
||||
|
||||
error = vmballoon_lock_page(b, page);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
|
||||
cond_resched();
|
||||
allocations = 0;
|
||||
|
|
|
@ -1031,14 +1031,9 @@ int __init vmci_host_init(void)
|
|||
|
||||
void __exit vmci_host_exit(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
vmci_host_device_initialized = false;
|
||||
|
||||
error = misc_deregister(&vmci_host_miscdev);
|
||||
if (error)
|
||||
pr_warn("Error unregistering character device: %d\n", error);
|
||||
|
||||
misc_deregister(&vmci_host_miscdev);
|
||||
vmci_ctx_destroy(host_context);
|
||||
vmci_qp_broker_exit();
|
||||
|
||||
|
|
|
@ -355,7 +355,8 @@ static int nfc_mei_phy_enable(void *phy_id)
|
|||
goto err;
|
||||
}
|
||||
|
||||
r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
|
||||
r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX),
|
||||
nfc_mei_event_cb, phy);
|
||||
if (r) {
|
||||
pr_err("Event cb registration failed %d\n", r);
|
||||
goto err;
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
menuconfig NVMEM
|
||||
tristate "NVMEM Support"
|
||||
select REGMAP
|
||||
help
|
||||
Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
|
||||
|
||||
This framework is designed to provide a generic interface to NVMEM
|
||||
from both the Linux Kernel and the userspace.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called nvmem_core.
|
||||
|
||||
If unsure, say no.
|
||||
|
||||
if NVMEM
|
||||
|
||||
config QCOM_QFPROM
|
||||
tristate "QCOM QFPROM Support"
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
select REGMAP_MMIO
|
||||
help
|
||||
Say y here to enable QFPROM support. The QFPROM provides access
|
||||
functions for QFPROM data to rest of the drivers via nvmem interface.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called nvmem_qfprom.
|
||||
|
||||
config NVMEM_SUNXI_SID
|
||||
tristate "Allwinner SoCs SID support"
|
||||
depends on ARCH_SUNXI
|
||||
select REGMAP_MMIO
|
||||
help
|
||||
This is a driver for the 'security ID' available on various Allwinner
|
||||
devices.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called nvmem_sunxi_sid.
|
||||
|
||||
endif
|
|
@ -0,0 +1,12 @@
|
|||
#
|
||||
# Makefile for nvmem drivers.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_NVMEM) += nvmem_core.o
|
||||
nvmem_core-y := core.o
|
||||
|
||||
# Devices
|
||||
obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
|
||||
nvmem_qfprom-y := qfprom.o
|
||||
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
|
||||
nvmem_sunxi_sid-y := sunxi_sid.o
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvmem-provider.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
static struct regmap_config qfprom_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 8,
|
||||
.reg_stride = 1,
|
||||
};
|
||||
|
||||
static struct nvmem_config econfig = {
|
||||
.name = "qfprom",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int qfprom_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nvmem_device *nvmem = platform_get_drvdata(pdev);
|
||||
|
||||
return nvmem_unregister(nvmem);
|
||||
}
|
||||
|
||||
static int qfprom_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct nvmem_device *nvmem;
|
||||
struct regmap *regmap;
|
||||
void __iomem *base;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
qfprom_regmap_config.max_register = resource_size(res) - 1;
|
||||
|
||||
regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(dev, "regmap init failed\n");
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
econfig.dev = dev;
|
||||
nvmem = nvmem_register(&econfig);
|
||||
if (IS_ERR(nvmem))
|
||||
return PTR_ERR(nvmem);
|
||||
|
||||
platform_set_drvdata(pdev, nvmem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qfprom_of_match[] = {
|
||||
{ .compatible = "qcom,qfprom",},
|
||||
{/* sentinel */},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qfprom_of_match);
|
||||
|
||||
static struct platform_driver qfprom_driver = {
|
||||
.probe = qfprom_probe,
|
||||
.remove = qfprom_remove,
|
||||
.driver = {
|
||||
.name = "qcom,qfprom",
|
||||
.of_match_table = qfprom_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(qfprom_driver);
|
||||
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
|
||||
MODULE_DESCRIPTION("Qualcomm QFPROM driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Allwinner sunXi SoCs Security ID support.
|
||||
*
|
||||
* Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
|
||||
* Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvmem-provider.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
|
||||
static struct nvmem_config econfig = {
|
||||
.name = "sunxi-sid",
|
||||
.read_only = true,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
struct sunxi_sid {
|
||||
void __iomem *base;
|
||||
};
|
||||
|
||||
/* We read the entire key, due to a 32 bit read alignment requirement. Since we
|
||||
* want to return the requested byte, this results in somewhat slower code and
|
||||
* uses 4 times more reads as needed but keeps code simpler. Since the SID is
|
||||
* only very rarely probed, this is not really an issue.
|
||||
*/
|
||||
static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
|
||||
const unsigned int offset)
|
||||
{
|
||||
u32 sid_key;
|
||||
|
||||
sid_key = ioread32be(sid->base + round_down(offset, 4));
|
||||
sid_key >>= (offset % 4) * 8;
|
||||
|
||||
return sid_key; /* Only return the last byte */
|
||||
}
|
||||
|
||||
static int sunxi_sid_read(void *context,
|
||||
const void *reg, size_t reg_size,
|
||||
void *val, size_t val_size)
|
||||
{
|
||||
struct sunxi_sid *sid = context;
|
||||
unsigned int offset = *(u32 *)reg;
|
||||
u8 *buf = val;
|
||||
|
||||
while (val_size) {
|
||||
*buf++ = sunxi_sid_read_byte(sid, offset);
|
||||
val_size--;
|
||||
offset++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sunxi_sid_write(void *context, const void *data, size_t count)
|
||||
{
|
||||
/* Unimplemented, dummy to keep regmap core happy */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct regmap_bus sunxi_sid_bus = {
|
||||
.read = sunxi_sid_read,
|
||||
.write = sunxi_sid_write,
|
||||
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
};
|
||||
|
||||
static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct regmap_config sunxi_sid_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 8,
|
||||
.reg_stride = 1,
|
||||
.writeable_reg = sunxi_sid_writeable_reg,
|
||||
};
|
||||
|
||||
static int sunxi_sid_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct nvmem_device *nvmem;
|
||||
struct regmap *regmap;
|
||||
struct sunxi_sid *sid;
|
||||
int i, size;
|
||||
char *randomness;
|
||||
|
||||
sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
|
||||
if (!sid)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sid->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(sid->base))
|
||||
return PTR_ERR(sid->base);
|
||||
|
||||
size = resource_size(res) - 1;
|
||||
sunxi_sid_regmap_config.max_register = size;
|
||||
|
||||
regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
|
||||
&sunxi_sid_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(dev, "regmap init failed\n");
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
econfig.dev = dev;
|
||||
nvmem = nvmem_register(&econfig);
|
||||
if (IS_ERR(nvmem))
|
||||
return PTR_ERR(nvmem);
|
||||
|
||||
randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
|
||||
for (i = 0; i < size; i++)
|
||||
randomness[i] = sunxi_sid_read_byte(sid, i);
|
||||
|
||||
add_device_randomness(randomness, size);
|
||||
kfree(randomness);
|
||||
|
||||
platform_set_drvdata(pdev, nvmem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sunxi_sid_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nvmem_device *nvmem = platform_get_drvdata(pdev);
|
||||
|
||||
return nvmem_unregister(nvmem);
|
||||
}
|
||||
|
||||
static const struct of_device_id sunxi_sid_of_match[] = {
|
||||
{ .compatible = "allwinner,sun4i-a10-sid" },
|
||||
{ .compatible = "allwinner,sun7i-a20-sid" },
|
||||
{/* sentinel */},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
|
||||
|
||||
static struct platform_driver sunxi_sid_driver = {
|
||||
.probe = sunxi_sid_probe,
|
||||
.remove = sunxi_sid_remove,
|
||||
.driver = {
|
||||
.name = "eeprom-sunxi-sid",
|
||||
.of_match_table = sunxi_sid_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(sunxi_sid_driver);
|
||||
|
||||
MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
|
||||
MODULE_DESCRIPTION("Allwinner sunxi security id driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -666,9 +666,8 @@ static int ds1374_remove(struct i2c_client *client)
|
|||
#ifdef CONFIG_RTC_DRV_DS1374_WDT
|
||||
int res;
|
||||
|
||||
res = misc_deregister(&ds1374_miscdev);
|
||||
if (!res)
|
||||
ds1374_miscdev.parent = NULL;
|
||||
misc_deregister(&ds1374_miscdev);
|
||||
ds1374_miscdev.parent = NULL;
|
||||
unregister_reboot_notifier(&ds1374_wdt_notifier);
|
||||
#endif
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче