Merge branch 'perf/urgent' into perf/core

Conflicts:
	tools/perf/builtin-record.c
	tools/perf/builtin-top.c
	tools/perf/util/hist.h
This commit is contained in:
Ingo Molnar
2013-10-29 11:23:32 +01:00
406 changed files with 3510 additions and 2383 deletions

View File

@@ -37,8 +37,8 @@ Description:
that the USB device has been connected to the machine. This that the USB device has been connected to the machine. This
file is read-only. file is read-only.
Users: Users:
PowerTOP <power@bughost.org> PowerTOP <powertop@lists.01.org>
http://www.lesswatts.org/projects/powertop/ https://01.org/powertop/
What: /sys/bus/usb/device/.../power/active_duration What: /sys/bus/usb/device/.../power/active_duration
Date: January 2008 Date: January 2008
@@ -57,8 +57,8 @@ Description:
will give an integer percentage. Note that this does not will give an integer percentage. Note that this does not
account for counter wrap. account for counter wrap.
Users: Users:
PowerTOP <power@bughost.org> PowerTOP <powertop@lists.01.org>
http://www.lesswatts.org/projects/powertop/ https://01.org/powertop/
What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
Date: January 2008 Date: January 2008

View File

@@ -1,6 +1,6 @@
What: /sys/devices/.../power/ What: /sys/devices/.../power/
Date: January 2009 Date: January 2009
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power directory contains attributes The /sys/devices/.../power directory contains attributes
allowing the user space to check and modify some power allowing the user space to check and modify some power
@@ -8,7 +8,7 @@ Description:
What: /sys/devices/.../power/wakeup What: /sys/devices/.../power/wakeup
Date: January 2009 Date: January 2009
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power/wakeup attribute allows the user The /sys/devices/.../power/wakeup attribute allows the user
space to check if the device is enabled to wake up the system space to check if the device is enabled to wake up the system
@@ -34,7 +34,7 @@ Description:
What: /sys/devices/.../power/control What: /sys/devices/.../power/control
Date: January 2009 Date: January 2009
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power/control attribute allows the user The /sys/devices/.../power/control attribute allows the user
space to control the run-time power management of the device. space to control the run-time power management of the device.
@@ -53,7 +53,7 @@ Description:
What: /sys/devices/.../power/async What: /sys/devices/.../power/async
Date: January 2009 Date: January 2009
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../async attribute allows the user space to The /sys/devices/.../async attribute allows the user space to
enable or diasble the device's suspend and resume callbacks to enable or diasble the device's suspend and resume callbacks to
@@ -79,7 +79,7 @@ Description:
What: /sys/devices/.../power/wakeup_count What: /sys/devices/.../power/wakeup_count
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_count attribute contains the number The /sys/devices/.../wakeup_count attribute contains the number
of signaled wakeup events associated with the device. This of signaled wakeup events associated with the device. This
@@ -88,7 +88,7 @@ Description:
What: /sys/devices/.../power/wakeup_active_count What: /sys/devices/.../power/wakeup_active_count
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_active_count attribute contains the The /sys/devices/.../wakeup_active_count attribute contains the
number of times the processing of wakeup events associated with number of times the processing of wakeup events associated with
@@ -98,7 +98,7 @@ Description:
What: /sys/devices/.../power/wakeup_abort_count What: /sys/devices/.../power/wakeup_abort_count
Date: February 2012 Date: February 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_abort_count attribute contains the The /sys/devices/.../wakeup_abort_count attribute contains the
number of times the processing of a wakeup event associated with number of times the processing of a wakeup event associated with
@@ -109,7 +109,7 @@ Description:
What: /sys/devices/.../power/wakeup_expire_count What: /sys/devices/.../power/wakeup_expire_count
Date: February 2012 Date: February 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_expire_count attribute contains the The /sys/devices/.../wakeup_expire_count attribute contains the
number of times a wakeup event associated with the device has number of times a wakeup event associated with the device has
@@ -119,7 +119,7 @@ Description:
What: /sys/devices/.../power/wakeup_active What: /sys/devices/.../power/wakeup_active
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_active attribute contains either 1, The /sys/devices/.../wakeup_active attribute contains either 1,
or 0, depending on whether or not a wakeup event associated with or 0, depending on whether or not a wakeup event associated with
@@ -129,7 +129,7 @@ Description:
What: /sys/devices/.../power/wakeup_total_time_ms What: /sys/devices/.../power/wakeup_total_time_ms
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_total_time_ms attribute contains The /sys/devices/.../wakeup_total_time_ms attribute contains
the total time of processing wakeup events associated with the the total time of processing wakeup events associated with the
@@ -139,7 +139,7 @@ Description:
What: /sys/devices/.../power/wakeup_max_time_ms What: /sys/devices/.../power/wakeup_max_time_ms
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_max_time_ms attribute contains The /sys/devices/.../wakeup_max_time_ms attribute contains
the maximum time of processing a single wakeup event associated the maximum time of processing a single wakeup event associated
@@ -149,7 +149,7 @@ Description:
What: /sys/devices/.../power/wakeup_last_time_ms What: /sys/devices/.../power/wakeup_last_time_ms
Date: September 2010 Date: September 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_last_time_ms attribute contains The /sys/devices/.../wakeup_last_time_ms attribute contains
the value of the monotonic clock corresponding to the time of the value of the monotonic clock corresponding to the time of
@@ -160,7 +160,7 @@ Description:
What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms
Date: February 2012 Date: February 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
contains the total time the device has been preventing contains the total time the device has been preventing
@@ -189,7 +189,7 @@ Description:
What: /sys/devices/.../power/pm_qos_latency_us What: /sys/devices/.../power/pm_qos_latency_us
Date: March 2012 Date: March 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power/pm_qos_resume_latency_us attribute The /sys/devices/.../power/pm_qos_resume_latency_us attribute
contains the PM QoS resume latency limit for the given device, contains the PM QoS resume latency limit for the given device,
@@ -207,7 +207,7 @@ Description:
What: /sys/devices/.../power/pm_qos_no_power_off What: /sys/devices/.../power/pm_qos_no_power_off
Date: September 2012 Date: September 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power/pm_qos_no_power_off attribute The /sys/devices/.../power/pm_qos_no_power_off attribute
is used for manipulating the PM QoS "no power off" flag. If is used for manipulating the PM QoS "no power off" flag. If
@@ -222,7 +222,7 @@ Description:
What: /sys/devices/.../power/pm_qos_remote_wakeup What: /sys/devices/.../power/pm_qos_remote_wakeup
Date: September 2012 Date: September 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/devices/.../power/pm_qos_remote_wakeup attribute The /sys/devices/.../power/pm_qos_remote_wakeup attribute
is used for manipulating the PM QoS "remote wakeup required" is used for manipulating the PM QoS "remote wakeup required"

View File

@@ -1,6 +1,6 @@
What: /sys/power/ What: /sys/power/
Date: August 2006 Date: August 2006
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power directory will contain files that will The /sys/power directory will contain files that will
provide a unified interface to the power management provide a unified interface to the power management
@@ -8,7 +8,7 @@ Description:
What: /sys/power/state What: /sys/power/state
Date: August 2006 Date: August 2006
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/state file controls the system power state. The /sys/power/state file controls the system power state.
Reading from this file returns what states are supported, Reading from this file returns what states are supported,
@@ -22,7 +22,7 @@ Description:
What: /sys/power/disk What: /sys/power/disk
Date: September 2006 Date: September 2006
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/disk file controls the operating mode of the The /sys/power/disk file controls the operating mode of the
suspend-to-disk mechanism. Reading from this file returns suspend-to-disk mechanism. Reading from this file returns
@@ -67,7 +67,7 @@ Description:
What: /sys/power/image_size What: /sys/power/image_size
Date: August 2006 Date: August 2006
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/image_size file controls the size of the image The /sys/power/image_size file controls the size of the image
created by the suspend-to-disk mechanism. It can be written a created by the suspend-to-disk mechanism. It can be written a
@@ -84,7 +84,7 @@ Description:
What: /sys/power/pm_trace What: /sys/power/pm_trace
Date: August 2006 Date: August 2006
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/pm_trace file controls the code which saves the The /sys/power/pm_trace file controls the code which saves the
last PM event point in the RTC across reboots, so that you can last PM event point in the RTC across reboots, so that you can
@@ -133,7 +133,7 @@ Description:
What: /sys/power/pm_async What: /sys/power/pm_async
Date: January 2009 Date: January 2009
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/pm_async file controls the switch allowing the The /sys/power/pm_async file controls the switch allowing the
user space to enable or disable asynchronous suspend and resume user space to enable or disable asynchronous suspend and resume
@@ -146,7 +146,7 @@ Description:
What: /sys/power/wakeup_count What: /sys/power/wakeup_count
Date: July 2010 Date: July 2010
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/wakeup_count file allows user space to put the The /sys/power/wakeup_count file allows user space to put the
system into a sleep state while taking into account the system into a sleep state while taking into account the
@@ -161,7 +161,7 @@ Description:
What: /sys/power/reserved_size What: /sys/power/reserved_size
Date: May 2011 Date: May 2011
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/reserved_size file allows user space to control The /sys/power/reserved_size file allows user space to control
the amount of memory reserved for allocations made by device the amount of memory reserved for allocations made by device
@@ -175,7 +175,7 @@ Description:
What: /sys/power/autosleep What: /sys/power/autosleep
Date: April 2012 Date: April 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/autosleep file can be written one of the strings The /sys/power/autosleep file can be written one of the strings
returned by reads from /sys/power/state. If that happens, a returned by reads from /sys/power/state. If that happens, a
@@ -192,7 +192,7 @@ Description:
What: /sys/power/wake_lock What: /sys/power/wake_lock
Date: February 2012 Date: February 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/wake_lock file allows user space to create The /sys/power/wake_lock file allows user space to create
wakeup source objects and activate them on demand (if one of wakeup source objects and activate them on demand (if one of
@@ -219,7 +219,7 @@ Description:
What: /sys/power/wake_unlock What: /sys/power/wake_unlock
Date: February 2012 Date: February 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl> Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description: Description:
The /sys/power/wake_unlock file allows user space to deactivate The /sys/power/wake_unlock file allows user space to deactivate
wakeup sources created with the help of /sys/power/wake_lock. wakeup sources created with the help of /sys/power/wake_lock.

View File

@@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
When to use this method is described in detail on the When to use this method is described in detail on the
Linux/ACPI home page: Linux/ACPI home page:
http://www.lesswatts.org/projects/acpi/overridingDSDT.php https://01.org/linux-acpi/documentation/overriding-dsdt

View File

@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
nlh->nlmsg_seq = seq++; nlh->nlmsg_seq = seq++;
nlh->nlmsg_pid = getpid(); nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); nlh->nlmsg_len = size;
nlh->nlmsg_flags = 0; nlh->nlmsg_flags = 0;
m = NLMSG_DATA(nlh); m = NLMSG_DATA(nlh);

View File

@@ -1,168 +0,0 @@
*** Memory binding ***
The /memory node provides basic information about the address and size
of the physical memory. This node is usually filled or updated by the
bootloader, depending on the actual memory configuration of the given
hardware.
The memory layout is described by the following node:
/ {
#address-cells = <(n)>;
#size-cells = <(m)>;
memory {
device_type = "memory";
reg = <(baseaddr1) (size1)
(baseaddr2) (size2)
...
(baseaddrN) (sizeN)>;
};
...
};
A memory node follows the typical device tree rules for "reg" property:
n: number of cells used to store base address value
m: number of cells used to store size value
baseaddrX: defines a base address of the defined memory bank
sizeX: the size of the defined memory bank
More than one memory bank can be defined.
*** Reserved memory regions ***
In /memory/reserved-memory node one can create child nodes describing
particular reserved (excluded from normal use) memory regions. Such
memory regions are usually designed for the special usage by various
device drivers. A good example are contiguous memory allocations or
memory sharing with other operating system on the same hardware board.
Those special memory regions might depend on the board configuration and
devices used on the target system.
Parameters for each memory region can be encoded into the device tree
with the following convention:
[(label):] (name) {
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
reg = <(address) (size)>;
(linux,default-contiguous-region);
};
compatible: one or more of:
- "linux,contiguous-memory-region" - enables binding of this
region to Contiguous Memory Allocator (special region for
contiguous memory allocations, shared with movable system
memory, Linux kernel-specific).
- "reserved-memory-region" - compatibility is defined, given
region is assigned for exclusive usage for by the respective
devices.
reg: standard property defining the base address and size of
the memory region
linux,default-contiguous-region: property indicating that the region
is the default region for all contiguous memory
allocations, Linux specific (optional)
It is optional to specify the base address, so if one wants to use
autoconfiguration of the base address, '0' can be specified as a base
address in the 'reg' property.
The /memory/reserved-memory node must contain the same #address-cells
and #size-cells value as the root node.
*** Device node's properties ***
Once regions in the /memory/reserved-memory node have been defined, they
may be referenced by other device nodes. Bindings that wish to reference
memory regions should explicitly document their use of the following
property:
memory-region = <&phandle_to_defined_region>;
This property indicates that the device driver should use the memory
region pointed by the given phandle.
*** Example ***
This example defines a memory consisting of 4 memory banks. 3 contiguous
regions are defined for Linux kernel, one default of all device drivers
(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
and one for multimedia processing (labelled multimedia_mem, placed at
0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
device for DMA memory allocations (Linux kernel drivers will use CMA is
available or dma-exclusive usage otherwise). 'multimedia_mem' is
assigned to scaler@12500000 and codec@12600000 devices for contiguous
memory allocations when CMA driver is enabled.
The reason for creating a separate region for framebuffer device is to
match the framebuffer base address to the one configured by bootloader,
so once Linux kernel drivers starts no glitches on the displayed boot
logo appears. Scaller and codec drivers should share the memory
allocations.
/ {
#address-cells = <1>;
#size-cells = <1>;
/* ... */
memory {
reg = <0x40000000 0x10000000
0x50000000 0x10000000
0x60000000 0x10000000
0x70000000 0x10000000>;
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
/*
* global autoconfigured region for contiguous allocations
* (used only with Contiguous Memory Allocator)
*/
contig_region@0 {
compatible = "linux,contiguous-memory-region";
reg = <0x0 0x4000000>;
linux,default-contiguous-region;
};
/*
* special region for framebuffer
*/
display_region: region@78000000 {
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
reg = <0x78000000 0x800000>;
};
/*
* special region for multimedia processing devices
*/
multimedia_region: region@77000000 {
compatible = "linux,contiguous-memory-region";
reg = <0x77000000 0x4000000>;
};
};
};
/* ... */
fb0: fb@12300000 {
status = "okay";
memory-region = <&display_region>;
};
scaler: scaler@12500000 {
status = "okay";
memory-region = <&multimedia_region>;
};
codec: codec@12600000 {
status = "okay";
memory-region = <&multimedia_region>;
};
};

View File

@@ -237,11 +237,11 @@ F: drivers/platform/x86/acer-wmi.c
ACPI ACPI
M: Len Brown <lenb@kernel.org> M: Len Brown <lenb@kernel.org>
M: Rafael J. Wysocki <rjw@sisk.pl> M: Rafael J. Wysocki <rjw@rjwysocki.net>
L: linux-acpi@vger.kernel.org L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: https://01.org/linux-acpi
Q: http://patchwork.kernel.org/project/linux-acpi/list/ Q: https://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
S: Supported S: Supported
F: drivers/acpi/ F: drivers/acpi/
F: drivers/pnp/pnpacpi/ F: drivers/pnp/pnpacpi/
@@ -256,21 +256,21 @@ F: drivers/pci/*/*/*acpi*
ACPI FAN DRIVER ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com> M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: https://01.org/linux-acpi
S: Supported S: Supported
F: drivers/acpi/fan.c F: drivers/acpi/fan.c
ACPI THERMAL DRIVER ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com> M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: https://01.org/linux-acpi
S: Supported S: Supported
F: drivers/acpi/*thermal* F: drivers/acpi/*thermal*
ACPI VIDEO DRIVER ACPI VIDEO DRIVER
M: Zhang Rui <rui.zhang@intel.com> M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: https://01.org/linux-acpi
S: Supported S: Supported
F: drivers/acpi/video.c F: drivers/acpi/video.c
@@ -1009,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
M: Jason Cooper <jason@lakedaemon.net> M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch> M: Andrew Lunn <andrew@lunn.ch>
M: Gregory Clement <gregory.clement@free-electrons.com> M: Gregory Clement <gregory.clement@free-electrons.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-mvebu/ F: arch/arm/mach-mvebu/
@@ -1016,6 +1017,7 @@ F: arch/arm/mach-mvebu/
ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net> M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch> M: Andrew Lunn <andrew@lunn.ch>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm/mach-dove/ F: arch/arm/mach-dove/
@@ -1148,6 +1150,13 @@ F: drivers/net/ethernet/i825xx/ether1*
F: drivers/net/ethernet/seeq/ether3* F: drivers/net/ethernet/seeq/ether3*
F: drivers/scsi/arm/ F: drivers/scsi/arm/
ARM/Rockchip SoC support
M: Heiko Stuebner <heiko@sntech.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-rockchip/
F: drivers/*/*rockchip*
ARM/SHARK MACHINE SUPPORT ARM/SHARK MACHINE SUPPORT
M: Alexander Schulz <alex@shark-linux.de> M: Alexander Schulz <alex@shark-linux.de>
W: http://www.shark-linux.de/shark.html W: http://www.shark-linux.de/shark.html
@@ -1791,6 +1800,7 @@ F: include/net/bluetooth/
BONDING DRIVER BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com> M: Jay Vosburgh <fubar@us.ibm.com>
M: Veaceslav Falico <vfalico@redhat.com>
M: Andy Gospodarek <andy@greyhouse.net> M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/ W: http://sourceforge.net/projects/bonding/
@@ -2300,7 +2310,7 @@ S: Maintained
F: drivers/net/ethernet/ti/cpmac.c F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS CPU FREQUENCY DRIVERS
M: Rafael J. Wysocki <rjw@sisk.pl> M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Viresh Kumar <viresh.kumar@linaro.org> M: Viresh Kumar <viresh.kumar@linaro.org>
L: cpufreq@vger.kernel.org L: cpufreq@vger.kernel.org
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
@@ -2331,7 +2341,7 @@ S: Maintained
F: drivers/cpuidle/cpuidle-big_little.c F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVERS CPUIDLE DRIVERS
M: Rafael J. Wysocki <rjw@sisk.pl> M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
@@ -2718,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com> M: Vinod Koul <vinod.koul@intel.com>
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported S: Supported
F: drivers/dma/ F: drivers/dma/
F: include/linux/dma* F: include/linux/dma*
@@ -2821,7 +2833,7 @@ M: Terje Bergström <tbergstrom@nvidia.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org L: linux-tegra@vger.kernel.org
T: git git://anongit.freedesktop.org/tegra/linux.git T: git git://anongit.freedesktop.org/tegra/linux.git
S: Maintained S: Supported
F: drivers/gpu/host1x/ F: drivers/gpu/host1x/
F: include/uapi/drm/tegra_drm.h F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -3553,7 +3565,7 @@ F: fs/freevxfs/
FREEZER FREEZER
M: Pavel Machek <pavel@ucw.cz> M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl> M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Supported S: Supported
F: Documentation/power/freezing-of-tasks.txt F: Documentation/power/freezing-of-tasks.txt
@@ -3624,6 +3636,12 @@ L: linux-scsi@vger.kernel.org
S: Odd Fixes (e.g., new signatures) S: Odd Fixes (e.g., new signatures)
F: drivers/scsi/fdomain.* F: drivers/scsi/fdomain.*
GCOV BASED KERNEL PROFILING
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
S: Maintained
F: kernel/gcov/
F: Documentation/gcov.txt
GDT SCSI DISK ARRAY CONTROLLER DRIVER GDT SCSI DISK ARRAY CONTROLLER DRIVER
M: Achim Leubner <achim_leubner@adaptec.com> M: Achim Leubner <achim_leubner@adaptec.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
@@ -3889,7 +3907,7 @@ F: drivers/video/hgafb.c
HIBERNATION (aka Software Suspend, aka swsusp) HIBERNATION (aka Software Suspend, aka swsusp)
M: Pavel Machek <pavel@ucw.cz> M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl> M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Supported S: Supported
F: arch/x86/power/ F: arch/x86/power/
@@ -4339,7 +4357,7 @@ F: drivers/video/i810/
INTEL MENLOW THERMAL DRIVER INTEL MENLOW THERMAL DRIVER
M: Sujith Thomas <sujith.thomas@intel.com> M: Sujith Thomas <sujith.thomas@intel.com>
L: platform-driver-x86@vger.kernel.org L: platform-driver-x86@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: https://01.org/linux-acpi
S: Supported S: Supported
F: drivers/platform/x86/intel_menlow.c F: drivers/platform/x86/intel_menlow.c
@@ -4351,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER INTEL I/OAT DMA DRIVER
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
S: Maintained M: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported
F: drivers/dma/ioat* F: drivers/dma/ioat*
INTEL IOMMU (VT-d) INTEL IOMMU (VT-d)
@@ -7816,6 +7837,13 @@ F: Documentation/sound/alsa/soc/
F: sound/soc/ F: sound/soc/
F: include/sound/soc* F: include/sound/soc*
SOUND - DMAENGINE HELPERS
M: Lars-Peter Clausen <lars@metafoo.de>
S: Supported
F: include/sound/dmaengine_pcm.h
F: sound/core/pcm_dmaengine.c
F: sound/soc/soc-generic-dmaengine-pcm.c
SPARC + UltraSPARC (sparc/sparc64) SPARC + UltraSPARC (sparc/sparc64)
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
L: sparclinux@vger.kernel.org L: sparclinux@vger.kernel.org
@@ -8095,7 +8123,7 @@ F: drivers/sh/
SUSPEND TO RAM SUSPEND TO RAM
M: Len Brown <len.brown@intel.com> M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz> M: Pavel Machek <pavel@ucw.cz>
M: "Rafael J. Wysocki" <rjw@sisk.pl> M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Supported S: Supported
F: Documentation/power/ F: Documentation/power/
@@ -8288,14 +8316,72 @@ L: linux-media@vger.kernel.org
S: Maintained S: Maintained
F: drivers/media/rc/ttusbir.c F: drivers/media/rc/ttusbir.c
TEGRA SUPPORT TEGRA ARCHITECTURE SUPPORT
M: Stephen Warren <swarren@wwwdotorg.org> M: Stephen Warren <swarren@wwwdotorg.org>
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org L: linux-tegra@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
S: Supported S: Supported
N: [^a-z]tegra N: [^a-z]tegra
TEGRA ASOC DRIVER
M: Stephen Warren <swarren@wwwdotorg.org>
S: Supported
F: sound/soc/tegra/
TEGRA CLOCK DRIVER
M: Peter De Schrijver <pdeschrijver@nvidia.com>
M: Prashant Gaikwad <pgaikwad@nvidia.com>
S: Supported
F: drivers/clk/tegra/
TEGRA DMA DRIVER
M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/dma/tegra20-apb-dma.c
TEGRA GPIO DRIVER
M: Stephen Warren <swarren@wwwdotorg.org>
S: Supported
F: drivers/gpio/gpio-tegra.c
TEGRA I2C DRIVER
M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/i2c/busses/i2c-tegra.c
TEGRA IOMMU DRIVERS
M: Hiroshi Doyu <hdoyu@nvidia.com>
S: Supported
F: drivers/iommu/tegra*
TEGRA KBC DRIVER
M: Rakesh Iyer <riyer@nvidia.com>
M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/input/keyboard/tegra-kbc.c
TEGRA PINCTRL DRIVER
M: Stephen Warren <swarren@wwwdotorg.org>
S: Supported
F: drivers/pinctrl/pinctrl-tegra*
TEGRA PWM DRIVER
M: Thierry Reding <thierry.reding@gmail.com>
S: Supported
F: drivers/pwm/pwm-tegra.c
TEGRA SERIAL DRIVER
M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/tty/serial/serial-tegra.c
TEGRA SPI DRIVER
M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/spi/spi-tegra*
TEHUTI ETHERNET DRIVER TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net> M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc7
NAME = One Giant Leap for Frogkind NAME = One Giant Leap for Frogkind
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@@ -9,11 +9,6 @@
model = "ARM Integrator/CP"; model = "ARM Integrator/CP";
compatible = "arm,integrator-cp"; compatible = "arm,integrator-cp";
aliases {
arm,timer-primary = &timer2;
arm,timer-secondary = &timer1;
};
chosen { chosen {
bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
}; };
@@ -24,14 +19,18 @@
}; };
timer0: timer@13000000 { timer0: timer@13000000 {
/* TIMER0 runs @ 25MHz */
compatible = "arm,integrator-cp-timer"; compatible = "arm,integrator-cp-timer";
status = "disabled";
}; };
timer1: timer@13000100 { timer1: timer@13000100 {
/* TIMER1 runs @ 1MHz */
compatible = "arm,integrator-cp-timer"; compatible = "arm,integrator-cp-timer";
}; };
timer2: timer@13000200 { timer2: timer@13000200 {
/* TIMER2 runs @ 1MHz */
compatible = "arm,integrator-cp-timer"; compatible = "arm,integrator-cp-timer";
}; };

View File

@@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
{ {
phys_reset_t phys_reset; phys_reset_t phys_reset;
BUG_ON(!platform_ops); if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
return;
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
/* /*
@@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
{ {
phys_reset_t phys_reset; phys_reset_t phys_reset;
BUG_ON(!platform_ops); if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
return;
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
/* Very similar to mcpm_cpu_power_down() */ /* Very similar to mcpm_cpu_power_down() */

View File

@@ -15,6 +15,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/mach/sharpsl_param.h> #include <asm/mach/sharpsl_param.h>
#include <asm/memory.h>
/* /*
* Certain hardware parameters determined at the time of device manufacture, * Certain hardware parameters determined at the time of device manufacture,
@@ -25,8 +26,10 @@
*/ */
#ifdef CONFIG_ARCH_SA1100 #ifdef CONFIG_ARCH_SA1100
#define PARAM_BASE 0xe8ffc000 #define PARAM_BASE 0xe8ffc000
#define param_start(x) (void *)(x)
#else #else
#define PARAM_BASE 0xa0000a00 #define PARAM_BASE 0xa0000a00
#define param_start(x) __va(x)
#endif #endif
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
void sharpsl_save_param(void) void sharpsl_save_param(void)
{ {
memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
sharpsl_param.comadj=-1; sharpsl_param.comadj=-1;

View File

@@ -31,5 +31,4 @@ generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += timex.h generic-y += timex.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h generic-y += unaligned.h

View File

@@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
* *
* This must be called with interrupts disabled. * This must be called with interrupts disabled.
* *
* This does not return. Re-entry in the kernel is expected via * On success this does not return. Re-entry in the kernel is expected
* mcpm_entry_point. * via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*/ */
void mcpm_cpu_power_down(void); void mcpm_cpu_power_down(void);
@@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
* *
* This must be called with interrupts disabled. * This must be called with interrupts disabled.
* *
* This does not return. Re-entry in the kernel is expected via * On success this does not return. Re-entry in the kernel is expected
* mcpm_entry_point. * via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*/ */
void mcpm_cpu_suspend(u64 expected_residency); void mcpm_cpu_suspend(u64 expected_residency);

View File

@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n, unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
if (n == 0)
return;
if (i + n > SYSCALL_MAX_ARGS) { if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n, unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
if (n == 0)
return;
if (i + n > SYSCALL_MAX_ARGS) { if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n", pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS); __func__, i + n, SYSCALL_MAX_ARGS);

View File

@@ -487,7 +487,26 @@ __fixup_smp:
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
and r0, r0, #0xc0000000 @ multiprocessing extensions and and r0, r0, #0xc0000000 @ multiprocessing extensions and
teq r0, #0x80000000 @ not part of a uniprocessor system? teq r0, #0x80000000 @ not part of a uniprocessor system?
moveq pc, lr @ yes, assume SMP bne __fixup_smp_on_up @ no, assume UP
@ Core indicates it is SMP. Check for Aegis SOC where a single
@ Cortex-A9 CPU is present but SMP operations fault.
mov r4, #0x41000000
orr r4, r4, #0x0000c000
orr r4, r4, #0x00000090
teq r3, r4 @ Check for ARM Cortex-A9
movne pc, lr @ Not ARM Cortex-A9,
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
@ below address check will need to be #ifdef'd or equivalent
@ for the Aegis platform.
mrc p15, 4, r0, c15, c0 @ get SCU base address
teq r0, #0x0 @ '0' on actual UP A9 hardware
beq __fixup_smp_on_up @ So its an A9 UP
ldr r0, [r0, #4] @ read SCU Config
and r0, r0, #0x3 @ number of CPUs
teq r0, #0x0 @ is 1?
movne pc, lr
__fixup_smp_on_up: __fixup_smp_on_up:
adr r0, 1f adr r0, 1f

View File

@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
break; break;
len = (j - i) << PAGE_SHIFT; len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len, 0); ret = iommu_map(mapping->domain, iova, phys, len,
IOMMU_READ|IOMMU_WRITE);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
iova += len; iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL); GFP_KERNEL);
} }
static int __dma_direction_to_prot(enum dma_data_direction dir)
{
int prot;
switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
return prot;
}
/* /*
* Map a part of the scatter-gather list into contiguous io address space * Map a part of the scatter-gather list into contiguous io address space
*/ */
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int ret = 0; int ret = 0;
unsigned int count; unsigned int count;
struct scatterlist *s; struct scatterlist *s;
int prot;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE; *handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
ret = iommu_map(mapping->domain, iova, phys, len, 0); prot = __dma_direction_to_prot(dir);
ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
count += len >> PAGE_SHIFT; count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == DMA_ERROR_CODE)
return dma_addr; return dma_addr;
switch (dir) { prot = __dma_direction_to_prot(dir);
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0) if (ret < 0)

View File

@@ -17,7 +17,6 @@
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/memblock.h> #include <linux/memblock.h>
@@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
if (mdesc->reserve) if (mdesc->reserve)
mdesc->reserve(); mdesc->reserve();
early_init_dt_scan_reserved_mem();
/* /*
* reserve memory for DMA contigouos allocations, * reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory * must come from DMA area inside low memory

View File

@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
{ {
if (fp->bpf_func != sk_run_filter) if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
kfree(fp);
} }

View File

@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
CONFIG_LLC2=m CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set # CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y CONFIG_PARPORT=y

View File

@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
CONFIG_LLC2=m CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set # CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_UMEM=m

View File

@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16 CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
CONFIG_IPV6=y CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y CONFIG_PARPORT_PC=y

View File

@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16 CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_QUEUE=m
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set # CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_UMEM=m

View File

@@ -62,6 +62,8 @@ CONFIG_TIPC=m
CONFIG_LLC2=m CONFIG_LLC2=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set # CONFIG_STANDALONE is not set
CONFIG_PARPORT=y CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y CONFIG_PARPORT_PC=y

View File

@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y CONFIG_INET6_IPCOMP=y
CONFIG_LLC2=m CONFIG_LLC2=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set # CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y CONFIG_PARPORT=y

View File

@@ -602,7 +602,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} }
} }
EXPORT_SYMBOL_GPL(flush_cache_page);
#ifdef CONFIG_PARISC_TMPALIAS #ifdef CONFIG_PARISC_TMPALIAS

View File

@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6 ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */ depd %r6, 31, 32, %r3 /* move to upper word */
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
ldo PDC_PSW(%r0),%arg0 /* 21 */ ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3 copy %r0,%arg3
stext_pdc_ret: stext_pdc_ret:
mtctl %r6,%cr30 /* restore task thread info */
/* restore rfi target address*/ /* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10 tophys_r1 %r10

View File

@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
{ {
if (fp->bpf_func != sk_run_filter) if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
kfree(fp);
} }

View File

@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
static inline void pgste_set_pte(pte_t *ptep, pte_t entry) static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{ {
if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { if (!MACHINE_HAS_ESOP &&
(pte_val(entry) & _PAGE_PRESENT) &&
(pte_val(entry) & _PAGE_WRITE)) {
/* /*
* Without enhanced suppression-on-protection force * Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes. * the dirty bit on for all writable ptes.

View File

@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
static inline unsigned long long get_tod_clock(void)
{
unsigned long long clk;
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
#else
asm volatile("stck %0" : "=Q" (clk) : : "cc");
#endif
return clk;
}
static inline void get_tod_clock_ext(char *clk) static inline void get_tod_clock_ext(char *clk)
{ {
asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
} }
static inline unsigned long long get_tod_clock_xt(void) static inline unsigned long long get_tod_clock(void)
{ {
unsigned char clk[16]; unsigned char clk[16];
get_tod_clock_ext(clk); get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]); return *((unsigned long long *)&clk[1]);
} }
static inline unsigned long long get_tod_clock_fast(void)
{
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
unsigned long long clk;
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
return clk;
#else
return get_tod_clock();
#endif
}
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
return (cycles_t) get_tod_clock() >> 2; return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
*/ */
static inline unsigned long long get_tod_clock_monotonic(void) static inline unsigned long long get_tod_clock_monotonic(void)
{ {
return get_tod_clock_xt() - sched_clock_base_cc; return get_tod_clock() - sched_clock_base_cc;
} }
/** /**

View File

@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
break; break;
} }
} }
return err; return err ? -EFAULT : 0;
} }
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,7 +148,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
break; break;
} }
} }
return err; return err ? -EFAULT : 0;
} }
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)

View File

@@ -867,7 +867,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception) int exception)
{ {
active->id.stck = get_tod_clock(); active->id.stck = get_tod_clock_fast();
active->id.fields.cpuid = smp_processor_id(); active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0); active->caller = __builtin_return_address(0);
active->id.fields.exception = exception; active->id.fields.exception = exception;

View File

@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
} }
if ((!rc) && (vcpu->arch.sie_block->ckc < if ((!rc) && (vcpu->arch.sie_block->ckc <
get_tod_clock() + vcpu->arch.sie_block->epoch)) { get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
if ((!psw_extint_disabled(vcpu)) && if ((!psw_extint_disabled(vcpu)) &&
(vcpu->arch.sie_block->gcr[0] & 0x800ul)) (vcpu->arch.sie_block->gcr[0] & 0x800ul))
rc = 1; rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
goto no_timer; goto no_timer;
} }
now = get_tod_clock() + vcpu->arch.sie_block->epoch; now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) { if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
return 0; return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
} }
if ((vcpu->arch.sie_block->ckc < if ((vcpu->arch.sie_block->ckc <
get_tod_clock() + vcpu->arch.sie_block->epoch)) get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
__try_deliver_ckc_interrupt(vcpu); __try_deliver_ckc_interrupt(vcpu);
if (atomic_read(&fi->active)) { if (atomic_read(&fi->active)) {

View File

@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
do { do {
set_clock_comparator(end); set_clock_comparator(end);
vtime_stop_cpu(); vtime_stop_cpu();
} while (get_tod_clock() < end); } while (get_tod_clock_fast() < end);
lockdep_on(); lockdep_on();
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
__ctl_load(cr6, 6, 6); __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
{ {
u64 clock_saved, end; u64 clock_saved, end;
end = get_tod_clock() + (usecs << 12); end = get_tod_clock_fast() + (usecs << 12);
do { do {
clock_saved = 0; clock_saved = 0;
if (end < S390_lowcore.clock_comparator) { if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
vtime_stop_cpu(); vtime_stop_cpu();
if (clock_saved) if (clock_saved)
local_tick_enable(clock_saved); local_tick_enable(clock_saved);
} while (get_tod_clock() < end); } while (get_tod_clock_fast() < end);
} }
/* /*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
{ {
u64 end; u64 end;
end = get_tod_clock() + (usecs << 12); end = get_tod_clock_fast() + (usecs << 12);
while (get_tod_clock() < end) while (get_tod_clock_fast() < end)
cpu_relax(); cpu_relax();
} }
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
nsecs <<= 9; nsecs <<= 9;
do_div(nsecs, 125); do_div(nsecs, 125);
end = get_tod_clock() + nsecs; end = get_tod_clock_fast() + nsecs;
if (nsecs & ~0xfffUL) if (nsecs & ~0xfffUL)
__udelay(nsecs >> 12); __udelay(nsecs >> 12);
while (get_tod_clock() < end) while (get_tod_clock_fast() < end)
barrier(); barrier();
} }
EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__ndelay);

View File

@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
struct bpf_binary_header *header = (void *)addr; struct bpf_binary_header *header = (void *)addr;
if (fp->bpf_func == sk_run_filter) if (fp->bpf_func == sk_run_filter)
return; goto free_filter;
set_memory_rw(addr, header->pages); set_memory_rw(addr, header->pages);
module_free(NULL, header); module_free(NULL, header);
free_filter:
kfree(fp);
} }

View File

@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
{ {
if (fp->bpf_func != sk_run_filter) if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
kfree(fp);
} }

View File

@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
config MICROCODE config MICROCODE
tristate "CPU microcode loading support" tristate "CPU microcode loading support"
depends on CPU_SUP_AMD || CPU_SUP_INTEL
select FW_LOADER select FW_LOADER
---help--- ---help---

View File

@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
break; break;
case UV3_HUB_PART_NUMBER: case UV3_HUB_PART_NUMBER:
case UV3_HUB_PART_NUMBER_X: case UV3_HUB_PART_NUMBER_X:
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
break; break;
} }

View File

@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return; return;
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
static_key_slow_inc(&paravirt_ticketlocks_enabled);
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
pv_lock_ops.unlock_kick = kvm_unlock_kick; pv_lock_ops.unlock_kick = kvm_unlock_kick;
} }
static __init int kvm_spinlock_init_jump(void)
{
if (!kvm_para_available())
return 0;
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return 0;
static_key_slow_inc(&paravirt_ticketlocks_enabled);
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
return 0;
}
early_initcall(kvm_spinlock_init_jump);
#endif /* CONFIG_PARAVIRT_SPINLOCKS */ #endif /* CONFIG_PARAVIRT_SPINLOCKS */

View File

@@ -772,13 +772,21 @@ out:
return; return;
} }
static void bpf_jit_free_deferred(struct work_struct *work)
{
struct sk_filter *fp = container_of(work, struct sk_filter, work);
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr;
set_memory_rw(addr, header->pages);
module_free(NULL, header);
kfree(fp);
}
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct sk_filter *fp)
{ {
if (fp->bpf_func != sk_run_filter) { if (fp->bpf_func != sk_run_filter) {
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; INIT_WORK(&fp->work, bpf_jit_free_deferred);
struct bpf_binary_header *header = (void *)addr; schedule_work(&fp->work);
set_memory_rw(addr, header->pages);
module_free(NULL, header);
} }
} }

View File

@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
old memory can be recycled */ old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt); make_lowmem_page_readwrite(xen_initial_gdt);
#ifdef CONFIG_X86_32
/*
* Xen starts us with XEN_FLAT_RING1_DS, but linux code
* expects __USER_DS
*/
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
#endif
xen_filter_cpu_maps(); xen_filter_cpu_maps();
xen_setup_vcpu_info_placement(); xen_setup_vcpu_info_placement();
} }

View File

@@ -222,11 +222,16 @@ check_hybrid:
* the disk size. * the disk size.
* *
* Hybrid MBRs do not necessarily comply with this. * Hybrid MBRs do not necessarily comply with this.
*
* Consider a bad value here to be a warning to support dd'ing
* an image from a smaller disk to a larger disk.
*/ */
if (ret == GPT_MBR_PROTECTIVE) { if (ret == GPT_MBR_PROTECTIVE) {
sz = le32_to_cpu(mbr->partition_record[part].size_in_lba); sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF) if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
ret = 0; pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
sz, min_t(uint32_t,
total_sectors - 1, 0xFFFFFFFF));
} }
done: done:
return ret; return ret;

View File

@@ -24,7 +24,7 @@ menuconfig ACPI
are configured, ACPI is used. are configured, ACPI is used.
The project home page for the Linux ACPI subsystem is here: The project home page for the Linux ACPI subsystem is here:
<http://www.lesswatts.org/projects/acpi/> <https://01.org/linux-acpi>
Linux support for ACPI is based on Intel Corporation's ACPI Linux support for ACPI is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). For more information on the Component Architecture (ACPI CA). For more information on the
@@ -123,9 +123,9 @@ config ACPI_BUTTON
default y default y
help help
This driver handles events on the power, sleep, and lid buttons. This driver handles events on the power, sleep, and lid buttons.
A daemon reads /proc/acpi/event and perform user-defined actions A daemon reads events from input devices or via netlink and
such as shutting down the system. This is necessary for performs user-defined actions such as shutting down the system.
software-controlled poweroff. This is necessary for software-controlled poweroff.
To compile this driver as a module, choose M here: To compile this driver as a module, choose M here:
the module will be called button. the module will be called button.

View File

@@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
} }
} }
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
/**
* acpi_dev_pm_add_dependent - Add physical device depending for PM.
* @handle: Handle of ACPI device node.
* @depdev: Device depending on that node for PM.
*/
void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
{
struct acpi_device_physical_node *dep;
struct acpi_device *adev;
if (!depdev || acpi_bus_get_device(handle, &adev))
return;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(dep, &adev->power_dependent, node)
if (dep->dev == depdev)
goto out;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (dep) {
dep->dev = depdev;
list_add_tail(&dep->node, &adev->power_dependent);
}
out:
mutex_unlock(&adev->physical_node_lock);
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
/**
* acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
* @handle: Handle of ACPI device node.
* @depdev: Device depending on that node for PM.
*/
void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
{
struct acpi_device_physical_node *dep;
struct acpi_device *adev;
if (!depdev || acpi_bus_get_device(handle, &adev))
return;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(dep, &adev->power_dependent, node)
if (dep->dev == depdev) {
list_del(&dep->node);
kfree(dep);
break;
}
mutex_unlock(&adev->physical_node_lock);
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */

View File

@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_ON 0x01 #define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
struct acpi_power_dependent_device {
struct list_head node;
struct acpi_device *adev;
struct work_struct work;
};
struct acpi_power_resource { struct acpi_power_resource {
struct acpi_device device; struct acpi_device device;
struct list_head list_node; struct list_head list_node;
struct list_head dependent;
char *name; char *name;
u32 system_level; u32 system_level;
u32 order; u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return 0; return 0;
} }
static void acpi_power_resume_dependent(struct work_struct *work)
{
struct acpi_power_dependent_device *dep;
struct acpi_device_physical_node *pn;
struct acpi_device *adev;
int state;
dep = container_of(work, struct acpi_power_dependent_device, work);
adev = dep->adev;
if (acpi_power_get_inferred_state(adev, &state))
return;
if (state > ACPI_STATE_D0)
return;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn, &adev->physical_node_list, node)
pm_request_resume(pn->dev);
list_for_each_entry(pn, &adev->power_dependent, node)
pm_request_resume(pn->dev);
mutex_unlock(&adev->physical_node_lock);
}
static int __acpi_power_on(struct acpi_power_resource *resource) static int __acpi_power_on(struct acpi_power_resource *resource)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
resource->name)); resource->name));
} else { } else {
result = __acpi_power_on(resource); result = __acpi_power_on(resource);
if (result) { if (result)
resource->ref_count--; resource->ref_count--;
} else {
struct acpi_power_dependent_device *dep;
list_for_each_entry(dep, &resource->dependent, node)
schedule_work(&dep->work);
}
} }
return result; return result;
} }
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
return result; return result;
} }
static void acpi_power_add_dependent(struct acpi_power_resource *resource,
struct acpi_device *adev)
{
struct acpi_power_dependent_device *dep;
mutex_lock(&resource->resource_lock);
list_for_each_entry(dep, &resource->dependent, node)
if (dep->adev == adev)
goto out;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
goto out;
dep->adev = adev;
INIT_WORK(&dep->work, acpi_power_resume_dependent);
list_add_tail(&dep->node, &resource->dependent);
out:
mutex_unlock(&resource->resource_lock);
}
static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
struct acpi_device *adev)
{
struct acpi_power_dependent_device *dep;
struct work_struct *work = NULL;
mutex_lock(&resource->resource_lock);
list_for_each_entry(dep, &resource->dependent, node)
if (dep->adev == adev) {
list_del(&dep->node);
work = &dep->work;
break;
}
mutex_unlock(&resource->resource_lock);
if (work) {
cancel_work_sync(work);
kfree(dep);
}
}
static struct attribute *attrs[] = { static struct attribute *attrs[] = {
NULL, NULL,
}; };
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
void acpi_power_add_remove_device(struct acpi_device *adev, bool add) void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
{ {
struct acpi_device_power_state *ps;
struct acpi_power_resource_entry *entry;
int state; int state;
if (adev->wakeup.flags.valid) if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
if (!adev->power.flags.power_resources) if (!adev->power.flags.power_resources)
return; return;
ps = &adev->power.states[ACPI_STATE_D0];
list_for_each_entry(entry, &ps->resources, node) {
struct acpi_power_resource *resource = entry->resource;
if (add)
acpi_power_add_dependent(resource, adev);
else
acpi_power_remove_dependent(resource, adev);
}
for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
acpi_power_expose_hide(adev, acpi_power_expose_hide(adev,
&adev->power.states[state].resources, &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
ACPI_STA_DEFAULT); ACPI_STA_DEFAULT);
mutex_init(&resource->resource_lock); mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->dependent);
INIT_LIST_HEAD(&resource->list_node); INIT_LIST_HEAD(&resource->list_node);
resource->name = device->pnp.bus_id; resource->name = device->pnp.bus_id;
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
mutex_lock(&resource->resource_lock); mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(resource->device.handle, &state); result = acpi_power_get_state(resource->device.handle, &state);
if (result) if (result) {
mutex_unlock(&resource->resource_lock);
continue; continue;
}
if (state == ACPI_POWER_RESOURCE_STATE_OFF if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) { && resource->ref_count) {

View File

@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->wakeup_list); INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list); INIT_LIST_HEAD(&device->physical_node_list);
mutex_init(&device->physical_node_lock); mutex_init(&device->physical_node_lock);
INIT_LIST_HEAD(&device->power_dependent);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) { if (!new_bus_id) {

View File

@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN; host->flags |= ATA_HOST_PARALLEL_SCAN;
else else
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM) if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host); ahci_reset_em(host);

View File

@@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN; host->flags |= ATA_HOST_PARALLEL_SCAN;
else else
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM) if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host); ahci_reset_em(host);

View File

@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
rc = ap->ops->transmit_led_message(ap, rc = ap->ops->transmit_led_message(ap,
emp->led_state, emp->led_state,
4); 4);
/*
* If busy, give a breather but do not
* release EH ownership by using msleep()
* instead of ata_msleep(). EM Transmit
* bit is busy for the whole host and
* releasing ownership will cause other
* ports to fail the same way.
*/
if (rc == -EBUSY) if (rc == -EBUSY)
ata_msleep(ap, 1); msleep(1);
else else
break; break;
} }

View File

@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
{ {
ata_acpi_clear_gtf(dev); ata_acpi_clear_gtf(dev);
} }
void ata_scsi_acpi_bind(struct ata_device *dev)
{
acpi_handle handle = ata_dev_acpi_handle(dev);
if (handle)
acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
}
void ata_scsi_acpi_unbind(struct ata_device *dev)
{
acpi_handle handle = ata_dev_acpi_handle(dev);
if (handle)
acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
}

View File

@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
* should be retried. To be used from EH. * should be retried. To be used from EH.
* *
* SCSI midlayer limits the number of retries to scmd->allowed. * SCSI midlayer limits the number of retries to scmd->allowed.
* scmd->retries is decremented for commands which get retried * scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero). * due to unrelated failures (qc->err_mask is zero).
*/ */
void ata_eh_qc_retry(struct ata_queued_cmd *qc) void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{ {
struct scsi_cmnd *scmd = qc->scsicmd; struct scsi_cmnd *scmd = qc->scsicmd;
if (!qc->err_mask && scmd->retries) if (!qc->err_mask)
scmd->retries--; scmd->allowed++;
__ata_eh_qc_complete(qc); __ata_eh_qc_complete(qc);
} }

View File

@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
if (!IS_ERR(sdev)) { if (!IS_ERR(sdev)) {
dev->sdev = sdev; dev->sdev = sdev;
scsi_device_put(sdev); scsi_device_put(sdev);
ata_scsi_acpi_bind(dev);
} else { } else {
dev->sdev = NULL; dev->sdev = NULL;
} }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
struct scsi_device *sdev; struct scsi_device *sdev;
unsigned long flags; unsigned long flags;
ata_scsi_acpi_unbind(dev);
/* Alas, we need to grab scan_mutex to ensure SCSI device /* Alas, we need to grab scan_mutex to ensure SCSI device
* state doesn't change underneath us and thus * state doesn't change underneath us and thus
* scsi_device_get() always succeeds. The mutex locking can * scsi_device_get() always succeeds. The mutex locking can

View File

@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap); extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev); extern void ata_acpi_bind_dev(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
extern void ata_scsi_acpi_bind(struct ata_device *dev);
extern void ata_scsi_acpi_unbind(struct ata_device *dev);
#else #else
static inline void ata_acpi_dissociate(struct ata_host *host) { } static inline void ata_acpi_dissociate(struct ata_host *host) { }
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { } pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {} static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {} static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
#endif #endif
/* libata-scsi.c */ /* libata-scsi.c */

View File

@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
ap->ioaddr.cmd_addr = cmd_addr; ap->ioaddr.cmd_addr = cmd_addr;
if (pnp_port_valid(idev, 1) == 0) { if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev, ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1); pnp_port_start(idev, 1), 1);
ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.altstatus_addr = ctl_addr;

View File

@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
online_type = ONLINE_KEEP; online_type = ONLINE_KEEP;
else if (!strncmp(buf, "offline", min_t(int, count, 7))) else if (!strncmp(buf, "offline", min_t(int, count, 7)))
online_type = -1; online_type = -1;
else else {
return -EINVAL; ret = -EINVAL;
goto err;
}
switch (online_type) { switch (online_type) {
case ONLINE_KERNEL: case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
ret = -EINVAL; /* should never happen */ ret = -EINVAL; /* should never happen */
} }
err:
unlock_device_hotplug(); unlock_device_hotplug();
if (ret) if (ret)

View File

@@ -10,6 +10,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <xen/xen.h>
#include <xen/events.h> #include <xen/events.h>
#include <xen/interface/io/tpmif.h> #include <xen/interface/io/tpmif.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>

View File

@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */ /* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id; ev->what = which_id;
ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid; ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu); get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */ msg->ack = 0; /* not used */
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg = (struct cn_msg *)buffer; msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data; ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq; msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */ ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1; msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev); msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
} }

View File

@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
data = nlmsg_data(nlh); data = nlmsg_data(nlh);
memcpy(data, msg, sizeof(*data) + msg->len); memcpy(data, msg, size);
NETLINK_CB(skb).dst_group = group; NETLINK_CB(skb).dst_group = group;
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
static void cn_rx_skb(struct sk_buff *__skb) static void cn_rx_skb(struct sk_buff *__skb)
{ {
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
int err;
struct sk_buff *skb; struct sk_buff *skb;
int len, err;
skb = skb_get(__skb); skb = skb_get(__skb);
if (skb->len >= NLMSG_HDRLEN) { if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb); nlh = nlmsg_hdr(skb);
len = nlmsg_len(nlh);
if (nlh->nlmsg_len < sizeof(struct cn_msg) || if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len || skb->len < nlh->nlmsg_len ||
nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { len > CONNECTOR_MAX_MSG_SIZE) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }

View File

@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
{ {
int ret; int ret;
if (acpi_disabled)
return -ENODEV;
/* don't keep reloading if cpufreq_driver exists */ /* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver()) if (cpufreq_get_current_driver())
return 0; return -EEXIST;
if (acpi_disabled)
return 0;
pr_debug("acpi_cpufreq_init\n"); pr_debug("acpi_cpufreq_init\n");

View File

@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
} }
struct sample { struct sample {
int core_pct_busy; int32_t core_pct_busy;
u64 aperf; u64 aperf;
u64 mperf; u64 mperf;
int freq; int freq;
@@ -68,7 +68,7 @@ struct _pid {
int32_t i_gain; int32_t i_gain;
int32_t d_gain; int32_t d_gain;
int deadband; int deadband;
int last_err; int32_t last_err;
}; };
struct cpudata { struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
} }
static signed int pid_calc(struct _pid *pid, int busy) static signed int pid_calc(struct _pid *pid, int32_t busy)
{ {
signed int err, result; signed int result;
int32_t pterm, dterm, fp_error; int32_t pterm, dterm, fp_error;
int32_t integral_limit; int32_t integral_limit;
err = pid->setpoint - busy; fp_error = int_tofp(pid->setpoint) - busy;
fp_error = int_tofp(err);
if (abs(err) <= pid->deadband) if (abs(fp_error) <= int_tofp(pid->deadband))
return 0; return 0;
pterm = mul_fp(pid->p_gain, fp_error); pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
if (pid->integral < -integral_limit) if (pid->integral < -integral_limit)
pid->integral = -integral_limit; pid->integral = -integral_limit;
dterm = mul_fp(pid->d_gain, (err - pid->last_err)); dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
pid->last_err = err; pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{ {
int max_perf = cpu->pstate.turbo_pstate; int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf; int min_perf;
if (limits.no_turbo) if (limits.no_turbo)
max_perf = cpu->pstate.max_pstate; max_perf = cpu->pstate.max_pstate;
max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
*max = clamp_t(int, max_perf, *max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{ {
int max_perf, min_perf; int max_perf, min_perf;
u64 val;
intel_pstate_get_min_max(cpu, &min_perf, &max_perf); intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
@@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
trace_cpu_frequency(pstate * 100000, cpu->cpu); trace_cpu_frequency(pstate * 100000, cpu->cpu);
cpu->pstate.current_pstate = pstate; cpu->pstate.current_pstate = pstate;
val = pstate << 8;
if (limits.no_turbo) if (limits.no_turbo)
wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8)); val |= (u64)1 << 32;
else
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
wrmsrl(MSR_IA32_PERF_CTL, val);
} }
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -435,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample) struct sample *sample)
{ {
u64 core_pct; u64 core_pct;
core_pct = div64_u64(sample->aperf * 100, sample->mperf); core_pct = div64_u64(int_tofp(sample->aperf * 100),
sample->freq = cpu->pstate.max_pstate * core_pct * 1000; sample->mperf);
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
sample->core_pct_busy = core_pct; sample->core_pct_busy = core_pct;
} }
@@ -468,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
mod_timer_pinned(&cpu->timer, jiffies + delay); mod_timer_pinned(&cpu->timer, jiffies + delay);
} }
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{ {
int32_t busy_scaled;
int32_t core_busy, max_pstate, current_pstate; int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate); max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate);
busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
} }
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{ {
int busy_scaled; int32_t busy_scaled;
struct _pid *pid; struct _pid *pid;
signed int ctl = 0; signed int ctl = 0;
int steps; int steps;
@@ -637,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
static int intel_pstate_cpu_init(struct cpufreq_policy *policy) static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{ {
int rc, min_pstate, max_pstate;
struct cpudata *cpu; struct cpudata *cpu;
int rc;
rc = intel_pstate_init_cpu(policy->cpu); rc = intel_pstate_init_cpu(policy->cpu);
if (rc) if (rc)
@@ -652,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
else else
policy->policy = CPUFREQ_POLICY_POWERSAVE; policy->policy = CPUFREQ_POLICY_POWERSAVE;
intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); policy->min = cpu->pstate.min_pstate * 100000;
policy->min = min_pstate * 100000; policy->max = cpu->pstate.turbo_pstate * 100000;
policy->max = max_pstate * 100000;
/* cpuinfo and default policy values */ /* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;

View File

@@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
if (freq->frequency == CPUFREQ_ENTRY_INVALID) if (freq->frequency == CPUFREQ_ENTRY_INVALID)
continue; continue;
dvfs = &s3c64xx_dvfs_table[freq->index]; dvfs = &s3c64xx_dvfs_table[freq->driver_data];
found = 0; found = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {

View File

@@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edma_alloc_slot(EDMA_CTLR(echan->ch_num), edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY); EDMA_SLOT_ANY);
if (echan->slot[i] < 0) { if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "Failed to allocate slot\n"); dev_err(dev, "Failed to allocate slot\n");
kfree(edesc); kfree(edesc);
return NULL; return NULL;
@@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
ccnt = sg_dma_len(sg) / (acnt * bcnt); ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) { if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n"); dev_err(dev, "Exceeded max SG segment size\n");
kfree(edesc);
return NULL; return NULL;
} }
cidx = acnt * bcnt; cidx = acnt * bcnt;

View File

@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
struct lp_gpio *lg = irq_data_get_irq_handler_data(data); struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin, mask; u32 base, pin, mask;
unsigned long reg, pending; unsigned long reg, ena, pending;
unsigned virq; unsigned virq;
/* check from GPIO controller which pin triggered the interrupt */ /* check from GPIO controller which pin triggered the interrupt */
for (base = 0; base < lg->chip.ngpio; base += 32) { for (base = 0; base < lg->chip.ngpio; base += 32) {
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
while ((pending = inl(reg))) { while ((pending = (inl(reg) & inl(ena)))) {
pin = __ffs(pending); pin = __ffs(pending);
mask = BIT(pin); mask = BIT(pin);
/* Clear before handling so we don't lose an edge */ /* Clear before handling so we don't lose an edge */

View File

@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
*/ */
static int desc_to_gpio(const struct gpio_desc *desc) static int desc_to_gpio(const struct gpio_desc *desc)
{ {
return desc->chip->base + gpio_chip_hwgpio(desc); return desc - &gpio_desc[0];
} }
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
int status = -EPROBE_DEFER; int status = -EPROBE_DEFER;
unsigned long flags; unsigned long flags;
if (!desc || !desc->chip) { if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__); pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL; return -EINVAL;
} }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
spin_lock_irqsave(&gpio_lock, flags); spin_lock_irqsave(&gpio_lock, flags);
chip = desc->chip; chip = desc->chip;
if (chip == NULL)
goto done;
if (!try_module_get(chip->owner)) if (!try_module_get(chip->owner))
goto done; goto done;

View File

@@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
cmd = ioctl->cmd_drv; cmd = ioctl->cmd_drv;
} }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
u32 drv_size;
ioctl = &drm_ioctls[nr]; ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
drv_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd); usize = asize = _IOC_SIZE(cmd);
if (drv_size > asize)
asize = drv_size;
cmd = ioctl->cmd;
} else } else
goto err_i1; goto err_i1;

View File

@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_modeset_suspend_hw(dev); intel_modeset_suspend_hw(dev);
} }
i915_gem_suspend_gtt_mappings(dev);
i915_save_state(dev); i915_save_state(dev);
intel_opregion_fini(dev); intel_opregion_fini(dev);
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev); i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} } else if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
__i915_drm_thaw(dev); __i915_drm_thaw(dev);

View File

@@ -497,10 +497,12 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */ /* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level); enum i915_cache_level level,
bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm, void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry, unsigned int first_entry,
unsigned int num_entries); unsigned int num_entries,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm, void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st, struct sg_table *st,
unsigned int first_entry, unsigned int first_entry,
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,

View File

@@ -58,9 +58,10 @@
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level,
bool valid)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr); pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) { switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
} }
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level,
bool valid)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr); pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) { switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level,
bool valid)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr); pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a /* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
} }
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level,
bool valid)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr); pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE) if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
} }
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level,
bool valid)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr); pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) { switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */ /* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm, static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry, unsigned first_entry,
unsigned num_entries) unsigned num_entries,
bool use_scratch)
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
while (num_entries) { while (num_entries) {
last_pte = first_pte + num_entries; last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
dma_addr_t page_addr; dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter); page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) { if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr); kunmap_atomic(pt_vaddr);
act_pt++; act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} }
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
{ {
ppgtt->base.clear_range(&ppgtt->base, ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT,
true);
} }
extern int intel_iommu_gfx_mapped; extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
if (INTEL_INFO(dev)->gen < 6)
return;
for_each_ring(ring, dev_priv, i) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(ring));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
fault_reg & PAGE_MASK,
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
I915_WRITE(RING_FAULT_REG(ring),
fault_reg & ~RING_FAULT_VALID);
}
}
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
if (INTEL_INFO(dev)->gen < 6)
return;
i915_check_and_clear_faults(dev);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
false);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE, dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE); dev_priv->gtt.base.total / PAGE_SIZE,
true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj, obj->pin_display); i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter); addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]); iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
i++; i++;
} }
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/ */
if (i != 0) if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != WARN_ON(readl(&gtt_entries[i-1]) !=
vm->pte_encode(addr, level)); vm->pte_encode(addr, level, true));
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_clear_range(struct i915_address_space *vm, static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry, unsigned int first_entry,
unsigned int num_entries) unsigned int num_entries,
bool use_scratch)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries)) first_entry, num_entries, max_entries))
num_entries = max_entries; num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]); iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base); readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
static void i915_ggtt_clear_range(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry, unsigned int first_entry,
unsigned int num_entries) unsigned int num_entries,
bool unused)
{ {
intel_gtt_clear_range(first_entry, num_entries); intel_gtt_clear_range(first_entry, num_entries);
} }
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry, entry,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT,
true);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
} }
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end); hole_start, hole_end);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
} }
/* And finally clear the reserved guard page */ /* And finally clear the reserved guard page */
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
} }
static bool static bool

View File

@@ -604,6 +604,10 @@
#define ARB_MODE_SWIZZLE_IVB (1<<5) #define ARB_MODE_SWIZZLE_IVB (1<<5)
#define RENDER_HWS_PGA_GEN7 (0x04080) #define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0 #define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180) #define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280) #define BLT_HWS_PGA_GEN7 (0x04280)
@@ -4279,7 +4283,9 @@
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020 #define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */ /* CPU: FDI_TX */

View File

@@ -4759,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
* gating for the panel power sequencer or it will fail to * gating for the panel power sequencer or it will fail to
* start up when no ports are active. * start up when no ports are active.
*/ */
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS); DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted /* The below fixes the weird display corruption, a few pixels shifted

View File

@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) { switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || if (radeon_audio != 0) {
(drm_detect_hdmi_monitor(radeon_connector->edid) && if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))) (radeon_connector->audio == RADEON_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital) else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
} else if (radeon_connector->use_digital) {
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
else } else {
return ATOM_ENCODER_MODE_CRT; return ATOM_ENCODER_MODE_CRT;
}
break; break;
case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
default: default:
if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || if (radeon_audio != 0) {
(drm_detect_hdmi_monitor(radeon_connector->edid) && if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
(radeon_connector->audio == RADEON_AUDIO_AUTO))) return ATOM_ENCODER_MODE_HDMI;
return ATOM_ENCODER_MODE_HDMI; else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
else (radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
}
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS; return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv; dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || } else if (radeon_audio != 0) {
(drm_detect_hdmi_monitor(radeon_connector->edid) && if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
(radeon_connector->audio == RADEON_AUDIO_AUTO))) return ATOM_ENCODER_MODE_HDMI;
return ATOM_ENCODER_MODE_HDMI; else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
else (radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
}
break; break;
case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
* does the same thing and more. * does the same thing and more.
*/ */
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
(rdev->family != CHIP_RS880)) (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {

View File

@@ -1694,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
fw_name); fw_name);
release_firmware(rdev->smc_fw); release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL; rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) { } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR printk(KERN_ERR
"cik_smc: Bogus length %zu in firmware \"%s\"\n", "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) { if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r); DRM_ERROR("radeon: failed to get ib (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r; return r;
} }
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
r = radeon_fence_wait(ib.fence, false); r = radeon_fence_wait(ib.fence, false);
if (r) { if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r); DRM_ERROR("radeon: fence wait failed (%d).\n", r);
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r; return r;
} }
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {

View File

@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb; u8 *sadb;
int sad_count; int sad_count;
/* XXX: setting this register causes hangs on some asics */
return;
if (!dig->afmt->pin) if (!dig->afmt->pin)
return; return;

View File

@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb; u8 *sadb;
int sad_count; int sad_count;
/* XXX: setting this register causes hangs on some asics */
return;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector); radeon_connector = to_radeon_connector(connector);

View File

@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
fw_name); fw_name);
release_firmware(rdev->smc_fw); release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL; rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) { } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n", "ni_mc: Bogus length %zu in firmware \"%s\"\n",

View File

@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
fw_name); fw_name);
release_firmware(rdev->smc_fw); release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL; rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) { } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n", "smc: Bogus length %zu in firmware \"%s\"\n",

View File

@@ -309,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb; u8 *sadb;
int sad_count; int sad_count;
/* XXX: setting this register causes hangs on some asics */
return;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector); radeon_connector = to_radeon_connector(connector);

View File

@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
drm_object_attach_property(&radeon_connector->base.base, if (radeon_audio != 0)
rdev->mode_info.audio_property, drm_object_attach_property(&radeon_connector->base.base,
RADEON_AUDIO_DISABLE); rdev->mode_info.audio_property,
(radeon_audio == 1) ?
RADEON_AUDIO_AUTO :
RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true; connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB) if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) { if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property, rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE); (radeon_audio == 1) ?
RADEON_AUDIO_AUTO :
RADEON_AUDIO_DISABLE);
} }
if (connector_type == DRM_MODE_CONNECTOR_DVII) { if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) { if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property, rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE); (radeon_audio == 1) ?
RADEON_AUDIO_AUTO :
RADEON_AUDIO_DISABLE);
} }
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true; connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) { if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property, rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE); (radeon_audio == 1) ?
RADEON_AUDIO_AUTO :
RADEON_AUDIO_DISABLE);
} }
connector->interlace_allowed = true; connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */ /* in theory with a DP to VGA converter... */

View File

@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
VRAM, also but everything into VRAM on AGP cards to avoid VRAM, also but everything into VRAM on AGP cards to avoid
image corruptions */ image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX && if (p->ring == R600_RING_TYPE_UVD_INDEX &&
p->rdev->family < CHIP_PALM &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
/* TODO: is this still needed for NI+ ? */
p->relocs[i].lobj.domain = p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM; RADEON_GEM_DOMAIN_VRAM;

View File

@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_audio = 1; int radeon_audio = -1;
int radeon_disp_priority = 0; int radeon_disp_priority = 0;
int radeon_hw_i2c = 0; int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1; int radeon_pcie_gen2 = -1;
@@ -196,7 +196,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, radeon_audio, int, 0444); module_param_named(audio, radeon_audio, int, 0444);
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");

View File

@@ -476,7 +476,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL; return -EINVAL;
} }
if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && /* TODO: is this still necessary on NI+ ? */
if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end); start, end);

View File

@@ -1681,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
fw_name); fw_name);
release_firmware(rdev->smc_fw); release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL; rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) { } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n", "si_smc: Bogus length %zu in firmware \"%s\"\n",

View File

@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* enable VCPU clock */ /* enable VCPU clock */
WREG32(UVD_VCPU_CNTL, 1 << 9); WREG32(UVD_VCPU_CNTL, 1 << 9);
/* enable UMC and NC0 */ /* enable UMC */
WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
/* boot up the VCPU */ /* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0); WREG32(UVD_SOFT_RESET, 0);

View File

@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
struct vmw_fpriv *vmw_fp; struct vmw_fpriv *vmw_fp;
vmw_fp = vmw_fpriv(file_priv); vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
if (vmw_fp->locked_master) if (vmw_fp->locked_master) {
struct vmw_master *vmaster =
vmw_master(vmw_fp->locked_master);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
ttm_vt_unlock(&vmaster->lock);
drm_master_put(&vmw_fp->locked_master); drm_master_put(&vmw_fp->locked_master);
}
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp); kfree(vmw_fp);
} }
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master); vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) { if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n"); DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master); drm_master_put(&vmw_fp->locked_master);
} }
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
vmw_execbuf_release_pinned_bo(dev_priv);
if (!dev_priv->enable_fb) { if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);

View File

@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup) if (new_backup)
res->backup_offset = new_backup_offset; res->backup_offset = new_backup_offset;
if (!res->func->may_evict) if (!res->func->may_evict || res->id == -1)
return; return;
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);

View File

@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
{ {
__u32 raw_value; __s32 raw_value;
switch (item->tag) { switch (item->tag) {
case HID_GLOBAL_ITEM_TAG_PUSH: case HID_GLOBAL_ITEM_TAG_PUSH:
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
return 0; return 0;
case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
/* Units exponent negative numbers are given through a /* Many devices provide unit exponent as a two's complement
* two's complement. * nibble due to the common misunderstanding of HID
* See "6.2.2.7 Global Items" for more information. */ * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
raw_value = item_udata(item); * both this and the standard encoding. */
raw_value = item_sdata(item);
if (!(raw_value & 0xfffffff0)) if (!(raw_value & 0xfffffff0))
parser->global.unit_exponent = hid_snto32(raw_value, 4); parser->global.unit_exponent = hid_snto32(raw_value, 4);
else else
@@ -1870,6 +1871,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ } { }
}; };

View File

@@ -633,6 +633,7 @@
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
#define USB_VENDOR_ID_NINTENDO 0x057e #define USB_VENDOR_ID_NINTENDO 0x057e
#define USB_VENDOR_ID_NINTENDO2 0x054c
#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
@@ -792,6 +793,8 @@
#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_VENDOR_ID_THINGM 0x27b8 #define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed #define USB_DEVICE_ID_BLINK1 0x01ed
@@ -919,4 +922,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461 #define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
#define USB_VENDOR_ID_SIS 0x0457
#define USB_DEVICE_ID_SIS_TS 0x1013
#endif #endif

View File

@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
return -EINVAL; return -EINVAL;
} }
/** /**
* hidinput_calc_abs_res - calculate an absolute axis resolution * hidinput_calc_abs_res - calculate an absolute axis resolution
* @field: the HID report field to calculate resolution for * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_MT_TOOL_Y: case ABS_MT_TOOL_Y:
case ABS_MT_TOUCH_MAJOR: case ABS_MT_TOUCH_MAJOR:
case ABS_MT_TOUCH_MINOR: case ABS_MT_TOUCH_MINOR:
if (field->unit & 0xffffff00) /* Not a length */ if (field->unit == 0x11) { /* If centimeters */
return 0;
unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
switch (field->unit & 0xf) {
case 0x1: /* If centimeters */
/* Convert to millimeters */ /* Convert to millimeters */
unit_exponent += 1; unit_exponent += 1;
break; } else if (field->unit == 0x13) { /* If inches */
case 0x3: /* If inches */
/* Convert to millimeters */ /* Convert to millimeters */
prev = physical_extents; prev = physical_extents;
physical_extents *= 254; physical_extents *= 254;
if (physical_extents < prev) if (physical_extents < prev)
return 0; return 0;
unit_exponent -= 1; unit_exponent -= 1;
break; } else {
default:
return 0; return 0;
} }
break; break;

View File

@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
goto done; goto done;
} }
if (vendor == USB_VENDOR_ID_NINTENDO) { if (vendor == USB_VENDOR_ID_NINTENDO ||
vendor == USB_VENDOR_ID_NINTENDO2) {
if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
devtype = WIIMOTE_DEV_GEN10; devtype = WIIMOTE_DEV_GEN10;
goto done; goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
static const struct hid_device_id wiimote_hid_devices[] = { static const struct hid_device_id wiimote_hid_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE) }, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ } { }

View File

@@ -110,6 +110,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 } { 0, 0 }
}; };

View File

@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
} }
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) if (indio_dev == NULL) {
return -ENOMEM; ret = -ENOMEM;
goto error_disable_clk;
}
st = iio_priv(indio_dev); st = iio_priv(indio_dev);

View File

@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
indio_dev->currentmode = INDIO_DIRECT_MODE; indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev); indio_dev->setup_ops->postdisable(indio_dev);
if (indio_dev->available_scan_masks == NULL)
kfree(indio_dev->active_scan_mask);
} }
int iio_update_buffers(struct iio_dev *indio_dev, int iio_update_buffers(struct iio_dev *indio_dev,

View File

@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from libibverbs, libibcm and a hardware driver library from
<http://www.openfabrics.org/git/>. <http://www.openfabrics.org/git/>.
config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
bool "Experimental and unstable ABI for userspace access to flow steering verbs"
depends on INFINIBAND_USER_ACCESS
depends on STAGING
---help---
The final ABI for userspace access to flow steering verbs
has not been defined. To use the current ABI, *WHICH WILL
CHANGE IN THE FUTURE*, say Y here.
If unsure, say N.
config INFINIBAND_USER_MEM config INFINIBAND_USER_MEM
bool bool
depends on INFINIBAND_USER_ACCESS != n depends on INFINIBAND_USER_ACCESS != n

View File

@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
IB_UVERBS_DECLARE_CMD(create_xsrq); IB_UVERBS_DECLARE_CMD(create_xsrq);
IB_UVERBS_DECLARE_CMD(open_xrcd); IB_UVERBS_DECLARE_CMD(open_xrcd);
IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_CMD(close_xrcd);
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
IB_UVERBS_DECLARE_CMD(create_flow); IB_UVERBS_DECLARE_CMD(create_flow);
IB_UVERBS_DECLARE_CMD(destroy_flow); IB_UVERBS_DECLARE_CMD(destroy_flow);
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
#endif /* UVERBS_H */ #endif /* UVERBS_H */

View File

@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \ do { \
@@ -2599,6 +2601,7 @@ out_put:
return ret ? ret : in_len; return ret ? ret : in_len;
} }
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
union ib_flow_spec *ib_spec) union ib_flow_spec *ib_spec)
{ {
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
return ret ? ret : in_len; return ret ? ret : in_len;
} }
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
static int __uverbs_create_xsrq(struct ib_uverbs_file *file, static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uverbs_create_xsrq *cmd, struct ib_uverbs_create_xsrq *cmd,

View File

@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
[IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
[IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
}; };
static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -ENOSYS; return -ENOSYS;
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
struct ib_uverbs_cmd_hdr_ex hdr_ex; struct ib_uverbs_cmd_hdr_ex hdr_ex;
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
(hdr_ex.out_words + (hdr_ex.out_words +
hdr_ex.provider_out_words) * 4); hdr_ex.provider_out_words) * 4);
} else { } else {
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
if (hdr.in_words * 4 != count) if (hdr.in_words * 4 != count)
return -EINVAL; return -EINVAL;
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf + sizeof(hdr), buf + sizeof(hdr),
hdr.in_words * 4, hdr.in_words * 4,
hdr.out_words * 4); hdr.out_words * 4);
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
} }
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
} }
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)

View File

@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
return "C2_QP_STATE_ERROR"; return "C2_QP_STATE_ERROR";
default: default:
return "<invalid QP state>"; return "<invalid QP state>";
}; }
} }
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)

View File

@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.create_flow = mlx4_ib_create_flow; ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
ibdev->ib_dev.uverbs_cmd_mask |= ibdev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
} }
mlx4_ib_alloc_eqs(dev, ibdev); mlx4_ib_alloc_eqs(dev, ibdev);

View File

@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev) static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
{ {
struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
char name[MLX5_MAX_EQ_NAME];
struct mlx5_eq *eq, *n; struct mlx5_eq *eq, *n;
int ncomp_vec; int ncomp_vec;
int nent; int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
goto clean; goto clean;
} }
snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(&dev->mdev, eq, err = mlx5_create_map_eq(&dev->mdev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0, i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
eq->name, name, &dev->mdev.priv.uuari.uars[0]);
&dev->mdev.priv.uuari.uars[0]);
if (err) { if (err) {
kfree(eq); kfree(eq);
goto clean; goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1; props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1; props->max_fast_reg_page_list_len = (unsigned int)-1;
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? props->atomic_cap = IB_ATOMIC_NONE;
IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_HCA;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
ibev.device = &ibdev->ib_dev; ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port; ibev.element.port_num = port;
if (port < 1 || port > ibdev->num_ports) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
return;
}
if (ibdev->ib_active) if (ibdev->ib_active)
ib_dispatch_event(&ibev); ib_dispatch_event(&ibev);
} }

View File

@@ -42,6 +42,10 @@ enum {
DEF_CACHE_SIZE = 10, DEF_CACHE_SIZE = 10,
}; };
enum {
MLX5_UMR_ALIGN = 2048
};
static __be64 *mr_align(__be64 *ptr, int align) static __be64 *mr_align(__be64 *ptr, int align)
{ {
unsigned long mask = align - 1; unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
static int add_keys(struct mlx5_ib_dev *dev, int c, int num) static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{ {
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_create_mkey_mbox_in *in; struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int npages = 1 << ent->order; int npages = 1 << ent->order;
int size = sizeof(u64) * npages;
int err = 0; int err = 0;
int i; int i;
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
} }
mr->order = ent->order; mr->order = ent->order;
mr->umred = 1; mr->umred = 1;
mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
if (!mr->pas) {
kfree(mr);
err = -ENOMEM;
goto out;
}
mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, mr->dma)) {
kfree(mr->pas);
kfree(mr);
err = -ENOMEM;
goto out;
}
in->seg.status = 1 << 6; in->seg.status = 1 << 6;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
sizeof(*in)); sizeof(*in));
if (err) { if (err) {
mlx5_ib_warn(dev, "create mkey failed %d\n", err); mlx5_ib_warn(dev, "create mkey failed %d\n", err);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
kfree(mr); kfree(mr);
goto out; goto out;
} }
@@ -129,11 +114,9 @@ out:
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{ {
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int size;
int err; int err;
int i; int i;
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->size--; ent->size--;
spin_unlock(&ent->lock); spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err) { if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n"); mlx5_ib_warn(dev, "failed destroy mkey\n");
} else { else
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
kfree(mr); kfree(mr);
}
} }
} }
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static void clean_keys(struct mlx5_ib_dev *dev, int c) static void clean_keys(struct mlx5_ib_dev *dev, int c)
{ {
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int size;
int err; int err;
cancel_delayed_work(&ent->dwork);
while (1) { while (1) {
spin_lock(&ent->lock); spin_lock(&ent->lock);
if (list_empty(&ent->head)) { if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->size--; ent->size--;
spin_unlock(&ent->lock); spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err) { if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n"); mlx5_ib_warn(dev, "failed destroy mkey\n");
} else { else
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
kfree(mr); kfree(mr);
}
} }
} }
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
int i; int i;
dev->cache.stopped = 1; dev->cache.stopped = 1;
destroy_workqueue(dev->cache.wq); flush_workqueue(dev->cache.wq);
mlx5_mr_cache_debugfs_cleanup(dev); mlx5_mr_cache_debugfs_cleanup(dev);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
clean_keys(dev, i); clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
return 0; return 0;
} }
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
int page_shift, int order, int access_flags) int page_shift, int order, int access_flags)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc; struct umr_common *umrc = &dev->umrc;
struct ib_send_wr wr, *bad; struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
struct ib_sge sg; struct ib_sge sg;
int size = sizeof(u64) * npages;
int err; int err;
int i; int i;
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr) if (!mr)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
if (!mr->pas) {
err = -ENOMEM;
goto error;
}
mlx5_ib_populate_pas(dev, umem, page_shift,
mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, mr->dma)) {
kfree(mr->pas);
err = -ENOMEM;
goto error;
}
memset(&wr, 0, sizeof(wr)); memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)mr; wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
wait_for_completion(&mr->done); wait_for_completion(&mr->done);
up(&umrc->sem); up(&umrc->sem);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
if (mr->status != IB_WC_SUCCESS) { if (mr->status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "reg umr failed\n"); mlx5_ib_warn(dev, "reg umr failed\n");
err = -EFAULT; err = -EFAULT;

View File

@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
switch (qp_type) { switch (qp_type) {
case IB_QPT_XRC_INI: case IB_QPT_XRC_INI:
size = sizeof(struct mlx5_wqe_xrc_seg); size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */ /* fall through */
case IB_QPT_RC: case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
sizeof(struct mlx5_wqe_raddr_seg); sizeof(struct mlx5_wqe_raddr_seg);
break; break;
case IB_QPT_XRC_TGT:
return 0;
case IB_QPT_UC: case IB_QPT_UC:
size = sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg); sizeof(struct mlx5_wqe_raddr_seg);
break; break;
case IB_QPT_UD: case IB_QPT_UD:
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
size = sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg); sizeof(struct mlx5_wqe_datagram_seg);
break; break;
case MLX5_IB_QPT_REG_UMR: case MLX5_IB_QPT_REG_UMR:
size = sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) + sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg); sizeof(struct mlx5_mkey_seg);
break; break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return wqe_size; return wqe_size;
if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
mlx5_ib_dbg(dev, "\n"); mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
wqe_size, dev->mdev.caps.max_sq_desc_sz);
return -EINVAL; return -EINVAL;
} }
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.max_gs = attr->cap.max_send_sge; qp->sq.max_gs = attr->cap.max_send_sge;
qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
return wq_size; return wq_size;
} }
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_Q_KEY, MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY, MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX,
}, },
}, },
[MLX5_QP_STATE_RTR] = { [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_STATE_RTS] = { [MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
}, },
}, },
}; };
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0; rseg->reserved = 0;
} }
static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
struct ib_send_wr *wr)
{
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr) struct ib_send_wr *wr)
{ {
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
seg += sizeof(struct mlx5_wqe_raddr_seg);
set_atomic_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_atomic_seg);
size += (sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_atomic_seg)) / 16;
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
set_raddr_seg(seg, wr->wr.atomic.remote_addr, mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
wr->wr.atomic.rkey); err = -ENOSYS;
seg += sizeof(struct mlx5_wqe_raddr_seg); *bad_wr = wr;
goto out;
set_masked_atomic_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
size += (sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
break;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;

Some files were not shown because too many files have changed in this diff Show More