Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge conflict of mlx5 resolved using instructions in merge
commit 9566e650bf
.
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* kernel/configs.c
|
||||
* Echo the kernel .config file used to build the kernel
|
||||
@@ -6,21 +7,6 @@
|
||||
* Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
|
||||
* Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
|
||||
* Copyright (C) 2002 Hewlett-Packard Company
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or (at
|
||||
* your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
||||
{
|
||||
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
|
||||
|
||||
if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
|
||||
max_dma = dev->bus_dma_mask;
|
||||
|
||||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||||
}
|
||||
|
||||
@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
if (!PageHighMem(page))
|
||||
arch_dma_prep_coherent(page, size);
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
/* return the page pointer as the opaque cookie */
|
||||
return page;
|
||||
}
|
||||
@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
__dma_direct_free_pages(dev, size, cpu_addr);
|
||||
return;
|
||||
|
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
}
|
||||
EXPORT_SYMBOL(dma_get_sgtable_attrs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* Return the page attributes used for mapping dma_alloc_* memory, either in
|
||||
* kernel space if remapping is needed, or to userspace through dma_mmap_*.
|
||||
*/
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
|
||||
{
|
||||
if (dev_is_dma_coherent(dev) ||
|
||||
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT)))
|
||||
return prot;
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
|
||||
return arch_dma_mmap_pgprot(dev, prot, attrs);
|
||||
return pgprot_noncached(prot);
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* Create userspace mapping for the DMA-coherent memory.
|
||||
*/
|
||||
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
unsigned long pfn;
|
||||
int ret = -ENXIO;
|
||||
|
||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
__dma_direct_free_pages(dev, size, page);
|
||||
|
@@ -251,11 +251,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
|
||||
* Determine the number of vectors which need interrupt affinities
|
||||
* assigned. If the pre/post request exhausts the available vectors
|
||||
* then nothing to do here except for invoking the calc_sets()
|
||||
* callback so the device driver can adjust to the situation. If there
|
||||
* is only a single vector, then managing the queue is pointless as
|
||||
* well.
|
||||
* callback so the device driver can adjust to the situation.
|
||||
*/
|
||||
if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
|
||||
if (nvecs > affd->pre_vectors + affd->post_vectors)
|
||||
affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
else
|
||||
affvecs = 0;
|
||||
|
@@ -40,6 +40,7 @@ struct sugov_policy {
|
||||
struct task_struct *thread;
|
||||
bool work_in_progress;
|
||||
|
||||
bool limits_changed;
|
||||
bool need_freq_update;
|
||||
};
|
||||
|
||||
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
!cpufreq_this_cpu_can_update(sg_policy->policy))
|
||||
return false;
|
||||
|
||||
if (unlikely(sg_policy->need_freq_update))
|
||||
if (unlikely(sg_policy->limits_changed)) {
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
delta_ns = time - sg_policy->last_freq_update_time;
|
||||
|
||||
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
||||
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
|
||||
{
|
||||
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
busy = sugov_cpu_is_busy(sg_cpu);
|
||||
/* Limits may have changed, don't skip frequency update */
|
||||
busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
util = sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = 0;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
||||
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
|
||||
mutex_unlock(&sg_policy->work_lock);
|
||||
}
|
||||
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
struct cpufreq_governor schedutil_gov = {
|
||||
|
@@ -2088,17 +2088,13 @@ retry:
|
||||
}
|
||||
|
||||
deactivate_task(rq, next_task, 0);
|
||||
sub_running_bw(&next_task->dl, &rq->dl);
|
||||
sub_rq_bw(&next_task->dl, &rq->dl);
|
||||
set_task_cpu(next_task, later_rq->cpu);
|
||||
add_rq_bw(&next_task->dl, &later_rq->dl);
|
||||
|
||||
/*
|
||||
* Update the later_rq clock here, because the clock is used
|
||||
* by the cpufreq_update_util() inside __add_running_bw().
|
||||
*/
|
||||
update_rq_clock(later_rq);
|
||||
add_running_bw(&next_task->dl, &later_rq->dl);
|
||||
activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
|
||||
ret = 1;
|
||||
|
||||
@@ -2186,11 +2182,7 @@ static void pull_dl_task(struct rq *this_rq)
|
||||
resched = true;
|
||||
|
||||
deactivate_task(src_rq, p, 0);
|
||||
sub_running_bw(&p->dl, &src_rq->dl);
|
||||
sub_rq_bw(&p->dl, &src_rq->dl);
|
||||
set_task_cpu(p, this_cpu);
|
||||
add_rq_bw(&p->dl, &this_rq->dl);
|
||||
add_running_bw(&p->dl, &this_rq->dl);
|
||||
activate_task(this_rq, p, 0);
|
||||
dmin = p->dl.deadline;
|
||||
|
||||
|
@@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
|
||||
|
||||
if (!rcu_access_pointer(group->poll_kworker)) {
|
||||
struct sched_param param = {
|
||||
.sched_priority = MAX_RT_PRIO - 1,
|
||||
.sched_priority = 1,
|
||||
};
|
||||
struct kthread_worker *kworker;
|
||||
|
||||
@@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
|
||||
mutex_unlock(&group->trigger_lock);
|
||||
return ERR_CAST(kworker);
|
||||
}
|
||||
sched_setscheduler(kworker->task, SCHED_FIFO, ¶m);
|
||||
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
|
||||
kthread_init_delayed_work(&group->poll_work,
|
||||
psi_poll_work);
|
||||
rcu_assign_pointer(group->poll_kworker, kworker);
|
||||
|
Reference in New Issue
Block a user