Merge 47e61cadc7 ("MIPS: fw: Allow firmware to pass a empty env") into android12-5.10-lts
Steps on the way to 5.10.180 to help resolve some testing errors. Change-Id: I291b51c58e5eeff603ad8bfa999b88c628b0fe8a Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -499,6 +499,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
u64 val;
|
||||
int wa_level;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != sizeof(val))
|
||||
return -ENOENT;
|
||||
if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ char *fw_getenv(char *envname)
|
||||
{
|
||||
char *result = NULL;
|
||||
|
||||
if (_fw_envp != NULL) {
|
||||
if (_fw_envp != NULL && fw_envp(0) != NULL) {
|
||||
/*
|
||||
* Return a pointer to the given environment variable.
|
||||
* YAMON uses "name", "value" pairs, while U-Boot uses
|
||||
|
||||
@@ -25,17 +25,7 @@
|
||||
*/
|
||||
union fpregs_state init_fpstate __read_mostly;
|
||||
|
||||
/*
|
||||
* Track whether the kernel is using the FPU state
|
||||
* currently.
|
||||
*
|
||||
* This flag is used:
|
||||
*
|
||||
* - by IRQ context code to potentially use the FPU
|
||||
* if it's unused.
|
||||
*
|
||||
* - to debug kernel_fpu_begin()/end() correctness
|
||||
*/
|
||||
/* Track in-kernel FPU usage */
|
||||
static DEFINE_PER_CPU(bool, in_kernel_fpu);
|
||||
|
||||
/*
|
||||
@@ -43,42 +33,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
|
||||
*/
|
||||
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
|
||||
|
||||
static bool kernel_fpu_disabled(void)
|
||||
{
|
||||
return this_cpu_read(in_kernel_fpu);
|
||||
}
|
||||
|
||||
static bool interrupted_kernel_fpu_idle(void)
|
||||
{
|
||||
return !kernel_fpu_disabled();
|
||||
}
|
||||
|
||||
/*
|
||||
* Were we in user mode (or vm86 mode) when we were
|
||||
* interrupted?
|
||||
*
|
||||
* Doing kernel_fpu_begin/end() is ok if we are running
|
||||
* in an interrupt context from user mode - we'll just
|
||||
* save the FPU state as required.
|
||||
*/
|
||||
static bool interrupted_user_mode(void)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
return regs && user_mode(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Can we use the FPU in kernel mode with the
|
||||
* whole "kernel_fpu_begin/end()" sequence?
|
||||
*
|
||||
* It's always ok in process context (ie "not interrupt")
|
||||
* but it is sometimes ok even from an irq.
|
||||
*/
|
||||
bool irq_fpu_usable(void)
|
||||
{
|
||||
return !in_interrupt() ||
|
||||
interrupted_user_mode() ||
|
||||
interrupted_kernel_fpu_idle();
|
||||
if (WARN_ON_ONCE(in_nmi()))
|
||||
return false;
|
||||
|
||||
/* In kernel FPU usage already active? */
|
||||
if (this_cpu_read(in_kernel_fpu))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* When not in NMI or hard interrupt context, FPU can be used in:
|
||||
*
|
||||
* - Task context except from within fpregs_lock()'ed critical
|
||||
* regions.
|
||||
*
|
||||
* - Soft interrupt processing context which cannot happen
|
||||
* while in a fpregs_lock()'ed critical region.
|
||||
*/
|
||||
if (!in_irq())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* In hard interrupt context it's safe when soft interrupts
|
||||
* are enabled, which means the interrupt did not hit in
|
||||
* a fpregs_lock()'ed critical region.
|
||||
*/
|
||||
return !softirq_count();
|
||||
}
|
||||
EXPORT_SYMBOL(irq_fpu_usable);
|
||||
|
||||
|
||||
@@ -489,7 +489,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
|
||||
bool cpu_is_hotpluggable(unsigned cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
return dev && container_of(dev, struct cpu, dev)->hotpluggable;
|
||||
return dev && container_of(dev, struct cpu, dev)->hotpluggable
|
||||
&& tick_nohz_cpu_hotpluggable(cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
|
||||
|
||||
|
||||
@@ -677,7 +677,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
|
||||
calltime = ktime_get();
|
||||
ret = really_probe(dev, drv);
|
||||
rettime = ktime_get();
|
||||
pr_debug("probe of %s returned %d after %lld usecs\n",
|
||||
/*
|
||||
* Don't change this to pr_debug() because that requires
|
||||
* CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
|
||||
* kernel commandline to print this all the time at the debug level.
|
||||
*/
|
||||
printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
|
||||
dev_name(dev), ret, ktime_us_delta(rettime, calltime));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -62,10 +62,6 @@ struct quad8_iio {
|
||||
#define QUAD8_REG_CHAN_OP 0x11
|
||||
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
|
||||
#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17
|
||||
/* Borrow Toggle flip-flop */
|
||||
#define QUAD8_FLAG_BT BIT(0)
|
||||
/* Carry Toggle flip-flop */
|
||||
#define QUAD8_FLAG_CT BIT(1)
|
||||
/* Error flag */
|
||||
#define QUAD8_FLAG_E BIT(4)
|
||||
/* Up/Down flag */
|
||||
@@ -104,9 +100,6 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
|
||||
{
|
||||
struct quad8_iio *const priv = iio_priv(indio_dev);
|
||||
const int base_offset = priv->base + 2 * chan->channel;
|
||||
unsigned int flags;
|
||||
unsigned int borrow;
|
||||
unsigned int carry;
|
||||
int i;
|
||||
|
||||
switch (mask) {
|
||||
@@ -117,12 +110,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
flags = inb(base_offset + 1);
|
||||
borrow = flags & QUAD8_FLAG_BT;
|
||||
carry = !!(flags & QUAD8_FLAG_CT);
|
||||
|
||||
/* Borrow XOR Carry effectively doubles count range */
|
||||
*val = (borrow ^ carry) << 24;
|
||||
*val = 0;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
@@ -643,17 +631,9 @@ static int quad8_count_read(struct counter_device *counter,
|
||||
{
|
||||
struct quad8_iio *const priv = counter->priv;
|
||||
const int base_offset = priv->base + 2 * count->id;
|
||||
unsigned int flags;
|
||||
unsigned int borrow;
|
||||
unsigned int carry;
|
||||
int i;
|
||||
|
||||
flags = inb(base_offset + 1);
|
||||
borrow = flags & QUAD8_FLAG_BT;
|
||||
carry = !!(flags & QUAD8_FLAG_CT);
|
||||
|
||||
/* Borrow XOR Carry effectively doubles count range */
|
||||
*val = (unsigned long)(borrow ^ carry) << 24;
|
||||
*val = 0;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
@@ -1198,8 +1178,8 @@ static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
|
||||
return sprintf(buf, "33554431\n");
|
||||
/* By default 0xFFFFFF (24 bits unsigned) is maximum count */
|
||||
return sprintf(buf, "16777215\n");
|
||||
}
|
||||
|
||||
static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
|
||||
|
||||
@@ -1299,6 +1299,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
var->xres_virtual = fb->width;
|
||||
var->yres_virtual = fb->height;
|
||||
|
||||
/*
|
||||
* Workaround for SDL 1.2, which is known to be setting all pixel format
|
||||
* fields values to zero in some cases. We treat this situation as a
|
||||
|
||||
@@ -1515,9 +1515,9 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
|
||||
int ret, i;
|
||||
u8 val;
|
||||
|
||||
ret = of_property_read_u32_array(client->dev.of_node,
|
||||
"adi,pwm-active-state", states,
|
||||
ARRAY_SIZE(states));
|
||||
ret = device_property_read_u32_array(&client->dev,
|
||||
"adi,pwm-active-state", states,
|
||||
ARRAY_SIZE(states));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
|
||||
|
||||
#define ZEN_CUR_TEMP_SHIFT 21
|
||||
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
|
||||
#define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16)
|
||||
|
||||
#define ZEN_SVI_BASE 0x0005A000
|
||||
|
||||
@@ -173,7 +174,8 @@ static long get_raw_temp(struct k10temp_data *data)
|
||||
|
||||
data->read_tempreg(data->pdev, ®val);
|
||||
temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
|
||||
if (regval & data->temp_adjust_mask)
|
||||
if ((regval & data->temp_adjust_mask) ||
|
||||
(regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
|
||||
temp -= 49000;
|
||||
return temp;
|
||||
}
|
||||
|
||||
@@ -628,7 +628,7 @@ out:
|
||||
|
||||
static int palmas_gpadc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
|
||||
struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
|
||||
struct palmas_gpadc *adc = iio_priv(indio_dev);
|
||||
|
||||
if (adc->wakeup1_enable || adc->wakeup2_enable)
|
||||
|
||||
@@ -46,7 +46,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
|
||||
if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
|
||||
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
|
||||
peer->device->dev->name, peer->internal_id,
|
||||
&peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
|
||||
&peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2);
|
||||
|
||||
del_timer(&peer->timer_send_keepalive);
|
||||
/* We drop all packets without a keypair and don't try again,
|
||||
@@ -64,7 +64,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
|
||||
++peer->timer_handshake_attempts;
|
||||
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
|
||||
peer->device->dev->name, peer->internal_id,
|
||||
&peer->endpoint.addr, REKEY_TIMEOUT,
|
||||
&peer->endpoint.addr, (int)REKEY_TIMEOUT,
|
||||
peer->timer_handshake_attempts + 1);
|
||||
|
||||
/* We clear the endpoint address src address, in case this is
|
||||
@@ -94,7 +94,7 @@ static void wg_expired_new_handshake(struct timer_list *timer)
|
||||
|
||||
pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
|
||||
peer->device->dev->name, peer->internal_id,
|
||||
&peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
|
||||
&peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT));
|
||||
/* We clear the endpoint address src address, in case this is the cause
|
||||
* of trouble.
|
||||
*/
|
||||
@@ -126,7 +126,7 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work)
|
||||
|
||||
pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
|
||||
peer->device->dev->name, peer->internal_id,
|
||||
&peer->endpoint.addr, REJECT_AFTER_TIME * 3);
|
||||
&peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3);
|
||||
wg_noise_handshake_clear(&peer->handshake);
|
||||
wg_noise_keypairs_clear(&peer->keypairs);
|
||||
wg_peer_put(peer);
|
||||
|
||||
@@ -5834,6 +5834,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
|
||||
(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
|
||||
req_len = le32_to_cpu(assoc_info->req_len);
|
||||
resp_len = le32_to_cpu(assoc_info->resp_len);
|
||||
if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) {
|
||||
bphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n",
|
||||
req_len, resp_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (req_len) {
|
||||
err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
|
||||
cfg->extra_buf,
|
||||
|
||||
@@ -1210,11 +1210,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
|
||||
val |= BIT(4);
|
||||
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
|
||||
val |= BIT(31);
|
||||
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
|
||||
}
|
||||
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
|
||||
val |= BIT(31);
|
||||
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
|
||||
|
||||
return 0;
|
||||
err_disable_clocks:
|
||||
|
||||
@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)
|
||||
|
||||
pci_assign_unassigned_bridge_resources(bridge);
|
||||
pcie_bus_configure_settings(parent);
|
||||
|
||||
/*
|
||||
* Release reset_lock during driver binding
|
||||
* to avoid AB-BA deadlock with device_lock.
|
||||
*/
|
||||
up_read(&ctrl->reset_lock);
|
||||
pci_bus_add_devices(parent);
|
||||
down_read_nested(&ctrl->reset_lock, ctrl->depth);
|
||||
|
||||
out:
|
||||
pci_unlock_rescan_remove();
|
||||
@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
|
||||
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
|
||||
bus_list) {
|
||||
pci_dev_get(dev);
|
||||
|
||||
/*
|
||||
* Release reset_lock during driver unbinding
|
||||
* to avoid AB-BA deadlock with device_lock.
|
||||
*/
|
||||
up_read(&ctrl->reset_lock);
|
||||
pci_stop_and_remove_bus_device(dev);
|
||||
down_read_nested(&ctrl->reset_lock, ctrl->depth);
|
||||
|
||||
/*
|
||||
* Ensure that no new Requests will be generated from
|
||||
* the device.
|
||||
|
||||
@@ -101,7 +101,7 @@ struct ad2s1210_state {
|
||||
static const int ad2s1210_mode_vals[4][2] = {
|
||||
[MOD_POS] = { 0, 0 },
|
||||
[MOD_VEL] = { 0, 1 },
|
||||
[MOD_CONFIG] = { 1, 0 },
|
||||
[MOD_CONFIG] = { 1, 1 },
|
||||
};
|
||||
|
||||
static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
|
||||
|
||||
@@ -1627,13 +1627,11 @@ static int dwc3_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&dwc->lock);
|
||||
mutex_init(&dwc->mutex);
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
@@ -1692,12 +1690,10 @@ err3:
|
||||
dwc3_free_event_buffers(dwc);
|
||||
|
||||
err2:
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
|
||||
err1:
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
pm_runtime_allow(dev);
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
disable_clks:
|
||||
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
|
||||
assert_reset:
|
||||
@@ -1721,6 +1717,7 @@ static int dwc3_remove(struct platform_device *pdev)
|
||||
dwc3_core_exit(dwc);
|
||||
dwc3_ulpi_exit(dwc);
|
||||
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
|
||||
@@ -133,6 +133,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
|
||||
regset->regs = regs;
|
||||
regset->nregs = nregs;
|
||||
regset->base = hcd->regs + base;
|
||||
regset->dev = hcd->self.controller;
|
||||
|
||||
debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
|
||||
}
|
||||
|
||||
@@ -595,6 +595,11 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define SIERRA_VENDOR_ID 0x1199
|
||||
#define SIERRA_PRODUCT_EM9191 0x90d3
|
||||
|
||||
/* UNISOC (Spreadtrum) products */
|
||||
#define UNISOC_VENDOR_ID 0x1782
|
||||
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
|
||||
#define TOZED_PRODUCT_LT70C 0x4055
|
||||
|
||||
/* Device flags */
|
||||
|
||||
/* Highest interface number which can be used with NCTRL() and RSVD() */
|
||||
@@ -2225,6 +2230,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, option_ids);
|
||||
|
||||
@@ -242,7 +242,6 @@ retry:
|
||||
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
ext4_fc_start_update(inode);
|
||||
|
||||
if ((type == ACL_TYPE_ACCESS) && acl) {
|
||||
error = posix_acl_update_mode(inode, &mode, &acl);
|
||||
@@ -260,7 +259,6 @@ retry:
|
||||
}
|
||||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
ext4_fc_stop_update(inode);
|
||||
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
||||
goto retry;
|
||||
return error;
|
||||
|
||||
@@ -4694,7 +4694,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
FALLOC_FL_INSERT_RANGE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ext4_fc_start_update(inode);
|
||||
inode_lock(inode);
|
||||
ret = ext4_convert_inline_data(inode);
|
||||
inode_unlock(inode);
|
||||
@@ -4764,7 +4763,6 @@ out:
|
||||
inode_unlock(inode);
|
||||
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
|
||||
exit:
|
||||
ext4_fc_stop_update(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -262,7 +262,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ext4_fc_start_update(inode);
|
||||
inode_lock(inode);
|
||||
ret = ext4_write_checks(iocb, from);
|
||||
if (ret <= 0)
|
||||
@@ -274,7 +273,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
|
||||
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
ext4_fc_stop_update(inode);
|
||||
if (likely(ret > 0)) {
|
||||
iocb->ki_pos += ret;
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
@@ -561,9 +559,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ext4_fc_start_update(inode);
|
||||
ret = ext4_orphan_add(handle, inode);
|
||||
ext4_fc_stop_update(inode);
|
||||
if (ret) {
|
||||
ext4_journal_stop(handle);
|
||||
goto out;
|
||||
|
||||
@@ -5469,7 +5469,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
ext4_fc_start_update(inode);
|
||||
|
||||
if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
|
||||
(ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
|
||||
handle_t *handle;
|
||||
@@ -5493,7 +5493,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
|
||||
if (error) {
|
||||
ext4_journal_stop(handle);
|
||||
ext4_fc_stop_update(inode);
|
||||
return error;
|
||||
}
|
||||
/* Update corresponding info in inode so that everything is in
|
||||
@@ -5505,7 +5504,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
error = ext4_mark_inode_dirty(handle, inode);
|
||||
ext4_journal_stop(handle);
|
||||
if (unlikely(error)) {
|
||||
ext4_fc_stop_update(inode);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
@@ -5520,12 +5518,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
|
||||
if (attr->ia_size > sbi->s_bitmap_maxbytes) {
|
||||
ext4_fc_stop_update(inode);
|
||||
return -EFBIG;
|
||||
}
|
||||
}
|
||||
if (!S_ISREG(inode->i_mode)) {
|
||||
ext4_fc_stop_update(inode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -5651,7 +5647,6 @@ err_out:
|
||||
ext4_std_error(inode->i_sb, error);
|
||||
if (!error)
|
||||
error = rc;
|
||||
ext4_fc_stop_update(inode);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@@ -1328,13 +1328,7 @@ out:
|
||||
|
||||
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
long ret;
|
||||
|
||||
ext4_fc_start_update(file_inode(filp));
|
||||
ret = __ext4_ioctl(filp, cmd, arg);
|
||||
ext4_fc_stop_update(file_inode(filp));
|
||||
|
||||
return ret;
|
||||
return __ext4_ioctl(filp, cmd, arg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
@@ -757,6 +757,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
|
||||
}
|
||||
journal->j_flags |= JBD2_FAST_COMMIT_ONGOING;
|
||||
write_unlock(&journal->j_state_lock);
|
||||
jbd2_journal_lock_updates(journal);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -768,6 +769,7 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
|
||||
*/
|
||||
static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
|
||||
{
|
||||
jbd2_journal_unlock_updates(journal);
|
||||
if (journal->j_fc_cleanup_callback)
|
||||
journal->j_fc_cleanup_callback(journal, 0);
|
||||
write_lock(&journal->j_state_lock);
|
||||
|
||||
@@ -190,7 +190,7 @@ static inline u64 readq(const volatile void __iomem *addr)
|
||||
u64 val;
|
||||
|
||||
__io_br();
|
||||
val = __le64_to_cpu(__raw_readq(addr));
|
||||
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
|
||||
__io_ar(val);
|
||||
return val;
|
||||
}
|
||||
@@ -233,7 +233,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
|
||||
static inline void writeq(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
__io_bw();
|
||||
__raw_writeq(__cpu_to_le64(value), addr);
|
||||
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
|
||||
__io_aw();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/alarmtimer.h>
|
||||
#include <linux/timerqueue.h>
|
||||
#include <linux/task_work.h>
|
||||
@@ -63,16 +64,18 @@ static inline int clockid_to_fd(const clockid_t clk)
|
||||
* cpu_timer - Posix CPU timer representation for k_itimer
|
||||
* @node: timerqueue node to queue in the task/sig
|
||||
* @head: timerqueue head on which this timer is queued
|
||||
* @task: Pointer to target task
|
||||
* @pid: Pointer to target task PID
|
||||
* @elist: List head for the expiry list
|
||||
* @firing: Timer is currently firing
|
||||
* @handling: Pointer to the task which handles expiry
|
||||
*/
|
||||
struct cpu_timer {
|
||||
struct timerqueue_node node;
|
||||
struct timerqueue_head *head;
|
||||
struct pid *pid;
|
||||
struct list_head elist;
|
||||
int firing;
|
||||
struct timerqueue_node node;
|
||||
struct timerqueue_head *head;
|
||||
struct pid *pid;
|
||||
struct list_head elist;
|
||||
int firing;
|
||||
struct task_struct __rcu *handling;
|
||||
};
|
||||
|
||||
static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
|
||||
@@ -129,10 +132,12 @@ struct posix_cputimers {
|
||||
/**
|
||||
* posix_cputimers_work - Container for task work based posix CPU timer expiry
|
||||
* @work: The task work to be scheduled
|
||||
* @mutex: Mutex held around expiry in context of this task work
|
||||
* @scheduled: @work has been scheduled already, no further processing
|
||||
*/
|
||||
struct posix_cputimers_work {
|
||||
struct callback_head work;
|
||||
struct mutex mutex;
|
||||
unsigned int scheduled;
|
||||
};
|
||||
|
||||
|
||||
@@ -211,6 +211,7 @@ extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
|
||||
enum tick_dep_bits bit);
|
||||
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
|
||||
enum tick_dep_bits bit);
|
||||
extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
|
||||
|
||||
/*
|
||||
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
|
||||
@@ -275,6 +276,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
|
||||
|
||||
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
|
||||
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
|
||||
static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
|
||||
|
||||
static inline void tick_dep_set(enum tick_dep_bits bit) { }
|
||||
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
|
||||
|
||||
@@ -448,6 +448,9 @@ EXPORT_SYMBOL_GPL(put_task_stack);
|
||||
|
||||
void free_task(struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_SECCOMP
|
||||
WARN_ON_ONCE(tsk->seccomp.filter);
|
||||
#endif
|
||||
cpufreq_task_times_exit(tsk);
|
||||
scs_release(tsk);
|
||||
|
||||
@@ -2307,12 +2310,6 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
|
||||
spin_lock(¤t->sighand->siglock);
|
||||
|
||||
/*
|
||||
* Copy seccomp details explicitly here, in case they were changed
|
||||
* before holding sighand lock.
|
||||
*/
|
||||
copy_seccomp(p);
|
||||
|
||||
rseq_fork(p, clone_flags);
|
||||
|
||||
/* Don't start children in a dying pid namespace */
|
||||
@@ -2327,6 +2324,14 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
goto bad_fork_cancel_cgroup;
|
||||
}
|
||||
|
||||
/* No more failure paths after this point. */
|
||||
|
||||
/*
|
||||
* Copy seccomp details explicitly here, in case they were changed
|
||||
* before holding sighand lock.
|
||||
*/
|
||||
copy_seccomp(p);
|
||||
|
||||
init_task_pid_links(p);
|
||||
if (likely(p->pid)) {
|
||||
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
||||
|
||||
@@ -782,6 +782,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
|
||||
return expires;
|
||||
|
||||
ctmr->firing = 1;
|
||||
/* See posix_cpu_timer_wait_running() */
|
||||
rcu_assign_pointer(ctmr->handling, current);
|
||||
cpu_timer_dequeue(ctmr);
|
||||
list_add_tail(&ctmr->elist, firing);
|
||||
}
|
||||
@@ -1097,7 +1099,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
|
||||
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
|
||||
static void posix_cpu_timers_work(struct callback_head *work)
|
||||
{
|
||||
struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
|
||||
|
||||
mutex_lock(&cw->mutex);
|
||||
handle_posix_cpu_timers(current);
|
||||
mutex_unlock(&cw->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked from the posix-timer core when a cancel operation failed because
|
||||
* the timer is marked firing. The caller holds rcu_read_lock(), which
|
||||
* protects the timer and the task which is expiring it from being freed.
|
||||
*/
|
||||
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
|
||||
{
|
||||
struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
|
||||
|
||||
/* Has the handling task completed expiry already? */
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
/* Ensure that the task cannot go away */
|
||||
get_task_struct(tsk);
|
||||
/* Now drop the RCU protection so the mutex can be locked */
|
||||
rcu_read_unlock();
|
||||
/* Wait on the expiry mutex */
|
||||
mutex_lock(&tsk->posix_cputimers_work.mutex);
|
||||
/* Release it immediately again. */
|
||||
mutex_unlock(&tsk->posix_cputimers_work.mutex);
|
||||
/* Drop the task reference. */
|
||||
put_task_struct(tsk);
|
||||
/* Relock RCU so the callsite is balanced */
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
|
||||
{
|
||||
/* Ensure that timr->it.cpu.handling task cannot go away */
|
||||
rcu_read_lock();
|
||||
spin_unlock_irq(&timr->it_lock);
|
||||
posix_cpu_timer_wait_running(timr);
|
||||
rcu_read_unlock();
|
||||
/* @timr is on stack and is valid */
|
||||
spin_lock_irq(&timr->it_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1113,6 +1157,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
|
||||
sizeof(p->posix_cputimers_work.work));
|
||||
init_task_work(&p->posix_cputimers_work.work,
|
||||
posix_cpu_timers_work);
|
||||
mutex_init(&p->posix_cputimers_work.mutex);
|
||||
p->posix_cputimers_work.scheduled = false;
|
||||
}
|
||||
|
||||
@@ -1191,6 +1236,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
|
||||
lockdep_posixtimer_exit();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
|
||||
{
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
|
||||
{
|
||||
spin_unlock_irq(&timr->it_lock);
|
||||
cpu_relax();
|
||||
spin_lock_irq(&timr->it_lock);
|
||||
}
|
||||
|
||||
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
|
||||
{
|
||||
return false;
|
||||
@@ -1299,6 +1356,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
|
||||
*/
|
||||
if (likely(cpu_firing >= 0))
|
||||
cpu_timer_fire(timer);
|
||||
/* See posix_cpu_timer_wait_running() */
|
||||
rcu_assign_pointer(timer->it.cpu.handling, NULL);
|
||||
spin_unlock(&timer->it_lock);
|
||||
}
|
||||
}
|
||||
@@ -1434,23 +1493,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
|
||||
expires = cpu_timer_getexpires(&timer.it.cpu);
|
||||
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
|
||||
if (!error) {
|
||||
/*
|
||||
* Timer is now unarmed, deletion can not fail.
|
||||
*/
|
||||
/* Timer is now unarmed, deletion can not fail. */
|
||||
posix_cpu_timer_del(&timer);
|
||||
} else {
|
||||
while (error == TIMER_RETRY) {
|
||||
posix_cpu_timer_wait_running_nsleep(&timer);
|
||||
error = posix_cpu_timer_del(&timer);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
|
||||
while (error == TIMER_RETRY) {
|
||||
/*
|
||||
* We need to handle case when timer was or is in the
|
||||
* middle of firing. In other cases we already freed
|
||||
* resources.
|
||||
*/
|
||||
spin_lock_irq(&timer.it_lock);
|
||||
error = posix_cpu_timer_del(&timer);
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
}
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
|
||||
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
|
||||
/*
|
||||
@@ -1560,6 +1612,7 @@ const struct k_clock clock_posix_cpu = {
|
||||
.timer_del = posix_cpu_timer_del,
|
||||
.timer_get = posix_cpu_timer_get,
|
||||
.timer_rearm = posix_cpu_timer_rearm,
|
||||
.timer_wait_running = posix_cpu_timer_wait_running,
|
||||
};
|
||||
|
||||
const struct k_clock clock_process = {
|
||||
|
||||
@@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
|
||||
rcu_read_lock();
|
||||
unlock_timer(timer, *flags);
|
||||
|
||||
/*
|
||||
* kc->timer_wait_running() might drop RCU lock. So @timer
|
||||
* cannot be touched anymore after the function returns!
|
||||
*/
|
||||
if (!WARN_ON_ONCE(!kc->timer_wait_running))
|
||||
kc->timer_wait_running(timer);
|
||||
|
||||
|
||||
@@ -429,7 +429,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
|
||||
tick_nohz_full_running = true;
|
||||
}
|
||||
|
||||
static int tick_nohz_cpu_down(unsigned int cpu)
|
||||
bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* The tick_do_timer_cpu CPU handles housekeeping duty (unbound
|
||||
@@ -437,8 +437,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
|
||||
* CPUs. It must remain online when nohz full is enabled.
|
||||
*/
|
||||
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int tick_nohz_cpu_down(unsigned int cpu)
|
||||
{
|
||||
return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
void __init tick_nohz_init(void)
|
||||
|
||||
@@ -996,7 +996,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
if (hci_sock_gen_cookie(sk)) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (capable(CAP_NET_ADMIN))
|
||||
/* Perform careful checks before setting the HCI_SOCK_TRUSTED
|
||||
* flag. Make sure that not only the current task but also
|
||||
* the socket opener has the required capability, since
|
||||
* privileged programs can be tricked into making ioctl calls
|
||||
* on HCI sockets, and the socket should not be marked as
|
||||
* trusted simply because the ioctl caller is privileged.
|
||||
*/
|
||||
if (sk_capable(sk, CAP_NET_ADMIN))
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
/* Send event to monitor */
|
||||
|
||||
@@ -393,6 +393,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
|
||||
|
||||
/* Please keep this list alphabetically sorted */
|
||||
static const struct dmi_system_id byt_rt5640_quirk_table[] = {
|
||||
{ /* Acer Iconia One 7 B1-750 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
|
||||
},
|
||||
.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
|
||||
BYT_RT5640_JD_SRC_JD1_IN4P |
|
||||
BYT_RT5640_OVCD_TH_1500UA |
|
||||
BYT_RT5640_OVCD_SF_0P75 |
|
||||
BYT_RT5640_SSP0_AIF1 |
|
||||
BYT_RT5640_MCLK_EN),
|
||||
},
|
||||
{ /* Acer Iconia Tab 8 W1-810 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
|
||||
@@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched)
|
||||
err = pthread_attr_init(&attr);
|
||||
BUG_ON(err);
|
||||
err = pthread_attr_setstacksize(&attr,
|
||||
(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
|
||||
(size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
|
||||
BUG_ON(err);
|
||||
err = pthread_mutex_lock(&sched->start_work_mutex);
|
||||
BUG_ON(err);
|
||||
|
||||
Reference in New Issue
Block a user