Merge 5.10.109 into android12-5.10-lts
Changes in 5.10.109 nfc: st21nfca: Fix potential buffer overflows in EVT_TRANSACTION net: ipv6: fix skb_over_panic in __ip6_append_data exfat: avoid incorrectly releasing for root inode cgroup: Allocate cgroup_file_ctx for kernfs_open_file->priv cgroup: Use open-time cgroup namespace for process migration perm checks cgroup-v1: Correct privileges check in release_agent writes tpm: Fix error handling in async work staging: fbtft: fb_st7789v: reset display before initialization llc: fix netdevice reference leaks in llc_ui_bind() ASoC: sti: Fix deadlock via snd_pcm_stop_xrun() call ALSA: oss: Fix PCM OSS buffer allocation overflow ALSA: usb-audio: add mapping for new Corsair Virtuoso SE ALSA: hda/realtek: Add quirk for Clevo NP70PNJ ALSA: hda/realtek: Add quirk for Clevo NP50PNJ ALSA: hda/realtek - Fix headset mic problem for a HP machine with alc671 ALSA: hda/realtek: Add quirk for ASUS GA402 ALSA: pcm: Fix races among concurrent hw_params and hw_free calls ALSA: pcm: Fix races among concurrent read/write and buffer changes ALSA: pcm: Fix races among concurrent prepare and hw_params/hw_free calls ALSA: pcm: Fix races among concurrent prealloc proc writes ALSA: pcm: Add stream lock during PCM reset ioctl operations ALSA: usb-audio: Add mute TLV for playback volumes on RODE NT-USB ALSA: cmipci: Restore aux vol on suspend/resume ALSA: pci: fix reading of swapped values from pcmreg in AC97 codec drivers: net: xgene: Fix regression in CRC stripping netfilter: nf_tables: initialize registers in nft_do_chain() ACPI / x86: Work around broken XSDT on Advantech DAC-BJ01 board ACPI: battery: Add device HID and quirk for Microsoft Surface Go 3 ACPI: video: Force backlight native for Clevo NL5xRU and NL5xNU crypto: qat - disable registration of algorithms Revert "ath: add support for special 0x0 regulatory domain" rcu: Don't deboost before reporting expedited quiescent state mac80211: fix potential double free on mesh join tpm: use try_get_ops() in tpm-space.c wcn36xx: Differentiate wcn3660 from wcn3620 nds32: fix access_ok() checks in get/put_user llc: only change llc->dev when bind() succeeds Linux 5.10.109 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ifd757f0ec4ba643f7cbaf78aa899d3c159c4b877
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 108
|
SUBLEVEL = 109
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
@@ -70,9 +70,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||||||
* versions are void (ie, don't return a value as such).
|
* versions are void (ie, don't return a value as such).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define get_user __get_user \
|
#define get_user(x, ptr) \
|
||||||
|
|
||||||
#define __get_user(x, ptr) \
|
|
||||||
({ \
|
({ \
|
||||||
long __gu_err = 0; \
|
long __gu_err = 0; \
|
||||||
__get_user_check((x), (ptr), __gu_err); \
|
__get_user_check((x), (ptr), __gu_err); \
|
||||||
@@ -85,6 +83,14 @@ static inline void set_fs(mm_segment_t fs)
|
|||||||
(void)0; \
|
(void)0; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define __get_user(x, ptr) \
|
||||||
|
({ \
|
||||||
|
long __gu_err = 0; \
|
||||||
|
const __typeof__(*(ptr)) __user *__p = (ptr); \
|
||||||
|
__get_user_err((x), __p, (__gu_err)); \
|
||||||
|
__gu_err; \
|
||||||
|
})
|
||||||
|
|
||||||
#define __get_user_check(x, ptr, err) \
|
#define __get_user_check(x, ptr, err) \
|
||||||
({ \
|
({ \
|
||||||
const __typeof__(*(ptr)) __user *__p = (ptr); \
|
const __typeof__(*(ptr)) __user *__p = (ptr); \
|
||||||
@@ -165,12 +171,18 @@ do { \
|
|||||||
: "r"(addr), "i"(-EFAULT) \
|
: "r"(addr), "i"(-EFAULT) \
|
||||||
: "cc")
|
: "cc")
|
||||||
|
|
||||||
#define put_user __put_user \
|
#define put_user(x, ptr) \
|
||||||
|
({ \
|
||||||
|
long __pu_err = 0; \
|
||||||
|
__put_user_check((x), (ptr), __pu_err); \
|
||||||
|
__pu_err; \
|
||||||
|
})
|
||||||
|
|
||||||
#define __put_user(x, ptr) \
|
#define __put_user(x, ptr) \
|
||||||
({ \
|
({ \
|
||||||
long __pu_err = 0; \
|
long __pu_err = 0; \
|
||||||
__put_user_err((x), (ptr), __pu_err); \
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||||
|
__put_user_err((x), __p, __pu_err); \
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@@ -1340,6 +1340,17 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init disable_acpi_xsdt(const struct dmi_system_id *d)
|
||||||
|
{
|
||||||
|
if (!acpi_force) {
|
||||||
|
pr_notice("%s detected: force use of acpi=rsdt\n", d->ident);
|
||||||
|
acpi_gbl_do_not_use_xsdt = TRUE;
|
||||||
|
} else {
|
||||||
|
pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n");
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
|
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
|
||||||
{
|
{
|
||||||
if (!acpi_force) {
|
if (!acpi_force) {
|
||||||
@@ -1464,6 +1475,19 @@ static const struct dmi_system_id acpi_dmi_table[] __initconst = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* Boxes that need ACPI XSDT use disabled due to corrupted tables
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.callback = disable_acpi_xsdt,
|
||||||
|
.ident = "Advantech DAC-BJ01",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"),
|
||||||
|
DMI_MATCH(DMI_BIOS_VERSION, "V1.12"),
|
||||||
|
DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -66,6 +66,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
|
|||||||
|
|
||||||
static const struct acpi_device_id battery_device_ids[] = {
|
static const struct acpi_device_id battery_device_ids[] = {
|
||||||
{"PNP0C0A", 0},
|
{"PNP0C0A", 0},
|
||||||
|
|
||||||
|
/* Microsoft Surface Go 3 */
|
||||||
|
{"MSHW0146", 0},
|
||||||
|
|
||||||
{"", 0},
|
{"", 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1171,6 +1175,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
|
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Microsoft Surface Go 3 */
|
||||||
|
.callback = battery_notification_delay_quirk,
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -409,6 +409,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
|
||||||
|
* working native and video interface. However the default detection
|
||||||
|
* mechanism first registers the video interface before unregistering
|
||||||
|
* it again and switching to the native interface during boot. This
|
||||||
|
* results in a dangling SBIOS request for backlight change for some
|
||||||
|
* reason, causing the backlight to switch to ~2% once per boot on the
|
||||||
|
* first power cord connect or disconnect event. Setting the native
|
||||||
|
* interface explicitly circumvents this buggy behaviour, by avoiding
|
||||||
|
* the unregistering process.
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xRU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xRU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xRU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xRU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xRU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xNU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xNU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "Clevo NL5xNU",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Desktops which falsely report a backlight and which our heuristics
|
* Desktops which falsely report a backlight and which our heuristics
|
||||||
|
@@ -70,7 +70,13 @@ static void tpm_dev_async_work(struct work_struct *work)
|
|||||||
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
|
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
|
||||||
sizeof(priv->data_buffer));
|
sizeof(priv->data_buffer));
|
||||||
tpm_put_ops(priv->chip);
|
tpm_put_ops(priv->chip);
|
||||||
if (ret > 0) {
|
|
||||||
|
/*
|
||||||
|
* If ret is > 0 then tpm_dev_transmit returned the size of the
|
||||||
|
* response. If ret is < 0 then tpm_dev_transmit failed and
|
||||||
|
* returned an error code.
|
||||||
|
*/
|
||||||
|
if (ret != 0) {
|
||||||
priv->response_length = ret;
|
priv->response_length = ret;
|
||||||
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
|
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
|
||||||
}
|
}
|
||||||
|
@@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
|
|||||||
|
|
||||||
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
|
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
|
||||||
{
|
{
|
||||||
mutex_lock(&chip->tpm_mutex);
|
|
||||||
if (!tpm_chip_start(chip)) {
|
if (tpm_try_get_ops(chip) == 0) {
|
||||||
tpm2_flush_sessions(chip, space);
|
tpm2_flush_sessions(chip, space);
|
||||||
tpm_chip_stop(chip);
|
tpm_put_ops(chip);
|
||||||
}
|
}
|
||||||
mutex_unlock(&chip->tpm_mutex);
|
|
||||||
kfree(space->context_buf);
|
kfree(space->context_buf);
|
||||||
kfree(space->session_buf);
|
kfree(space->session_buf);
|
||||||
}
|
}
|
||||||
|
@@ -126,6 +126,14 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
|
|||||||
goto err;
|
goto err;
|
||||||
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
|
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
/* Temporarily set the number of crypto instances to zero to avoid
|
||||||
|
* registering the crypto algorithms.
|
||||||
|
* This will be removed when the algorithms will support the
|
||||||
|
* CRYPTO_TFM_REQ_MAY_BACKLOG flag
|
||||||
|
*/
|
||||||
|
instances = 0;
|
||||||
|
|
||||||
for (i = 0; i < instances; i++) {
|
for (i = 0; i < instances; i++) {
|
||||||
val = i;
|
val = i;
|
||||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
|
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
|
||||||
|
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
|
|||||||
buf_pool->rx_skb[skb_index] = NULL;
|
buf_pool->rx_skb[skb_index] = NULL;
|
||||||
|
|
||||||
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
|
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
|
||||||
|
|
||||||
|
/* strip off CRC as HW isn't doing this */
|
||||||
|
nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
|
||||||
|
if (!nv)
|
||||||
|
datalen -= 4;
|
||||||
|
|
||||||
skb_put(skb, datalen);
|
skb_put(skb, datalen);
|
||||||
prefetch(skb->data - NET_IP_ALIGN);
|
prefetch(skb->data - NET_IP_ALIGN);
|
||||||
skb->protocol = eth_type_trans(skb, ndev);
|
skb->protocol = eth_type_trans(skb, ndev);
|
||||||
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
|
if (!nv)
|
||||||
if (!nv) {
|
|
||||||
/* strip off CRC as HW isn't doing this */
|
|
||||||
datalen -= 4;
|
|
||||||
goto skip_jumbo;
|
goto skip_jumbo;
|
||||||
}
|
|
||||||
|
|
||||||
slots = page_pool->slots - 1;
|
slots = page_pool->slots - 1;
|
||||||
head = page_pool->head;
|
head = page_pool->head;
|
||||||
|
@@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Some users have reported their EEPROM programmed with
|
* Some users have reported their EEPROM programmed with
|
||||||
* 0x8000 or 0x0 set, this is not a supported regulatory
|
* 0x8000 set, this is not a supported regulatory domain
|
||||||
* domain but since we have more than one user with it we
|
* but since we have more than one user with it we need
|
||||||
* need a solution for them. We default to 0x64, which is
|
* a solution for them. We default to 0x64, which is the
|
||||||
* the default Atheros world regulatory domain.
|
* default Atheros world regulatory domain.
|
||||||
*/
|
*/
|
||||||
static void ath_regd_sanitize(struct ath_regulatory *reg)
|
static void ath_regd_sanitize(struct ath_regulatory *reg)
|
||||||
{
|
{
|
||||||
if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
|
if (reg->current_rd != COUNTRY_ERD_FLAG)
|
||||||
return;
|
return;
|
||||||
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
|
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
|
||||||
reg->current_rd = 0x64;
|
reg->current_rd = 0x64;
|
||||||
|
@@ -1362,6 +1362,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
|
|||||||
if (iris_node) {
|
if (iris_node) {
|
||||||
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
|
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
|
||||||
wcn->rf_id = RF_IRIS_WCN3620;
|
wcn->rf_id = RF_IRIS_WCN3620;
|
||||||
|
if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
|
||||||
|
of_device_is_compatible(iris_node, "qcom,wcn3660b"))
|
||||||
|
wcn->rf_id = RF_IRIS_WCN3660;
|
||||||
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
|
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
|
||||||
wcn->rf_id = RF_IRIS_WCN3680;
|
wcn->rf_id = RF_IRIS_WCN3680;
|
||||||
of_node_put(iris_node);
|
of_node_put(iris_node);
|
||||||
|
@@ -96,6 +96,7 @@ enum wcn36xx_ampdu_state {
|
|||||||
|
|
||||||
#define RF_UNKNOWN 0x0000
|
#define RF_UNKNOWN 0x0000
|
||||||
#define RF_IRIS_WCN3620 0x3620
|
#define RF_IRIS_WCN3620 0x3620
|
||||||
|
#define RF_IRIS_WCN3660 0x3660
|
||||||
#define RF_IRIS_WCN3680 0x3680
|
#define RF_IRIS_WCN3680 0x3680
|
||||||
|
|
||||||
static inline void buff_to_be(u32 *buf, size_t len)
|
static inline void buff_to_be(u32 *buf, size_t len)
|
||||||
|
@@ -320,6 +320,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
transaction->aid_len = skb->data[1];
|
transaction->aid_len = skb->data[1];
|
||||||
|
|
||||||
|
/* Checking if the length of the AID is valid */
|
||||||
|
if (transaction->aid_len > sizeof(transaction->aid))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
memcpy(transaction->aid, &skb->data[2],
|
memcpy(transaction->aid, &skb->data[2],
|
||||||
transaction->aid_len);
|
transaction->aid_len);
|
||||||
|
|
||||||
@@ -329,6 +334,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
|||||||
return -EPROTO;
|
return -EPROTO;
|
||||||
|
|
||||||
transaction->params_len = skb->data[transaction->aid_len + 3];
|
transaction->params_len = skb->data[transaction->aid_len + 3];
|
||||||
|
|
||||||
|
/* Total size is allocated (skb->len - 2) minus fixed array members */
|
||||||
|
if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
memcpy(transaction->params, skb->data +
|
memcpy(transaction->params, skb->data +
|
||||||
transaction->aid_len + 4, transaction->params_len);
|
transaction->aid_len + 4, transaction->params_len);
|
||||||
|
|
||||||
|
@@ -82,6 +82,8 @@ enum st7789v_command {
|
|||||||
*/
|
*/
|
||||||
static int init_display(struct fbtft_par *par)
|
static int init_display(struct fbtft_par *par)
|
||||||
{
|
{
|
||||||
|
par->fbtftops.reset(par);
|
||||||
|
|
||||||
/* turn off sleep mode */
|
/* turn off sleep mode */
|
||||||
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
|
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
|
||||||
mdelay(120);
|
mdelay(120);
|
||||||
|
@@ -690,7 +690,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
if (!sb->s_root) {
|
if (!sb->s_root) {
|
||||||
exfat_err(sb, "failed to get the root dentry");
|
exfat_err(sb, "failed to get the root dentry");
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto put_inode;
|
goto free_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -398,6 +398,7 @@ struct snd_pcm_runtime {
|
|||||||
wait_queue_head_t tsleep; /* transfer sleep */
|
wait_queue_head_t tsleep; /* transfer sleep */
|
||||||
struct fasync_struct *fasync;
|
struct fasync_struct *fasync;
|
||||||
bool stop_operating; /* sync_stop will be called */
|
bool stop_operating; /* sync_stop will be called */
|
||||||
|
struct mutex buffer_mutex; /* protect for buffer changes */
|
||||||
|
|
||||||
/* -- private section -- */
|
/* -- private section -- */
|
||||||
void *private_data;
|
void *private_data;
|
||||||
|
@@ -65,6 +65,25 @@ static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
|
|||||||
return container_of(kfc, struct cgroup_fs_context, kfc);
|
return container_of(kfc, struct cgroup_fs_context, kfc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct cgroup_pidlist;
|
||||||
|
|
||||||
|
struct cgroup_file_ctx {
|
||||||
|
struct cgroup_namespace *ns;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
void *trigger;
|
||||||
|
} psi;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
bool started;
|
||||||
|
struct css_task_iter iter;
|
||||||
|
} procs;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct cgroup_pidlist *pidlist;
|
||||||
|
} procs1;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A cgroup can be associated with multiple css_sets as different tasks may
|
* A cgroup can be associated with multiple css_sets as different tasks may
|
||||||
* belong to different cgroups on different hierarchies. In the other
|
* belong to different cgroups on different hierarchies. In the other
|
||||||
|
@@ -394,6 +394,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
|
|||||||
* next pid to display, if any
|
* next pid to display, if any
|
||||||
*/
|
*/
|
||||||
struct kernfs_open_file *of = s->private;
|
struct kernfs_open_file *of = s->private;
|
||||||
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
struct cgroup *cgrp = seq_css(s)->cgroup;
|
struct cgroup *cgrp = seq_css(s)->cgroup;
|
||||||
struct cgroup_pidlist *l;
|
struct cgroup_pidlist *l;
|
||||||
enum cgroup_filetype type = seq_cft(s)->private;
|
enum cgroup_filetype type = seq_cft(s)->private;
|
||||||
@@ -403,25 +404,24 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
|
|||||||
mutex_lock(&cgrp->pidlist_mutex);
|
mutex_lock(&cgrp->pidlist_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* !NULL @of->priv indicates that this isn't the first start()
|
* !NULL @ctx->procs1.pidlist indicates that this isn't the first
|
||||||
* after open. If the matching pidlist is around, we can use that.
|
* start() after open. If the matching pidlist is around, we can use
|
||||||
* Look for it. Note that @of->priv can't be used directly. It
|
* that. Look for it. Note that @ctx->procs1.pidlist can't be used
|
||||||
* could already have been destroyed.
|
* directly. It could already have been destroyed.
|
||||||
*/
|
*/
|
||||||
if (of->priv)
|
if (ctx->procs1.pidlist)
|
||||||
of->priv = cgroup_pidlist_find(cgrp, type);
|
ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Either this is the first start() after open or the matching
|
* Either this is the first start() after open or the matching
|
||||||
* pidlist has been destroyed inbetween. Create a new one.
|
* pidlist has been destroyed inbetween. Create a new one.
|
||||||
*/
|
*/
|
||||||
if (!of->priv) {
|
if (!ctx->procs1.pidlist) {
|
||||||
ret = pidlist_array_load(cgrp, type,
|
ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
|
||||||
(struct cgroup_pidlist **)&of->priv);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
l = of->priv;
|
l = ctx->procs1.pidlist;
|
||||||
|
|
||||||
if (pid) {
|
if (pid) {
|
||||||
int end = l->length;
|
int end = l->length;
|
||||||
@@ -449,7 +449,8 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
|
|||||||
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
|
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
|
||||||
{
|
{
|
||||||
struct kernfs_open_file *of = s->private;
|
struct kernfs_open_file *of = s->private;
|
||||||
struct cgroup_pidlist *l = of->priv;
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
struct cgroup_pidlist *l = ctx->procs1.pidlist;
|
||||||
|
|
||||||
if (l)
|
if (l)
|
||||||
mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
|
mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
|
||||||
@@ -460,7 +461,8 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v)
|
|||||||
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
|
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
|
||||||
{
|
{
|
||||||
struct kernfs_open_file *of = s->private;
|
struct kernfs_open_file *of = s->private;
|
||||||
struct cgroup_pidlist *l = of->priv;
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
struct cgroup_pidlist *l = ctx->procs1.pidlist;
|
||||||
pid_t *p = v;
|
pid_t *p = v;
|
||||||
pid_t *end = l->list + l->length;
|
pid_t *end = l->list + l->length;
|
||||||
/*
|
/*
|
||||||
@@ -545,6 +547,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
|
|||||||
char *buf, size_t nbytes, loff_t off)
|
char *buf, size_t nbytes, loff_t off)
|
||||||
{
|
{
|
||||||
struct cgroup *cgrp;
|
struct cgroup *cgrp;
|
||||||
|
struct cgroup_file_ctx *ctx;
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
|
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
|
||||||
|
|
||||||
@@ -552,8 +555,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
|
|||||||
* Release agent gets called with all capabilities,
|
* Release agent gets called with all capabilities,
|
||||||
* require capabilities to set release agent.
|
* require capabilities to set release agent.
|
||||||
*/
|
*/
|
||||||
if ((of->file->f_cred->user_ns != &init_user_ns) ||
|
ctx = of->priv;
|
||||||
!capable(CAP_SYS_ADMIN))
|
if ((ctx->ns->user_ns != &init_user_ns) ||
|
||||||
|
!file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
cgrp = cgroup_kn_lock_live(of->kn, false);
|
cgrp = cgroup_kn_lock_live(of->kn, false);
|
||||||
|
@@ -3617,6 +3617,7 @@ static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
|
|||||||
static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
|
static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
|
||||||
size_t nbytes, enum psi_res res)
|
size_t nbytes, enum psi_res res)
|
||||||
{
|
{
|
||||||
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
struct psi_trigger *new;
|
struct psi_trigger *new;
|
||||||
struct cgroup *cgrp;
|
struct cgroup *cgrp;
|
||||||
struct psi_group *psi;
|
struct psi_group *psi;
|
||||||
@@ -3629,7 +3630,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
|
|||||||
cgroup_kn_unlock(of->kn);
|
cgroup_kn_unlock(of->kn);
|
||||||
|
|
||||||
/* Allow only one trigger per file descriptor */
|
/* Allow only one trigger per file descriptor */
|
||||||
if (of->priv) {
|
if (ctx->psi.trigger) {
|
||||||
cgroup_put(cgrp);
|
cgroup_put(cgrp);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
@@ -3641,7 +3642,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
|
|||||||
return PTR_ERR(new);
|
return PTR_ERR(new);
|
||||||
}
|
}
|
||||||
|
|
||||||
smp_store_release(&of->priv, new);
|
smp_store_release(&ctx->psi.trigger, new);
|
||||||
cgroup_put(cgrp);
|
cgroup_put(cgrp);
|
||||||
|
|
||||||
return nbytes;
|
return nbytes;
|
||||||
@@ -3671,12 +3672,15 @@ static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
|
|||||||
static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
|
static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
|
||||||
poll_table *pt)
|
poll_table *pt)
|
||||||
{
|
{
|
||||||
return psi_trigger_poll(&of->priv, of->file, pt);
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cgroup_pressure_release(struct kernfs_open_file *of)
|
static void cgroup_pressure_release(struct kernfs_open_file *of)
|
||||||
{
|
{
|
||||||
psi_trigger_destroy(of->priv);
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
|
||||||
|
psi_trigger_destroy(ctx->psi.trigger);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cgroup_psi_enabled(void)
|
bool cgroup_psi_enabled(void)
|
||||||
@@ -3729,24 +3733,43 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
|
|||||||
static int cgroup_file_open(struct kernfs_open_file *of)
|
static int cgroup_file_open(struct kernfs_open_file *of)
|
||||||
{
|
{
|
||||||
struct cftype *cft = of->kn->priv;
|
struct cftype *cft = of->kn->priv;
|
||||||
|
struct cgroup_file_ctx *ctx;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (cft->open)
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||||
return cft->open(of);
|
if (!ctx)
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ctx->ns = current->nsproxy->cgroup_ns;
|
||||||
|
get_cgroup_ns(ctx->ns);
|
||||||
|
of->priv = ctx;
|
||||||
|
|
||||||
|
if (!cft->open)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = cft->open(of);
|
||||||
|
if (ret) {
|
||||||
|
put_cgroup_ns(ctx->ns);
|
||||||
|
kfree(ctx);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cgroup_file_release(struct kernfs_open_file *of)
|
static void cgroup_file_release(struct kernfs_open_file *of)
|
||||||
{
|
{
|
||||||
struct cftype *cft = of->kn->priv;
|
struct cftype *cft = of->kn->priv;
|
||||||
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
|
||||||
if (cft->release)
|
if (cft->release)
|
||||||
cft->release(of);
|
cft->release(of);
|
||||||
|
put_cgroup_ns(ctx->ns);
|
||||||
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
|
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
|
||||||
size_t nbytes, loff_t off)
|
size_t nbytes, loff_t off)
|
||||||
{
|
{
|
||||||
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
struct cgroup *cgrp = of->kn->parent->priv;
|
struct cgroup *cgrp = of->kn->parent->priv;
|
||||||
struct cftype *cft = of->kn->priv;
|
struct cftype *cft = of->kn->priv;
|
||||||
struct cgroup_subsys_state *css;
|
struct cgroup_subsys_state *css;
|
||||||
@@ -3763,7 +3786,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
|
|||||||
*/
|
*/
|
||||||
if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
|
if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
|
||||||
!(cft->flags & CFTYPE_NS_DELEGATABLE) &&
|
!(cft->flags & CFTYPE_NS_DELEGATABLE) &&
|
||||||
ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp)
|
ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (cft->write)
|
if (cft->write)
|
||||||
@@ -4671,21 +4694,21 @@ void css_task_iter_end(struct css_task_iter *it)
|
|||||||
|
|
||||||
static void cgroup_procs_release(struct kernfs_open_file *of)
|
static void cgroup_procs_release(struct kernfs_open_file *of)
|
||||||
{
|
{
|
||||||
if (of->priv) {
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
css_task_iter_end(of->priv);
|
|
||||||
kfree(of->priv);
|
if (ctx->procs.started)
|
||||||
}
|
css_task_iter_end(&ctx->procs.iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
|
static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
|
||||||
{
|
{
|
||||||
struct kernfs_open_file *of = s->private;
|
struct kernfs_open_file *of = s->private;
|
||||||
struct css_task_iter *it = of->priv;
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
|
||||||
if (pos)
|
if (pos)
|
||||||
(*pos)++;
|
(*pos)++;
|
||||||
|
|
||||||
return css_task_iter_next(it);
|
return css_task_iter_next(&ctx->procs.iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
|
static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
|
||||||
@@ -4693,21 +4716,18 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
|
|||||||
{
|
{
|
||||||
struct kernfs_open_file *of = s->private;
|
struct kernfs_open_file *of = s->private;
|
||||||
struct cgroup *cgrp = seq_css(s)->cgroup;
|
struct cgroup *cgrp = seq_css(s)->cgroup;
|
||||||
struct css_task_iter *it = of->priv;
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
|
struct css_task_iter *it = &ctx->procs.iter;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When a seq_file is seeked, it's always traversed sequentially
|
* When a seq_file is seeked, it's always traversed sequentially
|
||||||
* from position 0, so we can simply keep iterating on !0 *pos.
|
* from position 0, so we can simply keep iterating on !0 *pos.
|
||||||
*/
|
*/
|
||||||
if (!it) {
|
if (!ctx->procs.started) {
|
||||||
if (WARN_ON_ONCE((*pos)))
|
if (WARN_ON_ONCE((*pos)))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
it = kzalloc(sizeof(*it), GFP_KERNEL);
|
|
||||||
if (!it)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
of->priv = it;
|
|
||||||
css_task_iter_start(&cgrp->self, iter_flags, it);
|
css_task_iter_start(&cgrp->self, iter_flags, it);
|
||||||
|
ctx->procs.started = true;
|
||||||
} else if (!(*pos)) {
|
} else if (!(*pos)) {
|
||||||
css_task_iter_end(it);
|
css_task_iter_end(it);
|
||||||
css_task_iter_start(&cgrp->self, iter_flags, it);
|
css_task_iter_start(&cgrp->self, iter_flags, it);
|
||||||
@@ -4758,9 +4778,9 @@ static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
|
|||||||
|
|
||||||
static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
|
static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
|
||||||
struct cgroup *dst_cgrp,
|
struct cgroup *dst_cgrp,
|
||||||
struct super_block *sb)
|
struct super_block *sb,
|
||||||
|
struct cgroup_namespace *ns)
|
||||||
{
|
{
|
||||||
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
|
|
||||||
struct cgroup *com_cgrp = src_cgrp;
|
struct cgroup *com_cgrp = src_cgrp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -4789,11 +4809,12 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
|
|||||||
|
|
||||||
static int cgroup_attach_permissions(struct cgroup *src_cgrp,
|
static int cgroup_attach_permissions(struct cgroup *src_cgrp,
|
||||||
struct cgroup *dst_cgrp,
|
struct cgroup *dst_cgrp,
|
||||||
struct super_block *sb, bool threadgroup)
|
struct super_block *sb, bool threadgroup,
|
||||||
|
struct cgroup_namespace *ns)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb);
|
ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -4810,6 +4831,7 @@ static int cgroup_attach_permissions(struct cgroup *src_cgrp,
|
|||||||
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
|
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
|
||||||
char *buf, size_t nbytes, loff_t off)
|
char *buf, size_t nbytes, loff_t off)
|
||||||
{
|
{
|
||||||
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
struct cgroup *src_cgrp, *dst_cgrp;
|
struct cgroup *src_cgrp, *dst_cgrp;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
@@ -4830,7 +4852,8 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
|
|||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
|
|
||||||
ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
|
ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
|
||||||
of->file->f_path.dentry->d_sb, true);
|
of->file->f_path.dentry->d_sb, true,
|
||||||
|
ctx->ns);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_finish;
|
goto out_finish;
|
||||||
|
|
||||||
@@ -4852,6 +4875,7 @@ static void *cgroup_threads_start(struct seq_file *s, loff_t *pos)
|
|||||||
static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
|
static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
|
||||||
char *buf, size_t nbytes, loff_t off)
|
char *buf, size_t nbytes, loff_t off)
|
||||||
{
|
{
|
||||||
|
struct cgroup_file_ctx *ctx = of->priv;
|
||||||
struct cgroup *src_cgrp, *dst_cgrp;
|
struct cgroup *src_cgrp, *dst_cgrp;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
@@ -4875,7 +4899,8 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
|
|||||||
|
|
||||||
/* thread migrations follow the cgroup.procs delegation rule */
|
/* thread migrations follow the cgroup.procs delegation rule */
|
||||||
ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
|
ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
|
||||||
of->file->f_path.dentry->d_sb, false);
|
of->file->f_path.dentry->d_sb, false,
|
||||||
|
ctx->ns);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_finish;
|
goto out_finish;
|
||||||
|
|
||||||
@@ -6058,7 +6083,8 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
|
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
|
||||||
!(kargs->flags & CLONE_THREAD));
|
!(kargs->flags & CLONE_THREAD),
|
||||||
|
current->nsproxy->cgroup_ns);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@@ -541,6 +541,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
|||||||
/* Unboost if we were boosted. */
|
/* Unboost if we were boosted. */
|
||||||
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
||||||
rt_mutex_futex_unlock(&rnp->boost_mtx);
|
rt_mutex_futex_unlock(&rnp->boost_mtx);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
@@ -1500,8 +1500,8 @@ static int __ip6_append_data(struct sock *sk,
|
|||||||
sizeof(struct frag_hdr) : 0) +
|
sizeof(struct frag_hdr) : 0) +
|
||||||
rt->rt6i_nfheader_len;
|
rt->rt6i_nfheader_len;
|
||||||
|
|
||||||
if (mtu < fragheaderlen ||
|
if (mtu <= fragheaderlen ||
|
||||||
((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
|
((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
|
||||||
goto emsgsize;
|
goto emsgsize;
|
||||||
|
|
||||||
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
|
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
|
||||||
|
@@ -276,6 +276,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
|
|||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct llc_sock *llc = llc_sk(sk);
|
struct llc_sock *llc = llc_sk(sk);
|
||||||
|
struct net_device *dev = NULL;
|
||||||
struct llc_sap *sap;
|
struct llc_sap *sap;
|
||||||
int rc = -EINVAL;
|
int rc = -EINVAL;
|
||||||
|
|
||||||
@@ -287,14 +288,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
|
|||||||
goto out;
|
goto out;
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
if (sk->sk_bound_dev_if) {
|
if (sk->sk_bound_dev_if) {
|
||||||
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
|
dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
|
||||||
if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
|
if (dev && addr->sllc_arphrd != dev->type) {
|
||||||
dev_put(llc->dev);
|
dev_put(dev);
|
||||||
llc->dev = NULL;
|
dev = NULL;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
|
dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
|
||||||
if (!llc->dev)
|
if (!dev)
|
||||||
goto out;
|
goto out;
|
||||||
rc = -EUSERS;
|
rc = -EUSERS;
|
||||||
llc->laddr.lsap = llc_ui_autoport();
|
llc->laddr.lsap = llc_ui_autoport();
|
||||||
@@ -304,6 +305,11 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
|
|||||||
sap = llc_sap_open(llc->laddr.lsap, NULL);
|
sap = llc_sap_open(llc->laddr.lsap, NULL);
|
||||||
if (!sap)
|
if (!sap)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/* Note: We do not expect errors from this point. */
|
||||||
|
llc->dev = dev;
|
||||||
|
dev = NULL;
|
||||||
|
|
||||||
memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN);
|
memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN);
|
||||||
memcpy(&llc->addr, addr, sizeof(llc->addr));
|
memcpy(&llc->addr, addr, sizeof(llc->addr));
|
||||||
/* assign new connection to its SAP */
|
/* assign new connection to its SAP */
|
||||||
@@ -311,6 +317,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
|
|||||||
sock_reset_flag(sk, SOCK_ZAPPED);
|
sock_reset_flag(sk, SOCK_ZAPPED);
|
||||||
rc = 0;
|
rc = 0;
|
||||||
out:
|
out:
|
||||||
|
dev_put(dev);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,6 +340,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||||||
struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
|
struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct llc_sock *llc = llc_sk(sk);
|
struct llc_sock *llc = llc_sk(sk);
|
||||||
|
struct net_device *dev = NULL;
|
||||||
struct llc_sap *sap;
|
struct llc_sap *sap;
|
||||||
int rc = -EINVAL;
|
int rc = -EINVAL;
|
||||||
|
|
||||||
@@ -348,25 +356,26 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (sk->sk_bound_dev_if) {
|
if (sk->sk_bound_dev_if) {
|
||||||
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
|
dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
|
||||||
if (llc->dev) {
|
if (dev) {
|
||||||
if (is_zero_ether_addr(addr->sllc_mac))
|
if (is_zero_ether_addr(addr->sllc_mac))
|
||||||
memcpy(addr->sllc_mac, llc->dev->dev_addr,
|
memcpy(addr->sllc_mac, dev->dev_addr,
|
||||||
IFHWADDRLEN);
|
IFHWADDRLEN);
|
||||||
if (addr->sllc_arphrd != llc->dev->type ||
|
if (addr->sllc_arphrd != dev->type ||
|
||||||
!ether_addr_equal(addr->sllc_mac,
|
!ether_addr_equal(addr->sllc_mac,
|
||||||
llc->dev->dev_addr)) {
|
dev->dev_addr)) {
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
llc->dev = NULL;
|
dev = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
|
dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
|
||||||
addr->sllc_mac);
|
addr->sllc_mac);
|
||||||
if (llc->dev)
|
}
|
||||||
dev_hold(llc->dev);
|
if (dev)
|
||||||
|
dev_hold(dev);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (!llc->dev)
|
if (!dev)
|
||||||
goto out;
|
goto out;
|
||||||
if (!addr->sllc_sap) {
|
if (!addr->sllc_sap) {
|
||||||
rc = -EUSERS;
|
rc = -EUSERS;
|
||||||
@@ -399,6 +408,11 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note: We do not expect errors from this point. */
|
||||||
|
llc->dev = dev;
|
||||||
|
dev = NULL;
|
||||||
|
|
||||||
llc->laddr.lsap = addr->sllc_sap;
|
llc->laddr.lsap = addr->sllc_sap;
|
||||||
memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN);
|
memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN);
|
||||||
memcpy(&llc->addr, addr, sizeof(llc->addr));
|
memcpy(&llc->addr, addr, sizeof(llc->addr));
|
||||||
@@ -409,6 +423,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||||||
out_put:
|
out_put:
|
||||||
llc_sap_put(sap);
|
llc_sap_put(sap);
|
||||||
out:
|
out:
|
||||||
|
dev_put(dev);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@@ -2076,14 +2076,12 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
|
|||||||
const struct mesh_setup *setup)
|
const struct mesh_setup *setup)
|
||||||
{
|
{
|
||||||
u8 *new_ie;
|
u8 *new_ie;
|
||||||
const u8 *old_ie;
|
|
||||||
struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
|
struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
|
||||||
struct ieee80211_sub_if_data, u.mesh);
|
struct ieee80211_sub_if_data, u.mesh);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* allocate information elements */
|
/* allocate information elements */
|
||||||
new_ie = NULL;
|
new_ie = NULL;
|
||||||
old_ie = ifmsh->ie;
|
|
||||||
|
|
||||||
if (setup->ie_len) {
|
if (setup->ie_len) {
|
||||||
new_ie = kmemdup(setup->ie, setup->ie_len,
|
new_ie = kmemdup(setup->ie, setup->ie_len,
|
||||||
@@ -2093,7 +2091,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
|
|||||||
}
|
}
|
||||||
ifmsh->ie_len = setup->ie_len;
|
ifmsh->ie_len = setup->ie_len;
|
||||||
ifmsh->ie = new_ie;
|
ifmsh->ie = new_ie;
|
||||||
kfree(old_ie);
|
|
||||||
|
|
||||||
/* now copy the rest of the setup parameters */
|
/* now copy the rest of the setup parameters */
|
||||||
ifmsh->mesh_id_len = setup->mesh_id_len;
|
ifmsh->mesh_id_len = setup->mesh_id_len;
|
||||||
|
@@ -162,7 +162,7 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
|
|||||||
struct nft_rule *const *rules;
|
struct nft_rule *const *rules;
|
||||||
const struct nft_rule *rule;
|
const struct nft_rule *rule;
|
||||||
const struct nft_expr *expr, *last;
|
const struct nft_expr *expr, *last;
|
||||||
struct nft_regs regs;
|
struct nft_regs regs = {};
|
||||||
unsigned int stackptr = 0;
|
unsigned int stackptr = 0;
|
||||||
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
|
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
|
||||||
bool genbit = READ_ONCE(net->nft.gencursor);
|
bool genbit = READ_ONCE(net->nft.gencursor);
|
||||||
|
@@ -774,6 +774,11 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
|
|||||||
|
|
||||||
if (oss_period_size < 16)
|
if (oss_period_size < 16)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* don't allocate too large period; 1MB period must be enough */
|
||||||
|
if (oss_period_size > 1024 * 1024)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
runtime->oss.period_bytes = oss_period_size;
|
runtime->oss.period_bytes = oss_period_size;
|
||||||
runtime->oss.period_frames = 1;
|
runtime->oss.period_frames = 1;
|
||||||
runtime->oss.periods = oss_periods;
|
runtime->oss.periods = oss_periods;
|
||||||
@@ -1042,10 +1047,9 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
|
|||||||
goto failure;
|
goto failure;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
oss_period_size *= oss_frame_size;
|
oss_period_size = array_size(oss_period_size, oss_frame_size);
|
||||||
|
oss_buffer_size = array_size(oss_period_size, runtime->oss.periods);
|
||||||
oss_buffer_size = oss_period_size * runtime->oss.periods;
|
if (oss_buffer_size <= 0) {
|
||||||
if (oss_buffer_size < 0) {
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto failure;
|
goto failure;
|
||||||
}
|
}
|
||||||
|
@@ -61,7 +61,10 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
|
|||||||
}
|
}
|
||||||
if ((width = snd_pcm_format_physical_width(format->format)) < 0)
|
if ((width = snd_pcm_format_physical_width(format->format)) < 0)
|
||||||
return width;
|
return width;
|
||||||
size = frames * format->channels * width;
|
size = array3_size(frames, format->channels, width);
|
||||||
|
/* check for too large period size once again */
|
||||||
|
if (size > 1024 * 1024)
|
||||||
|
return -ENOMEM;
|
||||||
if (snd_BUG_ON(size % 8))
|
if (snd_BUG_ON(size % 8))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
size /= 8;
|
size /= 8;
|
||||||
|
@@ -969,6 +969,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
|
|||||||
init_waitqueue_head(&runtime->tsleep);
|
init_waitqueue_head(&runtime->tsleep);
|
||||||
|
|
||||||
runtime->status->state = SNDRV_PCM_STATE_OPEN;
|
runtime->status->state = SNDRV_PCM_STATE_OPEN;
|
||||||
|
mutex_init(&runtime->buffer_mutex);
|
||||||
|
|
||||||
substream->runtime = runtime;
|
substream->runtime = runtime;
|
||||||
substream->private_data = pcm->private_data;
|
substream->private_data = pcm->private_data;
|
||||||
@@ -1002,6 +1003,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
|
|||||||
} else {
|
} else {
|
||||||
substream->runtime = NULL;
|
substream->runtime = NULL;
|
||||||
}
|
}
|
||||||
|
mutex_destroy(&runtime->buffer_mutex);
|
||||||
kfree(runtime);
|
kfree(runtime);
|
||||||
put_pid(substream->pid);
|
put_pid(substream->pid);
|
||||||
substream->pid = NULL;
|
substream->pid = NULL;
|
||||||
|
@@ -1871,9 +1871,11 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
|
|||||||
if (avail >= runtime->twake)
|
if (avail >= runtime->twake)
|
||||||
break;
|
break;
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
|
mutex_unlock(&runtime->buffer_mutex);
|
||||||
|
|
||||||
tout = schedule_timeout(wait_time);
|
tout = schedule_timeout(wait_time);
|
||||||
|
|
||||||
|
mutex_lock(&runtime->buffer_mutex);
|
||||||
snd_pcm_stream_lock_irq(substream);
|
snd_pcm_stream_lock_irq(substream);
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
switch (runtime->status->state) {
|
switch (runtime->status->state) {
|
||||||
@@ -2167,6 +2169,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
|
|||||||
|
|
||||||
nonblock = !!(substream->f_flags & O_NONBLOCK);
|
nonblock = !!(substream->f_flags & O_NONBLOCK);
|
||||||
|
|
||||||
|
mutex_lock(&runtime->buffer_mutex);
|
||||||
snd_pcm_stream_lock_irq(substream);
|
snd_pcm_stream_lock_irq(substream);
|
||||||
err = pcm_accessible_state(runtime);
|
err = pcm_accessible_state(runtime);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
@@ -2254,6 +2257,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
|
|||||||
if (xfer > 0 && err >= 0)
|
if (xfer > 0 && err >= 0)
|
||||||
snd_pcm_update_state(substream, runtime);
|
snd_pcm_update_state(substream, runtime);
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
|
mutex_unlock(&runtime->buffer_mutex);
|
||||||
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
|
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__snd_pcm_lib_xfer);
|
EXPORT_SYMBOL(__snd_pcm_lib_xfer);
|
||||||
|
@@ -164,19 +164,20 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
|
|||||||
size_t size;
|
size_t size;
|
||||||
struct snd_dma_buffer new_dmab;
|
struct snd_dma_buffer new_dmab;
|
||||||
|
|
||||||
|
mutex_lock(&substream->pcm->open_mutex);
|
||||||
if (substream->runtime) {
|
if (substream->runtime) {
|
||||||
buffer->error = -EBUSY;
|
buffer->error = -EBUSY;
|
||||||
return;
|
goto unlock;
|
||||||
}
|
}
|
||||||
if (!snd_info_get_line(buffer, line, sizeof(line))) {
|
if (!snd_info_get_line(buffer, line, sizeof(line))) {
|
||||||
snd_info_get_str(str, line, sizeof(str));
|
snd_info_get_str(str, line, sizeof(str));
|
||||||
size = simple_strtoul(str, NULL, 10) * 1024;
|
size = simple_strtoul(str, NULL, 10) * 1024;
|
||||||
if ((size != 0 && size < 8192) || size > substream->dma_max) {
|
if ((size != 0 && size < 8192) || size > substream->dma_max) {
|
||||||
buffer->error = -EINVAL;
|
buffer->error = -EINVAL;
|
||||||
return;
|
goto unlock;
|
||||||
}
|
}
|
||||||
if (substream->dma_buffer.bytes == size)
|
if (substream->dma_buffer.bytes == size)
|
||||||
return;
|
goto unlock;
|
||||||
memset(&new_dmab, 0, sizeof(new_dmab));
|
memset(&new_dmab, 0, sizeof(new_dmab));
|
||||||
new_dmab.dev = substream->dma_buffer.dev;
|
new_dmab.dev = substream->dma_buffer.dev;
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
@@ -185,7 +186,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
|
|||||||
substream->dma_buffer.dev.dev,
|
substream->dma_buffer.dev.dev,
|
||||||
size, &new_dmab) < 0) {
|
size, &new_dmab) < 0) {
|
||||||
buffer->error = -ENOMEM;
|
buffer->error = -ENOMEM;
|
||||||
return;
|
goto unlock;
|
||||||
}
|
}
|
||||||
substream->buffer_bytes_max = size;
|
substream->buffer_bytes_max = size;
|
||||||
} else {
|
} else {
|
||||||
@@ -197,6 +198,8 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
|
|||||||
} else {
|
} else {
|
||||||
buffer->error = -EINVAL;
|
buffer->error = -EINVAL;
|
||||||
}
|
}
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&substream->pcm->open_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void preallocate_info_init(struct snd_pcm_substream *substream)
|
static inline void preallocate_info_init(struct snd_pcm_substream *substream)
|
||||||
|
@@ -667,33 +667,40 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
|
||||||
|
#define is_oss_stream(substream) ((substream)->oss.oss)
|
||||||
|
#else
|
||||||
|
#define is_oss_stream(substream) false
|
||||||
|
#endif
|
||||||
|
|
||||||
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
||||||
struct snd_pcm_hw_params *params)
|
struct snd_pcm_hw_params *params)
|
||||||
{
|
{
|
||||||
struct snd_pcm_runtime *runtime;
|
struct snd_pcm_runtime *runtime;
|
||||||
int err, usecs;
|
int err = 0, usecs;
|
||||||
unsigned int bits;
|
unsigned int bits;
|
||||||
snd_pcm_uframes_t frames;
|
snd_pcm_uframes_t frames;
|
||||||
|
|
||||||
if (PCM_RUNTIME_CHECK(substream))
|
if (PCM_RUNTIME_CHECK(substream))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
runtime = substream->runtime;
|
runtime = substream->runtime;
|
||||||
|
mutex_lock(&runtime->buffer_mutex);
|
||||||
snd_pcm_stream_lock_irq(substream);
|
snd_pcm_stream_lock_irq(substream);
|
||||||
switch (runtime->status->state) {
|
switch (runtime->status->state) {
|
||||||
case SNDRV_PCM_STATE_OPEN:
|
case SNDRV_PCM_STATE_OPEN:
|
||||||
case SNDRV_PCM_STATE_SETUP:
|
case SNDRV_PCM_STATE_SETUP:
|
||||||
case SNDRV_PCM_STATE_PREPARED:
|
case SNDRV_PCM_STATE_PREPARED:
|
||||||
|
if (!is_oss_stream(substream) &&
|
||||||
|
atomic_read(&substream->mmap_count))
|
||||||
|
err = -EBADFD;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
err = -EBADFD;
|
||||||
return -EBADFD;
|
break;
|
||||||
}
|
}
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
|
if (err)
|
||||||
if (!substream->oss.oss)
|
goto unlock;
|
||||||
#endif
|
|
||||||
if (atomic_read(&substream->mmap_count))
|
|
||||||
return -EBADFD;
|
|
||||||
|
|
||||||
snd_pcm_sync_stop(substream, true);
|
snd_pcm_sync_stop(substream, true);
|
||||||
|
|
||||||
@@ -780,16 +787,21 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
|||||||
if ((usecs = period_to_usecs(runtime)) >= 0)
|
if ((usecs = period_to_usecs(runtime)) >= 0)
|
||||||
cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
|
cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
|
||||||
usecs);
|
usecs);
|
||||||
return 0;
|
err = 0;
|
||||||
_error:
|
_error:
|
||||||
/* hardware might be unusable from this time,
|
if (err) {
|
||||||
so we force application to retry to set
|
/* hardware might be unusable from this time,
|
||||||
the correct hardware parameter settings */
|
* so we force application to retry to set
|
||||||
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
* the correct hardware parameter settings
|
||||||
if (substream->ops->hw_free != NULL)
|
*/
|
||||||
substream->ops->hw_free(substream);
|
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
||||||
if (substream->managed_buffer_alloc)
|
if (substream->ops->hw_free != NULL)
|
||||||
snd_pcm_lib_free_pages(substream);
|
substream->ops->hw_free(substream);
|
||||||
|
if (substream->managed_buffer_alloc)
|
||||||
|
snd_pcm_lib_free_pages(substream);
|
||||||
|
}
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&runtime->buffer_mutex);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -829,26 +841,31 @@ static int do_hw_free(struct snd_pcm_substream *substream)
|
|||||||
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
|
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
|
||||||
{
|
{
|
||||||
struct snd_pcm_runtime *runtime;
|
struct snd_pcm_runtime *runtime;
|
||||||
int result;
|
int result = 0;
|
||||||
|
|
||||||
if (PCM_RUNTIME_CHECK(substream))
|
if (PCM_RUNTIME_CHECK(substream))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
runtime = substream->runtime;
|
runtime = substream->runtime;
|
||||||
|
mutex_lock(&runtime->buffer_mutex);
|
||||||
snd_pcm_stream_lock_irq(substream);
|
snd_pcm_stream_lock_irq(substream);
|
||||||
switch (runtime->status->state) {
|
switch (runtime->status->state) {
|
||||||
case SNDRV_PCM_STATE_SETUP:
|
case SNDRV_PCM_STATE_SETUP:
|
||||||
case SNDRV_PCM_STATE_PREPARED:
|
case SNDRV_PCM_STATE_PREPARED:
|
||||||
|
if (atomic_read(&substream->mmap_count))
|
||||||
|
result = -EBADFD;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
result = -EBADFD;
|
||||||
return -EBADFD;
|
break;
|
||||||
}
|
}
|
||||||
snd_pcm_stream_unlock_irq(substream);
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
if (atomic_read(&substream->mmap_count))
|
if (result)
|
||||||
return -EBADFD;
|
goto unlock;
|
||||||
result = do_hw_free(substream);
|
result = do_hw_free(substream);
|
||||||
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
||||||
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
|
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&runtime->buffer_mutex);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1154,15 +1171,17 @@ struct action_ops {
|
|||||||
static int snd_pcm_action_group(const struct action_ops *ops,
|
static int snd_pcm_action_group(const struct action_ops *ops,
|
||||||
struct snd_pcm_substream *substream,
|
struct snd_pcm_substream *substream,
|
||||||
snd_pcm_state_t state,
|
snd_pcm_state_t state,
|
||||||
bool do_lock)
|
bool stream_lock)
|
||||||
{
|
{
|
||||||
struct snd_pcm_substream *s = NULL;
|
struct snd_pcm_substream *s = NULL;
|
||||||
struct snd_pcm_substream *s1;
|
struct snd_pcm_substream *s1;
|
||||||
int res = 0, depth = 1;
|
int res = 0, depth = 1;
|
||||||
|
|
||||||
snd_pcm_group_for_each_entry(s, substream) {
|
snd_pcm_group_for_each_entry(s, substream) {
|
||||||
if (do_lock && s != substream) {
|
if (s != substream) {
|
||||||
if (s->pcm->nonatomic)
|
if (!stream_lock)
|
||||||
|
mutex_lock_nested(&s->runtime->buffer_mutex, depth);
|
||||||
|
else if (s->pcm->nonatomic)
|
||||||
mutex_lock_nested(&s->self_group.mutex, depth);
|
mutex_lock_nested(&s->self_group.mutex, depth);
|
||||||
else
|
else
|
||||||
spin_lock_nested(&s->self_group.lock, depth);
|
spin_lock_nested(&s->self_group.lock, depth);
|
||||||
@@ -1190,18 +1209,18 @@ static int snd_pcm_action_group(const struct action_ops *ops,
|
|||||||
ops->post_action(s, state);
|
ops->post_action(s, state);
|
||||||
}
|
}
|
||||||
_unlock:
|
_unlock:
|
||||||
if (do_lock) {
|
/* unlock streams */
|
||||||
/* unlock streams */
|
snd_pcm_group_for_each_entry(s1, substream) {
|
||||||
snd_pcm_group_for_each_entry(s1, substream) {
|
if (s1 != substream) {
|
||||||
if (s1 != substream) {
|
if (!stream_lock)
|
||||||
if (s1->pcm->nonatomic)
|
mutex_unlock(&s1->runtime->buffer_mutex);
|
||||||
mutex_unlock(&s1->self_group.mutex);
|
else if (s1->pcm->nonatomic)
|
||||||
else
|
mutex_unlock(&s1->self_group.mutex);
|
||||||
spin_unlock(&s1->self_group.lock);
|
else
|
||||||
}
|
spin_unlock(&s1->self_group.lock);
|
||||||
if (s1 == s) /* end */
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
if (s1 == s) /* end */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@@ -1331,10 +1350,12 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
|
|||||||
|
|
||||||
/* Guarantee the group members won't change during non-atomic action */
|
/* Guarantee the group members won't change during non-atomic action */
|
||||||
down_read(&snd_pcm_link_rwsem);
|
down_read(&snd_pcm_link_rwsem);
|
||||||
|
mutex_lock(&substream->runtime->buffer_mutex);
|
||||||
if (snd_pcm_stream_linked(substream))
|
if (snd_pcm_stream_linked(substream))
|
||||||
res = snd_pcm_action_group(ops, substream, state, false);
|
res = snd_pcm_action_group(ops, substream, state, false);
|
||||||
else
|
else
|
||||||
res = snd_pcm_action_single(ops, substream, state);
|
res = snd_pcm_action_single(ops, substream, state);
|
||||||
|
mutex_unlock(&substream->runtime->buffer_mutex);
|
||||||
up_read(&snd_pcm_link_rwsem);
|
up_read(&snd_pcm_link_rwsem);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@@ -1829,11 +1850,13 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
|
|||||||
int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
|
int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
snd_pcm_stream_lock_irq(substream);
|
||||||
runtime->hw_ptr_base = 0;
|
runtime->hw_ptr_base = 0;
|
||||||
runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
|
runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
|
||||||
runtime->status->hw_ptr % runtime->period_size;
|
runtime->status->hw_ptr % runtime->period_size;
|
||||||
runtime->silence_start = runtime->status->hw_ptr;
|
runtime->silence_start = runtime->status->hw_ptr;
|
||||||
runtime->silence_filled = 0;
|
runtime->silence_filled = 0;
|
||||||
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1841,10 +1864,12 @@ static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
|
|||||||
snd_pcm_state_t state)
|
snd_pcm_state_t state)
|
||||||
{
|
{
|
||||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||||
|
snd_pcm_stream_lock_irq(substream);
|
||||||
runtime->control->appl_ptr = runtime->status->hw_ptr;
|
runtime->control->appl_ptr = runtime->status->hw_ptr;
|
||||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
|
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
|
||||||
runtime->silence_size > 0)
|
runtime->silence_size > 0)
|
||||||
snd_pcm_playback_silence(substream, ULONG_MAX);
|
snd_pcm_playback_silence(substream, ULONG_MAX);
|
||||||
|
snd_pcm_stream_unlock_irq(substream);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct action_ops snd_pcm_action_reset = {
|
static const struct action_ops snd_pcm_action_reset = {
|
||||||
|
@@ -938,8 +938,8 @@ static int snd_ac97_ad18xx_pcm_get_volume(struct snd_kcontrol *kcontrol, struct
|
|||||||
int codec = kcontrol->private_value & 3;
|
int codec = kcontrol->private_value & 3;
|
||||||
|
|
||||||
mutex_lock(&ac97->page_mutex);
|
mutex_lock(&ac97->page_mutex);
|
||||||
ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31);
|
ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31);
|
||||||
ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31);
|
ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31);
|
||||||
mutex_unlock(&ac97->page_mutex);
|
mutex_unlock(&ac97->page_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -302,7 +302,6 @@ MODULE_PARM_DESC(joystick_port, "Joystick port address.");
|
|||||||
#define CM_MICGAINZ 0x01 /* mic boost */
|
#define CM_MICGAINZ 0x01 /* mic boost */
|
||||||
#define CM_MICGAINZ_SHIFT 0
|
#define CM_MICGAINZ_SHIFT 0
|
||||||
|
|
||||||
#define CM_REG_MIXER3 0x24
|
|
||||||
#define CM_REG_AUX_VOL 0x26
|
#define CM_REG_AUX_VOL 0x26
|
||||||
#define CM_VAUXL_MASK 0xf0
|
#define CM_VAUXL_MASK 0xf0
|
||||||
#define CM_VAUXR_MASK 0x0f
|
#define CM_VAUXR_MASK 0x0f
|
||||||
@@ -3291,7 +3290,7 @@ static void snd_cmipci_remove(struct pci_dev *pci)
|
|||||||
*/
|
*/
|
||||||
static const unsigned char saved_regs[] = {
|
static const unsigned char saved_regs[] = {
|
||||||
CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL,
|
CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL,
|
||||||
CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_MIXER3, CM_REG_PLL,
|
CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_AUX_VOL, CM_REG_PLL,
|
||||||
CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2,
|
CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2,
|
||||||
CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC,
|
CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC,
|
||||||
CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0,
|
CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0,
|
||||||
|
@@ -8801,6 +8801,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
|
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
|
||||||
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
|
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
|
||||||
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
|
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
|
||||||
|
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
|
||||||
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
|
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
|
||||||
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
|
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
|
||||||
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
|
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
|
||||||
@@ -8884,6 +8885,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
|
SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
|
||||||
SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
|
SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
|
||||||
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
||||||
|
SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
||||||
|
SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
|
SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
|
||||||
SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
||||||
@@ -10839,6 +10842,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
|
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
|
||||||
SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
|
SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
|
||||||
|
SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
|
||||||
SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
|
SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
|
||||||
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
|
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
|
||||||
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
|
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
|
||||||
|
@@ -91,7 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
|
|||||||
SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
|
SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
|
||||||
|
|
||||||
/* Stop the player */
|
/* Stop the player */
|
||||||
snd_pcm_stop_xrun(player->substream);
|
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
@@ -105,7 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
|
|||||||
SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
|
SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
|
||||||
|
|
||||||
/* Stop the player */
|
/* Stop the player */
|
||||||
snd_pcm_stop_xrun(player->substream);
|
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
|
||||||
|
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@@ -138,7 +138,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
|
|||||||
dev_err(player->dev, "Underflow recovery failed\n");
|
dev_err(player->dev, "Underflow recovery failed\n");
|
||||||
|
|
||||||
/* Stop the player */
|
/* Stop the player */
|
||||||
snd_pcm_stop_xrun(player->substream);
|
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
|
||||||
|
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@@ -65,7 +65,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
|
|||||||
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
|
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
|
||||||
dev_err(reader->dev, "FIFO error detected\n");
|
dev_err(reader->dev, "FIFO error detected\n");
|
||||||
|
|
||||||
snd_pcm_stop_xrun(reader->substream);
|
snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
|
||||||
|
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@@ -542,6 +542,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
|
|||||||
.id = USB_ID(0x25c4, 0x0003),
|
.id = USB_ID(0x25c4, 0x0003),
|
||||||
.map = scms_usb3318_map,
|
.map = scms_usb3318_map,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Corsair Virtuoso SE Latest (wired mode) */
|
||||||
|
.id = USB_ID(0x1b1c, 0x0a3f),
|
||||||
|
.map = corsair_virtuoso_map,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* Corsair Virtuoso SE Latest (wireless mode) */
|
||||||
|
.id = USB_ID(0x1b1c, 0x0a40),
|
||||||
|
.map = corsair_virtuoso_map,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.id = USB_ID(0x30be, 0x0101), /* Schiit Hel */
|
.id = USB_ID(0x30be, 0x0101), /* Schiit Hel */
|
||||||
.ignore_ctl_error = 1,
|
.ignore_ctl_error = 1,
|
||||||
|
@@ -3135,9 +3135,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
|
|||||||
if (unitid == 7 && cval->control == UAC_FU_VOLUME)
|
if (unitid == 7 && cval->control == UAC_FU_VOLUME)
|
||||||
snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
|
snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
|
||||||
break;
|
break;
|
||||||
/* lowest playback value is muted on C-Media devices */
|
/* lowest playback value is muted on some devices */
|
||||||
case USB_ID(0x0d8c, 0x000c):
|
case USB_ID(0x0d8c, 0x000c): /* C-Media */
|
||||||
case USB_ID(0x0d8c, 0x0014):
|
case USB_ID(0x0d8c, 0x0014): /* C-Media */
|
||||||
|
case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */
|
||||||
if (strstr(kctl->id.name, "Playback"))
|
if (strstr(kctl->id.name, "Playback"))
|
||||||
cval->min_mute = 1;
|
cval->min_mute = 1;
|
||||||
break;
|
break;
|
||||||
|
Reference in New Issue
Block a user