Merge remote-tracking branch 'wireless-next/master' into HEAD
This commit is contained in:
@@ -3695,7 +3695,7 @@ struct iwl_bt_uart_msg {
|
||||
u8 frame5;
|
||||
u8 frame6;
|
||||
u8 frame7;
|
||||
} __attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
struct iwl_bt_coex_profile_notif {
|
||||
struct iwl_bt_uart_msg last_bt_uart_msg;
|
||||
@@ -3703,7 +3703,7 @@ struct iwl_bt_coex_profile_notif {
|
||||
u8 bt_traffic_load; /* 0 .. 3? */
|
||||
u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
|
||||
u8 reserved;
|
||||
} __attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
|
||||
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
|
||||
@@ -3752,7 +3752,7 @@ enum bt_coex_prio_table_priorities {
|
||||
|
||||
struct iwl_bt_coex_prio_table_cmd {
|
||||
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
|
||||
} __attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
#define IWL_BT_COEX_ENV_CLOSE 0
|
||||
#define IWL_BT_COEX_ENV_OPEN 1
|
||||
@@ -3764,7 +3764,7 @@ struct iwl_bt_coex_prot_env_cmd {
|
||||
u8 action; /* 0 = closed, 1 = open */
|
||||
u8 type; /* 0 .. 15 */
|
||||
u8 reserved[2];
|
||||
} __attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* REPLY_D3_CONFIG
|
||||
|
@@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
sram = priv->dbgfs_sram_offset & ~0x3;
|
||||
|
||||
/* read the first u32 from sram */
|
||||
val = iwl_read_targ_mem(priv->trans, sram);
|
||||
val = iwl_trans_read_mem32(priv->trans, sram);
|
||||
|
||||
for (; len; len--) {
|
||||
/* put the address at the start of every line */
|
||||
@@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
if (++offset == 4) {
|
||||
sram += 4;
|
||||
offset = 0;
|
||||
val = iwl_read_targ_mem(priv->trans, sram);
|
||||
val = iwl_trans_read_mem32(priv->trans, sram);
|
||||
}
|
||||
|
||||
/* put in extra spaces and split lines for human readability */
|
||||
|
@@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
|
||||
/* Set led register off */
|
||||
void iwlagn_led_enable(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
|
||||
iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -206,7 +206,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
|
||||
priv->trans->ops->wowlan_suspend &&
|
||||
priv->trans->ops->d3_suspend &&
|
||||
priv->trans->ops->d3_resume &&
|
||||
device_can_wakeup(priv->trans->dev)) {
|
||||
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
|
||||
WIPHY_WOWLAN_DISCONNECT |
|
||||
@@ -426,7 +427,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
iwl_trans_wowlan_suspend(priv->trans);
|
||||
iwl_trans_d3_suspend(priv->trans);
|
||||
|
||||
goto out;
|
||||
|
||||
@@ -459,11 +460,11 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
||||
base = priv->device_pointers.error_event_table;
|
||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||
spin_lock_irqsave(&priv->trans->reg_lock, flags);
|
||||
ret = iwl_grab_nic_access_silent(priv->trans);
|
||||
if (likely(ret == 0)) {
|
||||
if (iwl_trans_grab_nic_access(priv->trans, true)) {
|
||||
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
|
||||
status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
|
||||
iwl_release_nic_access(priv->trans);
|
||||
iwl_trans_release_nic_access(priv->trans);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
|
||||
|
||||
@@ -479,7 +480,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
||||
}
|
||||
|
||||
if (priv->wowlan_sram)
|
||||
_iwl_read_targ_mem_dwords(
|
||||
iwl_trans_read_mem(
|
||||
priv->trans, 0x800000,
|
||||
priv->wowlan_sram,
|
||||
img->sec[IWL_UCODE_SECTION_DATA].len / 4);
|
||||
@@ -520,9 +521,6 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
|
||||
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
||||
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
|
||||
|
||||
if (iwlagn_tx_skb(priv, control->sta, skb))
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
}
|
||||
|
@@ -354,7 +354,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
|
||||
|
||||
/* Make sure device is powered up for SRAM reads */
|
||||
spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
|
||||
if (unlikely(!iwl_grab_nic_access(priv->trans))) {
|
||||
if (!iwl_trans_grab_nic_access(priv->trans, false)) {
|
||||
spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
|
||||
return;
|
||||
}
|
||||
@@ -388,7 +388,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
|
||||
}
|
||||
}
|
||||
/* Allow device to power down */
|
||||
iwl_release_nic_access(priv->trans);
|
||||
iwl_trans_release_nic_access(priv->trans);
|
||||
spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
|
||||
}
|
||||
|
||||
@@ -408,7 +408,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
|
||||
|
||||
base = priv->device_pointers.log_event_table;
|
||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||
iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
|
||||
iwl_trans_read_mem_bytes(priv->trans, base,
|
||||
&read, sizeof(read));
|
||||
capacity = read.capacity;
|
||||
mode = read.mode;
|
||||
num_wraps = read.wrap_counter;
|
||||
@@ -1627,7 +1628,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
|
||||
}
|
||||
|
||||
/*TODO: Update dbgfs with ISR error stats obtained below */
|
||||
iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
@@ -1717,7 +1718,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
|
||||
|
||||
/* Make sure device is powered up for SRAM reads */
|
||||
spin_lock_irqsave(&trans->reg_lock, reg_flags);
|
||||
if (unlikely(!iwl_grab_nic_access(trans)))
|
||||
if (!iwl_trans_grab_nic_access(trans, false))
|
||||
goto out_unlock;
|
||||
|
||||
/* Set starting address; reads will auto-increment */
|
||||
@@ -1756,7 +1757,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
|
||||
}
|
||||
|
||||
/* Allow device to power down */
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
|
||||
return pos;
|
||||
@@ -1835,10 +1836,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
|
||||
}
|
||||
|
||||
/* event log header */
|
||||
capacity = iwl_read_targ_mem(trans, base);
|
||||
mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
|
||||
num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
|
||||
next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
|
||||
capacity = iwl_trans_read_mem32(trans, base);
|
||||
mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
|
||||
num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
|
||||
next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
|
||||
|
||||
if (capacity > logsize) {
|
||||
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
|
||||
|
@@ -186,8 +186,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
|
||||
}
|
||||
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
|
||||
spin_lock_irqsave(&priv->trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(priv->trans)))
|
||||
iwl_release_nic_access(priv->trans);
|
||||
if (iwl_trans_grab_nic_access(priv->trans, false))
|
||||
iwl_trans_release_nic_access(priv->trans);
|
||||
spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
|
||||
|
||||
/* Reschedule the ct_kill timer to occur in
|
||||
|
@@ -231,13 +231,11 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
|
||||
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
|
||||
IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
|
||||
break;
|
||||
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
|
||||
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
|
||||
IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
|
||||
break;
|
||||
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
@@ -355,8 +353,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
|
||||
|
||||
if (sta)
|
||||
sta_priv = (void *)sta->drv_priv;
|
||||
|
||||
@@ -472,6 +468,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
||||
WARN_ON_ONCE(is_agg &&
|
||||
priv->queue_to_mac80211[txq_id] != info->hw_queue);
|
||||
|
||||
IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
|
||||
txq_id, seq_number);
|
||||
|
||||
if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
@@ -541,9 +540,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
spin_lock_bh(&priv->sta_lock);
|
||||
|
||||
tid_data = &priv->tid_data[sta_id][tid];
|
||||
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
|
||||
txq_id = tid_data->agg.txq_id;
|
||||
|
||||
switch (priv->tid_data[sta_id][tid].agg.state) {
|
||||
switch (tid_data->agg.state) {
|
||||
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
||||
/*
|
||||
* This can happen if the peer stops aggregation
|
||||
@@ -563,9 +562,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
case IWL_AGG_ON:
|
||||
break;
|
||||
default:
|
||||
IWL_WARN(priv, "Stopping AGG while state not ON "
|
||||
"or starting for %d on %d (%d)\n", sta_id, tid,
|
||||
priv->tid_data[sta_id][tid].agg.state);
|
||||
IWL_WARN(priv,
|
||||
"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
|
||||
sta_id, tid, tid_data->agg.state);
|
||||
spin_unlock_bh(&priv->sta_lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -578,12 +577,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
|
||||
sta_id, tid, txq_id);
|
||||
} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
||||
"next_recl = %d\n",
|
||||
IWL_DEBUG_TX_QUEUES(priv,
|
||||
"Can't proceed: ssn %d, next_recl = %d\n",
|
||||
tid_data->agg.ssn,
|
||||
tid_data->next_reclaimed);
|
||||
priv->tid_data[sta_id][tid].agg.state =
|
||||
IWL_EMPTYING_HW_QUEUE_DELBA;
|
||||
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
|
||||
spin_unlock_bh(&priv->sta_lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -591,8 +589,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
|
||||
tid_data->agg.ssn);
|
||||
turn_off:
|
||||
agg_state = priv->tid_data[sta_id][tid].agg.state;
|
||||
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
||||
agg_state = tid_data->agg.state;
|
||||
tid_data->agg.state = IWL_AGG_OFF;
|
||||
|
||||
spin_unlock_bh(&priv->sta_lock);
|
||||
|
||||
@@ -954,12 +952,6 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
|
||||
if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
|
||||
AGG_TX_STATE_ABORT_MSK))
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
|
||||
"try-count (0x%08x)\n",
|
||||
iwl_get_agg_tx_fail_reason(fstatus),
|
||||
fstatus & AGG_TX_STATUS_MSK,
|
||||
fstatus & AGG_TX_TRY_MSK);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1079,6 +1071,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
|
||||
{
|
||||
u16 status = le16_to_cpu(tx_resp->status.status);
|
||||
|
||||
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
|
||||
info->status.rates[0].count = tx_resp->failure_frame + 1;
|
||||
info->flags |= iwl_tx_status_to_mac80211(status);
|
||||
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
|
||||
@@ -1151,13 +1145,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
next_reclaimed = ssn;
|
||||
}
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
|
||||
|
||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||
@@ -1208,21 +1195,49 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
if (!is_agg)
|
||||
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
||||
|
||||
/*
|
||||
* W/A for FW bug - the seq_ctl isn't updated when the
|
||||
* queues are flushed. Fetch it from the packet itself
|
||||
*/
|
||||
if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
||||
next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
|
||||
next_reclaimed =
|
||||
SEQ_TO_SN(next_reclaimed + 0x10);
|
||||
}
|
||||
|
||||
is_offchannel_skb =
|
||||
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
|
||||
freed++;
|
||||
}
|
||||
|
||||
WARN_ON(!is_agg && freed != 1);
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
if (!is_agg && freed != 1)
|
||||
IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
|
||||
|
||||
/*
|
||||
* An offchannel frame can be send only on the AUX queue, where
|
||||
* there is no aggregation (and reordering) so it only is single
|
||||
* skb is expected to be processed.
|
||||
*/
|
||||
WARN_ON(is_offchannel_skb && freed != 1);
|
||||
if (is_offchannel_skb && freed != 1)
|
||||
IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
|
||||
iwl_get_tx_fail_reason(status), status);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv,
|
||||
"\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
|
||||
le32_to_cpu(tx_resp->rate_n_flags),
|
||||
tx_resp->failure_frame, SEQ_TO_INDEX(sequence), ssn,
|
||||
le16_to_cpu(tx_resp->seq_ctl));
|
||||
|
||||
iwl_check_abort_status(priv, tx_resp->frame_count, status);
|
||||
spin_unlock(&priv->sta_lock);
|
||||
|
||||
|
@@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv)
|
||||
return iwl_send_calib_results(priv);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
|
||||
* using sample data 100 bytes apart. If these sample points are good,
|
||||
* it's a pretty good bet that everything between them is good, too.
|
||||
*/
|
||||
static int iwl_verify_sec_sparse(struct iwl_priv *priv,
|
||||
const struct fw_desc *fw_desc)
|
||||
{
|
||||
__le32 *image = (__le32 *)fw_desc->data;
|
||||
u32 len = fw_desc->len;
|
||||
u32 val;
|
||||
u32 i;
|
||||
|
||||
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
|
||||
|
||||
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
|
||||
/* read data comes through single port, auto-incr addr */
|
||||
/* NOTE: Use the debugless read so we don't flood kernel log
|
||||
* if IWL_DL_IO is set */
|
||||
iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
|
||||
i + fw_desc->offset);
|
||||
val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_print_mismatch_sec(struct iwl_priv *priv,
|
||||
const struct fw_desc *fw_desc)
|
||||
{
|
||||
__le32 *image = (__le32 *)fw_desc->data;
|
||||
u32 len = fw_desc->len;
|
||||
u32 val;
|
||||
u32 offs;
|
||||
int errors = 0;
|
||||
|
||||
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
|
||||
|
||||
iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
|
||||
fw_desc->offset);
|
||||
|
||||
for (offs = 0;
|
||||
offs < len && errors < 20;
|
||||
offs += sizeof(u32), image++) {
|
||||
/* read data comes through single port, auto-incr addr */
|
||||
val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
|
||||
if (val != le32_to_cpu(*image)) {
|
||||
IWL_ERR(priv, "uCode INST section at "
|
||||
"offset 0x%x, is 0x%x, s/b 0x%x\n",
|
||||
offs, val, le32_to_cpu(*image));
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_verify_ucode - determine which instruction image is in SRAM,
|
||||
* and verify its contents
|
||||
*/
|
||||
static int iwl_verify_ucode(struct iwl_priv *priv,
|
||||
enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
|
||||
|
||||
if (!img) {
|
||||
IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
|
||||
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
|
||||
|
||||
iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
struct iwl_alive_data {
|
||||
bool valid;
|
||||
u8 subtype;
|
||||
@@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
alive_cmd, ARRAY_SIZE(alive_cmd),
|
||||
iwl_alive_fn, &alive_data);
|
||||
|
||||
ret = iwl_trans_start_fw(priv->trans, fw);
|
||||
ret = iwl_trans_start_fw(priv->trans, fw, false);
|
||||
if (ret) {
|
||||
priv->cur_ucode = old_type;
|
||||
iwl_remove_notification(&priv->notif_wait, &alive_wait);
|
||||
@@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* This step takes a long time (60-80ms!!) and
|
||||
* WoWLAN image should be loaded quickly, so
|
||||
* skip it for WoWLAN.
|
||||
*/
|
||||
if (ucode_type != IWL_UCODE_WOWLAN) {
|
||||
ret = iwl_verify_ucode(priv, ucode_type);
|
||||
if (ret) {
|
||||
priv->cur_ucode = old_type;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* delay a bit to give rfkill time to run */
|
||||
msleep(5);
|
||||
}
|
||||
|
@@ -381,8 +381,8 @@
|
||||
|
||||
/* LED */
|
||||
#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
|
||||
#define CSR_LED_REG_TRUN_ON (0x78)
|
||||
#define CSR_LED_REG_TRUN_OFF (0x38)
|
||||
#define CSR_LED_REG_TURN_ON (0x60)
|
||||
#define CSR_LED_REG_TURN_OFF (0x20)
|
||||
|
||||
/* ANA_PLL */
|
||||
#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
|
||||
|
@@ -225,6 +225,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
||||
#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
|
||||
#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
|
||||
|
||||
#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
|
||||
#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
|
||||
|
||||
/**
|
||||
* Rx Config/Status Registers (RCSR)
|
||||
@@ -257,6 +259,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
||||
#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
|
||||
|
||||
#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
|
||||
#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8)
|
||||
#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10)
|
||||
|
||||
#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
|
||||
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
|
||||
|
@@ -35,12 +35,12 @@
|
||||
|
||||
#define IWL_POLL_INTERVAL 10 /* microseconds */
|
||||
|
||||
static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
|
||||
}
|
||||
|
||||
static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
|
||||
{
|
||||
iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
|
||||
}
|
||||
@@ -99,86 +99,16 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_poll_bit);
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_trans *trans)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/*
|
||||
* These bits say the device is running, and should keep running for
|
||||
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
|
||||
* but they do not indicate that embedded SRAM is restored yet;
|
||||
* 3945 and 4965 have volatile SRAM, and must save/restore contents
|
||||
* to/from host DRAM when sleeping/waking for power-saving.
|
||||
* Each direction takes approximately 1/4 millisecond; with this
|
||||
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
|
||||
* series of register accesses are expected (e.g. reading Event Log),
|
||||
* to keep device from sleeping.
|
||||
*
|
||||
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
|
||||
* SRAM is okay/restored. We don't check that here because this call
|
||||
* is just for hardware register access; but GP1 MAC_SLEEP check is a
|
||||
* good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
|
||||
*
|
||||
* 5000 series and later (including 1000 series) have non-volatile SRAM,
|
||||
* and do not save/restore SRAM when power cycling.
|
||||
*/
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
|
||||
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
|
||||
if (ret < 0) {
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
|
||||
|
||||
bool iwl_grab_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
int ret = iwl_grab_nic_access_silent(trans);
|
||||
if (unlikely(ret)) {
|
||||
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
|
||||
WARN_ONCE(1, "Timeout waiting for hardware access "
|
||||
"(CSR_GP_CNTRL 0x%08x)\n", val);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
|
||||
|
||||
void iwl_release_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
__iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
/*
|
||||
* Above we read the CSR_GP_CNTRL register, which will flush
|
||||
* any previous writes, but we need the write that clears the
|
||||
* MAC_ACCESS_REQ bit to be performed before any other writes
|
||||
* scheduled on different CPUs (after we drop reg_lock).
|
||||
*/
|
||||
mmiowb();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_release_nic_access);
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
|
||||
{
|
||||
u32 value;
|
||||
u32 value = 0x5a5a5a5a;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
value = iwl_read32(trans, reg);
|
||||
iwl_release_nic_access(trans);
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
value = iwl_read32(trans, reg);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
|
||||
return value;
|
||||
@@ -190,9 +120,9 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
iwl_write32(trans, reg, value);
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
@@ -230,12 +160,13 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
u32 val = 0x5a5a5a5a;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
val = __iwl_read_prph(trans, ofs);
|
||||
iwl_release_nic_access(trans);
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
val = __iwl_read_prph(trans, ofs);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
return val;
|
||||
}
|
||||
@@ -246,9 +177,9 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
__iwl_write_prph(trans, ofs, val);
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
@@ -259,10 +190,10 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
__iwl_write_prph(trans, ofs,
|
||||
__iwl_read_prph(trans, ofs) | mask);
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
@@ -274,10 +205,10 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
__iwl_write_prph(trans, ofs,
|
||||
(__iwl_read_prph(trans, ofs) & mask) | bits);
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
@@ -289,66 +220,11 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
val = __iwl_read_prph(trans, ofs);
|
||||
__iwl_write_prph(trans, ofs, (val & ~mask));
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
|
||||
|
||||
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
|
||||
for (offs = 0; offs < dwords; offs++)
|
||||
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
iwl_release_nic_access(trans);
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
_iwl_read_targ_mem_dwords(trans, addr, &value, 1);
|
||||
|
||||
return value;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
|
||||
|
||||
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
|
||||
const void *buf, int dwords)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs, result = 0;
|
||||
const u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (likely(iwl_grab_nic_access(trans))) {
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
|
||||
for (offs = 0; offs < dwords; offs++)
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
|
||||
iwl_release_nic_access(trans);
|
||||
} else
|
||||
result = -EBUSY;
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
|
||||
|
||||
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
|
||||
{
|
||||
return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
|
||||
|
@@ -53,6 +53,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
|
||||
|
||||
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
|
||||
|
||||
void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
|
||||
|
||||
@@ -61,10 +63,6 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
|
||||
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
|
||||
int timeout);
|
||||
|
||||
int iwl_grab_nic_access_silent(struct iwl_trans *trans);
|
||||
bool iwl_grab_nic_access(struct iwl_trans *trans);
|
||||
void iwl_release_nic_access(struct iwl_trans *trans);
|
||||
|
||||
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
|
||||
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
|
||||
|
||||
@@ -76,19 +74,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
|
||||
u32 bits, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
|
||||
|
||||
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords);
|
||||
|
||||
#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
|
||||
do { \
|
||||
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
|
||||
_iwl_read_targ_mem_dwords(trans, addr, buf, \
|
||||
(bufsize) / sizeof(u32));\
|
||||
} while (0)
|
||||
|
||||
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
|
||||
const void *buf, int dwords);
|
||||
|
||||
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
|
||||
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
|
||||
#endif
|
||||
|
@@ -467,18 +467,20 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
|
||||
if (IWL_ABS_PRPH_START <= addr &&
|
||||
addr < IWL_ABS_PRPH_START + PRPH_END) {
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
if (!iwl_trans_grab_nic_access(trans, false)) {
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
return -EIO;
|
||||
}
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
|
||||
addr | (3 << 24));
|
||||
for (i = 0; i < size; i += 4)
|
||||
*(u32 *)(tst->mem.addr + i) =
|
||||
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
|
||||
iwl_release_nic_access(trans);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
} else { /* target memory (SRAM) */
|
||||
_iwl_read_targ_mem_dwords(trans, addr,
|
||||
tst->mem.addr,
|
||||
tst->mem.size / 4);
|
||||
iwl_trans_read_mem(trans, addr, tst->mem.addr,
|
||||
tst->mem.size / 4);
|
||||
}
|
||||
|
||||
tst->mem.nchunks =
|
||||
@@ -501,28 +503,31 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
|
||||
|
||||
if (IWL_ABS_PRPH_START <= addr &&
|
||||
addr < IWL_ABS_PRPH_START + PRPH_END) {
|
||||
/* Periphery writes can be 1-3 bytes long, or DWORDs */
|
||||
if (size < 4) {
|
||||
memcpy(&val, buf, size);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
iwl_grab_nic_access(trans);
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
|
||||
(addr & 0x0000FFFF) |
|
||||
((size - 1) << 24));
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||
iwl_release_nic_access(trans);
|
||||
/* needed after consecutive writes w/o read */
|
||||
mmiowb();
|
||||
/* Periphery writes can be 1-3 bytes long, or DWORDs */
|
||||
if (size < 4) {
|
||||
memcpy(&val, buf, size);
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (!iwl_trans_grab_nic_access(trans, false)) {
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
} else {
|
||||
if (size % 4)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < size; i += 4)
|
||||
iwl_write_prph(trans, addr+i,
|
||||
*(u32 *)(buf+i));
|
||||
return -EIO;
|
||||
}
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
|
||||
(addr & 0x0000FFFF) |
|
||||
((size - 1) << 24));
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
/* needed after consecutive writes w/o read */
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
} else {
|
||||
if (size % 4)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < size; i += 4)
|
||||
iwl_write_prph(trans, addr+i,
|
||||
*(u32 *)(buf+i));
|
||||
}
|
||||
} else if (iwl_test_valid_hw_addr(tst, addr)) {
|
||||
_iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
|
||||
iwl_trans_write_mem(trans, addr, buf, size / 4);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -307,6 +307,16 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
|
||||
#define IWL_MAX_TID_COUNT 8
|
||||
#define IWL_FRAME_LIMIT 64
|
||||
|
||||
/**
|
||||
* enum iwl_wowlan_status - WoWLAN image/device status
|
||||
* @IWL_D3_STATUS_ALIVE: firmware is still running after resume
|
||||
* @IWL_D3_STATUS_RESET: device was reset while suspended
|
||||
*/
|
||||
enum iwl_d3_status {
|
||||
IWL_D3_STATUS_ALIVE,
|
||||
IWL_D3_STATUS_RESET,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_config - transport configuration
|
||||
*
|
||||
@@ -321,6 +331,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
|
||||
* @n_no_reclaim_cmds: # of commands in list
|
||||
* @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
|
||||
* if unset 4k will be the RX buffer size
|
||||
* @bc_table_dword: set to true if the BC table expects the byte count to be
|
||||
* in DWORD (as opposed to bytes)
|
||||
* @queue_watchdog_timeout: time (in ms) after which queues
|
||||
* are considered stuck and will trigger device restart
|
||||
* @command_names: array of command names, must be 256 entries
|
||||
@@ -335,6 +347,7 @@ struct iwl_trans_config {
|
||||
int n_no_reclaim_cmds;
|
||||
|
||||
bool rx_buf_size_8k;
|
||||
bool bc_table_dword;
|
||||
unsigned int queue_watchdog_timeout;
|
||||
const char **command_names;
|
||||
};
|
||||
@@ -360,9 +373,12 @@ struct iwl_trans;
|
||||
* May sleep
|
||||
* @stop_device:stops the whole device (embedded CPU put to reset)
|
||||
* May sleep
|
||||
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
|
||||
* @d3_suspend: put the device into the correct mode for WoWLAN during
|
||||
* suspend. This is optional, if not implemented WoWLAN will not be
|
||||
* supported. This callback may sleep.
|
||||
* @d3_resume: resume the device after WoWLAN, enabling the opmode to
|
||||
* talk to the WoWLAN image to get its status. This is optional, if not
|
||||
* implemented WoWLAN will not be supported. This callback may sleep.
|
||||
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
|
||||
* If RFkill is asserted in the middle of a SYNC host command, it must
|
||||
* return -ERFKILL straight away.
|
||||
@@ -387,20 +403,27 @@ struct iwl_trans;
|
||||
* @read32: read a u32 register at offset ofs from the BAR
|
||||
* @read_prph: read a DWORD from a periphery register
|
||||
* @write_prph: write a DWORD to a periphery register
|
||||
* @read_mem: read device's SRAM in DWORD
|
||||
* @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
|
||||
* will be zeroed.
|
||||
* @configure: configure parameters required by the transport layer from
|
||||
* the op_mode. May be called several times before start_fw, can't be
|
||||
* called after that.
|
||||
* @set_pmi: set the power pmi state
|
||||
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs
|
||||
* @release_nic_access: let the NIC go to sleep
|
||||
*/
|
||||
struct iwl_trans_ops {
|
||||
|
||||
int (*start_hw)(struct iwl_trans *iwl_trans);
|
||||
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
|
||||
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
|
||||
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
|
||||
bool run_in_rfkill);
|
||||
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
|
||||
void (*stop_device)(struct iwl_trans *trans);
|
||||
|
||||
void (*wowlan_suspend)(struct iwl_trans *trans);
|
||||
void (*d3_suspend)(struct iwl_trans *trans);
|
||||
int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status);
|
||||
|
||||
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
@@ -424,9 +447,15 @@ struct iwl_trans_ops {
|
||||
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
|
||||
u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
|
||||
void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
int (*read_mem)(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords);
|
||||
int (*write_mem)(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords);
|
||||
void (*configure)(struct iwl_trans *trans,
|
||||
const struct iwl_trans_config *trans_cfg);
|
||||
void (*set_pmi)(struct iwl_trans *trans, bool state);
|
||||
bool (*grab_nic_access)(struct iwl_trans *trans, bool silent);
|
||||
void (*release_nic_access)(struct iwl_trans *trans);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -528,13 +557,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
}
|
||||
|
||||
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
const struct fw_img *fw,
|
||||
bool run_in_rfkill)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
|
||||
|
||||
return trans->ops->start_fw(trans, fw);
|
||||
return trans->ops->start_fw(trans, fw, run_in_rfkill);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
|
||||
@@ -546,10 +576,17 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
|
||||
trans->state = IWL_TRANS_NO_FW;
|
||||
}
|
||||
|
||||
static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans)
|
||||
{
|
||||
might_sleep();
|
||||
trans->ops->wowlan_suspend(trans);
|
||||
trans->ops->d3_suspend(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
|
||||
enum iwl_d3_status *status)
|
||||
{
|
||||
might_sleep();
|
||||
return trans->ops->d3_resume(trans, status);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
|
||||
@@ -636,7 +673,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
|
||||
struct dentry *dir)
|
||||
struct dentry *dir)
|
||||
{
|
||||
return trans->ops->dbgfs_register(trans, dir);
|
||||
}
|
||||
@@ -679,11 +716,57 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
|
||||
return trans->ops->write_prph(trans, ofs, val);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords)
|
||||
{
|
||||
return trans->ops->read_mem(trans, addr, buf, dwords);
|
||||
}
|
||||
|
||||
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
|
||||
do { \
|
||||
if (__builtin_constant_p(bufsize)) \
|
||||
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
|
||||
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
|
||||
} while (0)
|
||||
|
||||
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
|
||||
return 0xa5a5a5a5;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords)
|
||||
{
|
||||
return trans->ops->write_mem(trans, addr, buf, dwords);
|
||||
}
|
||||
|
||||
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
|
||||
u32 val)
|
||||
{
|
||||
return iwl_trans_write_mem(trans, addr, &val, 1);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
|
||||
{
|
||||
trans->ops->set_pmi(trans, state);
|
||||
}
|
||||
|
||||
#define iwl_trans_grab_nic_access(trans, silent) \
|
||||
__cond_lock(nic_access, \
|
||||
likely((trans)->ops->grab_nic_access(trans, silent)))
|
||||
|
||||
static inline void __releases(nic_access)
|
||||
iwl_trans_release_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
trans->ops->release_nic_access(trans);
|
||||
__release(nic_access);
|
||||
}
|
||||
|
||||
/*****************************************************
|
||||
* driver (transport) register/unregister functions
|
||||
******************************************************/
|
||||
|
@@ -222,8 +222,6 @@ struct iwl_txq {
|
||||
* @rx_replenish: work that will be called when buffers need to be allocated
|
||||
* @drv - pointer to iwl_drv
|
||||
* @trans: pointer to the generic transport area
|
||||
* @irq - the irq number for the device
|
||||
* @irq_requested: true when the irq has been requested
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||
* @kw: keep warm address
|
||||
@@ -234,6 +232,7 @@ struct iwl_txq {
|
||||
* @status - transport specific status flags
|
||||
* @cmd_queue - command queue number
|
||||
* @rx_buf_size_8k: 8 kB RX buffer size
|
||||
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
|
||||
* @rx_page_order: page order for receive buffer size
|
||||
* @wd_timeout: queue watchdog timeout (jiffies)
|
||||
*/
|
||||
@@ -249,11 +248,9 @@ struct iwl_trans_pcie {
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
bool irq_requested;
|
||||
struct tasklet_struct irq_tasklet;
|
||||
struct isr_statistics isr_stats;
|
||||
|
||||
unsigned int irq;
|
||||
spinlock_t irq_lock;
|
||||
u32 inta_mask;
|
||||
u32 scd_base_addr;
|
||||
@@ -279,6 +276,7 @@ struct iwl_trans_pcie {
|
||||
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
|
||||
|
||||
bool rx_buf_size_8k;
|
||||
bool bc_table_dword;
|
||||
u32 rx_page_order;
|
||||
|
||||
const char **command_names;
|
||||
@@ -359,6 +357,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
struct iwl_rx_cmd_buffer *rxb, int handler_status);
|
||||
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
struct sk_buff_head *skbs);
|
||||
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
* Error handling
|
||||
******************************************************/
|
||||
|
@@ -436,7 +436,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
|
||||
err_rb_stts:
|
||||
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
||||
rxq->bd, rxq->bd_dma);
|
||||
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
|
||||
rxq->bd_dma = 0;
|
||||
rxq->bd = NULL;
|
||||
err_bd:
|
||||
return -ENOMEM;
|
||||
@@ -455,6 +455,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
||||
|
||||
/* Stop Rx DMA */
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
||||
/* reset and flush pointers */
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
|
||||
|
||||
/* Reset driver's Rx queue write index */
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
||||
@@ -491,7 +495,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
|
||||
int i, err;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -518,6 +521,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
rxq->read = rxq->write = 0;
|
||||
rxq->write_actual = 0;
|
||||
rxq->free_count = 0;
|
||||
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
iwl_pcie_rx_replenish(trans);
|
||||
@@ -545,13 +549,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
||||
return;
|
||||
}
|
||||
|
||||
cancel_work_sync(&trans_pcie->rx_replenish);
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
iwl_pcie_rxq_free_rbs(trans);
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
||||
rxq->bd, rxq->bd_dma);
|
||||
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
|
||||
rxq->bd_dma = 0;
|
||||
rxq->bd = NULL;
|
||||
|
||||
if (rxq->rb_stts)
|
||||
@@ -560,7 +566,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
||||
rxq->rb_stts, rxq->rb_stts_dma);
|
||||
else
|
||||
IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
|
||||
memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
|
||||
rxq->rb_stts_dma = 0;
|
||||
rxq->rb_stts = NULL;
|
||||
}
|
||||
|
||||
@@ -1166,6 +1172,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
|
||||
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
|
||||
!trans_pcie->inta)
|
||||
iwl_enable_interrupts(trans);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service. */
|
||||
|
@@ -75,21 +75,16 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "internal.h"
|
||||
|
||||
static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
|
||||
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
|
||||
{
|
||||
/*
|
||||
* (for documentation purposes)
|
||||
* to set power to V_AUX, do:
|
||||
|
||||
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
|
||||
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
*/
|
||||
|
||||
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
|
||||
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
else
|
||||
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
}
|
||||
|
||||
/* PCI registers */
|
||||
@@ -259,7 +254,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
|
||||
|
||||
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
||||
|
||||
iwl_pcie_set_pwr_vmain(trans);
|
||||
iwl_pcie_set_pwr(trans, false);
|
||||
|
||||
iwl_op_mode_nic_config(trans->op_mode);
|
||||
|
||||
@@ -435,7 +430,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
const struct fw_img *fw, bool run_in_rfkill)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret;
|
||||
@@ -454,7 +449,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
|
||||
if (hw_rfkill)
|
||||
if (hw_rfkill && !run_in_rfkill)
|
||||
return -ERFKILL;
|
||||
|
||||
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
|
||||
@@ -534,12 +529,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
||||
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
||||
/* wait to make sure we flush pending tasklet*/
|
||||
synchronize_irq(trans_pcie->irq);
|
||||
tasklet_kill(&trans_pcie->irq_tasklet);
|
||||
|
||||
cancel_work_sync(&trans_pcie->rx_replenish);
|
||||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
||||
|
||||
@@ -551,46 +540,87 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
||||
clear_bit(STATUS_RFKILL, &trans_pcie->status);
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
|
||||
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
|
||||
{
|
||||
/* let the ucode operate on its own */
|
||||
iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
|
||||
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
||||
|
||||
iwl_disable_interrupts(trans);
|
||||
iwl_pcie_disable_ict(trans);
|
||||
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/*
|
||||
* reset TX queues -- some of their registers reset during S3
|
||||
* so if we don't reset everything here the D3 image would try
|
||||
* to execute some invalid memory upon resume
|
||||
*/
|
||||
iwl_trans_pcie_tx_reset(trans);
|
||||
|
||||
iwl_pcie_set_pwr(trans, true);
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
||||
enum iwl_d3_status *status)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
iwl_pcie_set_pwr(trans, false);
|
||||
|
||||
val = iwl_read32(trans, CSR_RESET);
|
||||
if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
|
||||
*status = IWL_D3_STATUS_RESET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Also enables interrupts - none will happen as the device doesn't
|
||||
* know we're waking it up, only when the opmode actually tells it
|
||||
* after this call.
|
||||
*/
|
||||
iwl_pcie_reset_ict(trans);
|
||||
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
25000);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
iwl_trans_pcie_tx_reset(trans);
|
||||
|
||||
ret = iwl_pcie_rx_init(trans);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
||||
|
||||
*status = IWL_D3_STATUS_ALIVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int err;
|
||||
bool hw_rfkill;
|
||||
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
|
||||
if (!trans_pcie->irq_requested) {
|
||||
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
|
||||
iwl_pcie_tasklet, (unsigned long)trans);
|
||||
|
||||
iwl_pcie_alloc_ict(trans);
|
||||
|
||||
err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
|
||||
IRQF_SHARED, DRV_NAME, trans);
|
||||
if (err) {
|
||||
IWL_ERR(trans, "Error allocating IRQ %d\n",
|
||||
trans_pcie->irq);
|
||||
goto error;
|
||||
}
|
||||
|
||||
trans_pcie->irq_requested = true;
|
||||
}
|
||||
int err;
|
||||
|
||||
err = iwl_pcie_prepare_card_hw(trans);
|
||||
if (err) {
|
||||
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
|
||||
goto err_free_irq;
|
||||
return err;
|
||||
}
|
||||
|
||||
iwl_pcie_apm_init(trans);
|
||||
@@ -601,15 +631,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
|
||||
|
||||
return err;
|
||||
|
||||
err_free_irq:
|
||||
trans_pcie->irq_requested = false;
|
||||
free_irq(trans_pcie->irq, trans);
|
||||
error:
|
||||
iwl_pcie_free_ict(trans);
|
||||
tasklet_kill(&trans_pcie->irq_tasklet);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
|
||||
@@ -703,19 +725,21 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
|
||||
|
||||
trans_pcie->command_names = trans_cfg->command_names;
|
||||
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
tasklet_kill(&trans_pcie->irq_tasklet);
|
||||
|
||||
iwl_pcie_tx_free(trans);
|
||||
iwl_pcie_rx_free(trans);
|
||||
|
||||
if (trans_pcie->irq_requested == true) {
|
||||
free_irq(trans_pcie->irq, trans);
|
||||
iwl_pcie_free_ict(trans);
|
||||
}
|
||||
free_irq(trans_pcie->pci_dev->irq, trans);
|
||||
iwl_pcie_free_ict(trans);
|
||||
|
||||
pci_disable_msi(trans_pcie->pci_dev);
|
||||
iounmap(trans_pcie->hw_base);
|
||||
@@ -751,13 +775,112 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
|
||||
|
||||
if (!hw_rfkill)
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/*
|
||||
* These bits say the device is running, and should keep running for
|
||||
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
|
||||
* but they do not indicate that embedded SRAM is restored yet;
|
||||
* 3945 and 4965 have volatile SRAM, and must save/restore contents
|
||||
* to/from host DRAM when sleeping/waking for power-saving.
|
||||
* Each direction takes approximately 1/4 millisecond; with this
|
||||
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
|
||||
* series of register accesses are expected (e.g. reading Event Log),
|
||||
* to keep device from sleeping.
|
||||
*
|
||||
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
|
||||
* SRAM is okay/restored. We don't check that here because this call
|
||||
* is just for hardware register access; but GP1 MAC_SLEEP check is a
|
||||
* good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
|
||||
*
|
||||
* 5000 series and later (including 1000 series) have non-volatile SRAM,
|
||||
* and do not save/restore SRAM when power cycling.
|
||||
*/
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
|
||||
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
|
||||
if (unlikely(ret < 0)) {
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
|
||||
if (!silent) {
|
||||
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
|
||||
WARN_ONCE(1,
|
||||
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
|
||||
val);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
lockdep_assert_held(&trans->reg_lock);
|
||||
__iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
/*
|
||||
* Above we read the CSR_GP_CNTRL register, which will flush
|
||||
* any previous writes, but we need the write that clears the
|
||||
* MAC_ACCESS_REQ bit to be performed before any other writes
|
||||
* scheduled on different CPUs (after we drop reg_lock).
|
||||
*/
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs, ret = 0;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
|
||||
for (offs = 0; offs < dwords; offs++)
|
||||
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
} else {
|
||||
ret = -EBUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
|
||||
void *buf, int dwords)
|
||||
{
|
||||
unsigned long flags;
|
||||
int offs, ret = 0;
|
||||
u32 *vals = buf;
|
||||
|
||||
spin_lock_irqsave(&trans->reg_lock, flags);
|
||||
if (iwl_trans_grab_nic_access(trans, false)) {
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
|
||||
for (offs = 0; offs < dwords; offs++)
|
||||
iwl_write32(trans, HBUS_TARG_MEM_WDAT,
|
||||
vals ? vals[offs] : 0);
|
||||
iwl_trans_release_nic_access(trans);
|
||||
} else {
|
||||
ret = -EBUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&trans->reg_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define IWL_FLUSH_WAIT_MS 2000
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
@@ -767,6 +890,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
struct iwl_queue *q;
|
||||
int cnt;
|
||||
unsigned long now = jiffies;
|
||||
u32 scd_sram_addr;
|
||||
u8 buf[16];
|
||||
int ret = 0;
|
||||
|
||||
/* waiting for all the tx frames complete might take a while */
|
||||
@@ -780,11 +905,50 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
|
||||
msleep(1);
|
||||
|
||||
if (q->read_ptr != q->write_ptr) {
|
||||
IWL_ERR(trans, "fail to flush all tx fifo queues\n");
|
||||
IWL_ERR(trans,
|
||||
"fail to flush all tx fifo queues Q %d\n", cnt);
|
||||
ret = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
|
||||
txq->q.read_ptr, txq->q.write_ptr);
|
||||
|
||||
scd_sram_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
|
||||
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
|
||||
|
||||
iwl_print_hex_error(trans, buf, sizeof(buf));
|
||||
|
||||
for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
|
||||
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
|
||||
|
||||
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
|
||||
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
|
||||
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
|
||||
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
|
||||
u32 tbl_dw =
|
||||
iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
|
||||
|
||||
if (cnt & 0x1)
|
||||
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
|
||||
else
|
||||
tbl_dw = tbl_dw & 0x0000FFFF;
|
||||
|
||||
IWL_ERR(trans,
|
||||
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
|
||||
cnt, active ? "" : "in", fifo, tbl_dw,
|
||||
iwl_read_prph(trans,
|
||||
SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
|
||||
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1212,7 +1376,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
.start_fw = iwl_trans_pcie_start_fw,
|
||||
.stop_device = iwl_trans_pcie_stop_device,
|
||||
|
||||
.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend,
|
||||
.d3_resume = iwl_trans_pcie_d3_resume,
|
||||
|
||||
.send_cmd = iwl_trans_pcie_send_hcmd,
|
||||
|
||||
@@ -1235,8 +1400,12 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
.read32 = iwl_trans_pcie_read32,
|
||||
.read_prph = iwl_trans_pcie_read_prph,
|
||||
.write_prph = iwl_trans_pcie_write_prph,
|
||||
.read_mem = iwl_trans_pcie_read_mem,
|
||||
.write_mem = iwl_trans_pcie_write_mem,
|
||||
.configure = iwl_trans_pcie_configure,
|
||||
.set_pmi = iwl_trans_pcie_set_pmi,
|
||||
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
|
||||
.release_nic_access = iwl_trans_pcie_release_nic_access
|
||||
};
|
||||
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
@@ -1318,7 +1487,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
trans->dev = &pdev->dev;
|
||||
trans_pcie->irq = pdev->irq;
|
||||
trans_pcie->pci_dev = pdev;
|
||||
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
|
||||
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
|
||||
@@ -1344,8 +1512,27 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
if (!trans->dev_cmd_pool)
|
||||
goto out_pci_disable_msi;
|
||||
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
|
||||
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
|
||||
iwl_pcie_tasklet, (unsigned long)trans);
|
||||
|
||||
if (iwl_pcie_alloc_ict(trans))
|
||||
goto out_free_cmd_pool;
|
||||
|
||||
err = request_irq(pdev->irq, iwl_pcie_isr_ict,
|
||||
IRQF_SHARED, DRV_NAME, trans);
|
||||
if (err) {
|
||||
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
|
||||
goto out_free_ict;
|
||||
}
|
||||
|
||||
return trans;
|
||||
|
||||
out_free_ict:
|
||||
iwl_pcie_free_ict(trans);
|
||||
out_free_cmd_pool:
|
||||
kmem_cache_destroy(trans->dev_cmd_pool);
|
||||
out_pci_disable_msi:
|
||||
pci_disable_msi(pdev);
|
||||
out_pci_release_regions:
|
||||
|
@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
||||
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
|
||||
txq->q.read_ptr, txq->q.write_ptr);
|
||||
|
||||
iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
|
||||
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
|
||||
|
||||
iwl_print_hex_error(trans, buf, sizeof(buf));
|
||||
|
||||
@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
||||
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
|
||||
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
|
||||
u32 tbl_dw =
|
||||
iwl_read_targ_mem(trans,
|
||||
trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(i));
|
||||
iwl_trans_read_mem32(trans,
|
||||
trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(i));
|
||||
|
||||
if (i & 0x1)
|
||||
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
|
||||
@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
break;
|
||||
}
|
||||
|
||||
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
|
||||
if (trans_pcie->bc_table_dword)
|
||||
len = DIV_ROUND_UP(len, 4);
|
||||
|
||||
bc_ent = cpu_to_le16(len | (sta_id << 12));
|
||||
|
||||
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
|
||||
|
||||
@@ -306,6 +309,9 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
|
||||
txq->q.write_ptr);
|
||||
|
||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||
txq->q.write_ptr | (txq_id << 8));
|
||||
|
||||
@@ -612,7 +618,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
if (txq->q.n_bd) {
|
||||
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
|
||||
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
||||
memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
|
||||
txq->q.dma_addr = 0;
|
||||
}
|
||||
|
||||
kfree(txq->entries);
|
||||
@@ -638,9 +644,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
|
||||
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 a;
|
||||
int nq = trans->cfg->base_params->num_of_queues;
|
||||
int chan;
|
||||
u32 reg_val;
|
||||
int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
|
||||
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
|
||||
|
||||
/* make sure all queue are not stopped/used */
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
@@ -652,20 +660,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
WARN_ON(scd_base_addr != 0 &&
|
||||
scd_base_addr != trans_pcie->scd_base_addr);
|
||||
|
||||
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
|
||||
/* reset conext data memory */
|
||||
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(trans, a, 0);
|
||||
/* reset tx status memory */
|
||||
for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(trans, a, 0);
|
||||
for (; a < trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(
|
||||
trans->cfg->base_params->num_of_queues);
|
||||
a += 4)
|
||||
iwl_write_targ_mem(trans, a, 0);
|
||||
/* reset context data, TX status and translation data */
|
||||
iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
|
||||
SCD_CONTEXT_MEM_LOWER_BOUND,
|
||||
NULL, clear_dwords);
|
||||
|
||||
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
|
||||
trans_pcie->scd_bc_tbls.dma >> 10);
|
||||
@@ -697,6 +695,29 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int txq_id;
|
||||
|
||||
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
|
||||
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
|
||||
txq->q.dma_addr >> 8);
|
||||
iwl_pcie_txq_unmap(trans, txq_id);
|
||||
txq->q.read_ptr = 0;
|
||||
txq->q.write_ptr = 0;
|
||||
}
|
||||
|
||||
/* Tell NIC where to find the "keep warm" buffer */
|
||||
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
|
||||
trans_pcie->kw.dma >> 4);
|
||||
|
||||
iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_tx_stop - Stop all Tx DMA channels
|
||||
*/
|
||||
@@ -1002,14 +1023,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
|
||||
tbl_dw_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
|
||||
|
||||
tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
|
||||
tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
|
||||
|
||||
if (txq_id & 0x1)
|
||||
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
||||
else
|
||||
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
||||
|
||||
iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
|
||||
iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1068,9 +1089,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
|
||||
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
|
||||
|
||||
/* Set up Tx window size and frame limit for this queue */
|
||||
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
||||
iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
|
||||
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
|
||||
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
||||
iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
|
||||
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
|
||||
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||
@@ -1101,8 +1122,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
|
||||
|
||||
iwl_pcie_txq_set_inactive(trans, txq_id);
|
||||
|
||||
_iwl_write_targ_mem_dwords(trans, stts_addr,
|
||||
zero_val, ARRAY_SIZE(zero_val));
|
||||
iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
|
||||
ARRAY_SIZE(zero_val));
|
||||
|
||||
iwl_pcie_txq_unmap(trans, txq_id);
|
||||
|
||||
@@ -1642,10 +1663,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
|
||||
le16_to_cpu(dev_cmd->hdr.sequence));
|
||||
IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
||||
|
||||
|
Reference in New Issue
Block a user