Merge branch 'for-linville' of git://github.com/kvalo/ath6kl

This commit is contained in:
John W. Linville 2012-03-13 14:45:40 -04:00
Родитель 5d6a1b069b 9df2a0b709
Коммит 843dc6644b
25 изменённых файлов: 1247 добавлений и 686 удалений

Просмотреть файл

@ -1,5 +1,6 @@
#------------------------------------------------------------------------------
# Copyright (c) 2004-2010 Atheros Communications Inc.
# Copyright (c) 2004-2011 Atheros Communications Inc.
# Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
# All rights reserved.
#
#

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -105,7 +106,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
}
ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
targ_info->version, targ_info->type);
targ_info->version, targ_info->type);
return 0;
}
@ -192,7 +193,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
len_remain = len;
while (len_remain) {
@ -434,7 +435,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
offset += sizeof(tx_len);
memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
tx_len);
tx_len);
offset += tx_len;
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -222,6 +223,29 @@ struct ath6kl_bmi_target_info {
__le32 type; /* target type */
} __packed;
#define ath6kl_bmi_write_hi32(ar, item, val) \
({ \
u32 addr; \
__le32 v; \
\
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
v = cpu_to_le32(val); \
ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
})
#define ath6kl_bmi_read_hi32(ar, item, val) \
({ \
u32 addr, *check_type = val; \
__le32 tmp; \
int ret; \
\
(void) (check_type == val); \
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
*val = le32_to_cpu(tmp); \
ret; \
})
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
void ath6kl_bmi_reset(struct ath6kl *ar);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -68,6 +69,10 @@ static struct ieee80211_rate ath6kl_rates[] = {
#define ath6kl_g_rates (ath6kl_rates + 0)
#define ath6kl_g_rates_size 12
#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
IEEE80211_HT_CAP_SGI_20 | \
IEEE80211_HT_CAP_SGI_40)
static struct ieee80211_channel ath6kl_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@ -112,6 +117,8 @@ static struct ieee80211_supported_band ath6kl_band_2ghz = {
.channels = ath6kl_2ghz_channels,
.n_bitrates = ath6kl_g_rates_size,
.bitrates = ath6kl_g_rates,
.ht_cap.cap = ath6kl_g_htcap,
.ht_cap.ht_supported = true,
};
static struct ieee80211_supported_band ath6kl_band_5ghz = {
@ -119,6 +126,8 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.channels = ath6kl_5ghz_a_channels,
.n_bitrates = ath6kl_a_rates_size,
.bitrates = ath6kl_a_rates,
.ht_cap.cap = ath6kl_g_htcap,
.ht_cap.ht_supported = true,
};
#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
@ -381,7 +390,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
return false;
if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
ar->num_vif))
ar->num_vif))
return false;
if (type == NL80211_IFTYPE_STATION ||
@ -407,6 +416,12 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
return false;
}
static bool ath6kl_is_tx_pending(struct ath6kl *ar)
{
return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
}
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
@ -414,6 +429,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct ath6kl_vif *vif = netdev_priv(dev);
int status;
u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
u16 interval;
ath6kl_cfg80211_sscan_disable(vif);
@ -450,8 +466,8 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
* sleep until the command queue drains
*/
wait_event_interruptible_timeout(ar->event_wq,
ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
WMI_TIMEOUT);
ath6kl_is_tx_pending(ar),
WMI_TIMEOUT);
if (signal_pending(current)) {
ath6kl_err("cmd queue drain timeout\n");
up(&ar->sem);
@ -546,7 +562,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0) != 0) {
ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
@ -568,6 +584,20 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->grp_crypto_len, vif->ch_hint);
vif->reconnect_flag = 0;
if (vif->nw_type == INFRA_NETWORK) {
interval = max_t(u16, vif->listen_intvl_t,
ATH6KL_MAX_WOW_LISTEN_INTL);
status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
interval,
0);
if (status) {
ath6kl_err("couldn't set listen intervel\n");
up(&ar->sem);
return status;
}
}
status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
vif->dot11_auth_mode, vif->auth_mode,
vif->prwise_crypto,
@ -590,8 +620,8 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
((vif->auth_mode == WPA_PSK_AUTH)
|| (vif->auth_mode == WPA2_PSK_AUTH))) {
((vif->auth_mode == WPA_PSK_AUTH) ||
(vif->auth_mode == WPA2_PSK_AUTH))) {
mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
@ -824,13 +854,13 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
if (vif->sme_state == SME_CONNECTING) {
cfg80211_connect_result(vif->ndev,
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, reason,
NULL, 0, GFP_KERNEL);
NULL, 0, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
@ -876,19 +906,14 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
request->ssids[i].ssid);
}
/*
* FIXME: we should clear the IE in fw if it's not set so just
* remove the check altogether
*/
if (request->ie) {
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for "
"scan");
return ret;
}
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for "
"scan");
return ret;
}
/*
@ -917,7 +942,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
force_fg_scan = 1;
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
ar->fw_capabilities)) {
ar->fw_capabilities)) {
/*
* If capable of doing P2P mgmt operations using
* station interface, send additional information like
@ -926,14 +951,17 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
*/
ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
WMI_LONG_SCAN, force_fg_scan,
false, 0, 0, n_channels,
channels, request->no_cck,
false, 0,
ATH6KL_FG_SCAN_INTERVAL,
n_channels, channels,
request->no_cck,
request->rates);
} else {
ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
WMI_LONG_SCAN, force_fg_scan,
false, 0, 0, n_channels,
channels);
false, 0,
ATH6KL_FG_SCAN_INTERVAL,
n_channels, channels);
}
if (ret)
ath6kl_err("wmi_startscan_cmd failed\n");
@ -1046,9 +1074,9 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOTSUPP;
}
if (((vif->auth_mode == WPA_PSK_AUTH)
|| (vif->auth_mode == WPA2_PSK_AUTH))
&& (key_usage & GROUP_USAGE))
if (((vif->auth_mode == WPA_PSK_AUTH) ||
(vif->auth_mode == WPA2_PSK_AUTH)) &&
(key_usage & GROUP_USAGE))
del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@ -1058,7 +1086,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (vif->nw_type == AP_NETWORK && !pairwise &&
(key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
key_type == WAPI_CRYPT) && params) {
key_type == WAPI_CRYPT)) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
@ -1263,7 +1291,6 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
u8 ath6kl_dbm;
int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
@ -1280,7 +1307,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_LIMITED:
ar->tx_pwr = ath6kl_dbm = dbm;
ar->tx_pwr = dbm;
break;
default:
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
@ -1288,7 +1315,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm);
return 0;
}
@ -1349,7 +1376,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
}
if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
mode.pwr_mode) != 0) {
mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
@ -1904,7 +1931,7 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
struct ath6kl_vif *vif;
int ret, left;
u32 filter = 0;
u16 i;
u16 i, bmiss_time;
u8 index = 0;
__be32 ips[MAX_IP_ADDRS];
@ -1941,6 +1968,34 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ret)
return ret;
netif_stop_queue(vif->ndev);
if (vif->nw_type != AP_NETWORK) {
ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_MAX_WOW_LISTEN_INTL,
0);
if (ret)
return ret;
/* Set listen interval x 15 times as bmiss time */
bmiss_time = ATH6KL_MAX_WOW_LISTEN_INTL * 15;
if (bmiss_time > ATH6KL_MAX_BMISS_TIME)
bmiss_time = ATH6KL_MAX_BMISS_TIME;
ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
bmiss_time, 0);
if (ret)
return ret;
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0xFFFF, 0, 0xFFFF, 0, 0, 0,
0, 0, 0, 0);
if (ret)
return ret;
}
ar->state = ATH6KL_STATE_SUSPENDING;
/* Setup own IP addr for ARP agent. */
in_dev = __in_dev_get_rtnl(vif->ndev);
if (!in_dev)
@ -2019,15 +2074,46 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
if (!vif)
return -EIO;
ar->state = ATH6KL_STATE_RESUMING;
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
return ret;
if (ret) {
ath6kl_warn("Failed to configure host sleep mode for "
"wow resume: %d\n", ret);
ar->state = ATH6KL_STATE_WOW;
return ret;
}
if (vif->nw_type != AP_NETWORK) {
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
if (ret)
return ret;
ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
if (ret)
return ret;
ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
vif->bmiss_time_t, 0);
if (ret)
return ret;
}
ar->state = ATH6KL_STATE_ON;
netif_wake_queue(vif->ndev);
return 0;
}
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow)
{
enum ath6kl_state prev_state;
int ret;
switch (mode) {
@ -2038,11 +2124,14 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
/* Flush all non control pkts in TX path */
ath6kl_tx_data_cleanup(ar);
prev_state = ar->state;
ret = ath6kl_wow_suspend(ar, wow);
if (ret) {
ath6kl_err("wow suspend failed: %d\n", ret);
ar->state = prev_state;
return ret;
}
ar->state = ATH6KL_STATE_WOW;
break;
@ -2114,7 +2203,6 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
return ret;
}
ar->state = ATH6KL_STATE_ON;
break;
case ATH6KL_STATE_DEEPSLEEP:
@ -2188,6 +2276,9 @@ static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
*/
void ath6kl_check_wow_status(struct ath6kl *ar)
{
if (ar->state == ATH6KL_STATE_SUSPENDING)
return;
if (ar->state == ATH6KL_STATE_WOW)
ath6kl_cfg80211_resume(ar);
}
@ -2275,30 +2366,27 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
struct ath6kl *ar = vif->ar;
int res;
if (info->beacon_ies) {
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
}
/* this also clears IE in fw if it's not set */
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
if (info->proberesp_ies) {
res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
}
/* this also clears IE in fw if it's not set */
res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
if (info->assocresp_ies) {
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
return res;
}
/* this also clears IE in fw if it's not set */
res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
return res;
return 0;
}
@ -2309,6 +2397,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
bool hidden = false;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
@ -2345,7 +2434,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
memcpy(vif->ssid, info->ssid, info->ssid_len);
vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
return -EOPNOTSUPP; /* TODO */
hidden = true;
res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden);
if (res)
return res;
ret = ath6kl_set_auth_type(vif, info->auth_type);
if (ret)
@ -2584,6 +2677,76 @@ static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
return ret;
}
static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
u32 id,
u32 freq,
u32 wait,
const u8 *buf,
size_t len,
bool *more_data,
bool no_cck)
{
struct ieee80211_mgmt *mgmt;
struct ath6kl_sta *conn;
bool is_psq_empty = false;
struct ath6kl_mgmt_buff *mgmt_buf;
size_t mgmt_buf_size;
struct ath6kl *ar = vif->ar;
mgmt = (struct ieee80211_mgmt *) buf;
if (is_multicast_ether_addr(mgmt->da))
return false;
conn = ath6kl_find_sta(vif, mgmt->da);
if (!conn)
return false;
if (conn->sta_flags & STA_PS_SLEEP) {
if (!(conn->sta_flags & STA_PS_POLLED)) {
/* Queue the frames if the STA is sleeping */
mgmt_buf_size = len + sizeof(struct ath6kl_mgmt_buff);
mgmt_buf = kmalloc(mgmt_buf_size, GFP_KERNEL);
if (!mgmt_buf)
return false;
INIT_LIST_HEAD(&mgmt_buf->list);
mgmt_buf->id = id;
mgmt_buf->freq = freq;
mgmt_buf->wait = wait;
mgmt_buf->len = len;
mgmt_buf->no_cck = no_cck;
memcpy(mgmt_buf->buf, buf, len);
spin_lock_bh(&conn->psq_lock);
is_psq_empty = skb_queue_empty(&conn->psq) &&
(conn->mgmt_psq_len == 0);
list_add_tail(&mgmt_buf->list, &conn->mgmt_psq);
conn->mgmt_psq_len++;
spin_unlock_bh(&conn->psq_lock);
/*
* If this is the first pkt getting queued
* for this STA, update the PVB for this
* STA.
*/
if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
conn->aid, 1);
return true;
}
/*
* This tx is because of a PsPoll.
* Determine if MoreData bit has to be set.
*/
spin_lock_bh(&conn->psq_lock);
if (!skb_queue_empty(&conn->psq) || (conn->mgmt_psq_len != 0))
*more_data = true;
spin_unlock_bh(&conn->psq_lock);
}
return false;
}
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
@ -2595,6 +2758,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ath6kl_vif *vif = netdev_priv(dev);
u32 id;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
@ -2620,22 +2784,19 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = id;
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
ar->fw_capabilities)) {
/*
* If capable of doing P2P mgmt operations using
* station interface, send additional information like
* supported rates to advertise and xmit rates for
* probe requests
*/
return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
chan->center_freq, wait,
buf, len, no_cck);
} else {
return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
chan->center_freq, wait,
buf, len);
/* AP mode Power saving processing */
if (vif->nw_type == AP_NETWORK) {
queued = ath6kl_mgmt_powersave_ap(vif,
id, chan->center_freq,
wait, buf,
len, &more_data, no_cck);
if (queued)
return 0;
}
return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
chan->center_freq, wait,
buf, len, no_cck);
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@ -2929,7 +3090,10 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->wdev.netdev = ndev;
vif->wdev.iftype = type;
vif->fw_vif_idx = fw_vif_idx;
vif->nw_type = vif->next_mode = nw_type;
vif->nw_type = nw_type;
vif->next_mode = nw_type;
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0)
@ -3009,18 +3173,36 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->max_sched_scan_ssids = 10;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
ret = wiphy_register(wiphy);
if (ret < 0) {
ath6kl_err("couldn't register wiphy device\n");
return ret;
}
ar->wiphy_registered = true;
return 0;
}
void ath6kl_cfg80211_cleanup(struct ath6kl *ar)
{
wiphy_unregister(ar->wiphy);
ar->wiphy_registered = false;
}
struct ath6kl *ath6kl_cfg80211_create(void)

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -26,12 +27,14 @@
unsigned int debug_mask;
static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
@ -97,14 +100,59 @@ int ath6kl_core_init(struct ath6kl *ar)
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
/* setup access class priority mappings */
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
ar->ac_stream_pri_map[WMM_AC_BE] = 1;
ar->ac_stream_pri_map[WMM_AC_VI] = 2;
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
/* allocate some buffers that handle larger AMSDU frames */
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
ath6kl_cookie_init(ar);
ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
if (suspend_mode &&
suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
suspend_mode <= WLAN_POWER_STATE_WOW)
ar->suspend_mode = suspend_mode;
else
ar->suspend_mode = 0;
if (suspend_mode == WLAN_POWER_STATE_WOW &&
(wow_mode == WLAN_POWER_STATE_CUT_PWR ||
wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
ar->wow_suspend_mode = wow_mode;
else
ar->wow_suspend_mode = 0;
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
set_bit(FIRST_BOOT, &ar->flag);
ath6kl_debug_init(ar);
ret = ath6kl_init_hw_start(ar);
if (ret) {
ath6kl_err("Failed to start hardware: %d\n", ret);
goto err_rxbuf_cleanup;
}
/* give our connected endpoints some buffers */
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
ret = ath6kl_cfg80211_init(ar);
if (ret)
goto err_node_cleanup;
goto err_rxbuf_cleanup;
ret = ath6kl_debug_init(ar);
ret = ath6kl_debug_init_fs(ar);
if (ret) {
wiphy_unregister(ar->wiphy);
goto err_node_cleanup;
goto err_rxbuf_cleanup;
}
for (i = 0; i < ar->vif_max; i++)
@ -122,83 +170,18 @@ int ath6kl_core_init(struct ath6kl *ar)
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy);
goto err_debug_init;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, ndev->name, ndev, ar);
/* setup access class priority mappings */
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
ar->ac_stream_pri_map[WMM_AC_BE] = 1;
ar->ac_stream_pri_map[WMM_AC_VI] = 2;
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
/* give our connected endpoints some buffers */
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
/* allocate some buffers that handle larger AMSDU frames */
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
ath6kl_cookie_init(ar);
ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
if (suspend_mode &&
suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
suspend_mode <= WLAN_POWER_STATE_WOW)
ar->suspend_mode = suspend_mode;
else
ar->suspend_mode = 0;
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
set_bit(FIRST_BOOT, &ar->flag);
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
ret = ath6kl_init_hw_start(ar);
if (ret) {
ath6kl_err("Failed to start hardware: %d\n", ret);
goto err_rxbuf_cleanup;
}
/*
* Set mac address which is received in ready event
* FIXME: Move to ath6kl_interface_add()
*/
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, ndev->name, ndev, ar);
return ret;
err_rxbuf_cleanup:
ath6kl_debug_cleanup(ar);
ath6kl_htc_flush_rx_buf(ar->htc_target);
ath6kl_cleanup_amsdu_rxbufs(ar);
rtnl_lock();
ath6kl_cfg80211_vif_cleanup(netdev_priv(ndev));
rtnl_unlock();
wiphy_unregister(ar->wiphy);
err_debug_init:
ath6kl_debug_cleanup(ar);
err_node_cleanup:
ath6kl_wmi_shutdown(ar->wmi);
clear_bit(WMI_ENABLED, &ar->flag);
ar->wmi = NULL;
@ -245,9 +228,7 @@ struct ath6kl *ath6kl_core_create(struct device *dev)
clear_bit(SKIP_SCAN, &ar->flag);
clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
ar->listen_intvl_b = A_DEFAULT_LISTEN_INTERVAL;
ar->tx_pwr = 0;
ar->intra_bss = 1;
ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
@ -261,6 +242,8 @@ struct ath6kl *ath6kl_core_create(struct device *dev)
spin_lock_init(&ar->sta_list[ctr].psq_lock);
skb_queue_head_init(&ar->sta_list[ctr].psq);
skb_queue_head_init(&ar->sta_list[ctr].apsdq);
ar->sta_list[ctr].mgmt_psq_len = 0;
INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq);
ar->sta_list[ctr].aggr_conn =
kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
if (!ar->sta_list[ctr].aggr_conn) {

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -59,8 +60,9 @@
#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
#define DISCON_TIMER_INTVAL 10000 /* in msec */
#define A_DEFAULT_LISTEN_INTERVAL 1 /* beacon intervals */
#define A_MAX_WOW_LISTEN_INTERVAL 1000
/* Channel dwell time in fg scan */
#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */
/* includes also the null byte */
#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
@ -183,6 +185,11 @@ struct ath6kl_fw_ie {
#define MBOX_YIELD_LIMIT 99
#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */
#define ATH6KL_DEFAULT_BMISS_TIME 1500
#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */
#define ATH6KL_MAX_BMISS_TIME 5000
/* configuration lags */
/*
* ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
@ -226,6 +233,12 @@ struct rxtid {
u32 hold_q_sz;
struct skb_hold_q *hold_q;
struct sk_buff_head q;
/*
* FIXME: No clue what this should protect. Apparently it should
* protect some of the fields above but they are also accessed
* without taking the lock.
*/
spinlock_t lock;
};
@ -285,6 +298,16 @@ struct ath6kl_cookie {
struct ath6kl_cookie *arc_list_next;
};
struct ath6kl_mgmt_buff {
struct list_head list;
u32 freq;
u32 wait;
u32 id;
bool no_cck;
size_t len;
u8 buf[0];
};
struct ath6kl_sta {
u16 sta_flags;
u8 mac[ETH_ALEN];
@ -294,7 +317,12 @@ struct ath6kl_sta {
u8 auth;
u8 wpa_ie[ATH6KL_MAX_IE];
struct sk_buff_head psq;
/* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */
spinlock_t psq_lock;
struct list_head mgmt_psq;
size_t mgmt_psq_len;
u8 apsd_info;
struct sk_buff_head apsdq;
struct aggr_info_conn *aggr_conn;
@ -494,6 +522,8 @@ struct ath6kl_vif {
bool probe_req_report;
u16 next_chan;
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
@ -521,6 +551,8 @@ enum ath6kl_dev_state {
enum ath6kl_state {
ATH6KL_STATE_OFF,
ATH6KL_STATE_ON,
ATH6KL_STATE_SUSPENDING,
ATH6KL_STATE_RESUMING,
ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW,
@ -549,9 +581,14 @@ struct ath6kl {
unsigned int vif_max;
u8 max_norm_iface;
u8 avail_idx_map;
/*
* Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie()
* calls, tx_pending and total_tx_data_pend.
*/
spinlock_t lock;
struct semaphore sem;
u16 listen_intvl_b;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
@ -577,7 +614,13 @@ struct ath6kl {
u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
/*
* FIXME: protects access to mcastpsq but is actually useless as
* all skbe_queue_*() functions provide serialisation themselves
*/
spinlock_t mcastpsq_lock;
u8 intra_bss;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
@ -620,6 +663,7 @@ struct ath6kl {
u16 conf_flags;
u16 suspend_mode;
u16 wow_suspend_mode;
wait_queue_head_t event_wq;
struct ath6kl_mbox_info mbox_info;
@ -650,12 +694,16 @@ struct ath6kl {
bool p2p;
bool wiphy_registered;
#ifdef CONFIG_ATH6KL_DEBUG
struct {
struct circ_buf fwlog_buf;
spinlock_t fwlog_lock;
void *fwlog_tmp;
struct sk_buff_head fwlog_queue;
struct completion fwlog_completion;
bool fwlog_open;
u32 fwlog_mask;
unsigned int dbgfs_diag_reg;
u32 diag_reg_addr_wr;
u32 diag_reg_val_wr;
@ -727,10 +775,10 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info_conn *aggr_conn);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -16,7 +17,7 @@
#include "core.h"
#include <linux/circ_buf.h>
#include <linux/skbuff.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
@ -32,9 +33,8 @@ struct ath6kl_fwlog_slot {
u8 payload[0];
};
#define ATH6KL_FWLOG_SIZE 32768
#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \
ATH6KL_FWLOG_PAYLOAD_SIZE)
#define ATH6KL_FWLOG_MAX_ENTRIES 20
#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
int ath6kl_printk(const char *level, const char *fmt, ...)
@ -119,29 +119,29 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
if (irq_proc_reg != NULL) {
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Host Int status: 0x%x\n",
irq_proc_reg->host_int_status);
"Host Int status: 0x%x\n",
irq_proc_reg->host_int_status);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"CPU Int status: 0x%x\n",
irq_proc_reg->cpu_int_status);
irq_proc_reg->cpu_int_status);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Error Int status: 0x%x\n",
irq_proc_reg->error_int_status);
irq_proc_reg->error_int_status);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Counter Int status: 0x%x\n",
irq_proc_reg->counter_int_status);
irq_proc_reg->counter_int_status);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Mbox Frame: 0x%x\n",
irq_proc_reg->mbox_frame);
irq_proc_reg->mbox_frame);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead Valid: 0x%x\n",
irq_proc_reg->rx_lkahd_valid);
irq_proc_reg->rx_lkahd_valid);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead 0: 0x%x\n",
irq_proc_reg->rx_lkahd[0]);
irq_proc_reg->rx_lkahd[0]);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead 1: 0x%x\n",
irq_proc_reg->rx_lkahd[1]);
irq_proc_reg->rx_lkahd[1]);
if (dev->ar->mbox_info.gmbox_addr != 0) {
/*
@ -149,27 +149,27 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
* additional state.
*/
ath6kl_dbg(ATH6KL_DBG_IRQ,
"GMBOX Host Int status 2: 0x%x\n",
irq_proc_reg->host_int_status2);
"GMBOX Host Int status 2: 0x%x\n",
irq_proc_reg->host_int_status2);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"GMBOX RX Avail: 0x%x\n",
irq_proc_reg->gmbox_rx_avail);
"GMBOX RX Avail: 0x%x\n",
irq_proc_reg->gmbox_rx_avail);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"GMBOX lookahead alias 0: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[0]);
"GMBOX lookahead alias 0: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[0]);
ath6kl_dbg(ATH6KL_DBG_IRQ,
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
}
if (irq_enable_reg != NULL) {
ath6kl_dbg(ATH6KL_DBG_IRQ,
"Int status Enable: 0x%x\n",
irq_enable_reg->int_status_en);
"Int status Enable: 0x%x\n",
irq_enable_reg->int_status_en);
ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n",
irq_enable_reg->cntr_int_status_en);
irq_enable_reg->cntr_int_status_en);
}
ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n");
}
@ -268,105 +268,103 @@ static const struct file_operations fops_war_stats = {
.llseek = default_llseek,
};
static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf,
size_t buf_len)
{
struct circ_buf *fwlog = &ar->debug.fwlog_buf;
size_t space;
int i;
/* entries must all be equal size */
if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE))
return;
space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE);
if (space < buf_len)
/* discard oldest slot */
fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) &
(ATH6KL_FWLOG_SIZE - 1);
for (i = 0; i < buf_len; i += space) {
space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail,
ATH6KL_FWLOG_SIZE);
if ((size_t) space > buf_len - i)
space = buf_len - i;
memcpy(&fwlog->buf[fwlog->head], buf, space);
fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1);
}
}
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
{
struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp;
struct ath6kl_fwlog_slot *slot;
struct sk_buff *skb;
size_t slot_len;
if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
return;
spin_lock_bh(&ar->debug.fwlog_lock);
slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE;
skb = alloc_skb(slot_len, GFP_KERNEL);
if (!skb)
return;
slot = (struct ath6kl_fwlog_slot *) skb_put(skb, slot_len);
slot->timestamp = cpu_to_le32(jiffies);
slot->length = cpu_to_le32(len);
memcpy(slot->payload, buf, len);
slot_len = sizeof(*slot) + len;
/* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */
memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len);
if (slot_len < ATH6KL_FWLOG_SLOT_SIZE)
memset(slot->payload + len, 0,
ATH6KL_FWLOG_SLOT_SIZE - slot_len);
spin_lock(&ar->debug.fwlog_queue.lock);
ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE);
__skb_queue_tail(&ar->debug.fwlog_queue, skb);
complete(&ar->debug.fwlog_completion);
spin_unlock_bh(&ar->debug.fwlog_lock);
/* drop oldest entries */
while (skb_queue_len(&ar->debug.fwlog_queue) >
ATH6KL_FWLOG_MAX_ENTRIES) {
skb = __skb_dequeue(&ar->debug.fwlog_queue);
kfree_skb(skb);
}
spin_unlock(&ar->debug.fwlog_queue.lock);
return;
}
static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar)
static int ath6kl_fwlog_open(struct inode *inode, struct file *file)
{
return CIRC_CNT(ar->debug.fwlog_buf.head,
ar->debug.fwlog_buf.tail,
ATH6KL_FWLOG_SLOT_SIZE) == 0;
struct ath6kl *ar = inode->i_private;
if (ar->debug.fwlog_open)
return -EBUSY;
ar->debug.fwlog_open = true;
file->private_data = inode->i_private;
return 0;
}
static int ath6kl_fwlog_release(struct inode *inode, struct file *file)
{
struct ath6kl *ar = inode->i_private;
ar->debug.fwlog_open = false;
return 0;
}
static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct circ_buf *fwlog = &ar->debug.fwlog_buf;
size_t len = 0, buf_len = count;
struct sk_buff *skb;
ssize_t ret_cnt;
size_t len = 0;
char *buf;
int ccnt;
buf = vmalloc(buf_len);
buf = vmalloc(count);
if (!buf)
return -ENOMEM;
/* read undelivered logs from firmware */
ath6kl_read_fwlogs(ar);
spin_lock_bh(&ar->debug.fwlog_lock);
spin_lock(&ar->debug.fwlog_queue.lock);
while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) {
ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail,
ATH6KL_FWLOG_SIZE);
while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
if (skb->len > count - len) {
/* not enough space, put skb back and leave */
__skb_queue_head(&ar->debug.fwlog_queue, skb);
break;
}
if ((size_t) ccnt > buf_len - len)
ccnt = buf_len - len;
memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt);
len += ccnt;
memcpy(buf + len, skb->data, skb->len);
len += skb->len;
fwlog->tail = (fwlog->tail + ccnt) &
(ATH6KL_FWLOG_SIZE - 1);
kfree_skb(skb);
}
spin_unlock_bh(&ar->debug.fwlog_lock);
spin_unlock(&ar->debug.fwlog_queue.lock);
if (WARN_ON(len > buf_len))
len = buf_len;
/* FIXME: what to do if len == 0? */
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@ -376,12 +374,87 @@ static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
}
static const struct file_operations fops_fwlog = {
.open = ath6kl_debugfs_open,
.open = ath6kl_fwlog_open,
.release = ath6kl_fwlog_release,
.read = ath6kl_fwlog_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_fwlog_block_read(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct sk_buff *skb;
ssize_t ret_cnt;
size_t len = 0, not_copied;
char *buf;
int ret;
buf = vmalloc(count);
if (!buf)
return -ENOMEM;
spin_lock(&ar->debug.fwlog_queue.lock);
if (skb_queue_len(&ar->debug.fwlog_queue) == 0) {
/* we must init under queue lock */
init_completion(&ar->debug.fwlog_completion);
spin_unlock(&ar->debug.fwlog_queue.lock);
ret = wait_for_completion_interruptible(
&ar->debug.fwlog_completion);
if (ret == -ERESTARTSYS)
return ret;
spin_lock(&ar->debug.fwlog_queue.lock);
}
while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
if (skb->len > count - len) {
/* not enough space, put skb back and leave */
__skb_queue_head(&ar->debug.fwlog_queue, skb);
break;
}
memcpy(buf + len, skb->data, skb->len);
len += skb->len;
kfree_skb(skb);
}
spin_unlock(&ar->debug.fwlog_queue.lock);
/* FIXME: what to do if len == 0? */
not_copied = copy_to_user(user_buf, buf, len);
if (not_copied != 0) {
ret_cnt = -EFAULT;
goto out;
}
*ppos = *ppos + len;
ret_cnt = len;
out:
vfree(buf);
return ret_cnt;
}
static const struct file_operations fops_fwlog_block = {
.open = ath6kl_fwlog_open,
.release = ath6kl_fwlog_release,
.read = ath6kl_fwlog_block_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@ -667,9 +740,13 @@ static ssize_t ath6kl_endpoint_stats_read(struct file *file,
return -ENOMEM;
#define EPSTAT(name) \
len = print_endpoint_stat(target, buf, buf_len, len, \
offsetof(struct htc_endpoint_stats, name), \
#name)
do { \
len = print_endpoint_stat(target, buf, buf_len, len, \
offsetof(struct htc_endpoint_stats, \
name), \
#name); \
} while (0)
EPSTAT(cred_low_indicate);
EPSTAT(tx_issued);
EPSTAT(tx_pkt_bundled);
@ -779,17 +856,9 @@ static ssize_t ath6kl_regread_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
u8 buf[50];
unsigned int len;
unsigned long reg_addr;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &reg_addr))
if (kstrtoul_from_user(user_buf, count, 0, &reg_addr))
return -EINVAL;
if ((reg_addr % 4) != 0)
@ -903,15 +972,8 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &lrssi_roam_threshold))
if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
@ -1558,12 +1620,12 @@ static ssize_t ath6kl_listen_int_write(struct file *file,
if (kstrtou16(buf, 0, &listen_interval))
return -EINVAL;
if ((listen_interval < 1) || (listen_interval > 50))
if ((listen_interval < 15) || (listen_interval > 3000))
return -EINVAL;
ar->listen_intvl_b = listen_interval;
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, 0,
ar->listen_intvl_b);
vif->listen_intvl_t = listen_interval;
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
return count;
}
@ -1573,10 +1635,15 @@ static ssize_t ath6kl_listen_int_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[32];
int len;
len = scnprintf(buf, sizeof(buf), "%u\n", ar->listen_intvl_b);
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
len = scnprintf(buf, sizeof(buf), "%u\n", vif->listen_intvl_t);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@ -1649,33 +1716,29 @@ static const struct file_operations fops_power_params = {
.llseek = default_llseek,
};
int ath6kl_debug_init(struct ath6kl *ar)
void ath6kl_debug_init(struct ath6kl *ar)
{
ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
if (ar->debug.fwlog_buf.buf == NULL)
return -ENOMEM;
ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL);
if (ar->debug.fwlog_tmp == NULL) {
vfree(ar->debug.fwlog_buf.buf);
return -ENOMEM;
}
spin_lock_init(&ar->debug.fwlog_lock);
skb_queue_head_init(&ar->debug.fwlog_queue);
init_completion(&ar->debug.fwlog_completion);
/*
* Actually we are lying here but don't know how to read the mask
* value from the firmware.
*/
ar->debug.fwlog_mask = 0;
}
/*
* Initialisation needs to happen in two stages as fwlog events can come
* before cfg80211 is initialised, and debugfs depends on cfg80211
* initialisation.
*/
int ath6kl_debug_init_fs(struct ath6kl *ar)
{
ar->debugfs_phy = debugfs_create_dir("ath6kl",
ar->wiphy->debugfsdir);
if (!ar->debugfs_phy) {
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
if (!ar->debugfs_phy)
return -ENOMEM;
}
debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_tgt_stats);
@ -1689,6 +1752,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog);
debugfs_create_file("fwlog_block", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog_block);
debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
ar, &fops_fwlog_mask);
@ -1723,27 +1789,26 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debugfs_phy, ar, &fops_disconnect_timeout);
debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
&fops_create_qos);
&fops_create_qos);
debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
&fops_delete_qos);
&fops_delete_qos);
debugfs_create_file("bgscan_interval", S_IWUSR,
ar->debugfs_phy, ar, &fops_bgscan_int);
ar->debugfs_phy, ar, &fops_bgscan_int);
debugfs_create_file("listen_interval", S_IRUSR | S_IWUSR,
ar->debugfs_phy, ar, &fops_listen_int);
debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
&fops_power_params);
&fops_power_params);
return 0;
}
void ath6kl_debug_cleanup(struct ath6kl *ar)
{
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
skb_queue_purge(&ar->debug.fwlog_queue);
kfree(ar->debug.roam_tbl);
}

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -77,7 +78,8 @@ int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len);
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar);
void ath6kl_debug_init(struct ath6kl *ar);
int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
@ -127,7 +129,11 @@ static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
{
}
static inline int ath6kl_debug_init(struct ath6kl *ar)
static inline void ath6kl_debug_init(struct ath6kl *ar)
{
}
static inline int ath6kl_debug_init_fs(struct ath6kl *ar)
{
return 0;
}

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -89,7 +90,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
}
ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
regdump_addr);
regdump_addr);
regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
/* fetch register dump data */
@ -106,9 +107,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
4 * i,
i,
le32_to_cpu(regdump_val[i]),
le32_to_cpu(regdump_val[i + 1]),
le32_to_cpu(regdump_val[i + 2]),
@ -134,6 +135,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
ath6kl_hif_dump_fw_crash(dev->ar);
ath6kl_read_fwlogs(dev->ar);
return ret;
}
@ -283,7 +285,7 @@ static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
dev->irq_en_reg.cntr_int_status_en;
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
"valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
counter_int_status);
/*
@ -358,7 +360,7 @@ static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
}
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
"valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
cpu_int_status);
/* Clear the interrupt */

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -197,6 +198,8 @@ struct hif_scatter_req {
u8 *virt_dma_buf;
struct hif_scatter_item scat_list[1];
u32 scat_q_depth;
};
struct ath6kl_irq_proc_registers {
@ -220,6 +223,7 @@ struct ath6kl_irq_enable_reg {
} __packed;
struct ath6kl_device {
/* protects irq_proc_reg and irq_en_reg below */
spinlock_t lock;
struct ath6kl_irq_proc_registers irq_proc_reg;
struct ath6kl_irq_enable_reg irq_en_reg;

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -22,6 +23,9 @@
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
/* threshold to re-enable Tx bundling for an AC*/
#define TX_RESUME_BUNDLE_THRESHOLD 1500
/* Functions for Tx credit handling */
static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist,
@ -168,31 +172,29 @@ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
struct list_head *epdist_list)
{
struct htc_endpoint_credit_dist *cur_dist_list;
struct htc_endpoint_credit_dist *cur_list;
list_for_each_entry(cur_dist_list, epdist_list, list) {
if (cur_dist_list->endpoint == ENDPOINT_0)
list_for_each_entry(cur_list, epdist_list, list) {
if (cur_list->endpoint == ENDPOINT_0)
continue;
if (cur_dist_list->cred_to_dist > 0) {
cur_dist_list->credits +=
cur_dist_list->cred_to_dist;
cur_dist_list->cred_to_dist = 0;
if (cur_dist_list->credits >
cur_dist_list->cred_assngd)
if (cur_list->cred_to_dist > 0) {
cur_list->credits += cur_list->cred_to_dist;
cur_list->cred_to_dist = 0;
if (cur_list->credits > cur_list->cred_assngd)
ath6kl_credit_reduce(cred_info,
cur_dist_list,
cur_dist_list->cred_assngd);
cur_list,
cur_list->cred_assngd);
if (cur_dist_list->credits >
cur_dist_list->cred_norm)
ath6kl_credit_reduce(cred_info, cur_dist_list,
cur_dist_list->cred_norm);
if (cur_list->credits > cur_list->cred_norm)
ath6kl_credit_reduce(cred_info, cur_list,
cur_list->cred_norm);
if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
if (cur_dist_list->txq_depth == 0)
if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
if (cur_list->txq_depth == 0)
ath6kl_credit_reduce(cred_info,
cur_dist_list, 0);
cur_list, 0);
}
}
}
@ -460,8 +462,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
INIT_LIST_HEAD(&tx_compq);
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc tx scat complete len %d entries %d\n",
scat_req->len, scat_req->scat_entries);
"htc tx scat complete len %d entries %d\n",
scat_req->len, scat_req->scat_entries);
if (scat_req->status)
ath6kl_err("send scatter req failed: %d\n", scat_req->status);
@ -599,8 +601,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc tx got packet 0x%p queue depth %d\n",
packet, get_queue_depth(&endpoint->txq));
"htc tx got packet 0x%p queue depth %d\n",
packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
packet->act_len + HTC_HDR_LENGTH);
@ -670,6 +672,7 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
struct htc_packet *packet;
int i, len, rem_scat, cred_pad;
int status = 0;
u8 flags;
rem_scat = target->max_tx_bndl_sz;
@ -696,9 +699,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->scat_list[i].packet = packet;
/* prepare packet and flag message as part of a send bundle */
ath6kl_htc_tx_prep_pkt(packet,
packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
cred_pad, packet->info.tx.seqno);
flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
ath6kl_htc_tx_prep_pkt(packet, flags,
cred_pad, packet->info.tx.seqno);
/* Make sure the buffer is 4-byte aligned */
ath6kl_htc_tx_buf_align(&packet->buf,
packet->act_len + HTC_HDR_LENGTH);
@ -744,6 +747,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
struct hif_scatter_req *scat_req = NULL;
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
int status;
u32 txb_mask;
u8 ac = WMM_NUM_AC;
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
status = 0;
@ -759,10 +768,35 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
if (!scat_req) {
/* no scatter resources */
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc tx no more scatter resources\n");
"htc tx no more scatter resources\n");
break;
}
if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
if (WMM_AC_BE == ac)
/*
* BE, BK have priorities and bit
* positions reversed
*/
txb_mask = (1 << WMM_AC_BK);
else
/*
* any AC with priority lower than
* itself
*/
txb_mask = ((1 << ac) - 1);
/*
* when the scatter request resources drop below a
* certain threshold, disable Tx bundling for all
* AC's with priority lower than the current requesting
* AC. Otherwise re-enable Tx bundling for them
*/
if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
target->tx_bndl_mask &= ~txb_mask;
else
target->tx_bndl_mask |= txb_mask;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
@ -806,6 +840,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
struct htc_packet *packet;
int bundle_sent;
int n_pkts_bundle;
u8 ac = WMM_NUM_AC;
spin_lock_bh(&target->tx_lock);
@ -823,6 +858,10 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
*/
INIT_LIST_HEAD(&txq);
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
if (list_empty(&endpoint->txq))
@ -840,15 +879,18 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
while (true) {
/* try to send a bundle on each pass */
if ((target->tx_bndl_enable) &&
if ((target->tx_bndl_mask) &&
(get_queue_depth(&txq) >=
HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
int temp1 = 0, temp2 = 0;
ath6kl_htc_tx_bundle(endpoint, &txq,
&temp1, &temp2);
bundle_sent += temp1;
n_pkts_bundle += temp2;
/* check if bundling is enabled for an AC */
if (target->tx_bndl_mask & (1 << ac)) {
ath6kl_htc_tx_bundle(endpoint, &txq,
&temp1, &temp2);
bundle_sent += temp1;
n_pkts_bundle += temp2;
}
}
if (list_empty(&txq))
@ -867,6 +909,26 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
endpoint->ep_st.tx_bundles += bundle_sent;
endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
/*
* if an AC has bundling disabled and no tx bundling
* has occured continously for a certain number of TX,
* enable tx bundling for this AC
*/
if (!bundle_sent) {
if (!(target->tx_bndl_mask & (1 << ac)) &&
(ac < WMM_NUM_AC)) {
if (++target->ac_tx_count[ac] >=
TX_RESUME_BUNDLE_THRESHOLD) {
target->ac_tx_count[ac] = 0;
target->tx_bndl_mask |= (1 << ac);
}
}
} else {
/* tx bundling will reset the counter */
if (ac < WMM_NUM_AC)
target->ac_tx_count[ac] = 0;
}
}
endpoint->tx_proc_cnt = 0;
@ -979,8 +1041,8 @@ static int htc_setup_tx_complete(struct htc_target *target)
memcpy(&setup_comp_ext->flags, &flags,
sizeof(setup_comp_ext->flags));
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
sizeof(struct htc_setup_comp_ext_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
sizeof(struct htc_setup_comp_ext_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
} else {
struct htc_setup_comp_msg *setup_comp;
@ -988,8 +1050,8 @@ static int htc_setup_tx_complete(struct htc_target *target)
memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
sizeof(struct htc_setup_comp_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
sizeof(struct htc_setup_comp_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
}
/* we want synchronous operation */
@ -1088,9 +1150,9 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
packet->status = -ECANCELED;
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
"htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
@ -1490,7 +1552,7 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
if (packets->act_len > 0) {
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
packets->act_len + HTC_HDR_LENGTH);
packets->act_len + HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_HTC,
"htc rx unexpected endpoint 0 message", "",
@ -1609,8 +1671,8 @@ static int htc_parse_trailer(struct htc_target *target,
}
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
&& next_lk_ahds) {
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
@ -2038,13 +2100,13 @@ fail_rx:
list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
list_del(&packet->list);
htc_reclaim_rxbuf(target, packet,
&target->endpoint[packet->endpoint]);
&target->endpoint[packet->endpoint]);
}
list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
list_del(&packet->list);
htc_reclaim_rxbuf(target, packet,
&target->endpoint[packet->endpoint]);
&target->endpoint[packet->endpoint]);
}
return status;
@ -2176,11 +2238,11 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
u32 look_ahead;
if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
HTC_TARGET_RESPONSE_TIMEOUT))
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
"htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
@ -2245,7 +2307,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
depth = get_queue_depth(pkt_queue);
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx add multiple ep id %d cnt %d len %d\n",
"htc rx add multiple ep id %d cnt %d len %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
@ -2271,8 +2333,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
"htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
rx_unblock = true;
@ -2309,7 +2371,21 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
"htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
/*
* packets in rx_bufq of endpoint 0 have originally
* been queued from target->free_ctrl_rxbuf where
* packet and packet->buf_start are allocated
* separately using kmalloc(). For other endpoint
* rx_bufq, it is allocated as skb where packet is
* skb->head. Take care of this difference while freeing
* the memory.
*/
if (packet->endpoint == ENDPOINT_0) {
kfree(packet->buf_start);
kfree(packet);
} else {
dev_kfree_skb(packet->pkt_cntxt);
}
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
@ -2328,6 +2404,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
unsigned int max_msg_sz = 0;
int status = 0;
u16 msg_id;
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc connect service target 0x%p service id 0x%x\n",
@ -2371,9 +2448,10 @@ int ath6kl_htc_conn_service(struct htc_target *target,
}
resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
msg_id = le16_to_cpu(resp_msg->msg_id);
if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
|| (rx_pkt->act_len < sizeof(*resp_msg))) {
if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
(rx_pkt->act_len < sizeof(*resp_msg))) {
status = -ENOMEM;
goto fail_tx;
}
@ -2420,6 +2498,15 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
switch (endpoint->svc_id) {
case WMI_DATA_BK_SVC:
endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
break;
default:
endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
break;
}
if (conn_req->max_rxmsg_sz) {
/*
* Override cred_per_msg calculation, this optimizes
@ -2517,7 +2604,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
target->tx_bndl_enable = true;
/* tx_bndl_mask is enabled per AC, each has 1 bit */
target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
if (target->max_rx_bndl_sz)
target->rx_bndl_enable = true;
@ -2532,7 +2620,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
* padding will spill into the next credit buffer
* which is fatal.
*/
target->tx_bndl_enable = false;
target->tx_bndl_mask = 0;
}
}
@ -2589,8 +2677,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
}
ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
if (target->msg_per_bndl_max > 0)
htc_setup_msg_bndl(target);
@ -2784,14 +2872,14 @@ void ath6kl_htc_cleanup(struct htc_target *target)
ath6kl_hif_cleanup_scatter(target->dev->ar);
list_for_each_entry_safe(packet, tmp_packet,
&target->free_ctrl_txbuf, list) {
&target->free_ctrl_txbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);
}
list_for_each_entry_safe(packet, tmp_packet,
&target->free_ctrl_rxbuf, list) {
&target->free_ctrl_rxbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -87,6 +88,8 @@
#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
#define WMI_MAX_SERVICES 5
#define WMM_NUM_AC 4
/* reserved and used to flush ALL packets */
#define HTC_TX_PACKET_TAG_ALL 0
#define HTC_SERVICE_TX_PACKET_TAG 1
@ -498,6 +501,7 @@ struct htc_endpoint {
u8 seqno;
u32 conn_flags;
struct htc_endpoint_stats ep_st;
u16 tx_drop_packet_threshold;
};
struct htc_control_buffer {
@ -519,9 +523,16 @@ struct htc_target {
struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
/* protects free_ctrl_txbuf and free_ctrl_rxbuf */
spinlock_t htc_lock;
/* FIXME: does this protext rx_bufq and endpoint structures or what? */
spinlock_t rx_lock;
/* protects endpoint->txq */
spinlock_t tx_lock;
struct ath6kl_device *dev;
u32 htc_flags;
u32 rx_st_flags;
@ -531,7 +542,7 @@ struct htc_target {
/* max messages per bundle for HTC */
int msg_per_bndl_max;
bool tx_bndl_enable;
u32 tx_bndl_mask;
int rx_bndl_enable;
int max_rx_bndl_sz;
int max_tx_bndl_sz;
@ -543,6 +554,9 @@ struct htc_target {
int max_xfer_szper_scatreq;
int chk_irq_status_cnt;
/* counts the number of Tx without bundling continously per AC */
u32 ac_tx_count[WMM_NUM_AC];
};
void *ath6kl_htc_create(struct ath6kl *ar);

Просмотреть файл

@ -1,6 +1,7 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -74,7 +75,7 @@ static const struct ath6kl_hw hw_list[] = {
},
.fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
.fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
.fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
},
{
.id = AR6004_HW_1_0_VERSION,
@ -351,11 +352,7 @@ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
blk_size |= ((u32)htc_ctrl_buf) << 16;
/* set the host interest area for the block size */
status = ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_mbox_io_block_sz)),
(u8 *)&blk_size,
4);
status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size);
if (status) {
ath6kl_err("bmi_write_memory for IO block size failed\n");
goto out;
@ -367,11 +364,8 @@ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
if (mbox_isr_yield_val) {
/* set the host interest area for the mbox ISR yield limit */
status = ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_mbox_isr_yield_limit)),
(u8 *)&mbox_isr_yield_val,
4);
status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit,
mbox_isr_yield_val);
if (status) {
ath6kl_err("bmi_write_memory for yield limit failed\n");
goto out;
@ -384,7 +378,6 @@ out:
static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
{
int status = 0;
int ret;
/*
@ -392,43 +385,54 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
* default values. Required if checksum offload is needed. Set
* RxMetaVersion to 2.
*/
if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
ar->rx_meta_ver, 0, 0)) {
ath6kl_err("unable to set the rx frame format\n");
status = -EIO;
ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
ar->rx_meta_ver, 0, 0);
if (ret) {
ath6kl_err("unable to set the rx frame format: %d\n", ret);
return ret;
}
if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
ath6kl_err("unable to set power save fail event policy\n");
status = -EIO;
if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) {
ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
IGNORE_PS_FAIL_DURING_SCAN);
if (ret) {
ath6kl_err("unable to set power save fail event policy: %d\n",
ret);
return ret;
}
if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
ath6kl_err("unable to set barker preamble policy\n");
status = -EIO;
}
if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
ath6kl_err("unable to set keep alive interval\n");
status = -EIO;
}
if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
ath6kl_err("unable to set disconnect timeout\n");
status = -EIO;
if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) {
ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
WMI_FOLLOW_BARKER_IN_ERP);
if (ret) {
ath6kl_err("unable to set barker preamble policy: %d\n",
ret);
return ret;
}
}
if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
ath6kl_err("unable to set txop bursting\n");
status = -EIO;
ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
WLAN_CONFIG_KEEP_ALIVE_INTERVAL);
if (ret) {
ath6kl_err("unable to set keep alive interval: %d\n", ret);
return ret;
}
ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
WLAN_CONFIG_DISCONNECT_TIMEOUT);
if (ret) {
ath6kl_err("unable to set disconnect timeout: %d\n", ret);
return ret;
}
if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) {
ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED);
if (ret) {
ath6kl_err("unable to set txop bursting: %d\n", ret);
return ret;
}
}
if (ar->p2p && (ar->vif_max == 1 || idx)) {
ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
@ -452,7 +456,7 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
}
}
return status;
return ret;
}
int ath6kl_configure_target(struct ath6kl *ar)
@ -462,8 +466,7 @@ int ath6kl_configure_target(struct ath6kl *ar)
int i, status;
param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG);
if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_serial_enable)), (u8 *)&param, 4)) {
if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) {
ath6kl_err("bmi_write_memory for uart debug failed\n");
return -EIO;
}
@ -499,11 +502,8 @@ int ath6kl_configure_target(struct ath6kl *ar)
if (ar->p2p && ar->vif_max == 1)
fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
param = HTC_PROTOCOL_VERSION;
if (ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_app_host_interest)),
(u8 *)&param, 4) != 0) {
if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
HTC_PROTOCOL_VERSION) != 0) {
ath6kl_err("bmi_write_memory for htc version failed\n");
return -EIO;
}
@ -511,10 +511,7 @@ int ath6kl_configure_target(struct ath6kl *ar)
/* set the firmware mode to STA/IBSS/AP */
param = 0;
if (ath6kl_bmi_read(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_option_flag)),
(u8 *)&param, 4) != 0) {
if (ath6kl_bmi_read_hi32(ar, hi_option_flag, &param) != 0) {
ath6kl_err("bmi_read_memory for setting fwmode failed\n");
return -EIO;
}
@ -526,11 +523,7 @@ int ath6kl_configure_target(struct ath6kl *ar)
param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
if (ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_option_flag)),
(u8 *)&param,
4) != 0) {
if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) {
ath6kl_err("bmi_write_memory for setting fwmode failed\n");
return -EIO;
}
@ -549,16 +542,13 @@ int ath6kl_configure_target(struct ath6kl *ar)
param = ar->hw.board_ext_data_addr;
ram_reserved_size = ar->hw.reserved_ram_size;
if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_ext_data)),
(u8 *)&param, 4) != 0) {
if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
return -EIO;
}
if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_end_ram_reserve_sz)),
(u8 *)&ram_reserved_size, 4) != 0) {
if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
ram_reserved_size) != 0) {
ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
return -EIO;
}
@ -569,20 +559,13 @@ int ath6kl_configure_target(struct ath6kl *ar)
return -EIO;
/* Configure GPIO AR600x UART */
param = ar->hw.uarttx_pin;
status = ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_dbg_uart_txpin)),
(u8 *)&param, 4);
status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin,
ar->hw.uarttx_pin);
if (status)
return status;
/* Configure target refclk_hz */
param = ar->hw.refclk_hz;
status = ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_refclk_hz)),
(u8 *)&param, 4);
status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, ar->hw.refclk_hz);
if (status)
return status;
@ -832,13 +815,13 @@ static int ath6kl_fetch_testscript_file(struct ath6kl *ar)
return 0;
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw.fw.dir, ar->hw.fw.testscript);
ar->hw.fw.dir, ar->hw.fw.testscript);
ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript,
&ar->fw_testscript_len);
if (ret) {
ath6kl_err("Failed to get testscript file %s: %d\n",
filename, ret);
filename, ret);
return ret;
}
@ -922,7 +905,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
switch (ie_id) {
case ATH6KL_FW_IE_OTP_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
ie_len);
ie_len);
ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL);
@ -935,7 +918,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
break;
case ATH6KL_FW_IE_FW_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n",
ie_len);
ie_len);
/* in testmode we already might have a fw file */
if (ar->fw != NULL)
@ -952,7 +935,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
break;
case ATH6KL_FW_IE_PATCH_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n",
ie_len);
ie_len);
ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL);
@ -1096,22 +1079,14 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
*/
if (ar->hw.board_addr != 0) {
board_address = ar->hw.board_addr;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data)),
(u8 *) &board_address, 4);
ath6kl_bmi_write_hi32(ar, hi_board_data,
board_address);
} else {
ath6kl_bmi_read(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data)),
(u8 *) &board_address, 4);
ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address);
}
/* determine where in target ram to write extended board data */
ath6kl_bmi_read(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_ext_data)),
(u8 *) &board_ext_address, 4);
ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address);
if (ar->target_type == TARGET_TYPE_AR6003 &&
board_ext_address == 0) {
@ -1123,6 +1098,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
case TARGET_TYPE_AR6003:
board_data_size = AR6003_BOARD_DATA_SZ;
board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ;
if (ar->fw_board_len > (board_data_size + board_ext_data_size))
board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ_V2;
break;
case TARGET_TYPE_AR6004:
board_data_size = AR6004_BOARD_DATA_SZ;
@ -1154,10 +1131,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
/* record that extended board data is initialized */
param = (board_ext_data_size << 16) | 1;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_ext_data_config)),
(unsigned char *) &param, 4);
ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param);
}
if (ar->fw_board_len < board_data_size) {
@ -1178,11 +1152,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
}
/* record the fact that Board Data IS initialized */
param = 1;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data_initialized)),
(u8 *)&param, 4);
ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, 1);
return ret;
}
@ -1209,10 +1179,7 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
}
/* read firmware start address */
ret = ath6kl_bmi_read(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_app_start)),
(u8 *) &address, sizeof(address));
ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address);
if (ret) {
ath6kl_err("Failed to read hi_app_start: %d\n", ret);
@ -1270,7 +1237,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar)
static int ath6kl_upload_patch(struct ath6kl *ar)
{
u32 address, param;
u32 address;
int ret;
if (ar->fw_patch == NULL)
@ -1287,18 +1254,14 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
return ret;
}
param = address;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_dset_list_head)),
(unsigned char *) &param, 4);
ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address);
return 0;
}
static int ath6kl_upload_testscript(struct ath6kl *ar)
{
u32 address, param;
u32 address;
int ret;
if (ar->testmode != 2)
@ -1310,7 +1273,7 @@ static int ath6kl_upload_testscript(struct ath6kl *ar)
address = ar->hw.testscript_addr;
ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n",
address, ar->fw_testscript_len);
address, ar->fw_testscript_len);
ret = ath6kl_bmi_write(ar, address, ar->fw_testscript,
ar->fw_testscript_len);
@ -1319,23 +1282,9 @@ static int ath6kl_upload_testscript(struct ath6kl *ar)
return ret;
}
param = address;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_ota_testscript)),
(unsigned char *) &param, 4);
param = 4096;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_end_ram_reserve_sz)),
(unsigned char *) &param, 4);
param = 1;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_test_apps_related)),
(unsigned char *) &param, 4);
ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address);
ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096);
ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1);
return 0;
}
@ -1346,7 +1295,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
int status = 0;
if (ar->target_type != TARGET_TYPE_AR6003 &&
ar->target_type != TARGET_TYPE_AR6004)
ar->target_type != TARGET_TYPE_AR6004)
return -EINVAL;
/* temporarily disable system sleep */
@ -1403,7 +1352,8 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
if (ar->version.target_ver == AR6003_HW_2_0_VERSION ||
ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x20;
@ -1726,9 +1676,11 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
* configure NOT to reset the target during a debug session.
*/
ath6kl_dbg(ATH6KL_DBG_TRC,
"attempting to reset target on instance destroy\n");
"attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
clear_bit(WLAN_ENABLED, &ar->flag);
up(&ar->sem);
}
EXPORT_SYMBOL(ath6kl_stop_txrx);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -80,11 +81,21 @@ static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
{
struct ath6kl_sta *sta = &ar->sta_list[i];
struct ath6kl_mgmt_buff *entry, *tmp;
/* empty the queued pkts in the PS queue if any */
spin_lock_bh(&sta->psq_lock);
skb_queue_purge(&sta->psq);
skb_queue_purge(&sta->apsdq);
if (sta->mgmt_psq_len != 0) {
list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) {
kfree(entry);
}
INIT_LIST_HEAD(&sta->mgmt_psq);
sta->mgmt_psq_len = 0;
}
spin_unlock_bh(&sta->psq_lock);
memset(&ar->ap_stats.sta[sta->aid - 1], 0,
@ -339,7 +350,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
__le32 data;
if (target_type != TARGET_TYPE_AR6003 &&
target_type != TARGET_TYPE_AR6004)
target_type != TARGET_TYPE_AR6004)
return;
data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
@ -588,11 +599,9 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
memcpy(vif->bssid, bssid, sizeof(vif->bssid));
vif->bss_ch = channel;
if ((vif->nw_type == INFRA_NETWORK)) {
ar->listen_intvl_b = listen_int;
if ((vif->nw_type == INFRA_NETWORK))
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
0, ar->listen_intvl_b);
}
vif->listen_intvl_t, 0);
netif_wake_queue(vif->ndev);
@ -810,6 +819,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
struct sk_buff *skb;
bool psq_empty = false;
struct ath6kl *ar = vif->ar;
struct ath6kl_mgmt_buff *mgmt_buf;
conn = ath6kl_find_sta_by_aid(ar, aid);
@ -820,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
* becomes empty update the PVB for this station.
*/
spin_lock_bh(&conn->psq_lock);
psq_empty = skb_queue_empty(&conn->psq);
psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
@ -828,15 +838,31 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
return;
spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->psq);
spin_unlock_bh(&conn->psq_lock);
if (conn->mgmt_psq_len > 0) {
mgmt_buf = list_first_entry(&conn->mgmt_psq,
struct ath6kl_mgmt_buff, list);
list_del(&mgmt_buf->list);
conn->mgmt_psq_len--;
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED;
conn->sta_flags |= STA_PS_POLLED;
ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx,
mgmt_buf->id, mgmt_buf->freq,
mgmt_buf->wait, mgmt_buf->buf,
mgmt_buf->len, mgmt_buf->no_cck);
conn->sta_flags &= ~STA_PS_POLLED;
kfree(mgmt_buf);
} else {
skb = skb_dequeue(&conn->psq);
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED;
}
spin_lock_bh(&conn->psq_lock);
psq_empty = skb_queue_empty(&conn->psq);
psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
@ -922,8 +948,8 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
}
ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
assoc_resp_len, assoc_info,
prot_reason_status);
assoc_resp_len, assoc_info,
prot_reason_status);
aggr_reset_state(vif->aggr_cntxt->aggr_conn);
@ -943,9 +969,9 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
} else {
set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
&& (vif->reconnect_flag == 1))) {
(prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) &&
(vif->reconnect_flag == 1))) {
set_bit(CONNECTED, &vif->flags);
return;
}
@ -1079,7 +1105,7 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
if (mc_all_on || mc_all_off) {
/* Enable/disable all multicast */
ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
mc_all_on ? "enabling" : "disabling");
mc_all_on ? "enabling" : "disabling");
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
mc_all_on);
if (ret)
@ -1092,7 +1118,7 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
found = false;
netdev_for_each_mc_addr(ha, ndev) {
if (memcmp(ha->addr, mc_filter->hw_addr,
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
found = true;
break;
}
@ -1111,7 +1137,7 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
false);
if (ret) {
ath6kl_warn("Failed to remove multicast filter:%pM\n",
mc_filter->hw_addr);
mc_filter->hw_addr);
return;
}
@ -1126,7 +1152,7 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
found = false;
list_for_each_entry(mc_filter, &vif->mc_filter, list) {
if (memcmp(ha->addr, mc_filter->hw_addr,
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
found = true;
break;
}
@ -1151,7 +1177,7 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
true);
if (ret) {
ath6kl_warn("Failed to add multicast filter :%pM\n",
mc_filter->hw_addr);
mc_filter->hw_addr);
kfree(mc_filter);
goto out;
}
@ -1184,5 +1210,7 @@ void init_netdev(struct net_device *dev)
sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
return;
}

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -31,6 +32,7 @@
struct ath6kl_sdio {
struct sdio_func *func;
/* protects access to bus_req_freeq */
spinlock_t lock;
/* free list */
@ -49,16 +51,20 @@ struct ath6kl_sdio {
/* scatter request list head */
struct list_head scat_req;
/* Avoids disabling irq while the interrupts being handled */
struct mutex mtx_irq;
atomic_t irq_handling;
wait_queue_head_t irq_wq;
/* protects access to scat_req */
spinlock_t scat_lock;
bool scatter_enabled;
bool is_disabled;
const struct sdio_device_id *id;
struct work_struct wr_async_work;
struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock;
};
@ -404,7 +410,10 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
return -ENOMEM;
mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer;
memcpy(tbuf, buf, len);
if (request & HIF_WRITE)
memcpy(tbuf, buf, len);
bounced = true;
} else
tbuf = buf;
@ -462,7 +471,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
ar_sdio = sdio_get_drvdata(func);
mutex_lock(&ar_sdio->mtx_irq);
atomic_set(&ar_sdio->irq_handling, 1);
/*
* Release the host during interrups so we can pick it back up when
* we process commands.
@ -471,7 +480,10 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
mutex_unlock(&ar_sdio->mtx_irq);
atomic_set(&ar_sdio->irq_handling, 0);
wake_up(&ar_sdio->irq_wq);
WARN_ON(status && status != -ECANCELED);
}
@ -572,6 +584,13 @@ static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
sdio_release_host(ar_sdio->func);
}
static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
return !atomic_read(&ar_sdio->irq_handling);
}
static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
@ -579,14 +598,21 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
sdio_claim_host(ar_sdio->func);
mutex_lock(&ar_sdio->mtx_irq);
if (atomic_read(&ar_sdio->irq_handling)) {
sdio_release_host(ar_sdio->func);
ret = wait_event_interruptible(ar_sdio->irq_wq,
ath6kl_sdio_is_on_irq(ar));
if (ret)
return;
sdio_claim_host(ar_sdio->func);
}
ret = sdio_release_irq(ar_sdio->func);
if (ret)
ath6kl_err("Failed to release sdio irq: %d\n", ret);
mutex_unlock(&ar_sdio->mtx_irq);
sdio_release_host(ar_sdio->func);
}
@ -601,6 +627,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
node = list_first_entry(&ar_sdio->scat_req,
struct hif_scatter_req, list);
list_del(&node->list);
node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
}
spin_unlock_bh(&ar_sdio->scat_lock);
@ -633,8 +661,8 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
return -EINVAL;
ath6kl_dbg(ATH6KL_DBG_SCATTER,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS)
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
@ -813,6 +841,7 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
mmc_pm_flag_t flags;
bool try_deepsleep = false;
int ret;
if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
@ -839,14 +868,22 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
goto cut_pwr;
ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
if (ret)
goto cut_pwr;
if (ret && ret != -ENOTCONN)
ath6kl_err("wow suspend failed: %d\n", ret);
return 0;
if (ret &&
(!ar->wow_suspend_mode ||
ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
try_deepsleep = true;
else if (ret &&
ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
goto cut_pwr;
if (!ret)
return 0;
}
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode) {
!ar->suspend_mode || try_deepsleep) {
flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER))
@ -901,8 +938,15 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
case ATH6KL_STATE_WOW:
break;
case ATH6KL_STATE_SCHED_SCAN:
break;
case ATH6KL_STATE_SUSPENDING:
break;
case ATH6KL_STATE_RESUMING:
break;
}
ath6kl_cfg80211_resume(ar);
@ -981,7 +1025,7 @@ static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
(u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
if (status) {
ath6kl_err("%s: failed to read from window data addr\n",
__func__);
__func__);
return status;
}
@ -1285,7 +1329,6 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock);
mutex_init(&ar_sdio->dma_buffer_mutex);
mutex_init(&ar_sdio->mtx_irq);
INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@ -1293,6 +1336,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
init_waitqueue_head(&ar_sdio->irq_wq);
for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2010 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -19,6 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768
#define AR6003_BOARD_EXT_DATA_SZ_V2 1024
#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -158,8 +159,8 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*/
if (is_apsdq_empty) {
ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
vif->fw_vif_idx,
conn->aid, 1, 0);
vif->fw_vif_idx,
conn->aid, 1, 0);
}
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
@ -284,6 +285,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
int status = 0;
struct ath6kl_cookie *cookie = NULL;
if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
return -EACCES;
spin_lock_bh(&ar->lock);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
@ -359,6 +363,11 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
dev_kfree_skb(skb);
return 0;
}
if (!test_bit(WMI_READY, &ar->flag))
goto fail_tx;
@ -370,7 +379,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (test_bit(WMI_ENABLED, &ar->flag)) {
if ((dev->features & NETIF_F_IP_CSUM) &&
(csum == CHECKSUM_PARTIAL)) {
(csum == CHECKSUM_PARTIAL)) {
csum_start = skb->csum_start -
(skb_network_header(skb) - skb->head) +
sizeof(struct ath6kl_llc_snap_hdr);
@ -394,7 +403,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
}
if ((dev->features & NETIF_F_IP_CSUM) &&
(csum == CHECKSUM_PARTIAL)) {
(csum == CHECKSUM_PARTIAL)) {
meta_v2.csum_start = csum_start;
meta_v2.csum_dest = csum_dest;
@ -419,7 +428,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
}
if ((vif->nw_type == ADHOC_NETWORK) &&
ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true;
else {
/* get the stream mapping */
@ -593,7 +602,8 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
*/
if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
ar->hiac_stream_active_pri &&
ar->cookie_count <= MAX_HI_COOKIE_NUM)
ar->cookie_count <=
target->endpoint[endpoint].tx_drop_packet_threshold)
/*
* Give preference to the highest priority stream by
* dropping the packets which overflowed.
@ -876,7 +886,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
if (!IS_ALIGNED((unsigned long) skb->data, 4))
skb->data = PTR_ALIGN(skb->data - 4, 4);
set_htc_rxpkt_info(packet, skb, skb->data,
ATH6KL_BUFFER_SIZE, endpoint);
ATH6KL_BUFFER_SIZE, endpoint);
list_add_tail(&packet->list, &queue);
}
@ -1256,8 +1266,8 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
flags = 0;
ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
vif->fw_vif_idx,
conn->aid, 0, flags);
vif->fw_vif_idx,
conn->aid, 0, flags);
}
return;
@ -1296,7 +1306,15 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
if (ept == ar->ctrl_ep) {
if (test_bit(WMI_ENABLED, &ar->flag)) {
ath6kl_check_wow_status(ar);
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
}
if_idx =
wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
} else {
@ -1321,10 +1339,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
spin_unlock_bh(&vif->if_lock);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) {
@ -1336,11 +1350,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
ath6kl_check_wow_status(ar);
if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
}
min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
sizeof(struct ath6kl_llc_snap_hdr);
@ -1416,8 +1425,33 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!(conn->sta_flags & STA_PS_SLEEP)) {
struct sk_buff *skbuff = NULL;
bool is_apsdq_empty;
struct ath6kl_mgmt_buff *mgmt;
u8 idx;
spin_lock_bh(&conn->psq_lock);
while (conn->mgmt_psq_len > 0) {
mgmt = list_first_entry(
&conn->mgmt_psq,
struct ath6kl_mgmt_buff,
list);
list_del(&mgmt->list);
conn->mgmt_psq_len--;
spin_unlock_bh(&conn->psq_lock);
idx = vif->fw_vif_idx;
ath6kl_wmi_send_mgmt_cmd(ar->wmi,
idx,
mgmt->id,
mgmt->freq,
mgmt->wait,
mgmt->buf,
mgmt->len,
mgmt->no_cck);
kfree(mgmt);
spin_lock_bh(&conn->psq_lock);
}
conn->mgmt_psq_len = 0;
while ((skbuff = skb_dequeue(&conn->psq))) {
spin_unlock_bh(&conn->psq_lock);
ath6kl_data_tx(skbuff, vif->ndev);
@ -1541,7 +1575,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
aggr_conn = vif->aggr_cntxt->aggr_conn;
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) {
is_amsdu, skb)) {
/* aggregation code will handle the skb */
return;
}

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -126,7 +127,7 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
if (!is_ethertype(be16_to_cpu(type))) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"%s: pkt is already in 802.3 format\n", __func__);
"%s: pkt is already in 802.3 format\n", __func__);
return 0;
}
@ -827,8 +828,8 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
/* WMM OUT (00:50:F2) */
if (pie[1] > 5
&& pie[6] == WMM_PARAM_OUI_SUBTYPE)
if (pie[1] > 5 &&
pie[6] == WMM_PARAM_OUI_SUBTYPE)
wmi->is_wmm_enabled = true;
}
break;
@ -912,17 +913,17 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
regpair->regDmnEnum);
regpair->regDmnEnum);
}
if (country) {
if (country && wmi->parent_dev->wiphy_registered) {
alpha2[0] = country->isoName[0];
alpha2[1] = country->isoName[1];
regulatory_hint(wmi->parent_dev->wiphy, alpha2);
ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
alpha2[0], alpha2[1]);
alpha2[0], alpha2[1]);
}
}
@ -1033,8 +1034,9 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
if (len < 8 + 2 + 2)
return -EINVAL;
if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
&& memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
if (bih->frame_type == BEACON_FTYPE &&
test_bit(CONNECTED, &vif->flags) &&
memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
const u8 *tim;
tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
len - 8 - 2 - 2);
@ -1366,8 +1368,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Upper threshold breached */
if (rssi < sq_thresh->upper_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"spurious upper rssi threshold event: %d\n",
rssi);
"spurious upper rssi threshold event: %d\n",
rssi);
} else if ((rssi < sq_thresh->upper_threshold[1]) &&
(rssi >= sq_thresh->upper_threshold[0])) {
new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
@ -1390,7 +1392,7 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Lower threshold breached */
if (rssi > sq_thresh->lower_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"spurious lower rssi threshold event: %d %d\n",
"spurious lower rssi threshold event: %d %d\n",
rssi, sq_thresh->lower_threshold[0]);
} else if ((rssi > sq_thresh->lower_threshold[1]) &&
(rssi <= sq_thresh->lower_threshold[0])) {
@ -1551,8 +1553,8 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Upper threshold breached */
if (snr < sq_thresh->upper_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"spurious upper snr threshold event: %d\n",
snr);
"spurious upper snr threshold event: %d\n",
snr);
} else if ((snr < sq_thresh->upper_threshold[1]) &&
(snr >= sq_thresh->upper_threshold[0])) {
new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
@ -1569,8 +1571,8 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Lower threshold breached */
if (snr > sq_thresh->lower_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"spurious lower snr threshold event: %d\n",
sq_thresh->lower_threshold[0]);
"spurious lower snr threshold event: %d\n",
sq_thresh->lower_threshold[0]);
} else if ((snr > sq_thresh->lower_threshold[1]) &&
(snr <= sq_thresh->lower_threshold[0])) {
new_threshold = WMI_SNR_THRESHOLD4_BELOW;
@ -2028,6 +2030,26 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
u16 bmiss_time, u16 num_beacons)
{
struct sk_buff *skb;
struct wmi_bmiss_time_cmd *cmd;
int ret;
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bmiss_time_cmd *) skb->data;
cmd->bmiss_time = cpu_to_le16(bmiss_time);
cmd->num_beacons = cpu_to_le16(num_beacons);
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BMISS_TIME_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
{
struct sk_buff *skb;
@ -2613,7 +2635,7 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
int ret;
if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
wow_mode != ATH6KL_WOW_MODE_DISABLE) {
wow_mode != ATH6KL_WOW_MODE_DISABLE) {
ath6kl_err("invalid wow mode: %d\n", wow_mode);
return -EINVAL;
}
@ -3014,6 +3036,22 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable)
{
struct sk_buff *skb;
struct wmi_ap_hidden_ssid_cmd *cmd;
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_ap_hidden_ssid_cmd *) skb->data;
cmd->hidden_ssid = enable ? 1 : 0;
return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_HIDDEN_SSID_CMDID,
NO_SYNC_WMIFLAG);
}
/* This command will be used to enable/disable AP uAPSD feature */
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable)
{
@ -3183,8 +3221,9 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
* ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
* mgmt operations using station interface.
*/
int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len)
static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
u32 freq, u32 wait, const u8 *data,
u16 data_len)
{
struct sk_buff *skb;
struct wmi_send_action_cmd *p;
@ -3220,9 +3259,9 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len,
u32 no_cck)
static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
u32 freq, u32 wait, const u8 *data,
u16 data_len, u32 no_cck)
{
struct sk_buff *skb;
struct wmi_send_mgmt_cmd *p;
@ -3259,6 +3298,32 @@ int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len,
u32 no_cck)
{
int status;
struct ath6kl *ar = wmi->parent_dev;
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
ar->fw_capabilities)) {
/*
* If capable of doing P2P mgmt operations using
* station interface, send additional information like
* supported rates to advertise and xmit rates for
* probe requests
*/
status = __ath6kl_wmi_send_mgmt_cmd(ar->wmi, if_idx, id, freq,
wait, data, data_len,
no_cck);
} else {
status = ath6kl_wmi_send_action_cmd(ar->wmi, if_idx, id, freq,
wait, data, data_len);
}
return status;
}
int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
const u8 *dst, const u8 *data,
u16 data_len)
@ -3370,48 +3435,108 @@ static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
}
/* Control Path */
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
/* Process interface specific wmi events, caller would free the datap */
static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id,
u8 *datap, u32 len)
{
struct wmi_cmd_hdr *cmd;
struct ath6kl_vif *vif;
u32 len;
u16 id;
u8 if_idx;
u8 *datap;
int ret = 0;
if (WARN_ON(skb == NULL))
return -EINVAL;
if (skb->len < sizeof(struct wmi_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
dev_kfree_skb(skb);
return -EINVAL;
}
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
datap = skb->data;
len = skb->len;
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi rx id %d len %d\n", id, len);
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
if (!vif) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"Wmi event for unavailable vif, vif_index:%d\n",
if_idx);
dev_kfree_skb(skb);
return -EINVAL;
}
switch (cmd_id) {
case WMI_CONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
return ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
case WMI_DISCONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
return ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
case WMI_TKIP_MICERR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
return ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
case WMI_BSSINFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
return ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
case WMI_NEIGHBOR_REPORT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
return ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
vif);
case WMI_SCAN_COMPLETE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
return ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
case WMI_REPORT_STATISTICS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
return ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
case WMI_CAC_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
return ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
case WMI_PSPOLL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
return ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
case WMI_DTIMEXPIRY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
return ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
case WMI_ADDBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
return ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
case WMI_DELBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
return ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID");
return ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif);
case WMI_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
return ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
return ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
len, vif);
case WMI_TX_STATUS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
return ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
case WMI_RX_PROBE_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
return ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
return -EINVAL;
}
return 0;
}
static int ath6kl_wmi_proc_events(struct wmi *wmi, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd;
int ret = 0;
u32 len;
u16 id;
u8 if_idx;
u8 *datap;
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
datap = skb->data;
len = skb->len;
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi rx id %d len %d\n", id, len);
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
switch (id) {
case WMI_GET_BITRATE_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@ -3429,26 +3554,10 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
break;
case WMI_CONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
break;
case WMI_DISCONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
break;
case WMI_PEER_NODE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
break;
case WMI_TKIP_MICERR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
break;
case WMI_BSSINFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
ath6kl_wmi_regdomain_event(wmi, datap, len);
@ -3457,23 +3566,10 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
break;
case WMI_NEIGHBOR_REPORT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
vif);
break;
case WMI_SCAN_COMPLETE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
break;
case WMI_CMDERROR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
break;
case WMI_REPORT_STATISTICS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
break;
case WMI_RSSI_THRESHOLD_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
@ -3493,10 +3589,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
break;
case WMI_CAC_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
break;
case WMI_CHANNEL_CHANGE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
break;
@ -3536,28 +3628,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
break;
case WMI_PSPOLL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
break;
case WMI_DTIMEXPIRY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
break;
case WMI_SET_PARAMS_REPLY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
break;
case WMI_ADDBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_ADDBA_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
break;
case WMI_DELBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
@ -3570,52 +3646,39 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
break;
case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID");
ret = ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif);
break;
case WMI_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
break;
case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
len, vif);
break;
case WMI_TX_STATUS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
break;
case WMI_RX_PROBE_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_CAPABILITIES_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
ret = ath6kl_wmi_p2p_capabilities_event_rx(datap, len);
break;
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_INFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
ret = ath6kl_wmi_p2p_info_event_rx(datap, len);
break;
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
ret = -EINVAL;
/* may be the event is interface specific */
ret = ath6kl_wmi_proc_events_vif(wmi, if_idx, id, datap, len);
break;
}
dev_kfree_skb(skb);
return ret;
}
/* Control Path */
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
{
if (WARN_ON(skb == NULL))
return -EINVAL;
if (skb->len < sizeof(struct wmi_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
dev_kfree_skb(skb);
return -EINVAL;
}
return ath6kl_wmi_proc_events(wmi, skb);
}
void ath6kl_wmi_reset(struct wmi *wmi)
{
spin_lock_bh(&wmi->lock);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -110,6 +111,8 @@ struct wmi {
u8 fat_pipe_exist;
struct ath6kl *parent_dev;
u8 pwr_mode;
/* protects fat_pipe_exist and stream_exist_for_ac */
spinlock_t lock;
enum htc_endpoint_id ep_id;
struct sq_threshold_params
@ -997,6 +1000,12 @@ struct wmi_listen_int_cmd {
__le16 num_beacons;
} __packed;
/* WMI_SET_BMISS_TIME_CMDID */
struct wmi_bmiss_time_cmd {
__le16 bmiss_time;
__le16 num_beacons;
};
/* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode {
REC_POWER = 0x01,
@ -1014,7 +1023,7 @@ struct wmi_power_mode_cmd {
*/
enum power_save_fail_event_policy {
SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
IGNORE_PS_FAIL_DURING_SCAN = 2,
};
struct wmi_power_params_cmd {
@ -1212,7 +1221,7 @@ struct wmi_snr_threshold_params_cmd {
enum wmi_preamble_policy {
WMI_IGNORE_BARKER_IN_ERP = 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP
WMI_FOLLOW_BARKER_IN_ERP,
};
struct wmi_set_lpreamble_cmd {
@ -2128,6 +2137,10 @@ struct wmi_rx_frame_format_cmd {
u8 reserved[1];
} __packed;
struct wmi_ap_hidden_ssid_cmd {
u8 hidden_ssid;
} __packed;
/* AP mode events */
struct wmi_ap_set_apsd_cmd {
u8 enable;
@ -2413,6 +2426,8 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
u16 listen_interval,
u16 listen_beacons);
int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
u16 bmiss_time, u16 num_beacons);
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
@ -2484,6 +2499,7 @@ u8 ath6kl_wmi_get_traffic_class(u8 user_priority);
u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
/* AP mode */
int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable);
int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
struct wmi_connect_cmd *p);
@ -2505,9 +2521,6 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
u32 dur);
int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len);
int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len,
u32 no_cck);