Merge branch 'for-linville' of git://github.com/kvalo/ath

This commit is contained in:
John W. Linville 2014-08-28 14:36:38 -04:00
Родитель 7bb75da171 0fdc14e42b
Коммит 59e25676cf
30 изменённых файлов: 4136 добавлений и 2207 удалений

Просмотреть файл

@ -25,6 +25,7 @@ config ATH10K_DEBUG
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K
select RELAY
---help---
Enabled debugfs support

Просмотреть файл

@ -10,6 +10,7 @@ ath10k_core-y += mac.o \
wmi.o \
bmi.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o

Просмотреть файл

@ -22,7 +22,7 @@
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar)
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device: %d\n", ret);
ath10k_warn(ar, "unable to write to the device: %d\n", ret);
return ret;
}
@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
return -EBUSY;
}
@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn("unable to get target info from device\n");
ath10k_warn(ar, "unable to get target info from device\n");
return ret;
}
if (resplen < sizeof(resp.get_target_info)) {
ath10k_warn("invalid get_target_info response length (%d)\n",
ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
resplen);
return -EIO;
}
@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
ath10k_warn("unable to read from the device (%d)\n",
ath10k_warn(ar, "unable to read from the device (%d)\n",
ret);
return ret;
}
@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device (%d)\n",
ath10k_warn(ar, "unable to write to the device (%d)\n",
ret);
return ret;
}
@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn("unable to read from the device\n");
ath10k_warn(ar, "unable to read from the device\n");
return ret;
}
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
ath10k_warn(ar, "invalid execute response length (%d)\n",
resplen);
return -EIO;
}
*result = __le32_to_cpu(resp.execute.result);
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0;
}
@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device\n");
ath10k_warn(ar, "unable to write to the device\n");
return ret;
}
@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn("unable to Start LZ Stream to the device\n");
ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
return ret;
}
@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ATH10K_DBG_BMI,
ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);

Просмотреть файл

@ -284,13 +284,9 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
int ret = 0;
if (nbytes > ce_state->src_sz_max)
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
ret = -ENOSR;
@ -325,7 +321,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
src_ring->write_index = write_index;
exit:
ath10k_pci_sleep(ar);
return ret;
}
@ -390,49 +385,57 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
return delta;
}
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index;
unsigned int sw_index;
int ret;
unsigned int write_index = dest_ring->write_index;
unsigned int sw_index = dest_ring->sw_index;
spin_lock_bh(&ar_pci->ce_lock);
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
lockdep_assert_held(&ar_pci->ce_lock);
ret = ath10k_pci_wake(ar);
if (ret)
goto out;
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
u32 ctrl_addr = pipe->ctrl_addr;
/* Update destination descriptor */
desc->addr = __cpu_to_le32(buffer);
lockdep_assert_held(&ar_pci->ce_lock);
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -EIO;
desc->addr = __cpu_to_le32(paddr);
desc->nbytes = 0;
dest_ring->per_transfer_context[write_index] =
per_recv_context;
/* Update Destination Ring Write Index */
dest_ring->per_transfer_context[write_index] = ctx;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
dest_ring->write_index = write_index;
ret = 0;
} else {
ret = -EIO;
}
ath10k_pci_sleep(ar);
out:
return 0;
}
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
spin_lock_bh(&ar_pci->ce_lock);
ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@ -588,7 +591,6 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int sw_index = src_ring->sw_index;
struct ce_desc *sdesc, *sbase;
unsigned int read_index;
int ret;
if (src_ring->hw_index == sw_index) {
/*
@ -599,18 +601,12 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
* value of the HW index has become stale.
*/
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
if (read_index == 0xffffffff)
return -ENODEV;
read_index &= nentries_mask;
src_ring->hw_index = read_index;
ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
@ -731,11 +727,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
spin_lock_bh(&ar_pci->ce_lock);
@ -760,7 +751,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
spin_unlock_bh(&ar_pci->ce_lock);
ath10k_pci_sleep(ar);
}
/*
@ -771,13 +761,9 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
int ce_id, ret;
int ce_id;
u32 intr_summary;
ret = ath10k_pci_wake(ar);
if (ret)
return;
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
@ -789,8 +775,6 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
ath10k_ce_per_engine_service(ar, ce_id);
}
ath10k_pci_sleep(ar);
}
/*
@ -800,16 +784,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@ -818,17 +797,11 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
ath10k_pci_sleep(ar);
}
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
int ce_id, ret;
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@ -838,34 +811,16 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
ath10k_pci_sleep(ar);
return 0;
}
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
spin_lock_bh(&ar_pci->ce_lock);
ce_state->send_cb = send_cb;
ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
spin_unlock_bh(&ar_pci->ce_lock);
}
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
spin_lock_bh(&ar_pci->ce_lock);
ce_state->recv_cb = recv_cb;
ath10k_ce_per_engine_handler_adjust(ce_state, 0);
spin_unlock_bh(&ar_pci->ce_lock);
for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}
static int ath10k_ce_init_src_ring(struct ath10k *ar,
@ -898,7 +853,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
@ -932,7 +887,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
@ -1067,7 +1022,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* initialized by software/firmware.
*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@ -1084,39 +1041,37 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
ce_state->send_cb = send_cb;
if (attr->dest_nentries)
ce_state->recv_cb = recv_cb;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
goto out;
return ret;
}
}
if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
goto out;
return ret;
}
}
out:
ath10k_pci_sleep(ar);
return ret;
return 0;
}
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
@ -1140,16 +1095,8 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
ath10k_pci_sleep(ar);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
@ -1163,7 +1110,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err("failed to allocate copy engine source ring %d: %d\n",
ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
@ -1175,7 +1122,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;

Просмотреть файл

@ -162,30 +162,13 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
/*
* Make a buffer available to receive. The buffer must be at least of a
* minimal size appropriate for this copy engine (src_sz_max attribute).
* ce - which copy engine to use
* per_transfer_recv_context - context passed back to caller's recv_cb
* buffer - address of buffer in CE space
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *));
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
/* recv flags */
/* Data is byte-swapped */
@ -214,7 +197,9 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr);
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
@ -245,6 +230,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */

Просмотреть файл

@ -53,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
complete(&ar->target_suspend);
}
@ -67,14 +67,14 @@ static int ath10k_init_configure_target(struct ath10k *ar)
ret = ath10k_bmi_write32(ar, hi_app_host_interest,
HTC_PROTOCOL_VERSION);
if (ret) {
ath10k_err("settings HTC version failed\n");
ath10k_err(ar, "settings HTC version failed\n");
return ret;
}
/* set the firmware mode to STA/IBSS/AP */
ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
if (ret) {
ath10k_err("setting firmware mode (1/2) failed\n");
ath10k_err(ar, "setting firmware mode (1/2) failed\n");
return ret;
}
@ -93,14 +93,14 @@ static int ath10k_init_configure_target(struct ath10k *ar)
ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
if (ret) {
ath10k_err("setting firmware mode (2/2) failed\n");
ath10k_err(ar, "setting firmware mode (2/2) failed\n");
return ret;
}
/* We do all byte-swapping on the host */
ret = ath10k_bmi_write32(ar, hi_be, 0);
if (ret) {
ath10k_err("setting host CPU BE mode failed\n");
ath10k_err(ar, "setting host CPU BE mode failed\n");
return ret;
}
@ -108,7 +108,7 @@ static int ath10k_init_configure_target(struct ath10k *ar)
ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
if (ret) {
ath10k_err("setting FW data/desc swap flags failed\n");
ath10k_err(ar, "setting FW data/desc swap flags failed\n");
return ret;
}
@ -146,11 +146,12 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
if (ret) {
ath10k_err("could not read board ext data addr (%d)\n", ret);
ath10k_err(ar, "could not read board ext data addr (%d)\n",
ret);
return ret;
}
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot push board extended data addr 0x%x\n",
board_ext_data_addr);
@ -158,7 +159,7 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
return 0;
if (ar->board_len != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
@ -167,14 +168,15 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
ath10k_err(ar, "could not write board ext data (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
(board_ext_data_size << 16) | 1);
if (ret) {
ath10k_err("could not write board ext data bit (%d)\n", ret);
ath10k_err(ar, "could not write board ext data bit (%d)\n",
ret);
return ret;
}
@ -189,13 +191,13 @@ static int ath10k_download_board_data(struct ath10k *ar)
ret = ath10k_push_board_ext_data(ar);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
ath10k_err(ar, "could not push board ext data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_read32(ar, hi_board_data, &address);
if (ret) {
ath10k_err("could not read board data addr (%d)\n", ret);
ath10k_err(ar, "could not read board data addr (%d)\n", ret);
goto exit;
}
@ -203,13 +205,13 @@ static int ath10k_download_board_data(struct ath10k *ar)
min_t(u32, board_data_size,
ar->board_len));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
ath10k_err(ar, "could not write board data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
if (ret) {
ath10k_err("could not write board data bit (%d)\n", ret);
ath10k_err(ar, "could not write board data bit (%d)\n", ret);
goto exit;
}
@ -225,30 +227,30 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len) {
ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->otp_data, ar->otp_len);
return 0;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
ath10k_err(ar, "could not write otp (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
ath10k_err(ar, "could not execute otp (%d)\n", ret);
return ret;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (result != 0) {
ath10k_err("otp calibration failed: %d", result);
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@ -265,7 +267,7 @@ static int ath10k_download_fw(struct ath10k *ar)
ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
ar->firmware_len);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
ath10k_err(ar, "could not write fw (%d)\n", ret);
goto exit;
}
@ -302,12 +304,12 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
int ret = 0;
if (ar->hw_params.fw.fw == NULL) {
ath10k_err("firmware file not defined\n");
ath10k_err(ar, "firmware file not defined\n");
return -EINVAL;
}
if (ar->hw_params.fw.board == NULL) {
ath10k_err("board data file not defined");
ath10k_err(ar, "board data file not defined");
return -EINVAL;
}
@ -316,7 +318,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
ath10k_err(ar, "could not fetch board data (%d)\n", ret);
goto err;
}
@ -328,7 +330,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.fw);
if (IS_ERR(ar->firmware)) {
ret = PTR_ERR(ar->firmware);
ath10k_err("could not fetch firmware (%d)\n", ret);
ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
goto err;
}
@ -344,7 +346,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.otp);
if (IS_ERR(ar->otp)) {
ret = PTR_ERR(ar->otp);
ath10k_err("could not fetch otp (%d)\n", ret);
ath10k_err(ar, "could not fetch otp (%d)\n", ret);
goto err;
}
@ -369,7 +371,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
@ -381,14 +383,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
ath10k_err("invalid firmware magic\n");
ath10k_err(ar, "invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
@ -410,7 +412,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr);
if (len < ie_len) {
ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
@ -424,7 +426,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
memcpy(ar->hw->wiphy->fw_version, data, ie_len);
ar->hw->wiphy->fw_version[ie_len] = '\0';
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw version %s\n",
ar->hw->wiphy->fw_version);
break;
@ -434,11 +436,11 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
timestamp = (__le32 *)data;
ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
le32_to_cpup(timestamp));
break;
case ATH10K_FW_IE_FEATURES:
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found firmware features ie (%zd B)\n",
ie_len);
@ -450,19 +452,19 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
break;
if (data[index] & (1 << bit)) {
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Enabling feature bit: %i\n",
i);
__set_bit(i, ar->fw_features);
}
}
ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
ar->fw_features,
sizeof(ar->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
@ -471,7 +473,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
break;
case ATH10K_FW_IE_OTP_IMAGE:
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found otp image ie (%zd B)\n",
ie_len);
@ -480,7 +482,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
break;
default:
ath10k_warn("Unknown FW IE: %u\n",
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
break;
}
@ -493,15 +495,22 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
}
if (!ar->firmware_data || !ar->firmware_len) {
ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
}
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
!test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
ret = -EINVAL;
goto err;
}
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
ath10k_err("board data file not defined");
ath10k_err(ar, "board data file not defined");
ret = -EINVAL;
goto err;
}
@ -511,7 +520,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data '%s/%s' (%d)\n",
ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err;
@ -531,22 +540,29 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
if (ret == 0)
goto success;
ar->fw_api = 2;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
if (ret == 0)
goto success;
ar->fw_api = 1;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
success:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
}
@ -557,19 +573,19 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
ret = ath10k_download_board_data(ar);
if (ret) {
ath10k_err("failed to download board data: %d\n", ret);
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
}
ret = ath10k_download_and_run_otp(ar);
if (ret) {
ath10k_err("failed to run otp: %d\n", ret);
ath10k_err(ar, "failed to run otp: %d\n", ret);
return ret;
}
ret = ath10k_download_fw(ar);
if (ret) {
ath10k_err("failed to download firmware: %d\n", ret);
ath10k_err(ar, "failed to download firmware: %d\n", ret);
return ret;
}
@ -586,7 +602,7 @@ static int ath10k_init_uart(struct ath10k *ar)
*/
ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
if (ret) {
ath10k_warn("could not disable UART prints (%d)\n", ret);
ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
return ret;
}
@ -595,24 +611,24 @@ static int ath10k_init_uart(struct ath10k *ar)
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
if (ret) {
ath10k_warn("could not enable UART prints (%d)\n", ret);
ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
if (ret) {
ath10k_warn("could not enable UART prints (%d)\n", ret);
ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
return ret;
}
/* Set the UART baud rate to 19200. */
ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
if (ret) {
ath10k_warn("could not set the baud rate (%d)\n", ret);
ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
return ret;
}
ath10k_info("UART prints enabled\n");
ath10k_info(ar, "UART prints enabled\n");
return 0;
}
@ -629,14 +645,14 @@ static int ath10k_init_hw_params(struct ath10k *ar)
}
if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
ath10k_err("Unsupported hardware version: 0x%x\n",
ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
ar->target_version);
return -EINVAL;
}
ar->hw_params = *hw_params;
ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
ar->hw_params.name, ar->target_version);
return 0;
@ -651,14 +667,14 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) {
case ATH10K_STATE_ON:
ar->state = ATH10K_STATE_RESTARTING;
del_timer_sync(&ar->scan.timeout);
ath10k_reset_scan((unsigned long)ar);
ath10k_hif_stop(ar);
ath10k_scan_finish(ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
/* this can happen if driver is being unloaded
* or if the crash happens during FW probing */
ath10k_warn("cannot restart a device that hasn't been started\n");
ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
/* hw restart might be requested from multiple places */
@ -667,7 +683,7 @@ static void ath10k_core_restart(struct work_struct *work)
ar->state = ATH10K_STATE_WEDGED;
/* fall through */
case ATH10K_STATE_WEDGED:
ath10k_warn("device is wedged, will not restart\n");
ath10k_warn(ar, "device is wedged, will not restart\n");
break;
}
@ -700,7 +716,7 @@ int ath10k_core_start(struct ath10k *ar)
status = ath10k_htc_init(ar);
if (status) {
ath10k_err("could not init HTC (%d)\n", status);
ath10k_err(ar, "could not init HTC (%d)\n", status);
goto err;
}
@ -710,90 +726,91 @@ int ath10k_core_start(struct ath10k *ar)
status = ath10k_wmi_attach(ar);
if (status) {
ath10k_err("WMI attach failed: %d\n", status);
ath10k_err(ar, "WMI attach failed: %d\n", status);
goto err;
}
status = ath10k_htt_init(ar);
if (status) {
ath10k_err("failed to init htt: %d\n", status);
ath10k_err(ar, "failed to init htt: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_tx_alloc(&ar->htt);
if (status) {
ath10k_err("failed to alloc htt tx: %d\n", status);
ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
ath10k_err("failed to alloc htt rx: %d\n", status);
ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
goto err_htt_tx_detach;
}
status = ath10k_hif_start(ar);
if (status) {
ath10k_err("could not start HIF: %d\n", status);
ath10k_err(ar, "could not start HIF: %d\n", status);
goto err_htt_rx_detach;
}
status = ath10k_htc_wait_target(&ar->htc);
if (status) {
ath10k_err("failed to connect to HTC: %d\n", status);
ath10k_err(ar, "failed to connect to HTC: %d\n", status);
goto err_hif_stop;
}
status = ath10k_htt_connect(&ar->htt);
if (status) {
ath10k_err("failed to connect htt (%d)\n", status);
ath10k_err(ar, "failed to connect htt (%d)\n", status);
goto err_hif_stop;
}
status = ath10k_wmi_connect(ar);
if (status) {
ath10k_err("could not connect wmi: %d\n", status);
ath10k_err(ar, "could not connect wmi: %d\n", status);
goto err_hif_stop;
}
status = ath10k_htc_start(&ar->htc);
if (status) {
ath10k_err("failed to start htc: %d\n", status);
ath10k_err(ar, "failed to start htc: %d\n", status);
goto err_hif_stop;
}
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
ath10k_warn("wmi service ready event not received");
ath10k_warn(ar, "wmi service ready event not received");
status = -ETIMEDOUT;
goto err_htc_stop;
goto err_hif_stop;
}
ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
goto err_htc_stop;
ath10k_err(ar, "could not send WMI init command (%d)\n",
status);
goto err_hif_stop;
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
ath10k_err("wmi unified ready event not received\n");
ath10k_err(ar, "wmi unified ready event not received\n");
status = -ETIMEDOUT;
goto err_htc_stop;
goto err_hif_stop;
}
status = ath10k_htt_setup(&ar->htt);
if (status) {
ath10k_err("failed to setup htt: %d\n", status);
goto err_htc_stop;
ath10k_err(ar, "failed to setup htt: %d\n", status);
goto err_hif_stop;
}
status = ath10k_debug_start(ar);
if (status)
goto err_htc_stop;
goto err_hif_stop;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
@ -802,28 +819,8 @@ int ath10k_core_start(struct ath10k *ar)
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) {
ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
ath10k_info("debug %d debugfs %d tracing %d dfs %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
config_enabled(CONFIG_ATH10K_TRACING),
config_enabled(CONFIG_ATH10K_DFS_CERTIFIED));
}
__set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
return 0;
err_htc_stop:
ath10k_htc_stop(&ar->htc);
err_hif_stop:
ath10k_hif_stop(ar);
err_htt_rx_detach:
@ -845,14 +842,14 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
if (ret) {
ath10k_warn("could not suspend target (%d)\n", ret);
ath10k_warn(ar, "could not suspend target (%d)\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
if (ret == 0) {
ath10k_warn("suspend timed out - target pause event never came\n");
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
@ -868,7 +865,6 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_hif_stop(ar);
ath10k_htt_tx_free(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
@ -887,14 +883,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err("could not start pci hif (%d)\n", ret);
ath10k_err(ar, "could not start pci hif (%d)\n", ret);
return ret;
}
memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err("could not get target info (%d)\n", ret);
ath10k_err(ar, "could not get target info (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
@ -904,14 +900,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err("could not get hw params (%d)\n", ret);
ath10k_err(ar, "could not get hw params (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
ret = ath10k_core_fetch_firmware_files(ar);
if (ret) {
ath10k_err("could not fetch firmware files (%d)\n", ret);
ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
@ -920,13 +916,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ret = ath10k_core_start(ar);
if (ret) {
ath10k_err("could not init core (%d)\n", ret);
ath10k_err(ar, "could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
ath10k_print_driver_info(ar);
ath10k_core_stop(ar);
mutex_unlock(&ar->conf_mutex);
@ -939,7 +936,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
{
u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ar->chip_id, hw_revision);
/* Check that we are not using hw1.0 (some of them have same pci id
@ -947,7 +944,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
* due to missing hw1.0 workarounds. */
switch (hw_revision) {
case QCA988X_HW_1_0_CHIP_ID_REV:
ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n");
return -EOPNOTSUPP;
case QCA988X_HW_2_0_CHIP_ID_REV:
@ -955,7 +952,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
return 0;
default:
ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n",
ar->chip_id);
return 0;
}
@ -970,25 +967,33 @@ static void ath10k_core_register_work(struct work_struct *work)
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
ath10k_err(ar, "could not probe fw (%d)\n", status);
goto err;
}
status = ath10k_mac_register(ar);
if (status) {
ath10k_err("could not register to mac80211 (%d)\n", status);
ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
goto err_release_fw;
}
status = ath10k_debug_create(ar);
if (status) {
ath10k_err("unable to initialize debugfs\n");
ath10k_err(ar, "unable to initialize debugfs\n");
goto err_unregister_mac;
}
status = ath10k_spectral_create(ar);
if (status) {
ath10k_err(ar, "failed to initialize spectral\n");
goto err_debug_destroy;
}
set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
err_debug_destroy:
ath10k_debug_destroy(ar);
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
@ -1008,7 +1013,7 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
status = ath10k_core_check_chip_id(ar);
if (status) {
ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id);
return status;
}
@ -1025,6 +1030,12 @@ void ath10k_core_unregister(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
/* Stop spectral before unregistering from mac80211 to remove the
* relayfs debugfs file cleanly. Otherwise the parent debugfs tree
* would be already be free'd recursively, leading to a double free.
*/
ath10k_spectral_destroy(ar);
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
@ -1036,12 +1047,12 @@ void ath10k_core_unregister(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
ar = ath10k_mac_create();
ar = ath10k_mac_create(priv_size);
if (!ar)
return NULL;
@ -1051,7 +1062,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
ar->p2p = !!ath10k_p2p;
ar->dev = dev;
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
init_completion(&ar->scan.started);
@ -1062,7 +1072,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
ar->workqueue = create_singlethread_workqueue("ath10k_wq");
if (!ar->workqueue)

Просмотреть файл

@ -22,6 +22,8 @@
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include "htt.h"
#include "htc.h"
@ -31,6 +33,7 @@
#include "../ath.h"
#include "../regd.h"
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -237,6 +240,7 @@ struct ath10k_vif {
bool is_started;
bool is_up;
bool spectral_enabled;
u32 aid;
u8 bssid[ETH_ALEN];
@ -276,11 +280,20 @@ struct ath10k_vif_iter {
struct ath10k_vif *arvif;
};
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
bool crashed_since_read;
uuid_le uuid;
struct timespec timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
};
struct ath10k_debug {
struct dentry *debugfs_phy;
struct ath10k_target_stats target_stats;
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_BM_SIZE);
struct completion event_stats_compl;
@ -293,6 +306,8 @@ struct ath10k_debug {
u8 htt_max_amsdu;
u8 htt_max_ampdu;
struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
@ -330,6 +345,11 @@ enum ath10k_fw_features {
/* Firmware does not support P2P */
ATH10K_FW_FEATURE_NO_P2P = 3,
/* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit
* is required to be set as well.
*/
ATH10K_FW_FEATURE_WMI_10_2 = 4,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@ -337,10 +357,32 @@ enum ath10k_fw_features {
enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_FIRST_BOOT_DONE,
ATH10K_FLAG_CORE_REGISTERED,
};
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
ATH10K_SCAN_RUNNING,
ATH10K_SCAN_ABORTING,
};
static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
{
switch (state) {
case ATH10K_SCAN_IDLE:
return "idle";
case ATH10K_SCAN_STARTING:
return "starting";
case ATH10K_SCAN_RUNNING:
return "running";
case ATH10K_SCAN_ABORTING:
return "aborting";
}
return "unknown";
}
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@ -368,7 +410,6 @@ struct ath10k {
bool p2p;
struct {
void *priv;
const struct ath10k_hif_ops *ops;
} hif;
@ -410,10 +451,9 @@ struct ath10k {
struct completion started;
struct completion completed;
struct completion on_channel;
struct timer_list timeout;
struct delayed_work timeout;
enum ath10k_scan_state state;
bool is_roc;
bool in_progress;
bool aborting;
int vdev_id;
int roc_freq;
} scan;
@ -494,9 +534,21 @@ struct ath10k {
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
/* spectral_mode and spec_config are protected by conf_mutex */
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);

Просмотреть файл

@ -17,6 +17,9 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/version.h>
#include <linux/vermagic.h>
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
@ -24,25 +27,86 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int rtn;
#define ATH10K_FW_CRASH_DUMP_VERSION 1
va_start(args, fmt);
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
* @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
vaf.fmt = fmt;
vaf.va = &args;
ATH10K_FW_CRASH_DUMP_MAX,
};
rtn = printk("%sath10k: %pV", level, &vaf);
struct ath10k_tlv_dump_data {
/* see ath10k_fw_crash_dump_type above */
__le32 type;
va_end(args);
/* in bytes */
__le32 tlv_len;
return rtn;
}
/* pad to 32-bit boundaries as needed */
u8 tlv_data[];
} __packed;
int ath10k_info(const char *fmt, ...)
struct ath10k_dump_file_data {
/* dump file information */
/* "ATH10K-FW-DUMP" */
char df_magic[16];
__le32 len;
/* file dump version */
__le32 version;
/* some info we can get from ath10k struct that might help */
u8 uuid[16];
__le32 chip_id;
/* 0 for now, in place for later hardware */
__le32 bus_type;
__le32 target_version;
__le32 fw_version_major;
__le32 fw_version_minor;
__le32 fw_version_release;
__le32 fw_version_build;
__le32 phy_capability;
__le32 hw_min_tx_power;
__le32 hw_max_tx_power;
__le32 ht_cap_info;
__le32 vht_cap_info;
__le32 num_rf_chains;
/* firmware version string */
char fw_ver[ETHTOOL_FWVERS_LEN];
/* Kernel related information */
/* time-of-day stamp */
__le64 tv_sec;
/* time-of-day stamp, nano-seconds */
__le64 tv_nsec;
/* LINUX_VERSION_CODE */
__le32 kernel_ver_code;
/* VERMAGIC_STRING */
char kernel_ver[64];
/* room for growth w/out changing binary format */
u8 unused[128];
/* struct ath10k_tlv_dump_data + more */
u8 data[0];
} __packed;
int ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@ -52,7 +116,7 @@ int ath10k_info(const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
ret = dev_info(ar->dev, "%pV", &vaf);
trace_ath10k_log_info(&vaf);
va_end(args);
@ -60,7 +124,25 @@ int ath10k_info(const char *fmt, ...)
}
EXPORT_SYMBOL(ath10k_info);
int ath10k_err(const char *fmt, ...)
void ath10k_print_driver_info(struct ath10k *ar)
{
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
config_enabled(CONFIG_ATH10K_TRACING),
config_enabled(CONFIG_ATH10K_DFS_CERTIFIED));
}
EXPORT_SYMBOL(ath10k_print_driver_info);
int ath10k_err(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@ -70,7 +152,7 @@ int ath10k_err(const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
ret = dev_err(ar->dev, "%pV", &vaf);
trace_ath10k_log_err(&vaf);
va_end(args);
@ -78,25 +160,21 @@ int ath10k_err(const char *fmt, ...)
}
EXPORT_SYMBOL(ath10k_err);
int ath10k_warn(const char *fmt, ...)
int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret = 0;
va_start(args, fmt);
vaf.va = &args;
if (net_ratelimit())
ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
dev_warn_ratelimited(ar->dev, "%pV", &vaf);
trace_ath10k_log_warn(&vaf);
va_end(args);
return ret;
return 0;
}
EXPORT_SYMBOL(ath10k_warn);
@ -115,9 +193,10 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
{
struct ath10k *ar = file->private_data;
char *buf;
unsigned int len = 0, buf_len = 1500;
const char *status;
unsigned int len = 0, buf_len = 4096;
const char *name;
ssize_t ret_cnt;
bool enabled;
int i;
buf = kzalloc(buf_len, GFP_KERNEL);
@ -129,15 +208,22 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
if (len > buf_len)
len = buf_len;
for (i = 0; i < WMI_SERVICE_LAST; i++) {
if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
status = "enabled";
else
status = "disabled";
for (i = 0; i < WMI_MAX_SERVICE; i++) {
enabled = test_bit(i, ar->debug.wmi_service_bitmap);
name = wmi_service_name(i);
if (!name) {
if (enabled)
len += scnprintf(buf + len, buf_len - len,
"%-40s %s (bit %d)\n",
"unknown", "enabled", i);
continue;
}
len += scnprintf(buf + len, buf_len - len,
"0x%02x - %20s - %s\n",
i, wmi_service_name(i), status);
"%-40s %s\n",
name, enabled ? "enabled" : "-");
}
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@ -309,7 +395,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn("could not request stats (%d)\n", ret);
ath10k_warn(ar, "could not request stats (%d)\n", ret);
goto exit;
}
@ -527,11 +613,14 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
}
if (!strcmp(buf, "soft")) {
ath10k_info("simulating soft firmware crash\n");
ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
} else if (!strcmp(buf, "hard")) {
ath10k_info("simulating hard firmware crash\n");
ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
ath10k_info(ar, "simulating hard firmware crash\n");
/* 0x7fff is vdev id, and it is always out of range for all
* firmware variants in order to force a firmware crash.
*/
ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
ar->wmi.vdev_param->rts_threshold, 0);
} else {
ret = -EINVAL;
@ -539,7 +628,7 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
}
if (ret) {
ath10k_warn("failed to simulate firmware crash: %d\n", ret);
ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
goto exit;
}
@ -577,6 +666,138 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
lockdep_assert_held(&ar->data_lock);
crash_data->crashed_since_read = true;
uuid_le_gen(&crash_data->uuid);
getnstimeofday(&crash_data->timestamp);
return crash_data;
}
EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
struct ath10k_dump_file_data *dump_data;
struct ath10k_tlv_dump_data *dump_tlv;
int hdr_len = sizeof(*dump_data);
unsigned int len, sofar = 0;
unsigned char *buf;
len = hdr_len;
len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
sofar += hdr_len;
/* This is going to get big when we start dumping FW RAM and such,
* so go ahead and use vmalloc.
*/
buf = vzalloc(len);
if (!buf)
return NULL;
spin_lock_bh(&ar->data_lock);
if (!crash_data->crashed_since_read) {
spin_unlock_bh(&ar->data_lock);
vfree(buf);
return NULL;
}
dump_data = (struct ath10k_dump_file_data *)(buf);
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
sizeof(dump_data->df_magic));
dump_data->len = cpu_to_le32(len);
dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
dump_data->chip_id = cpu_to_le32(ar->chip_id);
dump_data->bus_type = cpu_to_le32(0);
dump_data->target_version = cpu_to_le32(ar->target_version);
dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE);
strlcpy(dump_data->kernel_ver, VERMAGIC_STRING,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
/* Gather crash-dump */
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
memcpy(dump_tlv->tlv_data, &crash_data->registers,
sizeof(crash_data->registers));
sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
ar->debug.fw_crash_data->crashed_since_read = false;
spin_unlock_bh(&ar->data_lock);
return dump_data;
}
static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
struct ath10k_dump_file_data *dump;
dump = ath10k_build_dump_file(ar);
if (!dump)
return -ENODATA;
file->private_data = dump;
return 0;
}
static ssize_t ath10k_fw_crash_dump_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k_dump_file_data *dump_file = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
dump_file,
le32_to_cpu(dump_file->len));
}
static int ath10k_fw_crash_dump_release(struct inode *inode,
struct file *file)
{
vfree(file->private_data);
return 0;
}
static const struct file_operations fops_fw_crash_dump = {
.open = ath10k_fw_crash_dump_open,
.read = ath10k_fw_crash_dump_read,
.release = ath10k_fw_crash_dump_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
@ -596,7 +817,7 @@ static int ath10k_debug_htt_stats_req(struct ath10k *ar)
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
cookie);
if (ret) {
ath10k_warn("failed to send htt stats request: %d\n", ret);
ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
return ret;
}
@ -770,7 +991,7 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
if (ar->state == ATH10K_STATE_ON) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
if (ret) {
ath10k_warn("dbglog cfg failed from debugfs: %d\n",
ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
ret);
goto exit;
}
@ -801,13 +1022,14 @@ int ath10k_debug_start(struct ath10k *ar)
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
ret);
if (ar->debug.fw_dbglog_mask) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
if (ret)
/* not serious */
ath10k_warn("failed to enable dbglog during start: %d",
ath10k_warn(ar, "failed to enable dbglog during start: %d",
ret);
}
@ -910,11 +1132,20 @@ static const struct file_operations fops_dfs_stats = {
int ath10k_debug_create(struct ath10k *ar)
{
int ret;
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data) {
ret = -ENOMEM;
goto err;
}
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
ar->hw->wiphy->debugfsdir);
if (!ar->debug.debugfs_phy)
return -ENOMEM;
if (!ar->debug.debugfs_phy) {
ret = -ENOMEM;
goto err_free_fw_crash_data;
}
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
@ -930,6 +1161,9 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_crash_dump);
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
@ -958,17 +1192,25 @@ int ath10k_debug_create(struct ath10k *ar)
}
return 0;
err_free_fw_crash_data:
vfree(ar->debug.fw_crash_data);
err:
return ret;
}
void ath10k_debug_destroy(struct ath10k *ar)
{
vfree(ar->debug.fw_crash_data);
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@ -979,7 +1221,7 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
vaf.va = &args;
if (ath10k_debug_mask & mask)
ath10k_printk(KERN_DEBUG, "%pV", &vaf);
dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
trace_ath10k_log_dbg(mask, &vaf);
@ -987,13 +1229,14 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
}
EXPORT_SYMBOL(ath10k_dbg);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
if (ath10k_debug_mask & mask) {
if (msg)
ath10k_dbg(mask, "%s\n", msg);
ath10k_dbg(ar, mask, "%s\n", msg);
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
}

Просмотреть файл

@ -39,9 +39,10 @@ enum ath10k_debug_mask {
extern unsigned int ath10k_debug_mask;
__printf(1, 2) int ath10k_info(const char *fmt, ...);
__printf(1, 2) int ath10k_err(const char *fmt, ...);
__printf(1, 2) int ath10k_warn(const char *fmt, ...);
__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
@ -53,6 +54,10 @@ void ath10k_debug_read_service_map(struct ath10k *ar,
size_t map_size);
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
@ -86,25 +91,40 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
{
}
static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
int len)
{
}
static inline struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
return NULL;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
static inline int ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
static inline void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{

Просмотреть файл

@ -46,7 +46,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
if (!skb) {
ath10k_warn("Unable to allocate ctrl skb\n");
ath10k_warn(ar, "Unable to allocate ctrl skb\n");
return NULL;
}
@ -56,7 +56,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
@ -72,13 +72,15 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
struct ath10k *ar = ep->htc->ar;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
ath10k_warn("no tx handler for eid %d\n", ep->eid);
ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
dev_kfree_skb_any(skb);
return;
}
@ -89,12 +91,14 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
struct ath10k *ar = ep->htc->ar;
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
@ -123,6 +127,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
@ -134,18 +139,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ECOMM;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn("Invalid endpoint id: %d\n", eid);
ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
/* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
spin_unlock_bh(&htc->tx_lock);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
@ -157,7 +154,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ATH10K_DBG_HTC,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
@ -188,7 +185,7 @@ err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ATH10K_DBG_HTC,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
@ -227,11 +224,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
int len,
enum ath10k_htc_ep_id eid)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath10k_warn("Uneven credit report len %d", len);
ath10k_warn(ar, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
@ -243,7 +241,7 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
@ -260,6 +258,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
int length,
enum ath10k_htc_ep_id src_eid)
{
struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
@ -279,7 +278,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath10k_warn("Invalid record length: %d\n",
ath10k_warn(ar, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
@ -289,7 +288,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
case ATH10K_HTC_RECORD_CREDITS:
len = sizeof(struct ath10k_htc_credit_report);
if (record->hdr.len < len) {
ath10k_warn("Credit report too long\n");
ath10k_warn(ar, "Credit report too long\n");
status = -EINVAL;
break;
}
@ -299,7 +298,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
src_eid);
break;
default:
ath10k_warn("Unhandled record: id:%d length:%d\n",
ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
@ -313,7 +312,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
}
if (status)
ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
orig_buffer, orig_length);
return status;
@ -339,8 +338,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
eid = hdr->eid;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn("HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
@ -360,19 +359,19 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
ath10k_warn("HTC rx frame too long, len: %zu\n",
ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
if (skb->len < payload_len) {
ath10k_dbg(ATH10K_DBG_HTC,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
@ -388,7 +387,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath10k_warn("Invalid trailer length: %d\n",
ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
status = -EPROTO;
goto out;
@ -421,7 +420,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath10k_warn("HTC rx ctrl still processing\n");
ath10k_warn(ar, "HTC rx ctrl still processing\n");
status = -EINVAL;
complete(&htc->ctl_resp);
goto out;
@ -442,7 +441,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
goto out;
}
ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
@ -459,7 +458,7 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar,
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint. */
ath10k_warn("unexpected htc rx\n");
ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}
@ -546,6 +545,7 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
@ -563,7 +563,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
* iomap writes unmasking PCI CE irqs aren't propagated
* properly in KVM PCI-passthrough sometimes.
*/
ath10k_warn("failed to receive control response completion, polling..\n");
ath10k_warn(ar, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
@ -576,12 +576,12 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
}
if (status < 0) {
ath10k_err("ctl_resp never came in (%d)\n", status);
ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err("Invalid HTC ready msg len:%d\n",
ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
@ -592,21 +592,21 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
credit_size = __le16_to_cpu(msg->ready.credit_size);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath10k_dbg(ATH10K_DBG_HTC,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits,
htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath10k_err("Invalid credit size received\n");
ath10k_err(ar, "Invalid credit size received\n");
return -ECOMM;
}
@ -623,7 +623,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
/* connect fake service */
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err("could not connect to htc service (%d)\n", status);
ath10k_err(ar, "could not connect to htc service (%d)\n",
status);
return status;
}
@ -634,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_msg *msg;
struct ath10k_htc_conn_svc *req_msg;
struct ath10k_htc_conn_svc_response resp_msg_dummy;
@ -659,13 +661,13 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
ath10k_err("Failed to allocate HTC packet\n");
ath10k_err(ar, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
@ -703,7 +705,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
ath10k_err("Service connect timeout: %d\n", status);
ath10k_err(ar, "Service connect timeout: %d\n", status);
return status;
}
@ -716,11 +718,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(msg->hdr) +
sizeof(msg->connect_service_response))) {
ath10k_err("Invalid resp message ID 0x%x", message_id);
ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath10k_dbg(ATH10K_DBG_HTC,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
htc_service_name(service_id),
resp_msg->status, resp_msg->eid);
@ -729,7 +731,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
/* check response status */
if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
resp_msg->status);
return -EPROTO;
@ -780,18 +782,18 @@ setup:
if (status)
return status;
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ATH10K_DBG_BOOT,
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@ -799,13 +801,13 @@ setup:
return status;
}
struct sk_buff *ath10k_htc_alloc_skb(int size)
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
if (!skb) {
ath10k_warn("could not allocate HTC tx skb\n");
ath10k_warn(ar, "could not allocate HTC tx skb\n");
return NULL;
}
@ -813,13 +815,14 @@ struct sk_buff *ath10k_htc_alloc_skb(int size)
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn("Unaligned HTC tx skb\n");
ath10k_warn(ar, "Unaligned HTC tx skb\n");
return skb;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int status = 0;
struct ath10k_htc_msg *msg;
@ -835,7 +838,7 @@ int ath10k_htc_start(struct ath10k_htc *htc)
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
@ -846,13 +849,6 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return 0;
}
void ath10k_htc_stop(struct ath10k_htc *htc)
{
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
}
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
@ -862,7 +858,6 @@ int ath10k_htc_init(struct ath10k *ar)
spin_lock_init(&htc->tx_lock);
htc->stopped = false;
ath10k_htc_reset_endpoint_states(htc);
/* setup HIF layer callbacks */

Просмотреть файл

@ -332,7 +332,7 @@ struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
/* protects endpoint and stopped fields */
/* protects endpoints */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
@ -345,8 +345,6 @@ struct ath10k_htc {
int total_transmit_credits;
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
bool stopped;
};
int ath10k_htc_init(struct ath10k *ar);
@ -357,7 +355,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_resp *conn_resp);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
void ath10k_htc_stop(struct ath10k_htc *htc);
struct sk_buff *ath10k_htc_alloc_skb(int size);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
#endif

Просмотреть файл

@ -74,12 +74,14 @@ int ath10k_htt_init(struct ath10k *ar)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
struct ath10k *ar = htt->ar;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
@ -89,6 +91,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
int ath10k_htt_setup(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int status;
init_completion(&htt->target_version_received);
@ -100,7 +103,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) {
ath10k_warn("htt version request timed out\n");
ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT;
}

Просмотреть файл

@ -271,13 +271,14 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int idx;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_ring.fill_cnt == 0) {
ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
return NULL;
}
@ -311,6 +312,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff **tail_msdu,
u32 *attention)
{
struct ath10k *ar = htt->ar;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
@ -318,7 +320,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
ath10k_warn(ar, "htt is confused. refusing rx\n");
return -1;
}
@ -331,7 +333,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
@ -354,7 +356,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
msdu = NULL;
ath10k_err("htt rx stopped. cannot recover\n");
ath10k_err(ar, "htt rx stopped. cannot recover\n");
htt->rx_confused = true;
break;
}
@ -429,7 +431,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"htt rx chained: ", next->data,
next->len + skb_tailroom(next));
@ -483,13 +485,14 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
dma_addr_t paddr;
void *vaddr;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn("htt rx ring size is not power of 2\n");
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
@ -550,7 +553,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@ -572,7 +575,8 @@ err_netbuf:
return -ENOMEM;
}
static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_WEP40:
@ -588,11 +592,12 @@ static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
return 0;
}
ath10k_warn("unknown encryption type %d\n", type);
ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
@ -608,7 +613,7 @@ static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
return 8;
}
ath10k_warn("unknown encryption type %d\n", type);
ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
@ -819,19 +824,55 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
return true;
}
static const char * const tid_to_ac[] = {
"BE",
"BK",
"BK",
"BE",
"VI",
"VI",
"VO",
"VO",
};
static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
{
u8 *qc;
int tid;
if (!ieee80211_is_data_qos(hdr->frame_control))
return "";
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
if (tid < 8)
snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
else
snprintf(out, size, "tid %d", tid);
return out;
}
static void ath10k_process_rx(struct ath10k *ar,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
char tid[32];
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
ath10k_get_tid(hdr, tid, sizeof(tid)),
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
@ -843,8 +884,9 @@ static void ath10k_process_rx(struct ath10k *ar,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR));
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
@ -860,13 +902,14 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb_in)
{
struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct sk_buff *skb = skb_in;
struct sk_buff *first;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
u8 hdr_buf[64], addr[ETH_ALEN], *qos;
u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
unsigned int hdr_len;
rxd = (void *)skb->data - sizeof(*rxd);
@ -893,8 +936,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
/* First frame in an A-MSDU chain has more decapped data. */
if (skb == first) {
len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
4);
len += round_up(ath10k_htt_rx_crypto_param_len(ar,
enctype), 4);
decap_hdr += len;
}
@ -904,10 +947,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
/* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(da, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(sa, ieee80211_get_SA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
/* push original 802.11 header */
@ -921,8 +965,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
/* original 802.11 header has a different DA */
memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
memcpy(ieee80211_get_DA(hdr), da, ETH_ALEN);
memcpy(ieee80211_get_SA(hdr), sa, ETH_ALEN);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* strip ethernet header and insert decapped 802.11
@ -965,6 +1012,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@ -974,7 +1022,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
ath10k_warn("htt rx received chained non A-MSDU frame\n");
ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
}
@ -1011,7 +1059,8 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
rfc1042 = hdr;
rfc1042 += roundup(hdr_len, 4);
rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
enctype), 4);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
@ -1120,27 +1169,29 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
bool channel_set,
u32 attention)
{
struct ath10k *ar = htt->ar;
if (head->len == 0) {
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
return false;
}
if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
return false;
}
if (!channel_set) {
ath10k_warn("no channel configured; ignoring frame!\n");
ath10k_warn(ar, "no channel configured; ignoring frame!\n");
return false;
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
@ -1148,14 +1199,14 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx CAC running\n");
return false;
}
@ -1166,6 +1217,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
@ -1211,7 +1263,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
rx_status);
}
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
@ -1233,7 +1285,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
&attention);
if (ret < 0) {
ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
@ -1282,6 +1334,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct ath10k *ar = htt->ar;
struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
@ -1308,10 +1361,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
&attention);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
@ -1328,7 +1381,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
RX_MSDU_START_INFO1_DECAP_FORMAT);
if (fmt != RX_MSDU_DECAP_RAW) {
ath10k_warn("we dont support non-raw fragmented rx yet\n");
ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
@ -1340,17 +1393,17 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
if (tkip_mic_err)
ath10k_warn("tkip mic error\n");
ath10k_warn(ar, "tkip mic error\n");
if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n");
ath10k_warn(ar, "decryption err in fragmented rx\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
paramlen = ath10k_htt_rx_crypto_param_len(enctype);
paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
/* It is more efficient to move the header than the payload */
memmove((void *)msdu_head->data + paramlen,
@ -1364,7 +1417,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
trim = 4;
/* remove crypto trailer */
trim += ath10k_htt_rx_crypto_tail_len(enctype);
trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
@ -1372,20 +1425,20 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
trim += 8;
if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
msdu_head->data, msdu_head->len);
ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"expecting more fragmented rx in one indication %d\n",
fw_desc_len);
}
@ -1415,12 +1468,12 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.discard = true;
break;
default:
ath10k_warn("unhandled tx completion status %d\n", status);
ath10k_warn(ar, "unhandled tx completion status %d\n", status);
tx_done.discard = true;
break;
}
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
@ -1441,14 +1494,14 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx addba tid %hu peer_id %hu size %hhu\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn("received addba event for invalid peer_id: %hu\n",
ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@ -1456,13 +1509,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
ath10k_warn("received addba event for invalid vdev_id: %u\n",
ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx start rx ba session sta %pM tid %hu size %hhu\n",
peer->addr, tid, ev->window_size);
@ -1481,14 +1534,14 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx delba tid %hu peer_id %hu\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn("received addba event for invalid peer_id: %hu\n",
ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@ -1496,13 +1549,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
ath10k_warn("received addba event for invalid vdev_id: %u\n",
ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx stop rx ba session sta %pM tid %hu\n",
peer->addr, tid);
@ -1517,9 +1570,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn("unaligned htt message, expect trouble\n");
ath10k_warn(ar, "unaligned htt message, expect trouble\n");
ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@ -1583,7 +1636,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"sec ind peer_id %d unicast %d type %d\n",
__le16_to_cpu(ev->peer_id),
!!(ev->flags & HTT_SECURITY_IS_UNICAST),
@ -1592,7 +1645,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
break;
@ -1609,7 +1662,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
* sends all tx frames as already inspected so this shouldn't
* happen unless fw has a bug.
*/
ath10k_warn("received an unexpected htt tx inspect event\n");
ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
break;
case HTT_T2H_MSG_TYPE_RX_ADDBA:
ath10k_htt_rx_addba(ar, resp);
@ -1624,9 +1677,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
default:
ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
resp->hdr.msg_type);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
};

Просмотреть файл

@ -58,6 +58,7 @@ exit:
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int msdu_id;
lockdep_assert_held(&htt->tx_lock);
@ -67,24 +68,29 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
if (msdu_id == htt->max_num_pending_tx)
return -ENOBUFS;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
struct ath10k *ar = htt->ar;
lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
msdu_id);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
@ -93,7 +99,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
else
htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@ -122,6 +128,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct htt_tx_done tx_done = {0};
int msdu_id;
@ -130,7 +137,7 @@ static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
tx_done.discard = 1;
@ -157,6 +164,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0;
@ -165,7 +173,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
len += sizeof(cmd->hdr);
len += sizeof(cmd->ver_req);
skb = ath10k_htc_alloc_skb(len);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
@ -184,6 +192,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
struct ath10k *ar = htt->ar;
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
@ -192,7 +201,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
skb = ath10k_htc_alloc_skb(len);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
@ -214,7 +223,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn("failed to send htt type stats request: %d", ret);
ath10k_warn(ar, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
@ -224,6 +234,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
@ -242,7 +253,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(len);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
@ -311,6 +322,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
{
struct ath10k *ar = htt->ar;
struct htt_aggr_conf *aggr_conf;
struct sk_buff *skb;
struct htt_cmd *cmd;
@ -328,7 +340,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
len = sizeof(cmd->hdr);
len += sizeof(cmd->aggr_conf);
skb = ath10k_htc_alloc_skb(len);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
@ -340,7 +352,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
ath10k_dbg(ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
aggr_conf->max_num_amsdu_subframes,
aggr_conf->max_num_ampdu_subframes);
@ -355,7 +367,8 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
@ -382,7 +395,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(len);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
@ -429,7 +442,8 @@ err:
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
@ -545,11 +559,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
ath10k_dbg(ATH10K_DBG_HTT,
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
sg_items[0].transfer_id = 0;

Просмотреть файл

@ -28,16 +28,19 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define REG_DUMP_COUNT_QCA988X 60
struct ath10k_fw_ie {
__le32 id;
__le32 len;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -26,12 +26,14 @@ struct ath10k_generic_iter {
int ret;
};
struct ath10k *ath10k_mac_create(void);
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
void ath10k_mac_unregister(struct ath10k *ar);
struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void __ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_timeout_work(struct work_struct *work);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -23,9 +23,6 @@
#include "hw.h"
#include "ce.h"
/* FW dump area */
#define REG_DUMP_COUNT_QCA988X 60
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
*/
@ -103,12 +100,12 @@ struct pcie_state {
* NOTE: Structure is shared between Host software and Target firmware!
*/
struct ce_pipe_config {
u32 pipenum;
u32 pipedir;
u32 nentries;
u32 nbytes_max;
u32 flags;
u32 reserved;
__le32 pipenum;
__le32 pipedir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
/*
@ -130,17 +127,9 @@ struct ce_pipe_config {
/* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe {
u32 service_id;
u32 pipedir;
u32 pipenum;
};
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
};
/* Per-pipe state. */
@ -169,8 +158,6 @@ struct ath10k_pci {
struct ath10k *ar;
void __iomem *mem;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
* interrupts.
@ -179,12 +166,6 @@ struct ath10k_pci {
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
struct tasklet_struct early_irq_tasklet;
int started;
atomic_t keep_awake_count;
bool verified_awake;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
@ -198,27 +179,15 @@ struct ath10k_pci {
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
{
return ar->hif.priv;
}
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
return (struct ath10k_pci *)ar->drv_priv;
}
#define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
@ -242,35 +211,17 @@ static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/*
* This API allows the Host to access Target registers directly
* and relatively efficiently over PCIe.
* This allows the Host to avoid extra overhead associated with
* sending a message to firmware and waiting for a response message
* from firmware, as is done on other interconnects.
/* Target exposes its registers for direct access. However before host can
* access them it needs to make sure the target is awake (ath10k_pci_wake,
* ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
* to sleep unless host tells it to (ath10k_pci_sleep).
*
* Yet there is some complexity with direct accesses because the
* Target's power state is not known a priori. The Host must issue
* special PCIe reads/writes in order to explicitly wake the Target
* and to verify that it is awake and will remain awake.
* If host tries to access target registers without waking it up it can
* scribble over host memory.
*
* Usage:
*
* Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
* These calls must be bracketed by ath10k_pci_wake and
* ath10k_pci_sleep. A single BEGIN/END pair is adequate for
* multiple READ/WRITE operations.
*
* Use ath10k_pci_wake to put the Target in a state in
* which it is legal for the Host to directly access it. This
* may involve waking the Target from a low power state, which
* may take up to 2Ms!
*
* Use ath10k_pci_sleep to tell the Target that as far as
* this code path is concerned, it no longer needs to remain
* directly accessible. BEGIN/END is under a reference counter;
* multiple code paths may issue BEGIN/END on a single targid.
* If target is asleep waking it up may take up to even 2ms.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
@ -296,25 +247,18 @@ static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline int ath10k_pci_wake(struct ath10k *ar)
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
return ath10k_do_pci_wake(ar);
return 0;
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#endif /* _PCI_H_ */

Просмотреть файл

@ -0,0 +1,561 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/relay.h>
#include "core.h"
#include "debug.h"
static void send_fft_sample(struct ath10k *ar,
const struct fft_sample_tlv *fft_sample_tlv)
{
int length;
if (!ar->spectral.rfs_chan_spec_scan)
return;
length = __be16_to_cpu(fft_sample_tlv->length) +
sizeof(*fft_sample_tlv);
relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
}
static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
u8 *data)
{
int dc_pos;
u8 max_exp;
dc_pos = bin_len / 2;
/* peak index outside of bins */
if (dc_pos < max_index || -dc_pos >= max_index)
return 0;
for (max_exp = 0; max_exp < 8; max_exp++) {
if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
break;
}
/* max_exp not found */
if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
return 0;
return max_exp;
}
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
u32 reg0, reg1, nf_list1, nf_list2;
u8 chain_idx, *bins;
int dc_pos;
fft_sample = (struct fft_sample_ath10k *)&buf;
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
fft_sample->tlv.length = __cpu_to_be16(length);
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
switch (event->hdr.chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
case 40:
fft_sample->chan_width_mhz = 44;
break;
case 80:
/* TODO: As experiments with an analogue sender and various
* configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
* show, the particular configuration of 80 MHz/64 bins does
* not match with the other smaples at all. Until the reason
* for that is found, don't report these samples.
*/
if (bin_len == 64)
return -EINVAL;
fft_sample->chan_width_mhz = 88;
break;
default:
fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
fft_sample->rssi = event->hdr.rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
freq1 = __le16_to_cpu(event->hdr.freq1);
freq2 = __le16_to_cpu(event->hdr.freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
switch (chain_idx) {
case 0:
fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
break;
case 1:
fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
break;
case 2:
fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
break;
case 3:
fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
break;
}
bins = (u8 *)fftr;
bins += sizeof(*fftr);
fft_sample->tsf = __cpu_to_be64(tsf);
/* max_exp has been directly reported by previous hardware (ath9k),
* maybe its possible to get it by other means?
*/
fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
bin_len, bins);
memcpy(fft_sample->data, bins, bin_len);
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
dc_pos = bin_len / 2;
fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
fft_sample->data[dc_pos - 1]) / 2;
send_fft_sample(ar, &fft_sample->tlv);
return 0;
}
static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
if (list_empty(&ar->arvifs))
return NULL;
/* if there already is a vif doing spectral, return that. */
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->spectral_enabled)
return arvif;
/* otherwise, return the first vif. */
return list_first_entry(&ar->arvifs, typeof(*arvif), list);
}
static int ath10k_spectral_scan_trigger(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int res;
int vdev_id;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
if (ar->spectral.mode == SPECTRAL_DISABLED)
return 0;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
return 0;
}
static int ath10k_spectral_scan_config(struct ath10k *ar,
enum ath10k_spectral_mode mode)
{
struct wmi_vdev_spectral_conf_arg arg;
struct ath10k_vif *arvif;
int vdev_id, count, res = 0;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
ar->spectral.mode = mode;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_DISABLE);
if (res < 0) {
ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
return res;
}
if (mode == SPECTRAL_DISABLED)
return 0;
if (mode == SPECTRAL_BACKGROUND)
count = WMI_SPECTRAL_COUNT_DEFAULT;
else
count = max_t(u8, 1, ar->spectral.config.count);
arg.vdev_id = vdev_id;
arg.scan_count = count;
arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
arg.scan_fft_size = ar->spectral.config.fft_size;
arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
if (res < 0) {
ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
return res;
}
return 0;
}
static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char *mode = "";
unsigned int len;
enum ath10k_spectral_mode spectral_mode;
mutex_lock(&ar->conf_mutex);
spectral_mode = ar->spectral.mode;
mutex_unlock(&ar->conf_mutex);
switch (spectral_mode) {
case SPECTRAL_DISABLED:
mode = "disable";
break;
case SPECTRAL_BACKGROUND:
mode = "background";
break;
case SPECTRAL_MANUAL:
mode = "manual";
break;
}
len = strlen(mode);
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
static ssize_t write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
ssize_t len;
int res;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
mutex_lock(&ar->conf_mutex);
if (strncmp("trigger", buf, 7) == 0) {
if (ar->spectral.mode == SPECTRAL_MANUAL ||
ar->spectral.mode == SPECTRAL_BACKGROUND) {
/* reset the configuration to adopt possibly changed
* debugfs parameters
*/
res = ath10k_spectral_scan_config(ar,
ar->spectral.mode);
if (res < 0) {
ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
res);
}
res = ath10k_spectral_scan_trigger(ar);
if (res < 0) {
ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
res);
}
} else {
res = -EINVAL;
}
} else if (strncmp("background", buf, 9) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
} else if (strncmp("manual", buf, 6) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
} else if (strncmp("disable", buf, 7) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
} else {
res = -EINVAL;
}
mutex_unlock(&ar->conf_mutex);
if (res < 0)
return res;
return count;
}
static const struct file_operations fops_spec_scan_ctl = {
.read = read_file_spec_scan_ctl,
.write = write_file_spec_scan_ctl,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
u8 spectral_count;
mutex_lock(&ar->conf_mutex);
spectral_count = ar->spectral.config.count;
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", spectral_count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.count = val;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_count = {
.read = read_file_spectral_count,
.write = write_file_spectral_count,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_bins(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len, bins, fft_size, bin_scale;
mutex_lock(&ar->conf_mutex);
fft_size = ar->spectral.config.fft_size;
bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
bins = 1 << (fft_size - bin_scale);
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", bins);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_bins(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
if (!is_power_of_2(val))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.fft_size = ilog2(val);
ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_bins = {
.read = read_file_spectral_bins,
.write = write_file_spectral_bins,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
*is_global = 1;
return buf_file;
}
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static struct rchan_callbacks rfs_spec_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
int ath10k_spectral_start(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
arvif->spectral_enabled = 0;
ar->spectral.mode = SPECTRAL_DISABLED;
ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
return 0;
}
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
if (!arvif->spectral_enabled)
return 0;
return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
}
int ath10k_spectral_create(struct ath10k *ar)
{
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
1024, 256,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_count);
debugfs_create_file("spectral_bins",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_bins);
return 0;
}
void ath10k_spectral_destroy(struct ath10k *ar)
{
if (ar->spectral.rfs_chan_spec_scan) {
relay_close(ar->spectral.rfs_chan_spec_scan);
ar->spectral.rfs_chan_spec_scan = NULL;
}
}

Просмотреть файл

@ -0,0 +1,90 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SPECTRAL_H
#define SPECTRAL_H
#include "../spectral_common.h"
/**
* struct ath10k_spec_scan - parameters for Atheros spectral scan
*
* @count: number of scan results requested for manual mode
* @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
*/
struct ath10k_spec_scan {
u8 count;
u8 fft_size;
};
/* enum ath10k_spectral_mode:
*
* @SPECTRAL_DISABLED: spectral mode is disabled
* @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
* something else.
* @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
* is performed manually.
*/
enum ath10k_spectral_mode {
SPECTRAL_DISABLED = 0,
SPECTRAL_BACKGROUND,
SPECTRAL_MANUAL,
};
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
int ath10k_spectral_create(struct ath10k *ar);
void ath10k_spectral_destroy(struct ath10k *ar);
#else
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
}
static inline int ath10k_spectral_start(struct ath10k *ar)
{
return 0;
}
static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
return 0;
}
static inline int ath10k_spectral_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_spectral_destroy(struct ath10k *ar)
{
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#endif /* SPECTRAL_H */

Просмотреть файл

@ -32,14 +32,14 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
* offchan_tx_skb. */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn("completed old offchannel frame\n");
ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
@ -47,18 +47,19 @@ out:
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
@ -182,7 +183,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
set_bit(ev->peer_id, peer->peer_ids);
@ -199,12 +200,12 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn("peer-unmap-event: unknown peer id %d\n",
ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
clear_bit(ev->peer_id, peer->peer_ids);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -73,116 +73,279 @@ struct wmi_cmd_hdr {
#define HTC_PROTOCOL_VERSION 0x0002
#define WMI_PROTOCOL_VERSION 0x0002
enum wmi_service_id {
WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */
WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
WMI_SERVICE_AP_DFS, /* DFS on AP */
WMI_SERVICE_11AC, /* supports 11ac */
WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
WMI_SERVICE_PHYERR, /* PHY error */
WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
WMI_SERVICE_RTT, /* RTT (round trip time) support */
WMI_SERVICE_RATECTRL, /* Rate-control */
WMI_SERVICE_WOW, /* WOW Support */
WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */
WMI_SERVICE_NLO, /* Network list offload service */
WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
WMI_SERVICE_CHATTER, /* Chatter service */
WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */
WMI_SERVICE_GPIO, /* GPIO service */
WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */
WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */
WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */
WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
enum wmi_service {
WMI_SERVICE_BEACON_OFFLOAD = 0,
WMI_SERVICE_SCAN_OFFLOAD,
WMI_SERVICE_ROAM_OFFLOAD,
WMI_SERVICE_BCN_MISS_OFFLOAD,
WMI_SERVICE_STA_PWRSAVE,
WMI_SERVICE_STA_ADVANCED_PWRSAVE,
WMI_SERVICE_AP_UAPSD,
WMI_SERVICE_AP_DFS,
WMI_SERVICE_11AC,
WMI_SERVICE_BLOCKACK,
WMI_SERVICE_PHYERR,
WMI_SERVICE_BCN_FILTER,
WMI_SERVICE_RTT,
WMI_SERVICE_RATECTRL,
WMI_SERVICE_WOW,
WMI_SERVICE_RATECTRL_CACHE,
WMI_SERVICE_IRAM_TIDS,
WMI_SERVICE_ARPNS_OFFLOAD,
WMI_SERVICE_NLO,
WMI_SERVICE_GTK_OFFLOAD,
WMI_SERVICE_SCAN_SCH,
WMI_SERVICE_CSA_OFFLOAD,
WMI_SERVICE_CHATTER,
WMI_SERVICE_COEX_FREQAVOID,
WMI_SERVICE_PACKET_POWER_SAVE,
WMI_SERVICE_FORCE_FW_HANG,
WMI_SERVICE_GPIO,
WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
WMI_SERVICE_STA_KEEP_ALIVE,
WMI_SERVICE_TX_ENCAP,
WMI_SERVICE_BURST,
WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
};
WMI_SERVICE_LAST,
WMI_MAX_SERVICE = 64 /* max service */
enum wmi_10x_service {
WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
WMI_10X_SERVICE_SCAN_OFFLOAD,
WMI_10X_SERVICE_ROAM_OFFLOAD,
WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
WMI_10X_SERVICE_STA_PWRSAVE,
WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
WMI_10X_SERVICE_AP_UAPSD,
WMI_10X_SERVICE_AP_DFS,
WMI_10X_SERVICE_11AC,
WMI_10X_SERVICE_BLOCKACK,
WMI_10X_SERVICE_PHYERR,
WMI_10X_SERVICE_BCN_FILTER,
WMI_10X_SERVICE_RTT,
WMI_10X_SERVICE_RATECTRL,
WMI_10X_SERVICE_WOW,
WMI_10X_SERVICE_RATECTRL_CACHE,
WMI_10X_SERVICE_IRAM_TIDS,
WMI_10X_SERVICE_BURST,
/* introduced in 10.2 */
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
};
enum wmi_main_service {
WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
WMI_MAIN_SERVICE_SCAN_OFFLOAD,
WMI_MAIN_SERVICE_ROAM_OFFLOAD,
WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
WMI_MAIN_SERVICE_STA_PWRSAVE,
WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
WMI_MAIN_SERVICE_AP_UAPSD,
WMI_MAIN_SERVICE_AP_DFS,
WMI_MAIN_SERVICE_11AC,
WMI_MAIN_SERVICE_BLOCKACK,
WMI_MAIN_SERVICE_PHYERR,
WMI_MAIN_SERVICE_BCN_FILTER,
WMI_MAIN_SERVICE_RTT,
WMI_MAIN_SERVICE_RATECTRL,
WMI_MAIN_SERVICE_WOW,
WMI_MAIN_SERVICE_RATECTRL_CACHE,
WMI_MAIN_SERVICE_IRAM_TIDS,
WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
WMI_MAIN_SERVICE_NLO,
WMI_MAIN_SERVICE_GTK_OFFLOAD,
WMI_MAIN_SERVICE_SCAN_SCH,
WMI_MAIN_SERVICE_CSA_OFFLOAD,
WMI_MAIN_SERVICE_CHATTER,
WMI_MAIN_SERVICE_COEX_FREQAVOID,
WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
WMI_MAIN_SERVICE_FORCE_FW_HANG,
WMI_MAIN_SERVICE_GPIO,
WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
WMI_MAIN_SERVICE_TX_ENCAP,
};
static inline char *wmi_service_name(int service_id)
{
#define SVCSTR(x) case x: return #x
switch (service_id) {
case WMI_SERVICE_BEACON_OFFLOAD:
return "BEACON_OFFLOAD";
case WMI_SERVICE_SCAN_OFFLOAD:
return "SCAN_OFFLOAD";
case WMI_SERVICE_ROAM_OFFLOAD:
return "ROAM_OFFLOAD";
case WMI_SERVICE_BCN_MISS_OFFLOAD:
return "BCN_MISS_OFFLOAD";
case WMI_SERVICE_STA_PWRSAVE:
return "STA_PWRSAVE";
case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
return "STA_ADVANCED_PWRSAVE";
case WMI_SERVICE_AP_UAPSD:
return "AP_UAPSD";
case WMI_SERVICE_AP_DFS:
return "AP_DFS";
case WMI_SERVICE_11AC:
return "11AC";
case WMI_SERVICE_BLOCKACK:
return "BLOCKACK";
case WMI_SERVICE_PHYERR:
return "PHYERR";
case WMI_SERVICE_BCN_FILTER:
return "BCN_FILTER";
case WMI_SERVICE_RTT:
return "RTT";
case WMI_SERVICE_RATECTRL:
return "RATECTRL";
case WMI_SERVICE_WOW:
return "WOW";
case WMI_SERVICE_RATECTRL_CACHE:
return "RATECTRL CACHE";
case WMI_SERVICE_IRAM_TIDS:
return "IRAM TIDS";
case WMI_SERVICE_ARPNS_OFFLOAD:
return "ARPNS_OFFLOAD";
case WMI_SERVICE_NLO:
return "NLO";
case WMI_SERVICE_GTK_OFFLOAD:
return "GTK_OFFLOAD";
case WMI_SERVICE_SCAN_SCH:
return "SCAN_SCH";
case WMI_SERVICE_CSA_OFFLOAD:
return "CSA_OFFLOAD";
case WMI_SERVICE_CHATTER:
return "CHATTER";
case WMI_SERVICE_COEX_FREQAVOID:
return "COEX_FREQAVOID";
case WMI_SERVICE_PACKET_POWER_SAVE:
return "PACKET_POWER_SAVE";
case WMI_SERVICE_FORCE_FW_HANG:
return "FORCE FW HANG";
case WMI_SERVICE_GPIO:
return "GPIO";
case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
return "MODULATED DTIM";
case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
return "BASIC UAPSD";
case WMI_STA_UAPSD_VAR_AUTO_TRIG:
return "VAR UAPSD";
case WMI_SERVICE_STA_KEEP_ALIVE:
return "STA KEEP ALIVE";
case WMI_SERVICE_TX_ENCAP:
return "TX ENCAP";
SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
SVCSTR(WMI_SERVICE_STA_PWRSAVE);
SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
SVCSTR(WMI_SERVICE_AP_UAPSD);
SVCSTR(WMI_SERVICE_AP_DFS);
SVCSTR(WMI_SERVICE_11AC);
SVCSTR(WMI_SERVICE_BLOCKACK);
SVCSTR(WMI_SERVICE_PHYERR);
SVCSTR(WMI_SERVICE_BCN_FILTER);
SVCSTR(WMI_SERVICE_RTT);
SVCSTR(WMI_SERVICE_RATECTRL);
SVCSTR(WMI_SERVICE_WOW);
SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
SVCSTR(WMI_SERVICE_IRAM_TIDS);
SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
SVCSTR(WMI_SERVICE_NLO);
SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
SVCSTR(WMI_SERVICE_SCAN_SCH);
SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
SVCSTR(WMI_SERVICE_CHATTER);
SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
SVCSTR(WMI_SERVICE_GPIO);
SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
SVCSTR(WMI_SERVICE_TX_ENCAP);
SVCSTR(WMI_SERVICE_BURST);
SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
default:
return "UNKNOWN SERVICE\n";
}
return NULL;
}
#undef SVCSTR
}
#define WMI_MAX_SERVICE 64
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
(__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
BIT((svc_id)%(sizeof(u32))))
#define SVCMAP(x, y) \
do { \
if (WMI_SERVICE_IS_ENABLED((in), (x))) \
__set_bit(y, out); \
} while (0)
static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out)
{
SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
WMI_SERVICE_BEACON_OFFLOAD);
SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
WMI_SERVICE_SCAN_OFFLOAD);
SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
WMI_SERVICE_ROAM_OFFLOAD);
SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
WMI_SERVICE_BCN_MISS_OFFLOAD);
SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
WMI_SERVICE_STA_PWRSAVE);
SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
WMI_SERVICE_STA_ADVANCED_PWRSAVE);
SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
WMI_SERVICE_AP_UAPSD);
SVCMAP(WMI_10X_SERVICE_AP_DFS,
WMI_SERVICE_AP_DFS);
SVCMAP(WMI_10X_SERVICE_11AC,
WMI_SERVICE_11AC);
SVCMAP(WMI_10X_SERVICE_BLOCKACK,
WMI_SERVICE_BLOCKACK);
SVCMAP(WMI_10X_SERVICE_PHYERR,
WMI_SERVICE_PHYERR);
SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
WMI_SERVICE_BCN_FILTER);
SVCMAP(WMI_10X_SERVICE_RTT,
WMI_SERVICE_RTT);
SVCMAP(WMI_10X_SERVICE_RATECTRL,
WMI_SERVICE_RATECTRL);
SVCMAP(WMI_10X_SERVICE_WOW,
WMI_SERVICE_WOW);
SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
WMI_SERVICE_RATECTRL_CACHE);
SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
WMI_SERVICE_IRAM_TIDS);
SVCMAP(WMI_10X_SERVICE_BURST,
WMI_SERVICE_BURST);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_SERVICE_FORCE_FW_HANG);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out)
{
SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
WMI_SERVICE_BEACON_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
WMI_SERVICE_SCAN_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
WMI_SERVICE_ROAM_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
WMI_SERVICE_BCN_MISS_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
WMI_SERVICE_STA_PWRSAVE);
SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
WMI_SERVICE_STA_ADVANCED_PWRSAVE);
SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
WMI_SERVICE_AP_UAPSD);
SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
WMI_SERVICE_AP_DFS);
SVCMAP(WMI_MAIN_SERVICE_11AC,
WMI_SERVICE_11AC);
SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
WMI_SERVICE_BLOCKACK);
SVCMAP(WMI_MAIN_SERVICE_PHYERR,
WMI_SERVICE_PHYERR);
SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
WMI_SERVICE_BCN_FILTER);
SVCMAP(WMI_MAIN_SERVICE_RTT,
WMI_SERVICE_RTT);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
WMI_SERVICE_RATECTRL);
SVCMAP(WMI_MAIN_SERVICE_WOW,
WMI_SERVICE_WOW);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
WMI_SERVICE_RATECTRL_CACHE);
SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
WMI_SERVICE_IRAM_TIDS);
SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
WMI_SERVICE_ARPNS_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_NLO,
WMI_SERVICE_NLO);
SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
WMI_SERVICE_GTK_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
WMI_SERVICE_SCAN_SCH);
SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
WMI_SERVICE_CSA_OFFLOAD);
SVCMAP(WMI_MAIN_SERVICE_CHATTER,
WMI_SERVICE_CHATTER);
SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
WMI_SERVICE_COEX_FREQAVOID);
SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
WMI_SERVICE_PACKET_POWER_SAVE);
SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
WMI_SERVICE_FORCE_FW_HANG);
SVCMAP(WMI_MAIN_SERVICE_GPIO,
WMI_SERVICE_GPIO);
SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
WMI_SERVICE_STA_KEEP_ALIVE);
SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
WMI_SERVICE_TX_ENCAP);
}
#undef SVCMAP
#define WMI_SERVICE_BM_SIZE \
((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
@ -803,6 +966,159 @@ enum wmi_10x_event_id {
WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
};
enum wmi_10_2_cmd_id {
WMI_10_2_START_CMDID = 0x9000,
WMI_10_2_END_CMDID = 0x9FFF,
WMI_10_2_INIT_CMDID,
WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
WMI_10_2_STOP_SCAN_CMDID,
WMI_10_2_SCAN_CHAN_LIST_CMDID,
WMI_10_2_ECHO_CMDID,
WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
WMI_10_2_PDEV_SET_CHANNEL_CMDID,
WMI_10_2_PDEV_SET_PARAM_CMDID,
WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
WMI_10_2_VDEV_CREATE_CMDID,
WMI_10_2_VDEV_DELETE_CMDID,
WMI_10_2_VDEV_START_REQUEST_CMDID,
WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
WMI_10_2_VDEV_UP_CMDID,
WMI_10_2_VDEV_STOP_CMDID,
WMI_10_2_VDEV_DOWN_CMDID,
WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
WMI_10_2_VDEV_SET_PARAM_CMDID,
WMI_10_2_VDEV_INSTALL_KEY_CMDID,
WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
WMI_10_2_PEER_CREATE_CMDID,
WMI_10_2_PEER_DELETE_CMDID,
WMI_10_2_PEER_FLUSH_TIDS_CMDID,
WMI_10_2_PEER_SET_PARAM_CMDID,
WMI_10_2_PEER_ASSOC_CMDID,
WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
WMI_10_2_PEER_MCAST_GROUP_CMDID,
WMI_10_2_BCN_TX_CMDID,
WMI_10_2_BCN_PRB_TMPL_CMDID,
WMI_10_2_BCN_FILTER_RX_CMDID,
WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
WMI_10_2_MGMT_TX_CMDID,
WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
WMI_10_2_ADDBA_SEND_CMDID,
WMI_10_2_ADDBA_STATUS_CMDID,
WMI_10_2_DELBA_SEND_CMDID,
WMI_10_2_ADDBA_SET_RESP_CMDID,
WMI_10_2_SEND_SINGLEAMSDU_CMDID,
WMI_10_2_STA_POWERSAVE_MODE_CMDID,
WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
WMI_10_2_STA_MIMO_PS_MODE_CMDID,
WMI_10_2_DBGLOG_CFG_CMDID,
WMI_10_2_PDEV_DFS_ENABLE_CMDID,
WMI_10_2_PDEV_DFS_DISABLE_CMDID,
WMI_10_2_PDEV_QVIT_CMDID,
WMI_10_2_ROAM_SCAN_MODE,
WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
WMI_10_2_ROAM_SCAN_PERIOD,
WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
WMI_10_2_ROAM_AP_PROFILE,
WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
WMI_10_2_OFL_SCAN_PERIOD,
WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
WMI_10_2_P2P_GO_SET_BEACON_IE,
WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
WMI_10_2_AP_PS_PEER_PARAM_CMDID,
WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
WMI_10_2_PDEV_SUSPEND_CMDID,
WMI_10_2_PDEV_RESUME_CMDID,
WMI_10_2_ADD_BCN_FILTER_CMDID,
WMI_10_2_RMV_BCN_FILTER_CMDID,
WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
WMI_10_2_WOW_ENABLE_CMDID,
WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
WMI_10_2_RTT_MEASREQ_CMDID,
WMI_10_2_RTT_TSF_CMDID,
WMI_10_2_RTT_KEEPALIVE_CMDID,
WMI_10_2_PDEV_SEND_BCN_CMDID,
WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
WMI_10_2_REQUEST_STATS_CMDID,
WMI_10_2_GPIO_CONFIG_CMDID,
WMI_10_2_GPIO_OUTPUT_CMDID,
WMI_10_2_VDEV_RATEMASK_CMDID,
WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
WMI_10_2_FORCE_FW_HANG_CMDID,
WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
enum wmi_10_2_event_id {
WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
WMI_10_2_READY_EVENTID,
WMI_10_2_DEBUG_MESG_EVENTID,
WMI_10_2_START_EVENTID = 0x9000,
WMI_10_2_END_EVENTID = 0x9FFF,
WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
WMI_10_2_ECHO_EVENTID,
WMI_10_2_UPDATE_STATS_EVENTID,
WMI_10_2_INST_RSSI_STATS_EVENTID,
WMI_10_2_VDEV_START_RESP_EVENTID,
WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
WMI_10_2_VDEV_RESUME_REQ_EVENTID,
WMI_10_2_VDEV_STOPPED_EVENTID,
WMI_10_2_PEER_STA_KICKOUT_EVENTID,
WMI_10_2_HOST_SWBA_EVENTID,
WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
WMI_10_2_MGMT_RX_EVENTID,
WMI_10_2_CHAN_INFO_EVENTID,
WMI_10_2_PHYERR_EVENTID,
WMI_10_2_ROAM_EVENTID,
WMI_10_2_PROFILE_MATCH,
WMI_10_2_DEBUG_PRINT_EVENTID,
WMI_10_2_PDEV_QVIT_EVENTID,
WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
WMI_10_2_RTT_ERROR_REPORT_EVENTID,
WMI_10_2_RTT_KEEPALIVE_EVENTID,
WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
WMI_10_2_DCS_INTERFERENCE_EVENTID,
WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
WMI_10_2_GPIO_INPUT_EVENTID,
WMI_10_2_PEER_RATECODE_LIST_EVENTID,
WMI_10_2_GENERIC_BUFFER_EVENTID,
WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
WMI_10_2_WDS_PEER_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@ -1076,10 +1392,6 @@ struct wlan_host_mem_req {
__le32 num_units;
} __packed;
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
(1 << ((svc_id)%(sizeof(u32))))) != 0)
/*
* The following struct holds optional payload for
* wmi_service_ready_event,e.g., 11ac pass some of the
@ -1551,6 +1863,16 @@ struct wmi_resource_config_10x {
__le32 max_frag_entries;
} __packed;
struct wmi_resource_config_10_2 {
struct wmi_resource_config_10x common;
__le32 max_peer_ext_stats;
__le32 smart_ant_cap; /* 0-disable, 1-enable */
__le32 bk_min_free;
__le32 be_min_free;
__le32 vi_min_free;
__le32 vo_min_free;
__le32 rx_batchmode; /* 0-disable, 1-enable */
} __packed;
#define NUM_UNITS_IS_NUM_VDEVS 0x1
#define NUM_UNITS_IS_NUM_PEERS 0x2
@ -1588,11 +1910,28 @@ struct wmi_init_cmd_10x {
struct host_memory_chunk host_mem_chunks[1];
} __packed;
struct wmi_init_cmd_10_2 {
struct wmi_resource_config_10_2 resource_config;
__le32 num_host_mem_chunks;
/*
* variable number of host memory chunks.
* This should be the last element in the structure
*/
struct host_memory_chunk host_mem_chunks[1];
} __packed;
struct wmi_chan_list_entry {
__le16 freq;
u8 phy_mode; /* valid for 10.2 only */
u8 reserved;
} __packed;
/* TLV for channel list */
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
__le32 num_chan;
__le32 channel_list[0];
struct wmi_chan_list_entry channel_list[0];
} __packed;
struct wmi_bssid_list {
@ -1821,7 +2160,7 @@ struct wmi_start_scan_arg {
u32 n_bssids;
u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
u32 channels[64];
u16 channels[64];
struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
};
@ -2067,6 +2406,7 @@ struct wmi_comb_phyerr_rx_event {
#define PHYERR_TLV_SIG 0xBB
#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
struct phyerr_radar_report {
__le32 reg0; /* RADAR_REPORT_REG0_* */
@ -2515,6 +2855,19 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_BURST_DUR,
/* Set Bursting Enable*/
WMI_10X_PDEV_PARAM_BURST_ENABLE,
/* following are available as of firmware 10.2 */
WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
WMI_10X_PDEV_PARAM_IGMPMLD_TID,
WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
WMI_10X_PDEV_PARAM_RX_FILTER,
WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
};
struct wmi_pdev_set_param_cmd {
@ -3387,6 +3740,14 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
/* following are available as of firmware 10.2 */
WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
WMI_10X_VDEV_PARAM_MFPTEST_SET,
WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
WMI_10X_VDEV_PARAM_VHT_SGIMASK,
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
};
/* slot time long */
@ -3444,6 +3805,98 @@ struct wmi_vdev_simple_event {
/* unsupported VDEV combination */
#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
/* TODO: please add more comments if you have in-depth information */
struct wmi_vdev_spectral_conf_cmd {
__le32 vdev_id;
/* number of fft samples to send (0 for infinite) */
__le32 scan_count;
__le32 scan_period;
__le32 scan_priority;
/* number of bins in the FFT: 2^(fft_size - bin_scale) */
__le32 scan_fft_size;
__le32 scan_gc_ena;
__le32 scan_restart_ena;
__le32 scan_noise_floor_ref;
__le32 scan_init_delay;
__le32 scan_nb_tone_thr;
__le32 scan_str_bin_thr;
__le32 scan_wb_rpt_mode;
__le32 scan_rssi_rpt_mode;
__le32 scan_rssi_thr;
__le32 scan_pwr_format;
/* rpt_mode: Format of FFT report to software for spectral scan
* triggered FFTs:
* 0: No FFT report (only spectral scan summary report)
* 1: 2-dword summary of metrics for each completed FFT + spectral
* scan summary report
* 2: 2-dword summary of metrics for each completed FFT +
* 1x- oversampled bins(in-band) per FFT + spectral scan summary
* report
* 3: 2-dword summary of metrics for each completed FFT +
* 2x- oversampled bins (all) per FFT + spectral scan summary
*/
__le32 scan_rpt_mode;
__le32 scan_bin_scale;
__le32 scan_dbm_adj;
__le32 scan_chn_mask;
} __packed;
struct wmi_vdev_spectral_conf_arg {
u32 vdev_id;
u32 scan_count;
u32 scan_period;
u32 scan_priority;
u32 scan_fft_size;
u32 scan_gc_ena;
u32 scan_restart_ena;
u32 scan_noise_floor_ref;
u32 scan_init_delay;
u32 scan_nb_tone_thr;
u32 scan_str_bin_thr;
u32 scan_wb_rpt_mode;
u32 scan_rssi_rpt_mode;
u32 scan_rssi_thr;
u32 scan_pwr_format;
u32 scan_rpt_mode;
u32 scan_bin_scale;
u32 scan_dbm_adj;
u32 scan_chn_mask;
};
#define WMI_SPECTRAL_ENABLE_DEFAULT 0
#define WMI_SPECTRAL_COUNT_DEFAULT 0
#define WMI_SPECTRAL_PERIOD_DEFAULT 35
#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
struct wmi_vdev_spectral_enable_cmd {
__le32 vdev_id;
__le32 trigger_cmd;
__le32 enable_cmd;
} __packed;
#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
/* Beacon processing related command and event structures */
struct wmi_bcn_tx_hdr {
__le32 vdev_id;
@ -3470,6 +3923,11 @@ enum wmi_bcn_tx_ref_flags {
WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
};
/* TODO: It is unclear why "no antenna" works while any other seemingly valid
* chainmask yields no beacons on the air at all.
*/
#define WMI_BCN_TX_REF_DEF_ANTENNA 0
struct wmi_bcn_tx_ref_cmd {
__le32 vdev_id;
__le32 data_len;
@ -3481,6 +3939,8 @@ struct wmi_bcn_tx_ref_cmd {
__le32 frame_control;
/* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
__le32 flags;
/* introduced in 10.2 */
__le32 antenna_mask;
} __packed;
/* Beacon filter */
@ -4053,7 +4513,7 @@ struct wmi_peer_set_q_empty_callback_cmd {
/* Maximum listen interval supported by hw in units of beacon interval */
#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
struct wmi_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd {
struct wmi_mac_addr peer_macaddr;
__le32 vdev_id;
__le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
@ -4071,11 +4531,30 @@ struct wmi_peer_assoc_complete_cmd {
__le32 peer_vht_caps;
__le32 peer_phymode;
struct wmi_vht_rate_set peer_vht_rates;
};
struct wmi_main_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd cmd;
/* HT Operation Element of the peer. Five bytes packed in 2
* INT32 array and filled from lsb to msb. */
__le32 peer_ht_info[2];
} __packed;
struct wmi_10_1_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd cmd;
} __packed;
#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
struct wmi_10_2_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd cmd;
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
struct wmi_peer_assoc_complete_arg {
u8 addr[ETH_ALEN];
u32 vdev_id;
@ -4290,6 +4769,10 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
u32 param_id, u32 param_value);
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
const struct wmi_vdev_spectral_conf_arg *arg);
int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
u32 enable);
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,

Просмотреть файл

@ -1049,7 +1049,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
ar->hw.reserved_ram_size = le32_to_cpup(val);
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found reserved ram size ie 0x%d\n",
"found reserved ram size ie %d\n",
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:

Просмотреть файл

@ -225,7 +225,7 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
ret = ath6kl_hif_diag_write32(ar, address, value);
if (ret) {
ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
ath6kl_err("failed to write 0x%x during diagnose window to 0x%x\n",
address, value);
return ret;
}

Просмотреть файл

@ -1400,6 +1400,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
{},
};

Просмотреть файл

@ -1229,26 +1229,7 @@ static struct usb_driver ath6kl_usb_driver = {
.disable_hub_initiated_lpm = 1,
};
static int ath6kl_usb_init(void)
{
int ret;
ret = usb_register(&ath6kl_usb_driver);
if (ret) {
ath6kl_err("usb registration failed: %d\n", ret);
return ret;
}
return 0;
}
static void ath6kl_usb_exit(void)
{
usb_deregister(&ath6kl_usb_driver);
}
module_init(ath6kl_usb_init);
module_exit(ath6kl_usb_exit);
module_usb_driver(ath6kl_usb_driver);
MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices");

Просмотреть файл

@ -17,6 +17,8 @@
#ifndef SPECTRAL_H
#define SPECTRAL_H
#include "../spectral_common.h"
/* enum spectral_mode:
*
* @SPECTRAL_DISABLED: spectral mode is disabled
@ -54,8 +56,6 @@ struct ath_ht20_mag_info {
u8 max_exp;
} __packed;
#define SPECTRAL_HT20_NUM_BINS 56
/* WARNING: don't actually use this struct! MAC may vary the amount of
* data by -1/+2. This struct is for reference only.
*/
@ -83,8 +83,6 @@ struct ath_ht20_40_mag_info {
u8 max_exp;
} __packed;
#define SPECTRAL_HT20_40_NUM_BINS 128
/* WARNING: don't actually use this struct! MAC may vary the amount of
* data. This struct is for reference only.
*/
@ -125,71 +123,6 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
/* FFT sample format given to userspace via debugfs.
*
* Please keep the type/length at the front position and change
* other fields after adding another sample type
*
* TODO: this might need rework when switching to nl80211-based
* interface.
*/
enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
ATH_FFT_SAMPLE_HT20_40,
};
struct fft_sample_tlv {
u8 type; /* see ath_fft_sample */
__be16 length;
/* type dependent data follows */
} __packed;
struct fft_sample_ht20 {
struct fft_sample_tlv tlv;
u8 max_exp;
__be16 freq;
s8 rssi;
s8 noise;
__be16 max_magnitude;
u8 max_index;
u8 bitmap_weight;
__be64 tsf;
u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
struct fft_sample_ht20_40 {
struct fft_sample_tlv tlv;
u8 channel_type;
__be16 freq;
s8 lower_rssi;
s8 upper_rssi;
__be64 tsf;
s8 lower_noise;
s8 upper_noise;
__be16 lower_max_magnitude;
__be16 upper_max_magnitude;
u8 lower_max_index;
u8 upper_max_index;
u8 lower_bitmap_weight;
u8 upper_bitmap_weight;
u8 max_exp;
u8 data[SPECTRAL_HT20_40_NUM_BINS];
} __packed;
void ath9k_spectral_init_debug(struct ath_softc *sc);
void ath9k_spectral_deinit_debug(struct ath_softc *sc);

Просмотреть файл

@ -0,0 +1,113 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SPECTRAL_COMMON_H
#define SPECTRAL_COMMON_H
#define SPECTRAL_HT20_NUM_BINS 56
#define SPECTRAL_HT20_40_NUM_BINS 128
/* TODO: could possibly be 512, but no samples this large
* could be acquired so far.
*/
#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
/* FFT sample format given to userspace via debugfs.
*
* Please keep the type/length at the front position and change
* other fields after adding another sample type
*
* TODO: this might need rework when switching to nl80211-based
* interface.
*/
enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
ATH_FFT_SAMPLE_HT20_40,
ATH_FFT_SAMPLE_ATH10K,
};
struct fft_sample_tlv {
u8 type; /* see ath_fft_sample */
__be16 length;
/* type dependent data follows */
} __packed;
struct fft_sample_ht20 {
struct fft_sample_tlv tlv;
u8 max_exp;
__be16 freq;
s8 rssi;
s8 noise;
__be16 max_magnitude;
u8 max_index;
u8 bitmap_weight;
__be64 tsf;
u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
struct fft_sample_ht20_40 {
struct fft_sample_tlv tlv;
u8 channel_type;
__be16 freq;
s8 lower_rssi;
s8 upper_rssi;
__be64 tsf;
s8 lower_noise;
s8 upper_noise;
__be16 lower_max_magnitude;
__be16 upper_max_magnitude;
u8 lower_max_index;
u8 upper_max_index;
u8 lower_bitmap_weight;
u8 upper_bitmap_weight;
u8 max_exp;
u8 data[SPECTRAL_HT20_40_NUM_BINS];
} __packed;
struct fft_sample_ath10k {
struct fft_sample_tlv tlv;
u8 chan_width_mhz;
__be16 freq1;
__be16 freq2;
__be16 noise;
__be16 max_magnitude;
__be16 total_gain_db;
__be16 base_pwr_db;
__be64 tsf;
s8 max_index;
u8 rssi;
u8 relpwr_db;
u8 avgpwr_db;
u8 max_exp;
u8 data[0];
} __packed;
#endif /* SPECTRAL_COMMON_H */