net: ipa: introduce ipa_mem_find()

Introduce a new function that abstracts finding information about a
region in IPA-local memory, given its memory region ID.  For now it
simply uses the region ID as an index into the IPA memory array.
If the region is not defined, ipa_mem_find() returns a null pointer.

Update all code that accesses the ipa->mem[] array directly to use
ipa_mem_find() instead.  The return value must be checked for null
when optional memory regions are sought.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alex Elder 2021-06-10 14:23:07 -05:00 коммит произвёл David S. Miller
Родитель e9f5b2766e
Коммит 5e3bc1e5d0
6 изменённых файлов: 56 добавлений и 35 удалений

Просмотреть файл

@ -218,7 +218,7 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
/* The header memory area contains both the modem and AP header
* regions. The modem portion defines the address of the region.
*/
mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
@ -231,8 +231,10 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
return false;
}
/* Add the size of the AP portion to the combined size */
size += ipa->mem[IPA_MEM_AP_HEADER].size;
/* Add the size of the AP portion (if defined) to the combined size */
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
/* Make sure the combined size fits in the IPA command */
if (size > size_max) {

Просмотреть файл

@ -26,12 +26,20 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
if (mem_id < IPA_MEM_COUNT)
return &ipa->mem[mem_id];
return NULL;
}
/* Add an immediate command to a transaction that zeroes a memory region */
static void
ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = &ipa->mem[mem_id];
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
@ -61,6 +69,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
@ -75,12 +84,16 @@ int ipa_mem_setup(struct ipa *ipa)
return -EBUSY;
}
/* Initialize IPA-local header memory. The modem and AP header
* regions are contiguous, and initialized together.
/* Initialize IPA-local header memory. The AP header region, if
* present, is contiguous with and follows the modem header region,
* and they are initialized together.
*/
offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
size += ipa->mem[IPA_MEM_AP_HEADER].size;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
@ -91,7 +104,8 @@ int ipa_mem_setup(struct ipa *ipa)
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
@ -294,6 +308,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
void *virt;
@ -334,11 +349,11 @@ int ipa_mem_config(struct ipa *ipa)
* space prior to the region's base address if indicated.
*/
for (i = 0; i < ipa->mem_count; i++) {
const struct ipa_mem *mem = &ipa->mem[i];
u16 canary_count;
__le32 *canary;
/* Skip over undefined regions */
mem = &ipa->mem[i];
if (!mem->offset && !mem->size)
continue;
@ -361,8 +376,9 @@ int ipa_mem_config(struct ipa *ipa)
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
/* Verify the microcontroller ring alignment (0 is OK too) */
if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
@ -527,7 +543,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
* to "get" a pointer it (this implies no reference counting).
* to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/

Просмотреть файл

@ -90,6 +90,8 @@ struct ipa_mem {
u16 canary_count;
};
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);

Просмотреть файл

@ -298,32 +298,32 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
mem = &ipa->mem[IPA_MEM_V4_ROUTE];
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V6_ROUTE];
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V4_FILTER];
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = &ipa->mem[IPA_MEM_V6_FILTER];
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = &ipa->mem[IPA_MEM_MODEM];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
@ -336,7 +336,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* skip_uc_load_valid and skip_uc_load are set above */
mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
@ -347,7 +347,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* Nothing to report for the compression table (zip_tbl_info) */
mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
@ -355,7 +355,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
@ -363,22 +363,21 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
/* None of the stats fields are valid (IPA v4.0 and above) */
/* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
@ -387,8 +386,9 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
mem = &ipa->mem[IPA_MEM_STATS_DROP];
if (mem->size) {
/* If the DROP stats region is defined, include it */
mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;

Просмотреть файл

@ -152,7 +152,7 @@ static void ipa_table_validate_build(void)
static bool
ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
{
const struct ipa_mem *mem = &ipa->mem[mem_id];
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
u32 size;
@ -245,7 +245,7 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
u16 first, u16 count, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = &ipa->mem[mem_id];
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr;
u32 offset;
u16 size;
@ -417,8 +417,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
enum ipa_mem_id hash_mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *hash_mem = &ipa->mem[hash_mem_id];
const struct ipa_mem *mem = &ipa->mem[mem_id];
const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
u16 hash_count;

Просмотреть файл

@ -116,7 +116,8 @@ enum ipa_uc_event {
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}