AU_LINUX_QSDK_FIG_TARGET_ALL.12.0.000.934

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iEYEABECAAYFAmHdvRUACgkQoUgPZYCpAfGJzwCg30nYvQrJq3E/xHKZYLH+douv
 FC4AnAtbviZk3saEpgeoeVI+NhiyHzwv
 =2sax
 -----END PGP SIGNATURE-----

Merge AU_LINUX_QSDK_FIG_TARGET_ALL.12.0.000.934 on remote branch

Change-Id: I744646b60004b5234404ea00137ec64335b1d36a
Signed-off-by: Linux Build Service Account <lnxbuild@localhost>
This commit is contained in:
Linux Build Service Account 2022-01-12 06:58:48 -07:00
commit 8146137ef7
23 changed files with 877 additions and 399 deletions

View file

@ -492,6 +492,28 @@ int qca_scm_part_info(void *cmd_buf,
return ret;
}
int qca_scm_dpr(u32 svc_id, u32 cmd_id, void *buf, size_t len)
{
int ret = 0;
uint32_t *status;
if (is_scm_armv8())
{
struct qca_scm_desc desc = {0};
desc.arginfo = QCA_SCM_ARGS(1, SCM_VAL);
desc.args[0] = *((unsigned int *)buf);
ret = scm_call_64(svc_id, cmd_id, &desc);
status = (uint32_t *)(*(((uint32_t *)buf) + 1));
*status = desc.ret[0];
}
else
{
ret = scm_call(svc_id, cmd_id, buf, len, NULL, 0);
}
return ret;
}
int qca_scm_auth_kernel(void *cmd_buf,
size_t cmd_len)
{
@ -563,6 +585,8 @@ int qca_scm_secure_authenticate(void *cmd_buf, size_t cmd_len)
desc.args[2] = * (((unsigned long *)cmd_buf) + 2);
ret = scm_call_64(SCM_SVC_BOOT, SCM_CMD_SEC_AUTH, &desc);
if(!ret && desc.ret[0])
return SCM_ERROR;
}
else
{
@ -664,6 +688,10 @@ int qca_scm_part_info(void *cmd_buf, size_t cmd_len)
{
return 0;
}
int qca_scm_dpr(u32 svc_id, u32 cmd_id, void *buf, size_t len)
{
return 0;
}
int qca_scm_auth_kernel(void *cmd_buf,
size_t cmd_len)
{

View file

@ -38,6 +38,11 @@
#include "fdt_info.h"
#include <ubi_uboot.h>
#include <command.h>
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
#include <sdhci.h>
#include <mmc.h>
#endif
#ifdef IPQ_UBI_VOL_WRITE_SUPPORT
static struct ubi_device *ubi;
@ -145,6 +150,12 @@ static struct smem *smem = (void *)(CONFIG_QCA_SMEM_BASE);
qca_smem_flash_info_t qca_smem_flash_info;
qca_smem_bootconfig_info_t qca_smem_bootconfig_info;
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
unsigned ipq_runtime_failsafe_status;
unsigned ipq_runtime_fs_skip_status_check = 0;
unsigned ipq_runtime_fs_feature_enabled = 0;
#endif
#ifdef CONFIG_SMEM_VERSION_C
#define SMEM_COMMON_HOST 0xFFFE
@ -496,10 +507,121 @@ int smem_bootconfig_info(void)
return 0;
}
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
int smem_runtime_failsafe_info(void)
{
unsigned ret;
ret = smem_read_alloc_entry(SMEM_RUNTIME_FAILSAFE_INFO,
&ipq_runtime_failsafe_status, sizeof(ipq_runtime_failsafe_status));
if (ret != 0) {
printf("\nsmem: Failed to fetch the runtime failsafe status.." \
"Disabling the feature.\n");
ipq_runtime_fs_feature_enabled = 0;
}
if (ipq_runtime_failsafe_status & IPQ_RUNTIME_FAILSAFE_ENABLED) {
printf("\nRuntime Failsafe Feature Enabled\n");
ipq_runtime_fs_feature_enabled = 1;
}
return 0;
}
#endif
#ifndef CONFIG_SDHCI_SUPPORT
extern qca_mmc mmc_host;
#else
extern struct sdhci_host mmc_host;
#endif
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
int smem_update_bootconfig_to_flash(void)
{
unsigned i, j, len;
uint32_t load_addr = 0;
char *part_name[] = {"0:BOOTCONFIG", "0:BOOTCONFIG1"};
char runcmd[256];
if (smem_runtime_failsafe_info() != 0)
return -ENOMSG;
if (ipq_runtime_fs_feature_enabled == 0)
return 0;
/* Update BOOTCONFIG in flash only if there is an update in SMEM by SBL */
if (!ipq_runtime_fs_skip_status_check) {
if (ipq_runtime_failsafe_status & IPQ_RUNTIME_FS_BOOTCONFIG_UPDATED) {
printf("\nNonHLOS runtime hang detected: Partitions switched.\n");
} else {
return 0;
}
}
if (qca_smem_bootconfig_info.magic_start != _SMEM_DUAL_BOOTINFO_MAGIC_START) {
if(smem_bootconfig_info() != 0)
return -1;
}
fs_debug("\nFailsafe: SMEM bootinfo from SBL: ");
for (j = 0; j < qca_smem_bootconfig_info.numaltpart; j++)
fs_debug("\nPartition: %s primaryboot = %d\n",
qca_smem_bootconfig_info.per_part_entry[j].name,
qca_smem_bootconfig_info.per_part_entry[j].primaryboot);
len = sizeof(part_name)/sizeof(part_name[0]);
load_addr = (uint32_t)&qca_smem_bootconfig_info;
for (i = 0; i < len; i++) {
snprintf(runcmd, sizeof(runcmd), "setenv fileaddr 0x%x && \
setenv filesize %d && flash %s",
load_addr, sizeof(qca_smem_bootconfig_info), part_name[i]);
if (run_command(runcmd, 0) != CMD_RET_SUCCESS)
return CMD_RET_FAILURE;
}
return CMD_RET_SUCCESS;
}
__weak int is_hlos_crashed(void)
{
return 0;
}
void update_hlos_rootfs_primaryboot(void)
{
unsigned int i;
qca_smem_flash_info_t *sfi = &qca_smem_flash_info;
fs_debug("\nFailsafe: %s: HLOS bit is SET", __func__);
printf("\nHLOS runtime hang detected: Switching Partitions.\n");
for (i = 0; i < qca_smem_bootconfig_info.numaltpart; i++) {
if (sfi->flash_type == SMEM_BOOT_MMC_FLASH ||
sfi->flash_type == SMEM_BOOT_SPI_FLASH) {
/* Note: SBL swaps the offsets for NAND case */
if (strncmp("0:HLOS", qca_smem_bootconfig_info.per_part_entry[i].name,
ALT_PART_NAME_LENGTH) == 0)
qca_smem_bootconfig_info.per_part_entry[i].primaryboot = 1;
if (strncmp("rootfs", qca_smem_bootconfig_info.per_part_entry[i].name,
ALT_PART_NAME_LENGTH) == 0)
qca_smem_bootconfig_info.per_part_entry[i].primaryboot = 1;
}
}
ipq_runtime_fs_skip_status_check = 1;
}
#endif
unsigned int get_rootfs_active_partition(void)
{
int i;
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
if (ipq_runtime_fs_feature_enabled && is_hlos_crashed()) {
update_hlos_rootfs_primaryboot();
smem_update_bootconfig_to_flash();
}
#endif
for (i = 0; i < qca_smem_bootconfig_info.numaltpart; i++) {
if (strncmp("rootfs", qca_smem_bootconfig_info.per_part_entry[i].name,
ALT_PART_NAME_LENGTH) == 0)

View file

@ -29,6 +29,7 @@
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_0 0x14
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1 0x18
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2 0x1c
#define IPQ9574_EDMA_REG_DMAR_CTRL 0x48
#define IPQ9574_EDMA_REG_MISC_INT_STAT 0x5c
#define IPQ9574_EDMA_REG_MISC_INT_MASK 0x60
#define IPQ9574_EDMA_REG_TXDESC2CMPL_MAP_0 0x8c
@ -50,6 +51,7 @@
#define IPQ9574_EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_MOD_TIMER(n) (0x9900c + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * n))
@ -66,13 +68,41 @@
#define IPQ9574_EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * n))
#define IPQ9574_EDMA_QID2RID_TABLE_MEM(q) (0xb9000 + (0x4 * q))
/*
* EDMA_REG_DMAR_CTRL register
*/
#define IPQ9574_EDMA_DMAR_REQ_PRI_MASK 0x7
#define IPQ9574_EDMA_DMAR_REQ_PRI_SHIFT 0x0
#define IPQ9574_EDMA_DMAR_BURST_LEN_MASK 0x1
#define IPQ9574_EDMA_DMAR_BURST_LEN_SHIFT 3
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK 0x1f
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT 4
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK 0x7
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT 9
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK 0x7
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT 12
#define IPQ9574_EDMA_DMAR_REQ_PRI_SET(x) (((x) & IPQ9574_EDMA_DMAR_REQ_PRI_MASK) \
<< IPQ9574_EDMA_DMAR_REQ_PRI_SHIFT)
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_BURST_LEN_SET(x) (((x) & IPQ9574_EDMA_DMAR_BURST_LEN_MASK) \
<< IPQ9574_EDMA_DMAR_BURST_LEN_SHIFT)
#define IPQ9574_EDMA_BURST_LEN_ENABLE 0x0
/*
* EDMA_REG_PORT_CTRL register
*/
#define IPQ9574_EDMA_PORT_CTRL_EN 0x3
#define IPQ9574_EDMA_PORT_CTRL_EN 0x3
/*
* EDMA_REG_TXDESC_PROD_IDX register
@ -170,6 +200,18 @@
#define IPQ9574_EDMA_RXDESC_INT_MASK_PKT_INT 0x1
#define IPQ9574_EDMA_MASK_INT_DISABLE 0x0
/*
* EDMA_REG_RX_MOD_TIMER register
*/
#define IPQ9574_EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
#define IPQ9574_EDMA_RX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA_REG_TX_MOD_TIMER register
*/
#define IPQ9574_EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
#define IPQ9574_EDMA_TX_MOD_TIMER_INIT_SHIFT 0
/*
* TXDESC shift values
*/
@ -179,10 +221,27 @@
#define IPQ9574_EDMA_TXDESC_DATA_OFFSET_MASK 0xfff
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_SHIFT 0
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_MASK 0x3ffff
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_MASK 0x1ffff
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT 16
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_MASK (0x1FF << IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT)
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_SET(x) (((x) << IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT) & IPQ9574_EDMA_TXDESC_SERVICE_CODE_MASK)
#define IPQ9574_EDMA_SC_BYPASS 1
#define IPQ9574_EDMA_DST_PORT_TYPE 2
#define IPQ9574_EDMA_DST_PORT_TYPE_SHIFT 28
#define IPQ9574_EDMA_DST_PORT_TYPE_MASK (0xf << IPQ9574_EDMA_DST_PORT_TYPE_SHIFT)
#define IPQ9574_EDMA_DST_PORT_ID_SHIFT 16
#define IPQ9574_EDMA_DST_PORT_ID_MASK (0xfff << IPQ9574_EDMA_DST_PORT_ID_SHIFT)
#define IPQ9574_EDMA_DST_PORT_TYPE_SET(x) (((x) << IPQ9574_EDMA_DST_PORT_TYPE_SHIFT) & IPQ9574_EDMA_DST_PORT_TYPE_MASK)
#define IPQ9574_EDMA_DST_PORT_ID_SET(x) (((x) << EDMA_DST_PORT_ID_SHIFT) & EDMA_DST_PORT_ID_MASK)
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_PORTID 0x2000
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_SHIFT 8
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK 0xf000
#define IPQ9574_EDMA_RXDESC_PORTNUM_BITS 0x0FFF
#define IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND 0x20
#define IPQ9574_EDMA_PREHDR_PORTNUM_BITS 0x0fff
#define IPQ9574_EDMA_RING_DMA_MASK 0xffffffff
/*
@ -190,9 +249,14 @@
*/
#define IPQ9574_EDMA_RXDESC_PKT_SIZE_MASK 0x3ffff
#define IPQ9574_EDMA_RXDESC_PKT_SIZE_SHIFT 0
#define IPQ9574_EDMA_RXDESC_SRC_INFO_GET(x) (x & 0xFFFF)
#define IPQ9574_EDMA_RXDESC_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define IPQ9574_EDMA_RXFILL_RING_INT_STATUS_MASK 0x1
#define IPQ9574_EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define IPQ9574_EDMA_TX_NE_INT_EN 0x2
#define IPQ9574_EDMA_RX_NE_INT_EN 0x2
#define IPQ9574_EDMA_TX_INITIAL_PROD_IDX 0x0
#endif /* __EDMA_REGS__ */

View file

@ -100,6 +100,8 @@ void dump_func(unsigned int dump_level);
int do_dumpqca_flash_data(const char *);
int do_dumpqca_usb_data(unsigned int dump_level);
int apps_iscrashed(void);
int is_hlos_crashed(void);
int ipq_read_tcsr_boot_misc(void);
int set_uuid_bootargs(char *boot_args, char *part_name, int buflen, bool gpt_flag);
int get_eth_mac_address(uchar *enetaddr, uint no_of_macs);

View file

@ -127,6 +127,7 @@ typedef struct
extern qca_smem_bootconfig_info_t qca_smem_bootconfig_info;
int smem_bootconfig_info(void);
int smem_update_bootconfig_to_flash(void);
unsigned int get_smem_spi_addr_len(void);
unsigned int get_rootfs_active_partition(void);
unsigned int get_mibib_active_partition(void);
@ -134,4 +135,6 @@ void qca_smem_part_to_mtdparts(char *mtdid, int len);
int ipq_smem_get_socinfo_cpu_type(uint32_t *cpu_type);
int ipq_smem_get_socinfo_version(uint32_t *version);
int ipq_smem_get_boot_flash(uint32_t *flash_type);
int write_to_flash(int flash_type, uint32_t address, uint32_t offset,
uint32_t part_size, uint32_t file_size, char *layout);
#endif

View file

@ -33,6 +33,7 @@
DECLARE_GLOBAL_DATA_PTR;
static struct tag *params;
extern unsigned ipq_runtime_fs_feature_enabled;
static ulong get_sp(void)
{
@ -277,6 +278,21 @@ struct aarch64_hdr {
/* Subcommand: GO */
static void boot_jump_linux(bootm_headers_t *images, int flag)
{
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
unsigned int cookie, ret;
if (ipq_runtime_fs_feature_enabled) {
cookie = ipq_read_tcsr_boot_misc();
cookie &= ~IPQ_FS_NONHLOS_BIT;
cookie |= IPQ_FS_HLOS_BIT;
fs_debug("\nFailsafe: %s: Clear NonHLOS bit and set HLOS bit\n", __func__);
ret = qca_scm_dload(cookie);
if (ret)
printf ("Error in setting HLOS failsafe bit\n");
}
#endif
#ifdef CONFIG_ARM64
void (*kernel_entry)(void *fdt_addr, void *res0, void *res1,
void *res2);

View file

@ -888,6 +888,11 @@ __weak void fdt_fixup_sdx65_gpio(void *blob)
return;
}
__weak void fdt_fixup_runtime_failsafe(void *blob)
{
return;
}
void set_mtdids(void)
{
char mtdids[256];
@ -1047,6 +1052,9 @@ int ft_board_setup(void *blob, bd_t *bd)
fdt_fixup_cpus_node(blob);
fdt_low_memory_fixup(blob);
fdt_fixup_qpic(blob);
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
fdt_fixup_runtime_failsafe(blob);
#endif
s = getenv("dload_warm_reset");
if (s)
fdt_fixup_set_dload_warm_reset(blob);

View file

@ -28,9 +28,11 @@
#include <i2c.h>
#include <dm.h>
#include <command.h>
#include <watchdog.h>
#define DLOAD_MAGIC_COOKIE 0x10
#define DLOAD_DISABLED 0x40
#define DLOAD_BITS 0xFF
#define TCSR_SOC_HW_VERSION_REG 0x194D000
@ -45,6 +47,8 @@ const char *del_node[] = {"uboot",
NULL};
const add_node_t add_fdt_node[] = {{}};
static int aq_phy_initialised;
extern unsigned ipq_runtime_fs_feature_enabled;
struct dumpinfo_t dumpinfo_n[] = {
/* TZ stores the DDR physical address at which it stores the
* APSS regs, UTCM copy dump. We will have the TZ IMEM
@ -133,6 +137,31 @@ void qca_serial_init(struct ipq_serial_platdata *plat)
return;
}
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
void fdt_fixup_runtime_failsafe(void *blob)
{
int node_off, ret;
const char *fs_node = {"/soc/qti,scm_restart_reason"};
/* This fixup is for informing HLOS whether
* runtime failsafe feature is enabled or not
*/
node_off = fdt_path_offset(blob, fs_node);
if (node_off < 0) {
printf("%s: Failsafe: unable to find node '%s'\n",
__func__, fs_node);
return;
}
ret = fdt_setprop_u32(blob, node_off, "qti,runtime-failsafe",
ipq_runtime_fs_feature_enabled);
if (ret) {
printf("%s : Unable to set property 'ipq,runtime_failsafe'\n",__func__);
return;
}
}
#endif
int do_pmic_reset()
{
struct udevice *bus, *dev;
@ -167,11 +196,26 @@ int do_pmic_reset()
return 0;
}
#ifdef CONFIG_HW_WATCHDOG
void hw_watchdog_reset(void)
{
writel(1, APCS_WDT_RST);
}
#endif
void reset_crashdump(void)
{
unsigned int ret = 0;
unsigned int cookie = 0;
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
cookie = ipq_read_tcsr_boot_misc();
fs_debug("\nFailsafe: %s: Clearing DLOAD and NonHLOS bits\n", __func__);
cookie &= ~(DLOAD_BITS);
cookie &= ~(IPQ_FS_NONHLOS_BIT);
#endif
qca_scm_sdi();
ret = qca_scm_dload(CLEAR_MAGIC);
ret = qca_scm_dload(cookie);
if (ret)
printf ("Error in reseting the Magic cookie\n");
return;
@ -803,11 +847,29 @@ __weak int ipq_get_tz_version(char *version_name, int buf_size)
return 1;
}
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
int ipq_read_tcsr_boot_misc(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ6018_DMAGIC_ADDR;
return *dmagic;
}
int is_hlos_crashed(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ6018_DMAGIC_ADDR;
if (*dmagic & IPQ_FS_HLOS_BIT)
return 1;
return 0;
}
#endif
int apps_iscrashed_crashdump_disabled(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ6018_DMAGIC_ADDR;
if (*dmagic == DLOAD_DISABLED)
if (*dmagic & DLOAD_DISABLED)
return 1;
return 0;
@ -817,7 +879,7 @@ int apps_iscrashed(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ6018_DMAGIC_ADDR;
if (*dmagic == DLOAD_MAGIC_COOKIE)
if (*dmagic & DLOAD_MAGIC_COOKIE)
return 1;
return 0;
@ -1383,8 +1445,12 @@ void fdt_fixup_set_qca_cold_reboot_enable(void *blob)
void fdt_fixup_wcss_rproc_for_atf(void *blob)
{
parse_fdt_fixup("/soc/qcom_q6v5_wcss@CD00000%qcom,nosecure%1", blob);
parse_fdt_fixup("/soc/qcom_q6v5_wcss@CD00000%qca,wcss-aon-reset-seq%1", blob);
if (fdt_path_offset(blob, "/soc/remoteproc@cd00000") >= 0)
parse_fdt_fixup("/soc/remoteproc@cd00000%qcom,nosecure%1", blob);
else {
parse_fdt_fixup("/soc/qcom_q6v5_wcss@CD00000%qcom,nosecure%1", blob);
parse_fdt_fixup("/soc/qcom_q6v5_wcss@CD00000%qca,wcss-aon-reset-seq%1", blob);
}
}
int get_soc_hw_version(void)

View file

@ -263,6 +263,8 @@
#define ARM_PSCI_TZ_FN_CPU_ON ARM_PSCI_TZ_FN(3)
#define ARM_PSCI_TZ_FN_AFFINITY_INFO ARM_PSCI_TZ_FN(4)
#define APCS_WDT_RST 0xB017004
unsigned int __invoke_psci_fn_smc(unsigned int, unsigned int,
unsigned int, unsigned int);
@ -365,9 +367,10 @@ typedef enum {
SMEM_BOOT_DUALPARTINFO = 503,
SMEM_PARTITION_TABLE_OFFSET = 504,
SMEM_SPI_FLASH_ADDR_LEN = 505,
SMEM_RUNTIME_FAILSAFE_INFO = 507,
SMEM_FIRST_VALID_TYPE = SMEM_SPINLOCK_ARRAY,
SMEM_LAST_VALID_TYPE = SMEM_SPI_FLASH_ADDR_LEN,
SMEM_MAX_SIZE = SMEM_SPI_FLASH_ADDR_LEN + 1,
SMEM_LAST_VALID_TYPE = SMEM_RUNTIME_FAILSAFE_INFO,
SMEM_MAX_SIZE = SMEM_RUNTIME_FAILSAFE_INFO + 1,
} smem_mem_type_t;
extern const char *rsvd_node;

View file

@ -1036,6 +1036,9 @@ int sdx65_attached(void)
void fdt_fixup_sdx65_gpio(void *blob)
{
unsigned int machid = gd->bd->bi_arch_number;
int offset, len;
u32 *data;
if (machid != 0x08010400)
return;
@ -1044,10 +1047,41 @@ void fdt_fixup_sdx65_gpio(void *blob)
parse_fdt_fixup("/soc/pci@20000000/%add%x65_attached", blob);
parse_fdt_fixup("/soc/pci@20000000/%x65_attached%1", blob);
parse_fdt_fixup("/soc/pci@20000000/pcie0_rp/qcom,mhi@0/%mdm2ap%21", blob);
parse_fdt_fixup("/soc/pci@20000000/pcie0_rp/qcom,mhi@0/%ap2mdm%45", blob);
parse_fdt_fixup("/soc/pinctrl@1000000/ap2mdm_status/%pins%?gpio45", blob);
parse_fdt_fixup("/soc/pinctrl@1000000/mdm2ap_e911_status/%pins%?gpio22", blob);
offset = fdt_path_offset(blob, "/soc/pci@20000000/pcie0_rp/qcom,mhi@0");
if(offset >= 0) {
data = (u32 *)fdt_getprop(blob, offset, "mdm2ap", &len);
if (data) {
parse_fdt_fixup("/soc/pci@20000000/pcie0_rp/qcom,mhi@0/%mdm2ap%21", blob);
} else {
data = (u32 *)fdt_getprop(blob, offset, "mdm2ap-gpio", &len);
if (data) {
data[1] = cpu_to_fdt32(21);
fdt_setprop_inplace(blob, offset, "mdm2ap-gpio", data, len);
}
}
data = (u32 *)fdt_getprop(blob, offset, "ap2mdm", &len);
if (data) {
parse_fdt_fixup("/soc/pci@20000000/pcie0_rp/qcom,mhi@0/%ap2mdm%45", blob);
} else {
data = (u32 *)fdt_getprop(blob, offset, "ap2mdm-gpio", &len);
if (data) {
data[1] = cpu_to_fdt32(45);
fdt_setprop_inplace(blob, offset, "ap2mdm-gpio", data, len);
}
}
}
if (fdt_path_offset(blob, "/soc/pinctrl@1000000/ap2mdm_status") >= 0)
parse_fdt_fixup("/soc/pinctrl@1000000/ap2mdm_status/%pins%?gpio45", blob);
else if (fdt_path_offset(blob, "/soc/pinctrl@1000000/pcie_sdx_pinmux/ap2mdm_status") >= 0)
parse_fdt_fixup("/soc/pinctrl@1000000/pcie_sdx_pinmux/ap2mdm_status/%pins%?gpio45", blob);
if (fdt_path_offset(blob, "/soc/pinctrl@1000000/mdm2ap_e911_status/") >= 0)
parse_fdt_fixup("/soc/pinctrl@1000000/mdm2ap_e911_status/%pins%?gpio22", blob);
else if (fdt_path_offset(blob, "/soc/pinctrl@1000000/pcie_sdx_pinmux/mdm2ap_e911_status") >= 0)
parse_fdt_fixup("/soc/pinctrl@1000000/pcie_sdx_pinmux/mdm2ap_e911_status/%pins%?gpio22", blob);
}
#ifdef CONFIG_USB_XHCI_IPQ

View file

@ -27,9 +27,11 @@
#include <mmc.h>
#include <sdhci.h>
#include <usb.h>
#include <watchdog.h>
#define DLOAD_MAGIC_COOKIE 0x10
#define DLOAD_DISABLED 0x40
#define DLOAD_BITS 0xFF
DECLARE_GLOBAL_DATA_PTR;
@ -39,6 +41,9 @@ extern int ipq_spi_init(u16);
unsigned int qpic_frequency = 0, qpic_phase = 0;
extern unsigned int qpic_training_offset;
extern unsigned ipq_runtime_fs_feature_enabled;
extern int qca_scm_dpr(u32, u32, void *, size_t);
void qca_serial_init(struct ipq_serial_platdata *plat)
{
@ -58,6 +63,31 @@ void qca_serial_init(struct ipq_serial_platdata *plat)
return;
}
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
void fdt_fixup_runtime_failsafe(void *blob)
{
int node_off, ret;
const char *fs_node = {"/soc/qti,scm_restart_reason"};
/* This fixup is for informing HLOS whether
* runtime failsafe feature is enabled or not
*/
node_off = fdt_path_offset(blob, fs_node);
if (node_off < 0) {
printf("%s: Failsafe: unable to find node '%s'\n",
__func__, fs_node);
return;
}
ret = fdt_setprop_u32(blob, node_off, "qti,runtime-failsafe",
ipq_runtime_fs_feature_enabled);
if (ret) {
printf("%s : Unable to set property 'ipq,runtime_failsafe'\n",__func__);
return;
}
}
#endif
void fdt_fixup_qpic(void *blob)
{
int node_off, ret;
@ -849,8 +879,16 @@ void set_function_select_as_mdc_mdio(void)
}
}
void nssnoc_init(void)
{
void nssnoc_init(void){
unsigned int gcc_nssnoc_memnoc_bfdcd_cmd_rcgr_addr = 0x1817004;
unsigned int gcc_qdss_at_cmd_rcgr_addr = 0x182D004;
writel(0x102, gcc_nssnoc_memnoc_bfdcd_cmd_rcgr_addr + 4);
writel(0x1, gcc_nssnoc_memnoc_bfdcd_cmd_rcgr_addr);
writel(0x109, gcc_qdss_at_cmd_rcgr_addr + 4);
writel(0x1, gcc_qdss_at_cmd_rcgr_addr);
/* Enable required NSSNOC clocks */
writel(readl(GCC_MEM_NOC_NSSNOC_CLK) | GCC_CBCR_CLK_ENABLE,
GCC_MEM_NOC_NSSNOC_CLK);
@ -1195,11 +1233,26 @@ unsigned long timer_read_counter(void)
return 0;
}
#ifdef CONFIG_HW_WATCHDOG
void hw_watchdog_reset(void)
{
writel(1, APCS_WDT_RST);
}
#endif
void reset_crashdump(void)
{
unsigned int ret = 0;
unsigned int cookie = 0;
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
cookie = ipq_read_tcsr_boot_misc();
fs_debug("\nFailsafe: %s: Clearing DLOAD and NonHLOS bits\n", __func__);
cookie &= ~(DLOAD_BITS);
cookie &= ~(IPQ_FS_NONHLOS_BIT);
#endif
qca_scm_sdi();
ret = qca_scm_dload(CLEAR_MAGIC);
ret = qca_scm_dload(cookie);
if (ret)
printf ("Error in reseting the Magic cookie\n");
return;
@ -1301,11 +1354,29 @@ struct dumpinfo_t dumpinfo_s[] = {
};
int dump_entries_s = ARRAY_SIZE(dumpinfo_s);
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
int ipq_read_tcsr_boot_misc(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ9574_DMAGIC_ADDR;
return *dmagic;
}
int is_hlos_crashed(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ9574_DMAGIC_ADDR;
if (*dmagic & IPQ_FS_HLOS_BIT)
return 1;
return 0;
}
#endif
int apps_iscrashed_crashdump_disabled(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ9574_DMAGIC_ADDR;
if (*dmagic == DLOAD_DISABLED)
if (*dmagic & DLOAD_DISABLED)
return 1;
return 0;
@ -1315,7 +1386,7 @@ int apps_iscrashed(void)
{
u32 *dmagic = (u32 *)CONFIG_IPQ9574_DMAGIC_ADDR;
if (*dmagic == DLOAD_MAGIC_COOKIE)
if (*dmagic & DLOAD_MAGIC_COOKIE)
return 1;
return 0;
@ -1448,3 +1519,49 @@ void ipq_uboot_fdt_fixup(void)
}
return;
}
int do_dpr(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[])
{
int ret;
char *loadaddr;
uint32_t dpr_status = 0;
struct dpr {
uint32_t address;
uint32_t status;
} dpr;
if (argc > 2) {
return CMD_RET_USAGE;
}
if (argc == 2){
dpr.address = simple_strtoul(argv[1], NULL, 16);
} else {
loadaddr = getenv("fileaddr");
if (loadaddr == NULL) {
printf("No Arguments provided\n");
printf("Command format: dpr_execute <address>\n");
return CMD_RET_USAGE;
}
if (loadaddr != NULL)
dpr.address = simple_strtoul(loadaddr, NULL, 16);
}
dpr.status = (uint32_t)&dpr_status;
ret = qca_scm_dpr(SCM_SVC_FUSE, TME_DPR_PROCESSING,
&dpr, sizeof(dpr));
if (ret || dpr_status){
printf("%s: Error in DPR Processing (%d, %d)\n",
__func__, ret, dpr_status);
} else {
printf("DPR Process sucessful\n");
}
return ret;
}
U_BOOT_CMD(dpr_execute, 2, 0, do_dpr,
"Debug Policy Request processing\n",
"dpr_execute [address] - Processing dpr\n");

View file

@ -108,6 +108,7 @@
#define KERNEL_AUTH_CMD 0x1E
#define SCM_CMD_SEC_AUTH 0x1F
#define TME_DPR_PROCESSING 0x21
#ifdef CONFIG_SMEM_VERSION_C
#define RAM_PART_NAME_LENGTH 16
@ -122,6 +123,8 @@
#define ARM_PSCI_TZ_FN_CPU_ON ARM_PSCI_TZ_FN(3)
#define ARM_PSCI_TZ_FN_AFFINITY_INFO ARM_PSCI_TZ_FN(4)
#define APCS_WDT_RST 0xB017004
/*
* GCC-QPIC Registers
*/
@ -370,9 +373,10 @@ typedef enum {
SMEM_BOOT_DUALPARTINFO = 503,
SMEM_PARTITION_TABLE_OFFSET = 504,
SMEM_SPI_FLASH_ADDR_LEN = 505,
SMEM_RUNTIME_FAILSAFE_INFO = 507,
SMEM_FIRST_VALID_TYPE = SMEM_SPINLOCK_ARRAY,
SMEM_LAST_VALID_TYPE = SMEM_SPI_FLASH_ADDR_LEN,
SMEM_MAX_SIZE = SMEM_SPI_FLASH_ADDR_LEN + 1,
SMEM_LAST_VALID_TYPE = SMEM_RUNTIME_FAILSAFE_INFO,
SMEM_MAX_SIZE = SMEM_RUNTIME_FAILSAFE_INFO + 1,
} smem_mem_type_t;
#define MSM_SDC1_BASE 0x7800000

View file

@ -15,10 +15,12 @@
#include <post.h>
#include <u-boot/sha256.h>
#include <asm/arch-qca-common/qca_common.h>
#include <asm/arch-qca-common/scm.h>
DECLARE_GLOBAL_DATA_PTR;
extern int do_dumpqca_minimal_data(const char *offset);
extern unsigned ipq_runtime_fs_feature_enabled;
#define MAX_DELAY_STOP_STR 32
@ -225,6 +227,9 @@ static int abortboot_normal(int bootdelay)
{
int abort = 0;
unsigned long ts;
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
unsigned int cookie, ret;
#endif
#ifdef CONFIG_MENUPROMPT
printf(CONFIG_MENUPROMPT);
@ -255,6 +260,20 @@ static int abortboot_normal(int bootdelay)
if (tstc()) { /* we got a key press */
abort = 1; /* don't auto boot */
bootdelay = 0; /* no more delay */
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
if (ipq_runtime_fs_feature_enabled) {
cookie = ipq_read_tcsr_boot_misc();
cookie &= ~IPQ_FS_NONHLOS_BIT;
fs_debug("\nFailsafe: %s: Clear NonHLOS bit\n", __func__);
ret = qca_scm_dload(cookie);
if (ret)
printf ("Error in SCM to clear NonHLOS failsafe bit\n");
}
#endif
# ifdef CONFIG_MENUKEY
menukey = getc();
# else

View file

@ -64,6 +64,9 @@
#ifdef CONFIG_AVR32
#include <asm/arch/mmu.h>
#endif
#if defined(CONFIG_IPQ_RUNTIME_FAILSAFE)
#include <asm/arch-qca-common/smem.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
@ -951,6 +954,9 @@ init_fnc_t init_sequence_r[] = {
#endif
#if defined(CONFIG_SPARC)
prom_init,
#endif
#if defined(CONFIG_IPQ_RUNTIME_FAILSAFE)
smem_update_bootconfig_to_flash,
#endif
run_main_loop,
};

View file

@ -33,7 +33,7 @@ extern struct sdhci_host mmc_host;
#define SMEM_PTN_NAME_MAX 16
static int write_to_flash(int flash_type, uint32_t address, uint32_t offset,
int write_to_flash(int flash_type, uint32_t address, uint32_t offset,
uint32_t part_size, uint32_t file_size, char *layout)
{

View file

@ -285,6 +285,22 @@ static struct qpic_serial_nand_params qpic_serial_nand_tbl[] = {
.check_quad_config = true,
.name = "GD5F2GQ5REYIG",
},
{
.id = { 0xc2, 0xa6 },
.page_size = 2048,
.erase_blk_size = 0x00020000,
.pgs_per_blk = 64,
.no_of_blocks = 2048,
.spare_size = 160,
.density = 0x08000000,
.otp_region = 0x2000,
.no_of_addr_cycle = 0x3,
.num_bits_ecc_correctability = 4,
.timing_mode_support = 0,
.quad_mode = true,
.check_quad_config = true,
.name = "MX35UF2GE4AD",
},
{
.id = { 0xc2, 0xb7 },
.page_size = 4096,

View file

@ -113,7 +113,6 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
cons = reg_data & IPQ9574_EDMA_RXFILL_CONS_IDX_MASK;
while (1) {
counter = next;
if (++counter == rxfill_ring->count)
@ -141,8 +140,8 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
* Save buffer size in RXFILL descriptor
*/
rxfill_desc->rdes1 |= (IPQ9574_EDMA_RX_BUFF_SIZE <<
IPQ9574_EDMA_RXFILL_BUF_SIZE_SHIFT) &
IPQ9574_EDMA_RXFILL_BUF_SIZE_MASK;
IPQ9574_EDMA_RXFILL_BUF_SIZE_SHIFT) &
IPQ9574_EDMA_RXFILL_BUF_SIZE_MASK;
num_alloc++;
next = counter;
}
@ -160,6 +159,8 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_PROD_IDX(
rxfill_ring->id), reg_data);
rxfill_ring->prod_idx = reg_data;
pr_debug("%s: num_alloc = %d\n", __func__, num_alloc);
}
@ -272,13 +273,13 @@ uint32_t ipq9574_edma_clean_rx(struct ipq9574_edma_common_info *c_info,
/*
* Check src_info from Rx Descriptor
*/
if (IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxdesc_desc->rdes4) ==
IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND) {
src_port_num = rxdesc_desc->rdes4 &
IPQ9574_EDMA_PREHDR_PORTNUM_BITS;
src_port_num = IPQ9574_EDMA_RXDESC_SRC_INFO_GET(rxdesc_desc->rdes4);
if ((src_port_num & IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK) ==
IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_PORTID) {
src_port_num &= IPQ9574_EDMA_RXDESC_PORTNUM_BITS;
} else {
pr_warn("WARN: src_info_type:0x%x. Drop skb:%p\n",
IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxdesc_desc->rdes4),
(src_port_num & IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK),
skb);
goto next_rx_desc;
}
@ -348,7 +349,7 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
}
/*
* Set RXDESC ring interrupt mask
* Enable RX EDMA interrupt masks
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
@ -358,7 +359,7 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
}
/*
* Set TXCMPL ring interrupt mask
* Enable TX EDMA interrupt masks
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
@ -367,16 +368,6 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
ehw->txcmpl_intr_mask);
}
/*
* Set RXFILL ring interrupt mask
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_INT_MASK(
rxfill_ring->id),
ehw->rxfill_intr_mask);
}
/*
* Read Misc intr status
*/
@ -450,8 +441,20 @@ static int ipq9574_eth_snd(struct eth_device *dev, void *packet, int length)
*/
txdesc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, hw_next_to_use);
txdesc->tdes1 = 0;
txdesc->tdes2 = 0;
txdesc->tdes3 = 0;
txdesc->tdes4 = 0;
txdesc->tdes5 = 0;
txdesc->tdes6 = 0;
txdesc->tdes7 = 0;
skb = (uchar *)txdesc->tdes0;
/*
* Set SC BYPASS
*/
txdesc->tdes1 |= IPQ9574_EDMA_TXDESC_SERVICE_CODE_SET(IPQ9574_EDMA_SC_BYPASS);
pr_debug("%s: txdesc->tdes0 (buffer addr) = 0x%x length = %d \
prod_idx = %d cons_idx = %d\n",
__func__, txdesc->tdes0, length,
@ -464,9 +467,15 @@ static int ipq9574_eth_snd(struct eth_device *dev, void *packet, int length)
#else
/*
* Populate Tx dst info, port id is macid in dp_dev
* We have separate netdev for each port in Kernel but that is not the
* case in U-Boot.
* This part needs to be fixed to support multiple ports in non bridged
* mode during when all the ports are currently under same netdev.
*
* Currently mac port no. is fixed as 3 for the purpose of testing
*/
txdesc->tdes4 |= (((IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND << 8) |
(IPQ9574_EDMA_MAC_PORT_NO & 0x0fff)) << 16);
txdesc->tdes4 |= (IPQ9574_EDMA_DST_PORT_TYPE_SET(IPQ9574_EDMA_DST_PORT_TYPE) |
IPQ9574_EDMA_DST_PORT_ID_SET(IPQ9574_EDMA_MAC_PORT_NO));
#endif
/*
@ -600,11 +609,9 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
{
struct ipq9574_edma_txcmpl_ring *txcmpl_ring;
struct ipq9574_edma_txdesc_ring *txdesc_ring;
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring;
struct ipq9574_edma_rxfill_ring *rxfill_ring;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring;
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring;
struct ipq9574_edma_txdesc_desc *tx_desc;
struct ipq9574_edma_txdesc_desc *txdesc_desc;
struct ipq9574_edma_rxfill_desc *rxfill_desc;
int i, j, index;
void *tx_buf;
@ -640,6 +647,9 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
/*
* Allocate buffers for each of the desc
*/
for (j = 0; j < rxfill_ring->count; j++) {
rxfill_desc = IPQ9574_EDMA_RXFILL_DESC(rxfill_ring, j);
rxfill_desc->rdes0 = virt_to_phys(rx_buf);
@ -652,26 +662,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
}
}
/*
* Allocate secondary RxDesc ring descriptors
*/
for (i = 0; i < ehw->sec_rxdesc_rings; i++) {
sec_rxdesc_ring = &ehw->sec_rxdesc_ring[i];
sec_rxdesc_ring->count = EDMA_RING_SIZE;
sec_rxdesc_ring->id = ehw->sec_rxdesc_ring_start + i;
sec_rxdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_RX_SEC_DESC_SIZE * sec_rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (sec_rxdesc_ring->desc == NULL) {
pr_info("%s: sec_rxdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
sec_rxdesc_ring->dma = virt_to_phys(sec_rxdesc_ring->desc);
pr_debug("sec rxdesc ring id = %d, sec rxdesc ring ptr = %p, sec rxdesc ring dma = %u\n",
sec_rxdesc_ring->id, sec_rxdesc_ring->desc, (unsigned int)
sec_rxdesc_ring->dma);
}
/*
* Allocate RxDesc ring descriptors
*/
@ -685,7 +675,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
* Number of fill rings are lesser than the descriptor rings
* Share the fill rings across descriptor rings.
*/
index = ehw->rxfill_ring_start + (i % ehw->rxfill_rings);
rxdesc_ring->rxfill =
&ehw->rxfill_ring[index - ehw->rxfill_ring_start];
@ -694,7 +683,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
rxdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_RXDESC_DESC_SIZE * rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (rxdesc_ring->desc == NULL) {
pr_info("%s: rxdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
@ -703,47 +691,21 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
pr_debug("rxdesc ring id = %d, rxdesc ring ptr = %p, rxdesc ring dma = %u\n",
rxdesc_ring->id, rxdesc_ring->desc, (unsigned int)
rxdesc_ring->dma);
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
txcmpl_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TXCMPL_DESC_SIZE * txcmpl_ring->count,
/*
* Allocate secondary Rx ring descriptors
*/
rxdesc_ring->sdesc = (void *)noncached_alloc(
IPQ9574_EDMA_RX_SEC_DESC_SIZE * rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txcmpl_ring->desc == NULL) {
pr_info("%s: txcmpl_ring->desc alloc error\n", __func__);
if (rxdesc_ring->sdesc == NULL) {
pr_info("%s: rxdesc_ring->sdesc alloc error\n", __func__);
return -ENOMEM;
}
txcmpl_ring->dma = virt_to_phys(txcmpl_ring->desc);
pr_debug("txcmpl ring id = %d, txcmpl ring ptr = %p, txcmpl ring dma = %u\n",
txcmpl_ring->id, txcmpl_ring->desc, (unsigned int)
txcmpl_ring->dma);
}
/*
* Allocate secondary TxDesc ring descriptors
*/
for (i = 0; i < ehw->sec_txdesc_rings; i++) {
sec_txdesc_ring = &ehw->sec_txdesc_ring[i];
sec_txdesc_ring->count = EDMA_RING_SIZE;
sec_txdesc_ring->id = ehw->sec_txdesc_ring_start + i;
sec_txdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TX_SEC_DESC_SIZE * sec_txdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (sec_txdesc_ring->desc == NULL) {
pr_info("%s: sec_txdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
sec_txdesc_ring->dma = virt_to_phys(sec_txdesc_ring->desc);
pr_debug("sec txdesc ring id = %d, sec txdesc ring ptr = %p, sec txdesc ring dma = %u\n",
sec_txdesc_ring->id, sec_txdesc_ring->desc, (unsigned int)
sec_txdesc_ring->dma);
rxdesc_ring->sdma = virt_to_phys(rxdesc_ring->sdesc);
pr_debug("sec rxdesc ring id = %d, sec rxdesc ring ptr = %p, sec rxdesc ring dma = %u\n",
rxdesc_ring->id, rxdesc_ring->sdesc, (unsigned int)
rxdesc_ring->sdma);
}
/*
@ -774,21 +736,60 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
/*
* Allocate buffers for each of the desc
*/
for (j = 0; j < txdesc_ring->count; j++) {
tx_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, j);
tx_desc->tdes0 = virt_to_phys(tx_buf);
tx_desc->tdes1 = 0;
tx_desc->tdes2 = 0;
tx_desc->tdes3 = 0;
tx_desc->tdes4 = 0;
tx_desc->tdes5 = 0;
tx_desc->tdes6 = 0;
tx_desc->tdes7 = 0;
txdesc_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, j);
txdesc_desc->tdes0 = virt_to_phys(tx_buf);
txdesc_desc->tdes1 = 0;
txdesc_desc->tdes2 = 0;
txdesc_desc->tdes3 = 0;
txdesc_desc->tdes4 = 0;
txdesc_desc->tdes5 = 0;
txdesc_desc->tdes6 = 0;
txdesc_desc->tdes7 = 0;
tx_buf += IPQ9574_EDMA_TX_BUFF_SIZE;
pr_debug("Ring %d: txdesc ring dis0 ptr = %p, txdesc ring dis0 dma = %u\n",
j, tx_desc, (unsigned int)tx_desc->tdes0);
j, txdesc_desc, (unsigned int)txdesc_desc->tdes0);
}
/*
* Allocate secondary Tx ring descriptors
*/
txdesc_ring->sdesc = (void *)noncached_alloc(
IPQ9574_EDMA_TX_SEC_DESC_SIZE * txdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txdesc_ring->sdesc == NULL) {
pr_info("%s: txdesc_ring->sdesc alloc error\n", __func__);
return -ENOMEM;
}
txdesc_ring->sdma = virt_to_phys(txdesc_ring->sdesc);
pr_debug("txdesc sec desc ring id = %d, txdesc ring ptr = %p, txdesc ring dma = %u\n",
txdesc_ring->id, txdesc_ring->sdesc, (unsigned int)
txdesc_ring->sdma);
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
txcmpl_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TXCMPL_DESC_SIZE * txcmpl_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txcmpl_ring->desc == NULL) {
pr_info("%s: txcmpl_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
txcmpl_ring->dma = virt_to_phys(txcmpl_ring->desc);
pr_debug("txcmpl ring id = %d, txcmpl ring ptr = %p, txcmpl ring dma = %u\n",
txcmpl_ring->id, txcmpl_ring->desc, (unsigned int)
txcmpl_ring->dma);
}
pr_info("%s: successfull\n", __func__);
@ -807,13 +808,18 @@ static void ipq9574_edma_free_desc(struct ipq9574_edma_common_info *c_info)
struct ipq9574_edma_txdesc_ring *txdesc_ring;
struct ipq9574_edma_rxfill_ring *rxfill_ring;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring;
struct ipq9574_edma_txdesc_desc *tx_desc;
struct ipq9574_edma_txdesc_desc *txdesc_desc;
struct ipq9574_edma_rxfill_desc *rxfill_desc;
int i;
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
if (rxfill_ring->desc)
if (rxfill_ring->desc) {
rxfill_desc = IPQ9574_EDMA_RXFILL_DESC(rxfill_ring, 0);
if (rxfill_desc->rdes0)
ipq9574_free_mem((void *)rxfill_desc->rdes0);
ipq9574_free_mem(rxfill_ring->desc);
}
}
for (i = 0; i < ehw->rxdesc_rings; i++) {
@ -832,9 +838,9 @@ static void ipq9574_edma_free_desc(struct ipq9574_edma_common_info *c_info)
for (i = 0; i < ehw->txdesc_rings; i++) {
txdesc_ring = &ehw->txdesc_ring[i];
if (txdesc_ring->desc) {
tx_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, 0);
if (tx_desc->tdes0)
ipq9574_free_mem((void *)tx_desc->tdes0);
txdesc_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, 0);
if (txdesc_desc->tdes0)
ipq9574_free_mem((void *)txdesc_desc->tdes0);
ipq9574_free_mem(txdesc_ring->desc);
}
}
@ -898,15 +904,15 @@ static void ipq9574_edma_disable_intr(struct ipq9574_edma_hw *ehw)
/*
* Disable interrupts
*/
for (i = 0; i < IPQ9574_EDMA_MAX_TXCMPL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_MASK(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_RXFILL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_INT_MASK(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_RXDESC_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_INT_CTRL(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_TXCMPL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_MASK(i), 0);
/*
* Clear MISC interrupt mask
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
IPQ9574_EDMA_MASK_INT_DISABLE);
}
@ -1305,6 +1311,37 @@ static int ipq9574_edma_wr_macaddr(struct eth_device *dev)
static void ipq9574_eth_halt(struct eth_device *dev)
{
pr_debug("\n\n*****GMAC0 info*****\n");
pr_debug("GMAC0 RXPAUSE(0x3a001044):%x\n", readl(0x3a001044));
pr_debug("GMAC0 TXPAUSE(0x3a0010A4):%x\n", readl(0x3a0010A4));
pr_debug("GMAC0 RXGOODBYTE_L(0x3a001084):%x\n", readl(0x3a001084));
pr_debug("GMAC0 RXGOODBYTE_H(0x3a001088):%x\n", readl(0x3a001088));
pr_debug("GMAC0 RXBADBYTE_L(0x3a00108c):%x\n", readl(0x3a00108c));
pr_debug("GMAC0 RXBADBYTE_H(0x3a001090):%x\n", readl(0x3a001090));
pr_debug("\n\n*****GMAC1 info*****\n");
pr_debug("GMAC1 RXPAUSE(0x3a001244):%x\n", readl(0x3a001244));
pr_debug("GMAC1 TXPAUSE(0x3a0012A4):%x\n", readl(0x3a0012A4));
pr_debug("GMAC1 RXGOODBYTE_L(0x3a001284):%x\n", readl(0x3a001284));
pr_debug("GMAC1 RXGOODBYTE_H(0x3a001288):%x\n", readl(0x3a001288));
pr_debug("GMAC1 RXBADBYTE_L(0x3a00128c):%x\n", readl(0x3a00128c));
pr_debug("GMAC1 RXBADBYTE_H(0x3a001290):%x\n", readl(0x3a001290));
pr_debug("\n\n*****GMAC2 info*****\n");
pr_debug("GMAC2 RXPAUSE(0x3a001444):%x\n", readl(0x3a001444));
pr_debug("GMAC2 TXPAUSE(0x3a0014A4):%x\n", readl(0x3a0014A4));
pr_debug("GMAC2 RXGOODBYTE_L(0x3a001484):%x\n", readl(0x3a001484));
pr_debug("GMAC2 RXGOODBYTE_H(0x3a001488):%x\n", readl(0x3a001488));
pr_debug("GMAC2 RXBADBYTE_L(0x3a00148c):%x\n", readl(0x3a00148c));
pr_debug("GMAC2 RXBADBYTE_H(0x3a001490):%x\n", readl(0x3a001490));
pr_debug("\n\n*****GMAC3 info*****\n");
pr_debug("GMAC3 RXPAUSE(0x3a001644):%x\n", readl(0x3a001644));
pr_debug("GMAC3 TXPAUSE(0x3a0016A4):%x\n", readl(0x3a0016A4));
pr_debug("GMAC3 RXGOODBYTE_L(0x3a001684):%x\n", readl(0x3a001684));
pr_debug("GMAC3 RXGOODBYTE_H(0x3a001688):%x\n", readl(0x3a001688));
pr_debug("GMAC3 RXBADBYTE_L(0x3a00168c):%x\n", readl(0x3a00168c));
pr_debug("GMAC3 RXBADBYTE_H(0x3a001690):%x\n", readl(0x3a001690));
pr_info("%s: done\n", __func__);
}
@ -1314,10 +1351,6 @@ static void ipq9574_edma_set_ring_values(struct ipq9574_edma_hw *edma_hw)
edma_hw->txdesc_rings = IPQ9574_EDMA_TX_DESC_RING_NOS;
edma_hw->txdesc_ring_end = IPQ9574_EDMA_TX_DESC_RING_SIZE;
edma_hw->sec_txdesc_ring_start = IPQ9574_EDMA_SEC_TX_DESC_RING_START;
edma_hw->sec_txdesc_rings = IPQ9574_EDMA_SEC_TX_DESC_RING_NOS;
edma_hw->sec_txdesc_ring_end = IPQ9574_EDMA_SEC_TX_DESC_RING_SIZE;
edma_hw->txcmpl_ring_start = IPQ9574_EDMA_TX_CMPL_RING_START;
edma_hw->txcmpl_rings = IPQ9574_EDMA_RX_FILL_RING_NOS;
edma_hw->txcmpl_ring_end = IPQ9574_EDMA_TX_CMPL_RING_SIZE;
@ -1330,10 +1363,6 @@ static void ipq9574_edma_set_ring_values(struct ipq9574_edma_hw *edma_hw)
edma_hw->rxdesc_rings = IPQ9574_EDMA_RX_DESC_RING_NOS;
edma_hw->rxdesc_ring_end = IPQ9574_EDMA_RX_DESC_RING_SIZE;
edma_hw->sec_rxdesc_ring_start = IPQ9574_EDMA_SEC_RX_DESC_RING_START;
edma_hw->sec_rxdesc_rings = IPQ9574_EDMA_SEC_RX_DESC_RING_NOS;
edma_hw->sec_rxdesc_ring_end = IPQ9574_EDMA_SEC_RX_DESC_RING_SIZE;
pr_info("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
edma_hw->txdesc_rings, edma_hw->txdesc_ring_start,
(edma_hw->txdesc_ring_start + edma_hw->txdesc_rings - 1),
@ -1371,15 +1400,6 @@ static int ipq9574_edma_alloc_rings(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
ehw->sec_rxdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_sec_rxdesc_ring) *
ehw->sec_rxdesc_rings),
CONFIG_SYS_CACHELINE_SIZE);
if (!ehw->sec_rxdesc_ring) {
pr_info("%s: sec_rxdesc_ring alloc error\n", __func__);
return -ENOMEM;
}
ehw->txdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_txdesc_ring) *
ehw->txdesc_rings),
@ -1389,15 +1409,6 @@ static int ipq9574_edma_alloc_rings(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
ehw->sec_txdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_sec_txdesc_ring) *
ehw->sec_txdesc_rings),
CONFIG_SYS_CACHELINE_SIZE);
if (!ehw->sec_txdesc_ring) {
pr_info("%s: txdesc_ring alloc error\n", __func__);
return -ENOMEM;
}
ehw->txcmpl_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_txcmpl_ring) *
ehw->txcmpl_rings),
@ -1444,17 +1455,6 @@ static int ipq9574_edma_init_rings(struct ipq9574_edma_hw *ehw)
return 0;
}
/*
* ipq9574_edma_configure_sec_txdesc_ring()
* Configure one secondary TxDesc ring
*/
static void ipq9574_edma_configure_sec_txdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring)
{
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_BA2(sec_txdesc_ring->id),
(uint32_t)(sec_txdesc_ring->dma & 0xffffffff));
}
/*
* ipq9574_edma_configure_txdesc_ring()
* Configure one TxDesc ring
@ -1462,9 +1462,6 @@ static void ipq9574_edma_configure_sec_txdesc_ring(struct ipq9574_edma_hw *ehw,
static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_txdesc_ring *txdesc_ring)
{
uint32_t data;
uint16_t hw_cons_idx;
/*
* Configure TXDESC ring
*/
@ -1472,24 +1469,17 @@ static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
(uint32_t)(txdesc_ring->dma &
IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_BA2(txdesc_ring->id),
(uint32_t)(txdesc_ring->sdma &
IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_RING_SIZE(
txdesc_ring->id), (uint32_t)(txdesc_ring->count &
IPQ9574_EDMA_TXDESC_RING_SIZE_MASK));
data = ipq9574_edma_reg_read(IPQ9574_EDMA_REG_TXDESC_CONS_IDX(
txdesc_ring->id));
data &= ~(IPQ9574_EDMA_TXDESC_CONS_IDX_MASK);
hw_cons_idx = data;
data = ipq9574_edma_reg_read(IPQ9574_EDMA_REG_TXDESC_PROD_IDX(
txdesc_ring->id));
data &= ~(IPQ9574_EDMA_TXDESC_PROD_IDX_MASK);
data |= hw_cons_idx & IPQ9574_EDMA_TXDESC_PROD_IDX_MASK;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_PROD_IDX(
txdesc_ring->id), data);
txdesc_ring->id),
IPQ9574_EDMA_TX_INITIAL_PROD_IDX);
}
/*
@ -1499,6 +1489,8 @@ static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
static void ipq9574_edma_configure_txcmpl_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_txcmpl_ring *txcmpl_ring)
{
uint32_t tx_mod_timer;
/*
* Configure TxCmpl ring base address
*/
@ -1516,20 +1508,20 @@ static void ipq9574_edma_configure_txcmpl_ring(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id),
IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE);
/*
* Configure the default timer mitigation value
*/
tx_mod_timer = (IPQ9574_EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id) &
IPQ9574_EDMA_TX_MOD_TIMER_INIT_MASK)
<< IPQ9574_EDMA_TX_MOD_TIMER_INIT_SHIFT;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id),
tx_mod_timer);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_CTRL(txcmpl_ring->id),
0x2);
}
/*
* ipq9574_edma_configure_sec_rxdesc_ring()
* Configure one secondary RxDesc ring
*/
static void ipq9574_edma_configure_sec_rxdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring)
{
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA2(sec_rxdesc_ring->id),
(uint32_t)(sec_rxdesc_ring->dma & 0xffffffff));
IPQ9574_EDMA_TX_NE_INT_EN);
}
/*
@ -1542,7 +1534,10 @@ static void ipq9574_edma_configure_rxdesc_ring(struct ipq9574_edma_hw *ehw,
uint32_t data;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->dma & 0xffffffff));
(uint32_t)(rxdesc_ring->dma & IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA2(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->sdma & IPQ9574_EDMA_RING_DMA_MASK));
data = rxdesc_ring->count & IPQ9574_EDMA_RXDESC_RING_SIZE_MASK;
data |= (ehw->rx_payload_offset & IPQ9574_EDMA_RXDESC_PL_OFFSET_MASK) <<
@ -1551,11 +1546,20 @@ static void ipq9574_edma_configure_rxdesc_ring(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_RING_SIZE(
rxdesc_ring->id), data);
/*
* Configure the default timer mitigation value
*/
data = (IPQ9574_EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id) &
IPQ9574_EDMA_RX_MOD_TIMER_INIT_MASK)
<< IPQ9574_EDMA_RX_MOD_TIMER_INIT_SHIFT;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id),
data);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_INT_CTRL(rxdesc_ring->id),
0x2);
IPQ9574_EDMA_RX_NE_INT_EN);
}
/*
@ -1590,12 +1594,6 @@ static void ipq9574_edma_configure_rings(struct ipq9574_edma_hw *ehw)
for (i = 0; i < ehw->txdesc_rings; i++)
ipq9574_edma_configure_txdesc_ring(ehw, &ehw->txdesc_ring[i]);
/*
* Configure secondary TXDESC ring
*/
for (i = 0; i < ehw->sec_txdesc_rings; i++)
ipq9574_edma_configure_sec_txdesc_ring(ehw, &ehw->sec_txdesc_ring[i]);
/*
* Configure TXCMPL ring
*/
@ -1614,12 +1612,6 @@ static void ipq9574_edma_configure_rings(struct ipq9574_edma_hw *ehw)
for (i = 0; i < ehw->rxdesc_rings; i++)
ipq9574_edma_configure_rxdesc_ring(ehw, &ehw->rxdesc_ring[i]);
/*
* Configure secondary RXDESC ring
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
ipq9574_edma_configure_sec_rxdesc_ring(ehw, &ehw->sec_rxdesc_ring[i]);
pr_info("%s: successfull\n", __func__);
}
@ -1642,7 +1634,7 @@ void ipq9574_edma_hw_reset(void)
int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
{
int ret, desc_index;
uint32_t i, reg;
uint32_t i, reg, ring_id;
volatile uint32_t data;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring = NULL;
@ -1657,8 +1649,7 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
*/
ehw->rxfill_intr_mask = IPQ9574_EDMA_RXFILL_INT_MASK;
ehw->rxdesc_intr_mask = IPQ9574_EDMA_RXDESC_INT_MASK_PKT_INT;
ehw->txcmpl_intr_mask = IPQ9574_EDMA_TX_INT_MASK_PKT_INT |
IPQ9574_EDMA_TX_INT_MASK_UGT_INT;
ehw->txcmpl_intr_mask = IPQ9574_EDMA_TX_INT_MASK_PKT_INT;
ehw->misc_intr_mask = 0xff;
ehw->rx_payload_offset = 0x0;
@ -1753,17 +1744,19 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
/*
* Set PPE QID to EDMA Rx ring mapping.
* When coming up use only queue 0.
* HOST EDMA rings.
* Each entry can hold mapping for 8 PPE queues and entry size is
* Each entry can hold mapping for 4 PPE queues and entry size is
* 4 bytes
*/
desc_index = ehw->rxdesc_ring_start;
desc_index = (ehw->rxdesc_ring_start & 0x1F);
reg = IPQ9574_EDMA_QID2RID_TABLE_MEM(0);
data = 0;
data |= (desc_index & 0xF);
data = ((desc_index << 0) & 0xFF) |
(((desc_index + 1) << 8) & 0xff00) |
(((desc_index + 2) << 16) & 0xff0000) |
(((desc_index + 3) << 24) & 0xff000000);
ipq9574_edma_reg_write(reg, data);
pr_debug("Configure QID2RID reg:0x%x to 0x%x\n", reg, data);
pr_debug("Configure QID2RID(0) reg:0x%x to 0x%x\n", reg, data);
/*
* Set RXDESC2FILL_MAP_xx reg.
@ -1775,31 +1768,27 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1, 0);
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2, 0);
for (i = ehw->rxdesc_ring_start;
i < ehw->rxdesc_ring_end; i++) {
if ((i >= 0) && (i <= 9))
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
ring_id = rxdesc_ring->id;
if ((ring_id >= 0) && (ring_id <= 9))
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_0;
else if ((i >= 10) && (i <= 19))
else if ((ring_id >= 10) && (ring_id <= 19))
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1;
else
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2;
rxdesc_ring = &ehw->rxdesc_ring[i - ehw->rxdesc_ring_start];
pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
rxdesc_ring->id, rxdesc_ring->rxfill->id);
ring_id, rxdesc_ring->rxfill->id);
/*
* Set the Rx fill descriptor ring number in the mapping
* register.
* E.g. If (rxfill ring)rxdesc_ring->rxfill->id = 7, (rxdesc ring)i = 13.
* reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1
* data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
* data |= (0x7 << 9); - This sets 111 at 9th bit of
* register IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1
*/
data = ipq9574_edma_reg_read(reg);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((ring_id % 10) * 3);
ipq9574_edma_reg_write(reg, data);
}
@ -1810,6 +1799,23 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
pr_debug("EDMA_REG_RXDESC2FILL_MAP_2: 0x%x\n",
ipq9574_edma_reg_read(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2));
/*
* Configure DMA request priority, DMA read burst length,
* and AXI write size.
*/
data = IPQ9574_EDMA_DMAR_BURST_LEN_SET(IPQ9574_EDMA_BURST_LEN_ENABLE)
| IPQ9574_EDMA_DMAR_REQ_PRI_SET(0)
| IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(31)
| IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(7)
| IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(7);
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_DMAR_CTRL, data);
/*
* Enable MISC interrupt mask
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
ehw->misc_intr_mask);
/*
* Enable EDMA
*/
@ -1840,12 +1846,6 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_CTRL(i), data);
}
/*
* Enable MISC interrupt
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
ehw->misc_intr_mask);
pr_info("%s: successfull\n", __func__);
return 0;
}

View file

@ -38,21 +38,20 @@
#define IPQ9574_EDMA_START_GMACS IPQ9574_NSS_DP_START_PHY_PORT
#define IPQ9574_EDMA_MAX_GMACS IPQ9574_NSS_DP_MAX_PHY_PORTS
#define IPQ9574_EDMA_TX_BUFF_SIZE 1572
#define IPQ9574_EDMA_TX_BUFF_SIZE 2048
#define IPQ9574_EDMA_RX_BUFF_SIZE 2048
/* Max number of rings of each type is defined with below macro */
#define IPQ9574_EDMA_MAX_TXCMPL_RINGS 32 /* Max TxCmpl rings */
#define IPQ9574_EDMA_MAX_TXDESC_RINGS 32 /* Max TxDesc rings */
#define IPQ9574_EDMA_MAX_RXDESC_RINGS 24 /* Max RxDesc rings */
#define IPQ9574_EDMA_MAX_RXFILL_RINGS 8 /* Max RxFill rings */
#define IPQ9574_EDMA_MAX_TXDESC_RINGS 32 /* Max TxDesc rings */
#define IPQ9574_EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define IPQ9574_EDMA_RXFILL_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_rxfill_desc)
#define IPQ9574_EDMA_RXDESC_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_rxdesc_desc)
#define IPQ9574_EDMA_TXDESC_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_txdesc_desc)
#define IPQ9574_EDMA_TXCMPL_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_txcmpl_desc)
#define IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph & 0xffff) >> 8) & 0xf0)
#define IPQ9574_EDMA_DEV 1
#define IPQ9574_EDMA_TX_QUEUE 1
@ -65,26 +64,16 @@
#define IPQ9574_EDMA_TX_DESC_RING_SIZE \
(IPQ9574_EDMA_TX_DESC_RING_START + IPQ9574_EDMA_TX_DESC_RING_NOS)
#define IPQ9574_EDMA_SEC_TX_DESC_RING_START 31
#define IPQ9574_EDMA_SEC_TX_DESC_RING_NOS 1
#define IPQ9574_EDMA_SEC_TX_DESC_RING_SIZE \
(IPQ9574_EDMA_SEC_TX_DESC_RING_START + IPQ9574_EDMA_SEC_TX_DESC_RING_NOS)
#define IPQ9574_EDMA_TX_CMPL_RING_START 31
#define IPQ9574_EDMA_TX_CMPL_RING_NOS 1
#define IPQ9574_EDMA_TX_CMPL_RING_SIZE \
(IPQ9574_EDMA_TX_CMPL_RING_START + IPQ9574_EDMA_TX_CMPL_RING_NOS)
#define IPQ9574_EDMA_RX_DESC_RING_START 15
#define IPQ9574_EDMA_RX_DESC_RING_START 23
#define IPQ9574_EDMA_RX_DESC_RING_NOS 1
#define IPQ9574_EDMA_RX_DESC_RING_SIZE \
(IPQ9574_EDMA_RX_DESC_RING_START + IPQ9574_EDMA_RX_DESC_RING_NOS)
#define IPQ9574_EDMA_SEC_RX_DESC_RING_START 15
#define IPQ9574_EDMA_SEC_RX_DESC_RING_NOS 1
#define IPQ9574_EDMA_SEC_RX_DESC_RING_SIZE \
(IPQ9574_EDMA_SEC_RX_DESC_RING_START + IPQ9574_EDMA_SEC_RX_DESC_RING_NOS)
#define IPQ9574_EDMA_RX_FILL_RING_START 7
#define IPQ9574_EDMA_RX_FILL_RING_NOS 1
#define IPQ9574_EDMA_RX_FILL_RING_SIZE \
@ -96,129 +85,89 @@
* RxDesc descriptor
*/
struct ipq9574_edma_rxdesc_desc {
uint32_t rdes0;
/* buffer_address_lo */
uint32_t rdes1;
/* valid toggle, more, int_pri, drop_prec, reserved x 3,
* tunnel_type, tunnel_term_ind, cpu_code_valid, known_ind,
* wifi_qos_flag, wifi_qos, buffer_address_hi */
uint32_t rdes2;
/* opaque_lo */
uint32_t rdes3;
/* opaque_hi */
uint32_t rdes4;
/* dst_info, src_info */
uint32_t rdes5;
/* dspcp, pool_id, data_lengh */
uint32_t rdes6;
/* hash_value, hash_flag, l3_csum_status, l4_csum_status,
* data_offset */
uint32_t rdes7;
/* l4_offset, l3_offset, pid, CVLAN flag, SVLAN flag, PPPOE flag
* service_code */
};
/*
* RxFill descriptor
*/
struct ipq9574_edma_rxfill_desc {
uint32_t rdes0;
/* buffer_address_lo */
uint32_t rdes1;
/* buffer_size, reserved x 1, buffer_address_hi */
uint32_t rdes2;
/* opaque_lo */
uint32_t rdes3;
/* opaque_hu */
};
/*
* TxDesc descriptor
*/
struct ipq9574_edma_txdesc_desc {
uint32_t tdes0;
/* buffer_address_lo */
uint32_t tdes1;
/* reserved x 1, more, int_pri, drop_prec, reserved x 4,
* buff_recycling, fake_mac_header,ptp_tag_flag, pri_valid,
* buffer_address_high_bits_tbi, buffer_address_hi */
uint32_t tdes2;
/* opaque_lo */
uint32_t tdes3;
/* opaque_hi */
uint32_t tdes4;
/* dst_info, src_info */
uint32_t tdes5;
/* adv_offload_en, vlan_offload_en, frm_fmt_indication_en,
* edit_offload_en, csum_mode, ip_csum_en, tso_en, pool_id,
* data_lengh */
uint32_t tdes6;
/* mss/hash_value/pip_tag, hash_flag, reserved x 2,
* data_offset */
uint32_t tdes7;
/* l4_offset, l3_offset, reserved, prot_type, l2_type,
* CVLAN flag, SVLAN flag, PPPOE flag, service_code */
};
/*
* TxCmpl descriptor
*/
struct ipq9574_edma_txcmpl_desc {
uint32_t tdes0;
/* buffer_address_lo */
uint32_t tdes1;
/* buffer_size, reserved x 1, buffer_address_hi */
uint32_t tdes2;
/* opaque_lo */
uint32_t tdes3;
/* opaque_hu */
uint32_t rdes0; /* Contains buffer address */
uint32_t rdes1; /* Contains more bit, priority bit, service code */
uint32_t rdes2; /* Contains opaque */
uint32_t rdes3; /* Contains opaque high bits */
uint32_t rdes4; /* Contains destination and source information */
uint32_t rdes5; /* Contains WiFi QoS, data length */
uint32_t rdes6; /* Contains hash value, check sum status */
uint32_t rdes7; /* Contains DSCP, packet offsets */
};
/*
* EDMA Rx Secondary Descriptor
*/
struct ipq9574_edma_rx_sec_desc {
uint32_t rx_sec0;
uint32_t rx_sec1;
uint32_t rx_sec2;
uint32_t rx_sec3;
uint32_t rx_sec4;
uint32_t rx_sec5;
uint32_t rx_sec6;
uint32_t rx_sec7;
uint32_t rx_sec0; /* Contains timestamp */
uint32_t rx_sec1; /* Contains secondary checksum status */
uint32_t rx_sec2; /* Contains QoS tag */
uint32_t rx_sec3; /* Contains flow index details */
uint32_t rx_sec4; /* Contains secondary packet offsets */
uint32_t rx_sec5; /* Contains multicast bit, checksum */
uint32_t rx_sec6; /* Contains SVLAN, CVLAN */
uint32_t rx_sec7; /* Contains secondary SVLAN, CVLAN */
};
/*
* RxFill descriptor
*/
struct ipq9574_edma_rxfill_desc {
uint32_t rdes0; /* Contains buffer address */
uint32_t rdes1; /* Contains buffer size */
uint32_t rdes2; /* Contains opaque */
uint32_t rdes3; /* Contains opaque high bits */
};
/*
* TxDesc descriptor
*/
struct ipq9574_edma_txdesc_desc {
uint32_t tdes0; /* Low 32-bit of buffer address */
uint32_t tdes1; /* Buffer recycling, PTP tag flag, PRI valid flag */
uint32_t tdes2; /* Low 32-bit of opaque value */
uint32_t tdes3; /* High 32-bit of opaque value */
uint32_t tdes4; /* Source/Destination port info */
uint32_t tdes5; /* VLAN offload, csum_mode, ip_csum_en, tso_en, data length */
uint32_t tdes6; /* MSS/hash_value/PTP tag, data offset */
uint32_t tdes7; /* L4/L3 offset, PROT type, L2 type, CVLAN/SVLAN tag, service code */
};
/*
* EDMA Tx Secondary Descriptor
*/
struct ipq9574_edma_tx_sec_desc {
uint32_t tx_sec0;
uint32_t tx_sec1;
uint32_t rx_sec2;
uint32_t rx_sec3;
uint32_t rx_sec4;
uint32_t rx_sec5;
uint32_t rx_sec6;
uint32_t rx_sec7;
uint32_t tx_sec0; /* Reserved */
uint32_t tx_sec1; /* Custom csum offset, payload offset, TTL/NAT action */
uint32_t rx_sec2; /* NAPT translated port, DSCP value, TTL value */
uint32_t rx_sec3; /* Flow index value and valid flag */
uint32_t rx_sec4; /* Reserved */
uint32_t rx_sec5; /* Reserved */
uint32_t rx_sec6; /* CVLAN/SVLAN command */
uint32_t rx_sec7; /* CVLAN/SVLAN tag value */
};
/*
* secondary Tx descriptor ring
* TxCmpl descriptor
*/
struct ipq9574_edma_sec_txdesc_ring {
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors */
struct ipq9574_edma_txcmpl_desc {
uint32_t tdes0; /* Low 32-bit opaque value */
uint32_t tdes1; /* High 32-bit opaque value */
uint32_t tdes2; /* More fragment, transmit ring id, pool id */
uint32_t tdes3; /* Error indications */
};
/*
* Tx descriptor ring
*/
struct ipq9574_edma_txdesc_ring {
uint32_t prod_idx; /* Producer index */
uint32_t avail_desc; /* Number of available descriptor to process */
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
struct ipq9574_edma_txdesc_desc *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
struct ipq9574_edma_tx_sec_desc *sdesc; /* Secondary descriptor ring virtual addr */
dma_addr_t sdma; /* Secondary descriptor ring physical address */
uint16_t count; /* number of descriptors */
};
@ -226,10 +175,12 @@ struct ipq9574_edma_txdesc_ring {
* TxCmpl ring
*/
struct ipq9574_edma_txcmpl_ring {
uint32_t cons_idx; /* Consumer index */
uint32_t avail_pkt; /* Number of available packets to process */
struct ipq9574_edma_txcmpl_desc *desc; /* descriptor ring virtual address */
uint32_t id; /* TXCMPL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
uint32_t count; /* Number of descriptors in the ring */
};
/*
@ -237,19 +188,10 @@ struct ipq9574_edma_txcmpl_ring {
*/
struct ipq9574_edma_rxfill_ring {
uint32_t id; /* RXFILL ring number */
void *desc; /* descriptor ring virtual address */
uint32_t count; /* number of descriptors in the ring */
uint32_t prod_idx; /* Ring producer index */
struct ipq9574_edma_rxfill_desc *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* secondary RxDesc ring
*/
struct ipq9574_edma_sec_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
@ -257,10 +199,13 @@ struct ipq9574_edma_sec_rxdesc_ring {
*/
struct ipq9574_edma_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
uint32_t count; /* number of descriptors in the ring */
uint32_t cons_idx; /* Ring consumer index */
struct ipq9574_edma_rxdesc_desc *desc; /* Primary descriptor ring virtual addr */
struct ipq9574_edma_sec_rxdesc_ring *sdesc; /* Secondary desc ring VA */
struct ipq9574_edma_rxfill_ring *rxfill; /* RXFILL ring used */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
dma_addr_t dma; /* Primary descriptor ring physical address */
dma_addr_t sdma; /* Secondary descriptor ring physical address */
};
enum ipq9574_edma_tx {
@ -292,17 +237,12 @@ struct ipq9574_edma_hw {
uint32_t flags; /* internal flags */
int active; /* status */
struct ipq9574_edma_txdesc_ring *txdesc_ring; /* Tx Descriptor Ring, SW is producer */
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring; /* secondary Tx Descriptor Ring, SW is producer */
struct ipq9574_edma_txcmpl_ring *txcmpl_ring; /* Tx Completion Ring, SW is consumer */
struct ipq9574_edma_rxdesc_ring *rxdesc_ring; /* Rx Descriptor Ring, SW is consumer */
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring; /* secondary Rx Descriptor Ring, SW is consumer */
struct ipq9574_edma_rxfill_ring *rxfill_ring; /* Rx Fill Ring, SW is producer */
uint32_t txdesc_rings; /* Number of TxDesc rings */
uint32_t txdesc_ring_start; /* Id of first TXDESC ring */
uint32_t txdesc_ring_end; /* Id of the last TXDESC ring */
uint32_t sec_txdesc_rings; /* Number of secondary TxDesc rings */
uint32_t sec_txdesc_ring_start; /* Id of first secondary TxDesc ring */
uint32_t sec_txdesc_ring_end; /* Id of last secondary TxDesc ring */
uint32_t txcmpl_rings; /* Number of TxCmpl rings */
uint32_t txcmpl_ring_start; /* Id of first TXCMPL ring */
uint32_t txcmpl_ring_end; /* Id of last TXCMPL ring */
@ -312,9 +252,6 @@ struct ipq9574_edma_hw {
uint32_t rxdesc_rings; /* Number of RxDesc rings */
uint32_t rxdesc_ring_start; /* Id of first RxDesc ring */
uint32_t rxdesc_ring_end; /* Id of last RxDesc ring */
uint32_t sec_rxdesc_rings; /* Number of secondary RxDesc rings */
uint32_t sec_rxdesc_ring_start; /* Id of first secondary RxDesc ring */
uint32_t sec_rxdesc_ring_end; /* Id of last secondary RxDesc ring */
uint32_t tx_intr_mask; /* tx interrupt mask */
uint32_t rx_intr_mask; /* rx interrupt mask */
uint32_t rxfill_intr_mask; /* Rx fill ring interrupt mask */

View file

@ -110,8 +110,8 @@ void ipq9574_ppe_acl_set(int rule_id, int rule_type, int pkt_type, int l4_port_n
hw_reg.bf.pri = 0x0;
}
hw_reg.bf.src_0 = 0x6;
hw_reg.bf.src_1 = 0x7;
hw_reg.bf.src_0 = 0x0;
hw_reg.bf.src_1 = 0x3f;
ppe_ipo_rule_reg_set(&hw_reg, rule_id);
ppe_ipo_mask_reg_set(&hw_mask, rule_id);
ppe_ipo_action_set(&hw_act, rule_id);
@ -153,7 +153,7 @@ static void ipq9574_ppe_ucast_queue_map_tbl_queue_id_set(int queue, int port)
*/
static void ipq9574_vsi_setup(int vsi, uint8_t group_mask)
{
uint32_t val = (group_mask << 24 | group_mask << 16 | group_mask << 8
uint32_t val = (group_mask << 24 | group_mask << 16 | 0x1
| group_mask);
/* Set mask */
@ -473,7 +473,7 @@ void ipq9574_pqsgmii_speed_set(int port, int speed, int status)
ppe_port_bridge_txmac_set(port + 1, status);
ipq9574_ppe_reg_write(IPQ9574_PPE_MAC_SPEED + (0x200 * port), speed);
ipq9574_ppe_reg_write(IPQ9574_PPE_MAC_ENABLE + (0x200 * port), 0x73);
ipq9574_ppe_reg_write(IPQ9574_PPE_MAC_MIB_CTL + (0x200 * port), 0x5);
ipq9574_ppe_reg_write(IPQ9574_PPE_MAC_MIB_CTL + (0x200 * port), 0x1);
}
/*
@ -895,6 +895,8 @@ void ipq9574_ppe_provision_init(void)
ipq9574_ppe_vp_port_tbl_set(2, 3);
ipq9574_ppe_vp_port_tbl_set(3, 4);
ipq9574_ppe_vp_port_tbl_set(4, 5);
ipq9574_ppe_vp_port_tbl_set(5, 6);
ipq9574_ppe_vp_port_tbl_set(6, 7);
#endif
/* Unicast priority map */
@ -914,10 +916,6 @@ void ipq9574_ppe_provision_init(void)
ipq9574_ppe_e_sp_cfg_tbl_drr_id_set(i);
}
/* sp_cfg_l0 and sp_cfg_l1 configuration */
ipq9574_ppe_reg_write(IPQ9574_PPE_TM_SHP_CFG_L0, 0x12b);
ipq9574_ppe_reg_write(IPQ9574_PPE_TM_SHP_CFG_L1, 0x3f);
/* Port0 multicast queue */
ipq9574_ppe_reg_write(0x409000, 0x00000000);
ipq9574_ppe_reg_write(0x403000, 0x00401000);
@ -956,6 +954,8 @@ void ipq9574_ppe_provision_init(void)
ipq9574_vsi_setup(3, 0x05);
ipq9574_vsi_setup(4, 0x09);
ipq9574_vsi_setup(5, 0x11);
ipq9574_vsi_setup(6, 0x21);
ipq9574_vsi_setup(7, 0x41);
#endif
/* Port 0-7 STP */

View file

@ -77,14 +77,14 @@ struct ipo_rule_reg {
uint32_t fake_mac_header:1;
uint32_t range_en:1;
uint32_t inverse_en:1;
uint32_t rule_type:4;
uint32_t src_type:2;
uint32_t src_0:3;
uint32_t src_1:5;
uint32_t rule_type:5;
uint32_t src_type:3;
uint32_t src_0:1;
uint32_t src_1:7;
uint32_t pri:9;
uint32_t res_chain:1;
uint32_t post_routing_en:1;
uint32_t _reserved0:16;
uint32_t _reserved0:14;
};
union ipo_rule_reg_u {

View file

@ -291,6 +291,19 @@ extern loff_t board_env_size;
#ifdef CONFIG_OF_BOARD_SETUP
#define DLOAD_DISABLE 0x1
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
#define CONFIG_HW_WATCHDOG
#define IPQ_FS_NONHLOS_BIT (1 << 9)
#define IPQ_FS_HLOS_BIT (1 << 10)
#endif
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE_DEBUG
#define fs_debug(fmt, args...) printf(fmt, ##args);
#else
#define fs_debug(fmt, args...)
#endif
/*
* Below Configs need to be updated after enabling reset_crashdump
* Included now to avoid build failure

View file

@ -285,6 +285,23 @@ extern loff_t board_env_size;
#ifdef CONFIG_OF_BOARD_SETUP
#define DLOAD_DISABLE 0x1
#define CONFIG_IPQ_RUNTIME_FAILSAFE
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE
#define CONFIG_HW_WATCHDOG
#define IPQ_FS_NONHLOS_BIT (1 << 9)
#define IPQ_FS_HLOS_BIT (1 << 10)
#define IPQ_RUNTIME_FAILSAFE_ENABLED (1 << 0)
#define IPQ_RUNTIME_FS_BOOTCONFIG_UPDATED (1 << 1)
#endif
#ifdef CONFIG_IPQ_RUNTIME_FAILSAFE_DEBUG
#define fs_debug(fmt, args...) printf(fmt, ##args);
#else
#define fs_debug(fmt, args...)
#endif
/*
* Below Configs need to be updated after enabling reset_crashdump
* Included now to avoid build failure

View file

@ -89,8 +89,8 @@ memory_size = "default"
lk = "false"
skip_4k_nand = "false"
atf = "false"
qcn6122 = "false"
tiny_16m = "false"
multi_wifi_fw = "false"
# Note: ipq806x didn't expose any relevant version */
soc_hw_version_ipq40xx = { 0x20050100 };
@ -1275,7 +1275,7 @@ class Pack(object):
try:
diff_soc_ver_files = section.attrib['diff_soc_ver_files']
except KeyError, e:
if (qcn6122 == "true" or tiny_16m == "true") and 'wififw_type_min' in section.attrib:
if (multi_wifi_fw == "true" or tiny_16m == "true") and 'wififw_type_min' in section.attrib:
wifi_fw_type_min = section.attrib['wififw_type_min']
wifi_fw_type_max = section.attrib['wififw_type_max']
else:
@ -1341,7 +1341,7 @@ class Pack(object):
if flinfo.type != "emmc":
img = section.find('img_name')
if img != None and 'wififw_type' in img.attrib and (qcn6122 == "true" or tiny_16m == "true"):
if img != None and 'wififw_type' in img.attrib and (multi_wifi_fw == "true" or tiny_16m == "true"):
imgs = section.findall('img_name')
try:
for img in imgs:
@ -1438,7 +1438,7 @@ class Pack(object):
if ret == 0:
return 0
if self.flash_type in [ "nand", "nand-4k", "norplusnand", "norplusnand-4k" ] and partition == "rootfs" and qcn6122 == "true":
if self.flash_type in [ "nand", "nand-4k", "norplusnand", "norplusnand-4k" ] and partition == "rootfs" and multi_wifi_fw == "true":
fw_imgs = section.findall('img_name')
for fw_img in fw_imgs:
@ -1717,7 +1717,7 @@ class Pack(object):
diff_soc_ver_files = section.attrib['diff_soc_ver_files']
partition = section.attrib['label']
except KeyError, e:
if (qcn6122 == "true" or tiny_16m == "true") and 'wififw_type_min' in section.attrib:
if (multi_wifi_fw == "true" or tiny_16m == "true") and 'wififw_type_min' in section.attrib:
wifi_fw_type_min = section.attrib['wififw_type_min']
wifi_fw_type_max = section.attrib['wififw_type_max']
partition = section.attrib['label']
@ -1783,7 +1783,7 @@ class Pack(object):
img = section.find('img_name')
if img != None and 'wififw_type' in img.attrib and (qcn6122 == "true" or tiny_16m == "true"):
if img != None and 'wififw_type' in img.attrib and (multi_wifi_fw == "true" or tiny_16m == "true"):
imgs = section.findall('img_name')
try:
for img in imgs:
@ -1888,7 +1888,7 @@ class Pack(object):
if filename != "":
self.__gen_script_append_images(filename, soc_version, wifi_fw_type, images, flinfo, root, section_conf, partition)
if self.flash_type in [ "nand", "nand-4k", "norplusnand", "norplusnand-4k" ] and section_conf == "rootfs" and qcn6122 == "true":
if self.flash_type in [ "nand", "nand-4k", "norplusnand", "norplusnand-4k" ] and section_conf == "rootfs" and multi_wifi_fw == "true":
fw_imgs = section.findall('img_name')
try:
@ -2241,7 +2241,7 @@ class ArgParser(object):
global lk
global atf
global skip_4k_nand
global qcn6122
global multi_wifi_fw
"""Start the parsing process, and populate members with parsed value.
@ -2251,7 +2251,7 @@ class ArgParser(object):
cdir = os.path.abspath(os.path.dirname(""))
if len(sys.argv) > 1:
try:
opts, args = getopt(sys.argv[1:], "", ["arch=", "fltype=", "srcPath=", "inImage=", "outImage=", "image_type=", "memory=", "lk", "skip_4k_nand", "atf", "qcn6122"])
opts, args = getopt(sys.argv[1:], "", ["arch=", "fltype=", "srcPath=", "inImage=", "outImage=", "image_type=", "memory=", "lk", "skip_4k_nand", "atf", "qcn6122", "multi_wifi_fw"])
except GetoptError, e:
raise UsageError(e.msg)
@ -2287,7 +2287,10 @@ class ArgParser(object):
skip_4k_nand = "true"
elif option == "--qcn6122":
qcn6122 = "true"
multi_wifi_fw = "true"
elif option == "--multi_wifi_fw":
multi_wifi_fw = "true"
#Verify Arguments passed by user