diff --git a/target/linux/airoha/an7581/config-6.6 b/target/linux/airoha/an7581/config-6.6
new file mode 100644
index 0000000000..600f1e45b4
--- /dev/null
+++ b/target/linux/airoha/an7581/config-6.6
@@ -0,0 +1,370 @@
+CONFIG_64BIT=y
+CONFIG_AIROHA_CPU_PM_DOMAIN=y
+CONFIG_AIROHA_THERMAL=y
+CONFIG_AIROHA_WATCHDOG=y
+CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y
+CONFIG_ARCH_AIROHA=y
+CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
+CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
+CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FORCE_MAX_ORDER=10
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANTS_NO_INSTR=y
+CONFIG_ARCH_WANTS_THP_SWAP=y
+CONFIG_ARM64=y
+CONFIG_ARM64_4K_PAGES=y
+CONFIG_ARM64_ERRATUM_843419=y
+CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
+CONFIG_ARM64_PAGE_SHIFT=12
+CONFIG_ARM64_PA_BITS=48
+CONFIG_ARM64_PA_BITS_48=y
+CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_ARM64_VA_BITS=39
+CONFIG_ARM64_VA_BITS_39=y
+CONFIG_ARM_AIROHA_SOC_CPUFREQ=y
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_PCI=y
+CONFIG_ARM_PMU=y
+CONFIG_ARM_PMUV3=y
+CONFIG_ARM_PSCI_FW=y
+CONFIG_ARM_SMCCC_SOC_ID=y
+# CONFIG_ARM_SMMU is not set
+# CONFIG_ARM_SMMU_V3 is not set
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLK_PM=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
+CONFIG_CC_HAVE_SHADOW_CALL_STACK=y
+CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_EN7523=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+# CONFIG_COMPAT_32BIT_TIME is not set
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_RMAP=y
+CONFIG_CRC16=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEV_EIP93=y
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_SHA256=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA3=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_MISC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
+CONFIG_DMA_DIRECT_REMAP=y
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_OF=y
+CONFIG_DTC=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EXT4_FS=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FRAME_POINTER=y
+CONFIG_FS_IOMAP=y
+CONFIG_FS_MBCACHE=y
+CONFIG_FUNCTION_ALIGNMENT=4
+CONFIG_FUNCTION_ALIGNMENT_4B=y
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_CACHE=y
+# CONFIG_FW_LOADER_USER_HELPER is not set
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IOREMAP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GLOB=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_EN7523=y
+CONFIG_GPIO_GENERIC=y
+CONFIG_GRO_CELLS=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_AIROHA=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_ESP_OFFLOAD is not set
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_IO_URING=y
+CONFIG_IPC_NS=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_COMMON=y
+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_WORK=y
+CONFIG_JBD2=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+# CONFIG_MEDIATEK_GE_SOC_PHY is not set
+# CONFIG_MEMCG is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGRATION=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_CQHCI=y
+CONFIG_MMC_MTK=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_MTK_BMT=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_NET_AIROHA=y
+CONFIG_NET_DEVLINK=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_MT7530=y
+CONFIG_NET_DSA_MT7530_MDIO=y
+CONFIG_NET_DSA_MT7530_MMIO=y
+CONFIG_NET_DSA_TAG_MTK=y
+CONFIG_NET_FLOW_LIMIT=y
+# CONFIG_NET_MEDIATEK_SOC is not set
+CONFIG_NET_SELFTESTS=y
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NET_VENDOR_AIROHA=y
+# CONFIG_NET_VENDOR_MEDIATEK is not set
+CONFIG_NLS=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PARTITION_PERCPU=y
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEFAULT is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_MEDIATEK=y
+CONFIG_PCIE_MEDIATEK_GEN3=y
+CONFIG_PCIE_PME=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PCS_MTK_LYNXI=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PER_VMA_LOCK=y
+CONFIG_PGTABLE_LEVELS=3
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_LEDS=y
+CONFIG_PHYLINK=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_PHY_AIROHA_PCIE=y
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_AIROHA=y
+# CONFIG_PINCTRL_MT2712 is not set
+# CONFIG_PINCTRL_MT6765 is not set
+# CONFIG_PINCTRL_MT6795 is not set
+# CONFIG_PINCTRL_MT6797 is not set
+# CONFIG_PINCTRL_MT7622 is not set
+# CONFIG_PINCTRL_MT7981 is not set
+# CONFIG_PINCTRL_MT7986 is not set
+# CONFIG_PINCTRL_MT8173 is not set
+# CONFIG_PINCTRL_MT8183 is not set
+# CONFIG_PINCTRL_MT8186 is not set
+# CONFIG_PINCTRL_MT8188 is not set
+# CONFIG_PINCTRL_MT8516 is not set
+CONFIG_PM=y
+CONFIG_PM_CLK=y
+CONFIG_PM_OPP=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RELOCATABLE=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RFS_ACCEL=y
+CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
+CONFIG_RPS=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_FSL=y
+CONFIG_SERIAL_8250_NR_UARTS=5
+CONFIG_SERIAL_8250_RUNTIME_UARTS=5
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SKB_EXTENSIONS=y
+CONFIG_SMP=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SOC_BUS=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SPARSEMEM=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+# CONFIG_SPI_AIROHA_EN7523 is not set
+CONFIG_SPI_AIROHA_SNFI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SWIOTLB=y
+CONFIG_SWPHY=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_OF=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_VMAP_STACK=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WLAN is not set
+# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
+CONFIG_XFRM_AH=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_ESP=y
+CONFIG_XFRM_IPCOMP=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZONE_DMA32=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/airoha/en7523/config-6.6 b/target/linux/airoha/en7523/config-6.6
new file mode 100644
index 0000000000..a40b61696a
--- /dev/null
+++ b/target/linux/airoha/en7523/config-6.6
@@ -0,0 +1,300 @@
+CONFIG_ALIGNMENT_TRAP=y
+CONFIG_ARCH_32BIT_OFF_T=y
+CONFIG_ARCH_AIROHA=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+CONFIG_ARCH_MULTIPLATFORM=y
+CONFIG_ARCH_MULTI_V6_V7=y
+CONFIG_ARCH_MULTI_V7=y
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARM=y
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_CPU_SUSPEND=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_PCI=y
+CONFIG_ARM_HAS_GROUP_RELOCS=y
+CONFIG_ARM_HEAVY_MB=y
+# CONFIG_ARM_HIGHBANK_CPUIDLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_ARM_PATCH_IDIV=y
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+CONFIG_ARM_PSCI=y
+CONFIG_ARM_PSCI_FW=y
+# CONFIG_ARM_SMMU is not set
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_UNWIND=y
+CONFIG_ARM_VIRT_EXT=y
+CONFIG_ATAGS=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLK_PM=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_CACHE_L2X0=y
+CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_EN7523=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MITIGATIONS=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_PM=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_SPECTRE=y
+CONFIG_CPU_THUMB_CAPABLE=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_V7=y
+CONFIG_CRC16=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CURRENT_POINTER_IN_TPIDRURO=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
+CONFIG_DEBUG_MISC=y
+CONFIG_DMA_OPS=y
+CONFIG_DTC=y
+CONFIG_EDAC_ATOMIC_SCRUB=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FS_IOMAP=y
+CONFIG_FUNCTION_ALIGNMENT=0
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
+CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_VDSO_32=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_EN7523=y
+CONFIG_GPIO_GENERIC=y
+# CONFIG_HARDEN_BRANCH_HISTORY is not set
+# CONFIG_HARDEN_BRANCH_PREDICTOR is not set
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAVE_SMP=y
+CONFIG_HOTPLUG_CORE_SYNC=y
+CONFIG_HOTPLUG_CORE_SYNC_DEAD=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_FIXED=0
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_IOMMUFD is not set
+# CONFIG_IOMMU_DEBUGFS is not set
+# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
+# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_IRQCHIP=y
+CONFIG_IRQSTACKS=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_IRQ_WORK=y
+# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGHT_HAVE_CACHE_L2X0=y
+CONFIG_MIGRATION=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SRCU_NMI_SAFE=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_XGRESS=y
+CONFIG_NLS=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=2
+CONFIG_NVMEM=y
+CONFIG_NVMEM_LAYOUTS=y
+CONFIG_NVMEM_SYSFS=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_OLD_SIGACTION=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_PADATA=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PARTITION_PERCPU=y
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_MEDIATEK=y
+CONFIG_PCIE_PME=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_LEDS=y
+CONFIG_PINCTRL=y
+CONFIG_PM=y
+CONFIG_PM_CLK=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RFS_ACCEL=y
+CONFIG_RPS=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_SCSI=y
+CONFIG_SCSI_COMMON=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_FSL=y
+# CONFIG_SERIAL_8250_SHARE_IRQ is not set
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SG_POOL=y
+CONFIG_SMP=y
+CONFIG_SMP_ON_UP=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_AIROHA_EN7523=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_STACKTRACE=y
+# CONFIG_SWAP is not set
+CONFIG_SWPHY=y
+CONFIG_SWP_EMULATE=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
+CONFIG_UNWINDER_ARM=y
+CONFIG_USB=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_XHCI_HCD=y
+# CONFIG_USB_XHCI_PLATFORM is not set
+CONFIG_USE_OF=y
+# CONFIG_VFP is not set
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_BCJ=y
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch b/target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch
new file mode 100644
index 0000000000..a77ed8c778
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/001-v6.10-arm64-add-Airoha-EN7581-platform.patch
@@ -0,0 +1,34 @@
+From 428ae88ef519f2009fac37563de76ffa6f93046f Mon Sep 17 00:00:00 2001
+From: Daniel Danzberger
+Date: Sat, 9 Mar 2024 10:32:16 +0100
+Subject: [PATCH] arm64: add Airoha EN7581 platform
+
+Introduce the Kconfig entry for the Airoha EN7581 multicore architecture
+available in the Airoha EN7581 evaluation board.
+
+Signed-off-by: Daniel Danzberger
+Co-developed-by: Lorenzo Bianconi
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/d52d95db313e6a58ba997ba2181faf78a1014bcc.1709975956.git.lorenzo@kernel.org
+Signed-off-by: AngeloGioacchino Del Regno
+Signed-off-by: Arnd Bergmann
+---
+ arch/arm64/Kconfig.platforms | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/arm64/Kconfig.platforms
++++ b/arch/arm64/Kconfig.platforms
+@@ -8,6 +8,13 @@ config ARCH_ACTIONS
+ help
+ This enables support for the Actions Semiconductor S900 SoC family.
+
++config ARCH_AIROHA
++ bool "Airoha SoC Support"
++ select ARM_PSCI
++ select HAVE_ARM_ARCH_TIMER
++ help
++ This enables support for the ARM64 based Airoha SoCs.
++
+ config ARCH_SUNXI
+ bool "Allwinner sunxi 64-bit SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
diff --git a/target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch b/target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch
new file mode 100644
index 0000000000..46c376e343
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/002-v6.11-i2c-mt7621-Add-Airoha-EN7581-i2c-support.patch
@@ -0,0 +1,27 @@
+From fd6acb0d21b8683fd8804129beeb4fe629488aff Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Tue, 9 Jul 2024 00:42:38 +0200
+Subject: [PATCH] i2c: mt7621: Add Airoha EN7581 i2c support
+
+Introduce i2c support to Airoha EN7581 SoC through the i2c-mt7621
+driver.
+
+Reviewed-by: AngeloGioacchino Del Regno
+Tested-by: Ray Liu
+Signed-off-by: Lorenzo Bianconi
+Signed-off-by: Andi Shyti
+---
+ drivers/i2c/busses/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -841,7 +841,7 @@ config I2C_MT65XX
+
+ config I2C_MT7621
+ tristate "MT7621/MT7628 I2C Controller"
+- depends on (RALINK && (SOC_MT7620 || SOC_MT7621)) || COMPILE_TEST
++ depends on (RALINK && (SOC_MT7620 || SOC_MT7621)) || ARCH_AIROHA || COMPILE_TEST
+ help
+ Say Y here to include support for I2C controller in the
+ MediaTek MT7621/MT7628 SoCs.
diff --git a/target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch
new file mode 100644
index 0000000000..253b6fd7ab
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/006-v6.11-net-airoha-Introduce-ethernet-support-for-EN7581-SoC.patch
@@ -0,0 +1,2835 @@
+From 23020f04932701d5c8363e60756f12b43b8ed752 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Fri, 12 Jul 2024 23:27:58 +0200
+Subject: [PATCH] net: airoha: Introduce ethernet support for EN7581 SoC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add airoha_eth driver in order to introduce ethernet support for
+Airoha EN7581 SoC available on EN7581 development board (en7581-evb).
+EN7581 mac controller is mainly composed by the Frame Engine (PSE+PPE)
+and QoS-DMA (QDMA) modules. FE is used for traffic offloading (just
+basic functionalities are currently supported) while QDMA is used for
+DMA operations and QOS functionalities between the mac layer and the
+external modules conncted to the FE GDM ports (e.g MT7530 DSA switch
+or external phys).
+A general overview of airoha_eth architecture is reported below:
+
+ ┌───────┐ ┌───────┐
+ │ QDMA2 │ │ QDMA1 │
+ └───┬───┘ └───┬───┘
+ │ │
+ ┌───────▼─────────────────────────────────────────────▼────────┐
+ │ │
+ │ P5 P0 │
+ │ │
+ │ │
+ │ │ ┌──────┐
+ │ P3 ├────► GDM3 │
+ │ │ └──────┘
+ │ │
+ │ │
+┌─────┐ │ │
+│ PPE ◄────┤ P4 PSE │
+└─────┘ │ │
+ │ │
+ │ │
+ │ │ ┌──────┐
+ │ P9 ├────► GDM4 │
+ │ │ └──────┘
+ │ │
+ │ │
+ │ │
+ │ P2 P1 │
+ └─────────┬───────────────────────────────────────────┬────────┘
+ │ │
+ ┌───▼──┐ ┌──▼───┐
+ │ GDM2 │ │ GDM1 │
+ └──────┘ └──┬───┘
+ │
+ ┌────▼─────┐
+ │ MT7530 │
+ └──────────┘
+
+Currently only hw LAN features (QDMA1+GDM1) are available while hw WAN
+(QDMA2+GDM{2,3,4}) ones will be added with subsequent patches introducing
+traffic offloading support.
+
+Tested-by: Benjamin Larsson
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/274945d2391c195098ab180a46d0617b18b9e42c.1720818878.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ MAINTAINERS | 9 +
+ drivers/net/ethernet/mediatek/Kconfig | 10 +-
+ drivers/net/ethernet/mediatek/Makefile | 1 +
+ drivers/net/ethernet/mediatek/airoha_eth.c | 2730 ++++++++++++++++++++
+ 4 files changed, 2749 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/mediatek/airoha_eth.c
+
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -1,12 +1,20 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config NET_VENDOR_MEDIATEK
+ bool "MediaTek devices"
+- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
++ depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
+ help
+ If you have a Mediatek SoC with ethernet, say Y.
+
+ if NET_VENDOR_MEDIATEK
+
++config NET_AIROHA
++ tristate "Airoha SoC Gigabit Ethernet support"
++ depends on NET_DSA || !NET_DSA
++ select PAGE_POOL
++ help
++ This driver supports the gigabit ethernet MACs in the
++ Airoha SoC family.
++
+ config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+--- a/drivers/net/ethernet/mediatek/Makefile
++++ b/drivers/net/ethernet/mediatek/Makefile
+@@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
+ endif
+ obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
+ obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
++obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -0,0 +1,2731 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 AIROHA Inc
++ * Author: Lorenzo Bianconi
++ */
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#define AIROHA_MAX_NUM_GDM_PORTS 1
++#define AIROHA_MAX_NUM_RSTS 3
++#define AIROHA_MAX_NUM_XSI_RSTS 5
++#define AIROHA_MAX_MTU 2000
++#define AIROHA_MAX_PACKET_SIZE 2048
++#define AIROHA_NUM_TX_RING 32
++#define AIROHA_NUM_RX_RING 32
++#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
++#define AIROHA_FE_MC_MAX_VLAN_PORT 16
++#define AIROHA_NUM_TX_IRQ 2
++#define HW_DSCP_NUM 2048
++#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
++#define TX_DSCP_NUM 1024
++#define RX_DSCP_NUM(_n) \
++ ((_n) == 2 ? 128 : \
++ (_n) == 11 ? 128 : \
++ (_n) == 15 ? 128 : \
++ (_n) == 0 ? 1024 : 16)
++
++#define PSE_RSV_PAGES 128
++#define PSE_QUEUE_RSV_PAGES 64
++
++/* FE */
++#define PSE_BASE 0x0100
++#define CSR_IFC_BASE 0x0200
++#define CDM1_BASE 0x0400
++#define GDM1_BASE 0x0500
++#define PPE1_BASE 0x0c00
++
++#define CDM2_BASE 0x1400
++#define GDM2_BASE 0x1500
++
++#define GDM3_BASE 0x1100
++#define GDM4_BASE 0x2500
++
++#define GDM_BASE(_n) \
++ ((_n) == 4 ? GDM4_BASE : \
++ (_n) == 3 ? GDM3_BASE : \
++ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
++
++#define REG_FE_DMA_GLO_CFG 0x0000
++#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
++#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
++
++#define REG_FE_RST_GLO_CFG 0x0004
++#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
++#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
++#define FE_RST_CORE_MASK BIT(0)
++
++#define REG_FE_LAN_MAC_H 0x0040
++#define REG_FE_LAN_MAC_LMIN 0x0044
++#define REG_FE_LAN_MAC_LMAX 0x0048
++
++#define REG_FE_CDM1_OQ_MAP0 0x0050
++#define REG_FE_CDM1_OQ_MAP1 0x0054
++#define REG_FE_CDM1_OQ_MAP2 0x0058
++#define REG_FE_CDM1_OQ_MAP3 0x005c
++
++#define REG_FE_PCE_CFG 0x0070
++#define PCE_DPI_EN_MASK BIT(2)
++#define PCE_KA_EN_MASK BIT(1)
++#define PCE_MC_EN_MASK BIT(0)
++
++#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
++#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
++#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
++#define PSE_CFG_WR_EN_MASK BIT(8)
++#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
++
++#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
++#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
++
++#define PSE_FQ_CFG 0x008c
++#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
++
++#define REG_FE_PSE_BUF_SET 0x0090
++#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
++#define PSE_ALLRSV_MASK GENMASK(14, 0)
++
++#define REG_PSE_SHARE_USED_THD 0x0094
++#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
++#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
++
++#define REG_GDM_MISC_CFG 0x0148
++#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
++#define GDM2_CHN_VLD_MODE_MASK BIT(5)
++
++#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
++#define FE_IFC_EN_MASK BIT(0)
++
++#define REG_FE_VIP_PORT_EN 0x01f0
++#define REG_FE_IFC_PORT_EN 0x01f4
++
++#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
++#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
++
++#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
++#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
++#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
++
++#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
++#define PATN_FCPU_EN_MASK BIT(7)
++#define PATN_SWP_EN_MASK BIT(6)
++#define PATN_DP_EN_MASK BIT(5)
++#define PATN_SP_EN_MASK BIT(4)
++#define PATN_TYPE_MASK GENMASK(3, 1)
++#define PATN_EN_MASK BIT(0)
++
++#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
++#define PATN_DP_MASK GENMASK(31, 16)
++#define PATN_SP_MASK GENMASK(15, 0)
++
++#define REG_CDM1_VLAN_CTRL CDM1_BASE
++#define CDM1_VLAN_MASK GENMASK(31, 16)
++
++#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
++#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
++
++#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
++#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
++ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
++
++#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
++#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
++#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
++
++#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
++#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
++ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
++
++#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
++#define GDM_DROP_CRC_ERR BIT(23)
++#define GDM_IP4_CKSUM BIT(22)
++#define GDM_TCP_CKSUM BIT(21)
++#define GDM_UDP_CKSUM BIT(20)
++#define GDM_UCFQ_MASK GENMASK(15, 12)
++#define GDM_BCFQ_MASK GENMASK(11, 8)
++#define GDM_MCFQ_MASK GENMASK(7, 4)
++#define GDM_OCFQ_MASK GENMASK(3, 0)
++
++#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
++#define GDM_INGRESS_FC_EN_MASK BIT(1)
++#define GDM_STAG_EN_MASK BIT(0)
++
++#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
++#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
++#define GDM_LONG_LEN_MASK GENMASK(29, 16)
++
++#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
++#define FE_CPORT_PAD BIT(26)
++#define FE_CPORT_PORT_XFC_MASK BIT(25)
++#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
++
++#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
++#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
++#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
++
++#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
++#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
++#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
++#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
++#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
++#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
++
++#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
++#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
++#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
++#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
++#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
++#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
++#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
++#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
++#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
++#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
++#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
++#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
++#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
++#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
++#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
++
++#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
++#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
++#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
++#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
++#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
++#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
++#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
++#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
++#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
++#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
++#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
++#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
++#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
++#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
++#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
++#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
++#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
++#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
++#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
++#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
++#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
++#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
++
++#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
++#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
++#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
++#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
++#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
++
++#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
++#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
++#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
++#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
++
++#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
++#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
++#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
++#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
++#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
++#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
++#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
++#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
++#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
++#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
++#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
++#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
++#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
++#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
++#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
++#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
++
++#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
++#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17)
++#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
++
++#define REG_GDM3_FWD_CFG GDM3_BASE
++#define GDM3_PAD_EN_MASK BIT(28)
++
++#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
++#define GDM4_PAD_EN_MASK BIT(28)
++#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
++
++#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
++#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
++#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
++#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
++
++#define REG_IP_FRAG_FP 0x2010
++#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
++#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
++#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
++#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
++
++#define REG_MC_VLAN_EN 0x2100
++#define MC_VLAN_EN_MASK BIT(0)
++
++#define REG_MC_VLAN_CFG 0x2104
++#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
++#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
++#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
++#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
++#define MC_VLAN_CFG_RW_MASK BIT(0)
++
++#define REG_MC_VLAN_DATA 0x2108
++
++#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
++
++/* QDMA */
++#define REG_QDMA_GLOBAL_CFG 0x0004
++#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
++#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
++#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
++#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
++#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
++#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
++#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
++#define GLOBAL_CFG_RESET_MASK BIT(23)
++#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
++#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
++#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
++#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
++#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
++#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
++#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
++#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
++#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
++#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
++#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
++#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
++#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
++#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
++#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
++
++#define REG_FWD_DSCP_BASE 0x0010
++#define REG_FWD_BUF_BASE 0x0014
++
++#define REG_HW_FWD_DSCP_CFG 0x0018
++#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
++#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
++#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
++
++#define REG_INT_STATUS(_n) \
++ (((_n) == 4) ? 0x0730 : \
++ ((_n) == 3) ? 0x0724 : \
++ ((_n) == 2) ? 0x0720 : \
++ ((_n) == 1) ? 0x0024 : 0x0020)
++
++#define REG_INT_ENABLE(_n) \
++ (((_n) == 4) ? 0x0750 : \
++ ((_n) == 3) ? 0x0744 : \
++ ((_n) == 2) ? 0x0740 : \
++ ((_n) == 1) ? 0x002c : 0x0028)
++
++/* QDMA_CSR_INT_ENABLE1 */
++#define RX15_COHERENT_INT_MASK BIT(31)
++#define RX14_COHERENT_INT_MASK BIT(30)
++#define RX13_COHERENT_INT_MASK BIT(29)
++#define RX12_COHERENT_INT_MASK BIT(28)
++#define RX11_COHERENT_INT_MASK BIT(27)
++#define RX10_COHERENT_INT_MASK BIT(26)
++#define RX9_COHERENT_INT_MASK BIT(25)
++#define RX8_COHERENT_INT_MASK BIT(24)
++#define RX7_COHERENT_INT_MASK BIT(23)
++#define RX6_COHERENT_INT_MASK BIT(22)
++#define RX5_COHERENT_INT_MASK BIT(21)
++#define RX4_COHERENT_INT_MASK BIT(20)
++#define RX3_COHERENT_INT_MASK BIT(19)
++#define RX2_COHERENT_INT_MASK BIT(18)
++#define RX1_COHERENT_INT_MASK BIT(17)
++#define RX0_COHERENT_INT_MASK BIT(16)
++#define TX7_COHERENT_INT_MASK BIT(15)
++#define TX6_COHERENT_INT_MASK BIT(14)
++#define TX5_COHERENT_INT_MASK BIT(13)
++#define TX4_COHERENT_INT_MASK BIT(12)
++#define TX3_COHERENT_INT_MASK BIT(11)
++#define TX2_COHERENT_INT_MASK BIT(10)
++#define TX1_COHERENT_INT_MASK BIT(9)
++#define TX0_COHERENT_INT_MASK BIT(8)
++#define CNT_OVER_FLOW_INT_MASK BIT(7)
++#define IRQ1_FULL_INT_MASK BIT(5)
++#define IRQ1_INT_MASK BIT(4)
++#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
++#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
++#define IRQ0_FULL_INT_MASK BIT(1)
++#define IRQ0_INT_MASK BIT(0)
++
++#define TX_DONE_INT_MASK(_n) \
++ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
++ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
++
++#define INT_TX_MASK \
++ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
++ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
++
++#define INT_IDX0_MASK \
++ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
++ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
++ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
++ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
++ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
++ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
++ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
++ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
++ RX15_COHERENT_INT_MASK | INT_TX_MASK)
++
++/* QDMA_CSR_INT_ENABLE2 */
++#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
++#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
++#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
++#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
++#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
++#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
++#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
++#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
++#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
++#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
++#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
++#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
++#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
++#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
++#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
++#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
++#define RX15_DONE_INT_MASK BIT(15)
++#define RX14_DONE_INT_MASK BIT(14)
++#define RX13_DONE_INT_MASK BIT(13)
++#define RX12_DONE_INT_MASK BIT(12)
++#define RX11_DONE_INT_MASK BIT(11)
++#define RX10_DONE_INT_MASK BIT(10)
++#define RX9_DONE_INT_MASK BIT(9)
++#define RX8_DONE_INT_MASK BIT(8)
++#define RX7_DONE_INT_MASK BIT(7)
++#define RX6_DONE_INT_MASK BIT(6)
++#define RX5_DONE_INT_MASK BIT(5)
++#define RX4_DONE_INT_MASK BIT(4)
++#define RX3_DONE_INT_MASK BIT(3)
++#define RX2_DONE_INT_MASK BIT(2)
++#define RX1_DONE_INT_MASK BIT(1)
++#define RX0_DONE_INT_MASK BIT(0)
++
++#define RX_DONE_INT_MASK \
++ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
++ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
++ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
++ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
++ RX15_DONE_INT_MASK)
++#define INT_IDX1_MASK \
++ (RX_DONE_INT_MASK | \
++ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
++ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
++ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
++ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
++ RX15_NO_CPU_DSCP_INT_MASK)
++
++/* QDMA_CSR_INT_ENABLE5 */
++#define TX31_COHERENT_INT_MASK BIT(31)
++#define TX30_COHERENT_INT_MASK BIT(30)
++#define TX29_COHERENT_INT_MASK BIT(29)
++#define TX28_COHERENT_INT_MASK BIT(28)
++#define TX27_COHERENT_INT_MASK BIT(27)
++#define TX26_COHERENT_INT_MASK BIT(26)
++#define TX25_COHERENT_INT_MASK BIT(25)
++#define TX24_COHERENT_INT_MASK BIT(24)
++#define TX23_COHERENT_INT_MASK BIT(23)
++#define TX22_COHERENT_INT_MASK BIT(22)
++#define TX21_COHERENT_INT_MASK BIT(21)
++#define TX20_COHERENT_INT_MASK BIT(20)
++#define TX19_COHERENT_INT_MASK BIT(19)
++#define TX18_COHERENT_INT_MASK BIT(18)
++#define TX17_COHERENT_INT_MASK BIT(17)
++#define TX16_COHERENT_INT_MASK BIT(16)
++#define TX15_COHERENT_INT_MASK BIT(15)
++#define TX14_COHERENT_INT_MASK BIT(14)
++#define TX13_COHERENT_INT_MASK BIT(13)
++#define TX12_COHERENT_INT_MASK BIT(12)
++#define TX11_COHERENT_INT_MASK BIT(11)
++#define TX10_COHERENT_INT_MASK BIT(10)
++#define TX9_COHERENT_INT_MASK BIT(9)
++#define TX8_COHERENT_INT_MASK BIT(8)
++
++#define INT_IDX4_MASK \
++ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
++ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
++ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
++ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
++ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
++ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
++ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
++ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
++ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
++ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
++ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
++ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
++
++#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
++
++#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
++#define TX_IRQ_THR_MASK GENMASK(27, 16)
++#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
++
++#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
++#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
++
++#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
++#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
++#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
++
++#define REG_TX_RING_BASE(_n) \
++ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
++
++#define REG_TX_RING_BLOCKING(_n) \
++ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
++
++#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
++#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
++#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
++#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
++#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
++
++#define REG_TX_CPU_IDX(_n) \
++ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
++
++#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
++
++#define REG_TX_DMA_IDX(_n) \
++ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
++
++#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
++
++#define IRQ_RING_IDX_MASK GENMASK(20, 16)
++#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
++
++#define REG_RX_RING_BASE(_n) \
++ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
++
++#define REG_RX_RING_SIZE(_n) \
++ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
++
++#define RX_RING_THR_MASK GENMASK(31, 16)
++#define RX_RING_SIZE_MASK GENMASK(15, 0)
++
++#define REG_RX_CPU_IDX(_n) \
++ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
++
++#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
++
++#define REG_RX_DMA_IDX(_n) \
++ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
++
++#define REG_RX_DELAY_INT_IDX(_n) \
++ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
++
++#define RX_DELAY_INT_MASK GENMASK(15, 0)
++
++#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
++
++#define REG_INGRESS_TRTCM_CFG 0x0070
++#define INGRESS_TRTCM_EN_MASK BIT(31)
++#define INGRESS_TRTCM_MODE_MASK BIT(30)
++#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
++#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
++
++#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
++#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
++
++#define REG_LMGR_INIT_CFG 0x1000
++#define LMGR_INIT_START BIT(31)
++#define LMGR_SRAM_MODE_MASK BIT(30)
++#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
++#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
++
++#define REG_FWD_DSCP_LOW_THR 0x1004
++#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
++
++#define REG_EGRESS_RATE_METER_CFG 0x100c
++#define EGRESS_RATE_METER_EN_MASK BIT(29)
++#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
++#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
++#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
++
++#define REG_EGRESS_TRTCM_CFG 0x1010
++#define EGRESS_TRTCM_EN_MASK BIT(31)
++#define EGRESS_TRTCM_MODE_MASK BIT(30)
++#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
++#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
++
++#define REG_TXWRR_MODE_CFG 0x1020
++#define TWRR_WEIGHT_SCALE_MASK BIT(31)
++#define TWRR_WEIGHT_BASE_MASK BIT(3)
++
++#define REG_PSE_BUF_USAGE_CFG 0x1028
++#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
++
++#define REG_GLB_TRTCM_CFG 0x1080
++#define GLB_TRTCM_EN_MASK BIT(31)
++#define GLB_TRTCM_MODE_MASK BIT(30)
++#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
++#define GLB_FAST_TICK_MASK GENMASK(15, 0)
++
++#define REG_TXQ_CNGST_CFG 0x10a0
++#define TXQ_CNGST_DROP_EN BIT(31)
++#define TXQ_CNGST_DEI_DROP_EN BIT(30)
++
++#define REG_SLA_TRTCM_CFG 0x1150
++#define SLA_TRTCM_EN_MASK BIT(31)
++#define SLA_TRTCM_MODE_MASK BIT(30)
++#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
++#define SLA_FAST_TICK_MASK GENMASK(15, 0)
++
++/* CTRL */
++#define QDMA_DESC_DONE_MASK BIT(31)
++#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
++#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
++#define QDMA_DESC_DEI_MASK BIT(25)
++#define QDMA_DESC_NO_DROP_MASK BIT(24)
++#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
++/* DATA */
++#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
++/* TX MSG0 */
++#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
++#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
++#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
++#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
++#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
++#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
++#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
++#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
++#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
++#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
++/* TX MSG1 */
++#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
++#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
++#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
++#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
++#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
++#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
++#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
++#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
++#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
++
++/* RX MSG1 */
++#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
++#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
++#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
++#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
++#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
++#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
++#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
++#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
++#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
++
++struct airoha_qdma_desc {
++ __le32 rsv;
++ __le32 ctrl;
++ __le32 addr;
++ __le32 data;
++ __le32 msg0;
++ __le32 msg1;
++ __le32 msg2;
++ __le32 msg3;
++};
++
++/* CTRL0 */
++#define QDMA_FWD_DESC_CTX_MASK BIT(31)
++#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
++#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
++#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
++/* CTRL1 */
++#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
++/* CTRL2 */
++#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
++
++struct airoha_qdma_fwd_desc {
++ __le32 addr;
++ __le32 ctrl0;
++ __le32 ctrl1;
++ __le32 ctrl2;
++ __le32 msg0;
++ __le32 msg1;
++ __le32 rsv0;
++ __le32 rsv1;
++};
++
++enum {
++ QDMA_INT_REG_IDX0,
++ QDMA_INT_REG_IDX1,
++ QDMA_INT_REG_IDX2,
++ QDMA_INT_REG_IDX3,
++ QDMA_INT_REG_IDX4,
++ QDMA_INT_REG_MAX
++};
++
++enum {
++ XSI_PCIE0_PORT,
++ XSI_PCIE1_PORT,
++ XSI_USB_PORT,
++ XSI_AE_PORT,
++ XSI_ETH_PORT,
++};
++
++enum {
++ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
++ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
++ XSI_USB_VIP_PORT_MASK = BIT(25),
++ XSI_ETH_VIP_PORT_MASK = BIT(24),
++};
++
++enum {
++ DEV_STATE_INITIALIZED,
++};
++
++enum {
++ CDM_CRSN_QSEL_Q1 = 1,
++ CDM_CRSN_QSEL_Q5 = 5,
++ CDM_CRSN_QSEL_Q6 = 6,
++ CDM_CRSN_QSEL_Q15 = 15,
++};
++
++enum {
++ CRSN_08 = 0x8,
++ CRSN_21 = 0x15, /* KA */
++ CRSN_22 = 0x16, /* hit bind and force route to CPU */
++ CRSN_24 = 0x18,
++ CRSN_25 = 0x19,
++};
++
++enum {
++ FE_PSE_PORT_CDM1,
++ FE_PSE_PORT_GDM1,
++ FE_PSE_PORT_GDM2,
++ FE_PSE_PORT_GDM3,
++ FE_PSE_PORT_PPE1,
++ FE_PSE_PORT_CDM2,
++ FE_PSE_PORT_CDM3,
++ FE_PSE_PORT_CDM4,
++ FE_PSE_PORT_PPE2,
++ FE_PSE_PORT_GDM4,
++ FE_PSE_PORT_CDM5,
++ FE_PSE_PORT_DROP = 0xf,
++};
++
++struct airoha_queue_entry {
++ union {
++ void *buf;
++ struct sk_buff *skb;
++ };
++ dma_addr_t dma_addr;
++ u16 dma_len;
++};
++
++struct airoha_queue {
++ struct airoha_eth *eth;
++
++ /* protect concurrent queue accesses */
++ spinlock_t lock;
++ struct airoha_queue_entry *entry;
++ struct airoha_qdma_desc *desc;
++ u16 head;
++ u16 tail;
++
++ int queued;
++ int ndesc;
++ int free_thr;
++ int buf_size;
++
++ struct napi_struct napi;
++ struct page_pool *page_pool;
++};
++
++struct airoha_tx_irq_queue {
++ struct airoha_eth *eth;
++
++ struct napi_struct napi;
++ u32 *q;
++
++ int size;
++ int queued;
++ u16 head;
++};
++
++struct airoha_hw_stats {
++ /* protect concurrent hw_stats accesses */
++ spinlock_t lock;
++ struct u64_stats_sync syncp;
++
++ /* get_stats64 */
++ u64 rx_ok_pkts;
++ u64 tx_ok_pkts;
++ u64 rx_ok_bytes;
++ u64 tx_ok_bytes;
++ u64 rx_multicast;
++ u64 rx_errors;
++ u64 rx_drops;
++ u64 tx_drops;
++ u64 rx_crc_error;
++ u64 rx_over_errors;
++ /* ethtool stats */
++ u64 tx_broadcast;
++ u64 tx_multicast;
++ u64 tx_len[7];
++ u64 rx_broadcast;
++ u64 rx_fragment;
++ u64 rx_jabber;
++ u64 rx_len[7];
++};
++
++struct airoha_gdm_port {
++ struct net_device *dev;
++ struct airoha_eth *eth;
++ int id;
++
++ struct airoha_hw_stats stats;
++};
++
++struct airoha_eth {
++ struct device *dev;
++
++ unsigned long state;
++
++ void __iomem *qdma_regs;
++ void __iomem *fe_regs;
++
++ /* protect concurrent irqmask accesses */
++ spinlock_t irq_lock;
++ u32 irqmask[QDMA_INT_REG_MAX];
++ int irq;
++
++ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
++ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
++
++ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
++
++ struct net_device *napi_dev;
++ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
++ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
++
++ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
++
++ /* descriptor and packet buffers for qdma hw forward */
++ struct {
++ void *desc;
++ void *q;
++ } hfwd;
++};
++
++static u32 airoha_rr(void __iomem *base, u32 offset)
++{
++ return readl(base + offset);
++}
++
++static void airoha_wr(void __iomem *base, u32 offset, u32 val)
++{
++ writel(val, base + offset);
++}
++
++static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
++{
++ val |= (airoha_rr(base, offset) & ~mask);
++ airoha_wr(base, offset, val);
++
++ return val;
++}
++
++#define airoha_fe_rr(eth, offset) \
++ airoha_rr((eth)->fe_regs, (offset))
++#define airoha_fe_wr(eth, offset, val) \
++ airoha_wr((eth)->fe_regs, (offset), (val))
++#define airoha_fe_rmw(eth, offset, mask, val) \
++ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
++#define airoha_fe_set(eth, offset, val) \
++ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
++#define airoha_fe_clear(eth, offset, val) \
++ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
++
++#define airoha_qdma_rr(eth, offset) \
++ airoha_rr((eth)->qdma_regs, (offset))
++#define airoha_qdma_wr(eth, offset, val) \
++ airoha_wr((eth)->qdma_regs, (offset), (val))
++#define airoha_qdma_rmw(eth, offset, mask, val) \
++ airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
++#define airoha_qdma_set(eth, offset, val) \
++ airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
++#define airoha_qdma_clear(eth, offset, val) \
++ airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
++
++static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
++ u32 clear, u32 set)
++{
++ unsigned long flags;
++
++ if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
++ return;
++
++ spin_lock_irqsave(ð->irq_lock, flags);
++
++ eth->irqmask[index] &= ~clear;
++ eth->irqmask[index] |= set;
++ airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
++ /* Read irq_enable register in order to guarantee the update above
++ * completes in the spinlock critical section.
++ */
++ airoha_qdma_rr(eth, REG_INT_ENABLE(index));
++
++ spin_unlock_irqrestore(ð->irq_lock, flags);
++}
++
++static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
++ u32 mask)
++{
++ airoha_qdma_set_irqmask(eth, index, 0, mask);
++}
++
++static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
++ u32 mask)
++{
++ airoha_qdma_set_irqmask(eth, index, mask, 0);
++}
++
++static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
++{
++ u32 val;
++
++ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
++ airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
++
++ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
++ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
++ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
++}
++
++static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
++ u32 val)
++{
++ airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
++ FIELD_PREP(GDM_OCFQ_MASK, val));
++ airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
++ FIELD_PREP(GDM_MCFQ_MASK, val));
++ airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
++ FIELD_PREP(GDM_BCFQ_MASK, val));
++ airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
++ FIELD_PREP(GDM_UCFQ_MASK, val));
++}
++
++static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
++{
++ u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
++ u32 vip_port, cfg_addr;
++
++ switch (port) {
++ case XSI_PCIE0_PORT:
++ vip_port = XSI_PCIE0_VIP_PORT_MASK;
++ cfg_addr = REG_GDM_FWD_CFG(3);
++ break;
++ case XSI_PCIE1_PORT:
++ vip_port = XSI_PCIE1_VIP_PORT_MASK;
++ cfg_addr = REG_GDM_FWD_CFG(3);
++ break;
++ case XSI_USB_PORT:
++ vip_port = XSI_USB_VIP_PORT_MASK;
++ cfg_addr = REG_GDM_FWD_CFG(4);
++ break;
++ case XSI_ETH_PORT:
++ vip_port = XSI_ETH_VIP_PORT_MASK;
++ cfg_addr = REG_GDM_FWD_CFG(4);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (enable) {
++ airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
++ airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
++ } else {
++ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
++ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
++ }
++
++ airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
++
++ return 0;
++}
++
++static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
++{
++ const int port_list[] = {
++ XSI_PCIE0_PORT,
++ XSI_PCIE1_PORT,
++ XSI_USB_PORT,
++ XSI_ETH_PORT
++ };
++ int i, err;
++
++ for (i = 0; i < ARRAY_SIZE(port_list); i++) {
++ err = airoha_set_gdm_port(eth, port_list[i], enable);
++ if (err)
++ goto error;
++ }
++
++ return 0;
++
++error:
++ for (i--; i >= 0; i++)
++ airoha_set_gdm_port(eth, port_list[i], false);
++
++ return err;
++}
++
++static void airoha_fe_maccr_init(struct airoha_eth *eth)
++{
++ int p;
++
++ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
++ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
++ GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
++ GDM_DROP_CRC_ERR);
++ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
++ FE_PSE_PORT_CDM1);
++ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
++ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
++ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
++ FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
++ }
++
++ airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
++ FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
++
++ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
++}
++
++static void airoha_fe_vip_setup(struct airoha_eth *eth)
++{
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(4),
++ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
++ PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(6),
++ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
++ PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(7),
++ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
++ PATN_EN_MASK);
++
++ /* BOOTP (0x43) */
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(8),
++ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
++ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
++
++ /* BOOTP (0x44) */
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(9),
++ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
++ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
++
++ /* ISAKMP */
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(10),
++ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
++ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(11),
++ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
++ PATN_EN_MASK);
++
++ /* DHCPv6 */
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(12),
++ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
++ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(19),
++ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
++ PATN_EN_MASK);
++
++ /* ETH->ETH_P_1905 (0x893a) */
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(20),
++ PATN_FCPU_EN_MASK | PATN_EN_MASK);
++
++ airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
++ airoha_fe_wr(eth, REG_FE_VIP_EN(21),
++ PATN_FCPU_EN_MASK | PATN_EN_MASK);
++}
++
++static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
++ u32 port, u32 queue)
++{
++ u32 val;
++
++ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
++ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
++ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
++ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
++ val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
++
++ return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
++}
++
++static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
++ u32 port, u32 queue, u32 val)
++{
++ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
++ FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
++ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
++ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
++ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
++ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
++ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
++ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
++}
++
++static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
++ u32 port, u32 queue, u32 val)
++{
++ u32 orig_val, tmp, all_rsv, fq_limit;
++
++ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
++
++ /* modify all rsv */
++ orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
++ tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
++ all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
++ all_rsv += (val - orig_val);
++ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
++ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
++
++ /* modify hthd */
++ tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
++ fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
++ tmp = fq_limit - all_rsv - 0x20;
++ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
++ PSE_SHARE_USED_HTHD_MASK,
++ FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
++
++ tmp = fq_limit - all_rsv - 0x100;
++ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
++ PSE_SHARE_USED_MTHD_MASK,
++ FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
++ tmp = (3 * tmp) >> 2;
++ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
++ PSE_SHARE_USED_LTHD_MASK,
++ FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
++
++ return 0;
++}
++
++static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
++{
++ const u32 pse_port_num_queues[] = {
++ [FE_PSE_PORT_CDM1] = 6,
++ [FE_PSE_PORT_GDM1] = 6,
++ [FE_PSE_PORT_GDM2] = 32,
++ [FE_PSE_PORT_GDM3] = 6,
++ [FE_PSE_PORT_PPE1] = 4,
++ [FE_PSE_PORT_CDM2] = 6,
++ [FE_PSE_PORT_CDM3] = 8,
++ [FE_PSE_PORT_CDM4] = 10,
++ [FE_PSE_PORT_PPE2] = 4,
++ [FE_PSE_PORT_GDM4] = 2,
++ [FE_PSE_PORT_CDM5] = 2,
++ };
++ int q;
++
++ /* hw misses PPE2 oq rsv */
++ airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
++ PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
++
++ /* CMD1 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* GMD1 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* GMD2 */
++ for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
++ /* GMD3 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* PPE1 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
++ if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
++ PSE_QUEUE_RSV_PAGES);
++ else
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
++ }
++ /* CDM2 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* CDM3 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
++ /* CDM4 */
++ for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* PPE2 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
++ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
++ PSE_QUEUE_RSV_PAGES);
++ else
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
++ }
++ /* GMD4 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
++ PSE_QUEUE_RSV_PAGES);
++ /* CDM5 */
++ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
++ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
++ PSE_QUEUE_RSV_PAGES);
++}
++
++static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
++ int err, j;
++ u32 val;
++
++ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
++
++ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
++ MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
++ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
++ err = read_poll_timeout(airoha_fe_rr, val,
++ val & MC_VLAN_CFG_CMD_DONE_MASK,
++ USEC_PER_MSEC, 5 * USEC_PER_MSEC,
++ false, eth, REG_MC_VLAN_CFG);
++ if (err)
++ return err;
++
++ for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
++ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
++
++ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
++ FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
++ MC_VLAN_CFG_RW_MASK;
++ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
++ err = read_poll_timeout(airoha_fe_rr, val,
++ val & MC_VLAN_CFG_CMD_DONE_MASK,
++ USEC_PER_MSEC,
++ 5 * USEC_PER_MSEC, false, eth,
++ REG_MC_VLAN_CFG);
++ if (err)
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
++{
++ /* CDM1_CRSN_QSEL */
++ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
++ CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
++ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
++ CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
++ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
++ CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
++ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
++ CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
++ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
++ CDM_CRSN_QSEL_Q6));
++ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
++ CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
++ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
++ CDM_CRSN_QSEL_Q1));
++ /* CDM2_CRSN_QSEL */
++ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
++ CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
++ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
++ CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
++ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
++ CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
++ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
++ CDM_CRSN_QSEL_Q1));
++ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
++ CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
++ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
++ CDM_CRSN_QSEL_Q6));
++ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
++ CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
++ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
++ CDM_CRSN_QSEL_Q1));
++}
++
++static int airoha_fe_init(struct airoha_eth *eth)
++{
++ airoha_fe_maccr_init(eth);
++
++ /* PSE IQ reserve */
++ airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
++ FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
++ airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
++ PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
++ FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
++ FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
++
++ /* enable FE copy engine for MC/KA/DPI */
++ airoha_fe_wr(eth, REG_FE_PCE_CFG,
++ PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
++ /* set vip queue selection to ring 1 */
++ airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
++ FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
++ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
++ FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
++ /* set GDM4 source interface offset to 8 */
++ airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
++ GDM4_SPORT_OFF2_MASK |
++ GDM4_SPORT_OFF1_MASK |
++ GDM4_SPORT_OFF0_MASK,
++ FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
++ FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
++ FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
++
++ /* set PSE Page as 128B */
++ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
++ FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
++ FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
++ FE_DMA_GLO_PG_SZ_MASK);
++ airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
++ FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
++ FE_RST_GDM4_MBI_ARB_MASK);
++ usleep_range(1000, 2000);
++
++ /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
++ * connect other rings to PSE Port0 OQ-0
++ */
++ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
++ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
++ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
++ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
++
++ airoha_fe_vip_setup(eth);
++ airoha_fe_pse_ports_init(eth);
++
++ airoha_fe_set(eth, REG_GDM_MISC_CFG,
++ GDM2_RDM_ACK_WAIT_PREF_MASK |
++ GDM2_CHN_VLD_MODE_MASK);
++ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
++
++ /* init fragment and assemble Force Port */
++ /* NPU Core-3, NPU Bridge Channel-3 */
++ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
++ IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
++ FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
++ FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
++ /* QDMA LAN, RX Ring-22 */
++ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
++ IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
++ FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
++ FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
++
++ airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
++ airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
++
++ airoha_fe_crsn_qsel_init(eth);
++
++ airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
++ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
++
++ /* default aging mode for mbi unlock issue */
++ airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
++ MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
++ FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
++ FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
++
++ /* disable IFC by default */
++ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
++
++ /* enable 1:N vlan action, init vlan table */
++ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
++
++ return airoha_fe_mc_vlan_clear(eth);
++}
++
++static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
++{
++ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
++ struct airoha_eth *eth = q->eth;
++ int qid = q - ð->q_rx[0];
++ int nframes = 0;
++
++ while (q->queued < q->ndesc - 1) {
++ struct airoha_queue_entry *e = &q->entry[q->head];
++ struct airoha_qdma_desc *desc = &q->desc[q->head];
++ struct page *page;
++ int offset;
++ u32 val;
++
++ page = page_pool_dev_alloc_frag(q->page_pool, &offset,
++ q->buf_size);
++ if (!page)
++ break;
++
++ q->head = (q->head + 1) % q->ndesc;
++ q->queued++;
++ nframes++;
++
++ e->buf = page_address(page) + offset;
++ e->dma_addr = page_pool_get_dma_addr(page) + offset;
++ e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
++
++ dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
++ dir);
++
++ val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
++ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
++ WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
++ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
++ WRITE_ONCE(desc->data, cpu_to_le32(val));
++ WRITE_ONCE(desc->msg0, 0);
++ WRITE_ONCE(desc->msg1, 0);
++ WRITE_ONCE(desc->msg2, 0);
++ WRITE_ONCE(desc->msg3, 0);
++
++ airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
++ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
++ }
++
++ return nframes;
++}
++
++static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
++ struct airoha_qdma_desc *desc)
++{
++ u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
++
++ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
++ switch (sport) {
++ case 0x10 ... 0x13:
++ port = 0;
++ break;
++ case 0x2 ... 0x4:
++ port = sport - 1;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
++}
++
++static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
++{
++ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
++ struct airoha_eth *eth = q->eth;
++ int qid = q - ð->q_rx[0];
++ int done = 0;
++
++ while (done < budget) {
++ struct airoha_queue_entry *e = &q->entry[q->tail];
++ struct airoha_qdma_desc *desc = &q->desc[q->tail];
++ dma_addr_t dma_addr = le32_to_cpu(desc->addr);
++ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
++ struct sk_buff *skb;
++ int len, p;
++
++ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
++ break;
++
++ if (!dma_addr)
++ break;
++
++ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
++ if (!len)
++ break;
++
++ q->tail = (q->tail + 1) % q->ndesc;
++ q->queued--;
++
++ dma_sync_single_for_cpu(eth->dev, dma_addr,
++ SKB_WITH_OVERHEAD(q->buf_size), dir);
++
++ p = airoha_qdma_get_gdm_port(eth, desc);
++ if (p < 0 || !eth->ports[p]) {
++ page_pool_put_full_page(q->page_pool,
++ virt_to_head_page(e->buf),
++ true);
++ continue;
++ }
++
++ skb = napi_build_skb(e->buf, q->buf_size);
++ if (!skb) {
++ page_pool_put_full_page(q->page_pool,
++ virt_to_head_page(e->buf),
++ true);
++ break;
++ }
++
++ skb_reserve(skb, 2);
++ __skb_put(skb, len);
++ skb_mark_for_recycle(skb);
++ skb->dev = eth->ports[p]->dev;
++ skb->protocol = eth_type_trans(skb, skb->dev);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb_record_rx_queue(skb, qid);
++ napi_gro_receive(&q->napi, skb);
++
++ done++;
++ }
++ airoha_qdma_fill_rx_queue(q);
++
++ return done;
++}
++
++static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
++{
++ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
++ struct airoha_eth *eth = q->eth;
++ int cur, done = 0;
++
++ do {
++ cur = airoha_qdma_rx_process(q, budget - done);
++ done += cur;
++ } while (cur && done < budget);
++
++ if (done < budget && napi_complete(napi))
++ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
++ RX_DONE_INT_MASK);
++
++ return done;
++}
++
++static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
++ struct airoha_queue *q, int ndesc)
++{
++ const struct page_pool_params pp_params = {
++ .order = 0,
++ .pool_size = 256,
++ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
++ PP_FLAG_PAGE_FRAG,
++ .dma_dir = DMA_FROM_DEVICE,
++ .max_len = PAGE_SIZE,
++ .nid = NUMA_NO_NODE,
++ .dev = eth->dev,
++ .napi = &q->napi,
++ };
++ int qid = q - ð->q_rx[0], thr;
++ dma_addr_t dma_addr;
++
++ q->buf_size = PAGE_SIZE / 2;
++ q->ndesc = ndesc;
++ q->eth = eth;
++
++ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
++ GFP_KERNEL);
++ if (!q->entry)
++ return -ENOMEM;
++
++ q->page_pool = page_pool_create(&pp_params);
++ if (IS_ERR(q->page_pool)) {
++ int err = PTR_ERR(q->page_pool);
++
++ q->page_pool = NULL;
++ return err;
++ }
++
++ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
++ &dma_addr, GFP_KERNEL);
++ if (!q->desc)
++ return -ENOMEM;
++
++ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
++
++ airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
++ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
++ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
++
++ thr = clamp(ndesc >> 3, 1, 32);
++ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
++ FIELD_PREP(RX_RING_THR_MASK, thr));
++ airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
++ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
++
++ airoha_qdma_fill_rx_queue(q);
++
++ return 0;
++}
++
++static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
++{
++ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
++ struct airoha_eth *eth = q->eth;
++
++ while (q->queued) {
++ struct airoha_queue_entry *e = &q->entry[q->tail];
++ struct page *page = virt_to_head_page(e->buf);
++
++ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
++ dir);
++ page_pool_put_full_page(q->page_pool, page, false);
++ q->tail = (q->tail + 1) % q->ndesc;
++ q->queued--;
++ }
++}
++
++static int airoha_qdma_init_rx(struct airoha_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ int err;
++
++ if (!(RX_DONE_INT_MASK & BIT(i))) {
++ /* rx-queue not binded to irq */
++ continue;
++ }
++
++ err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i],
++ RX_DSCP_NUM(i));
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
++{
++ struct airoha_tx_irq_queue *irq_q;
++ struct airoha_eth *eth;
++ int id, done = 0;
++
++ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
++ eth = irq_q->eth;
++ id = irq_q - ð->q_tx_irq[0];
++
++ while (irq_q->queued > 0 && done < budget) {
++ u32 qid, last, val = irq_q->q[irq_q->head];
++ struct airoha_queue *q;
++
++ if (val == 0xff)
++ break;
++
++ irq_q->q[irq_q->head] = 0xff; /* mark as done */
++ irq_q->head = (irq_q->head + 1) % irq_q->size;
++ irq_q->queued--;
++ done++;
++
++ last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
++ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
++
++ if (qid >= ARRAY_SIZE(eth->q_tx))
++ continue;
++
++ q = ð->q_tx[qid];
++ if (!q->ndesc)
++ continue;
++
++ spin_lock_bh(&q->lock);
++
++ while (q->queued > 0) {
++ struct airoha_qdma_desc *desc = &q->desc[q->tail];
++ struct airoha_queue_entry *e = &q->entry[q->tail];
++ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
++ struct sk_buff *skb = e->skb;
++ u16 index = q->tail;
++
++ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
++ !(desc_ctrl & QDMA_DESC_DROP_MASK))
++ break;
++
++ q->tail = (q->tail + 1) % q->ndesc;
++ q->queued--;
++
++ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
++ DMA_TO_DEVICE);
++
++ WRITE_ONCE(desc->msg0, 0);
++ WRITE_ONCE(desc->msg1, 0);
++
++ if (skb) {
++ struct netdev_queue *txq;
++
++ txq = netdev_get_tx_queue(skb->dev, qid);
++ if (netif_tx_queue_stopped(txq) &&
++ q->ndesc - q->queued >= q->free_thr)
++ netif_tx_wake_queue(txq);
++
++ dev_kfree_skb_any(skb);
++ e->skb = NULL;
++ }
++
++ if (index == last)
++ break;
++ }
++
++ spin_unlock_bh(&q->lock);
++ }
++
++ if (done) {
++ int i, len = done >> 7;
++
++ for (i = 0; i < len; i++)
++ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
++ IRQ_CLEAR_LEN_MASK, 0x80);
++ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
++ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
++ }
++
++ if (done < budget && napi_complete(napi))
++ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
++ TX_DONE_INT_MASK(id));
++
++ return done;
++}
++
++static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
++ struct airoha_queue *q, int size)
++{
++ int i, qid = q - ð->q_tx[0];
++ dma_addr_t dma_addr;
++
++ spin_lock_init(&q->lock);
++ q->ndesc = size;
++ q->eth = eth;
++ q->free_thr = 1 + MAX_SKB_FRAGS;
++
++ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
++ GFP_KERNEL);
++ if (!q->entry)
++ return -ENOMEM;
++
++ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
++ &dma_addr, GFP_KERNEL);
++ if (!q->desc)
++ return -ENOMEM;
++
++ for (i = 0; i < q->ndesc; i++) {
++ u32 val;
++
++ val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
++ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
++ }
++
++ airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
++ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
++ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
++ airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
++ FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
++
++ return 0;
++}
++
++static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
++ struct airoha_tx_irq_queue *irq_q,
++ int size)
++{
++ int id = irq_q - ð->q_tx_irq[0];
++ dma_addr_t dma_addr;
++
++ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
++ airoha_qdma_tx_napi_poll);
++ irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
++ &dma_addr, GFP_KERNEL);
++ if (!irq_q->q)
++ return -ENOMEM;
++
++ memset(irq_q->q, 0xff, size * sizeof(u32));
++ irq_q->size = size;
++ irq_q->eth = eth;
++
++ airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
++ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
++ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
++ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
++ FIELD_PREP(TX_IRQ_THR_MASK, 1));
++
++ return 0;
++}
++
++static int airoha_qdma_init_tx(struct airoha_eth *eth)
++{
++ int i, err;
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
++ err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i],
++ IRQ_QUEUE_LEN(i));
++ if (err)
++ return err;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
++ err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i],
++ TX_DSCP_NUM);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
++{
++ struct airoha_eth *eth = q->eth;
++
++ spin_lock_bh(&q->lock);
++ while (q->queued) {
++ struct airoha_queue_entry *e = &q->entry[q->tail];
++
++ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
++ DMA_TO_DEVICE);
++ dev_kfree_skb_any(e->skb);
++ e->skb = NULL;
++
++ q->tail = (q->tail + 1) % q->ndesc;
++ q->queued--;
++ }
++ spin_unlock_bh(&q->lock);
++}
++
++static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
++{
++ dma_addr_t dma_addr;
++ u32 status;
++ int size;
++
++ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
++ eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
++ GFP_KERNEL);
++ if (!eth->hfwd.desc)
++ return -ENOMEM;
++
++ airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
++
++ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
++ eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
++ GFP_KERNEL);
++ if (!eth->hfwd.q)
++ return -ENOMEM;
++
++ airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
++
++ airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
++ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
++ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
++ airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
++ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
++ airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
++ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
++ HW_FWD_DESC_NUM_MASK,
++ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
++ LMGR_INIT_START);
++
++ return read_poll_timeout(airoha_qdma_rr, status,
++ !(status & LMGR_INIT_START), USEC_PER_MSEC,
++ 30 * USEC_PER_MSEC, true, eth,
++ REG_LMGR_INIT_CFG);
++}
++
++static void airoha_qdma_init_qos(struct airoha_eth *eth)
++{
++ airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
++ airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
++
++ airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
++ PSE_BUF_ESTIMATE_EN_MASK);
++
++ airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
++ EGRESS_RATE_METER_EN_MASK |
++ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
++ /* 2047us x 31 = 63.457ms */
++ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
++ EGRESS_RATE_METER_WINDOW_SZ_MASK,
++ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
++ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
++ EGRESS_RATE_METER_TIMESLICE_MASK,
++ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
++
++ /* ratelimit init */
++ airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
++ /* fast-tick 25us */
++ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
++ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
++ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
++ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
++
++ airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
++ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
++ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
++ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
++ EGRESS_SLOW_TICK_RATIO_MASK,
++ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
++
++ airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
++ airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
++ INGRESS_TRTCM_MODE_MASK);
++ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
++ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
++ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
++ INGRESS_SLOW_TICK_RATIO_MASK,
++ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
++
++ airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
++ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
++ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
++ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
++ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
++}
++
++static int airoha_qdma_hw_init(struct airoha_eth *eth)
++{
++ int i;
++
++ /* clear pending irqs */
++ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
++ airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
++
++ /* setup irqs */
++ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
++ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
++ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
++
++ /* setup irq binding */
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
++ if (!eth->q_tx[i].ndesc)
++ continue;
++
++ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
++ airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
++ TX_RING_IRQ_BLOCKING_CFG_MASK);
++ else
++ airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
++ TX_RING_IRQ_BLOCKING_CFG_MASK);
++ }
++
++ airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
++ GLOBAL_CFG_RX_2B_OFFSET_MASK |
++ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
++ GLOBAL_CFG_CPU_TXR_RR_MASK |
++ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
++ GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
++ GLOBAL_CFG_MULTICAST_EN_MASK |
++ GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
++ GLOBAL_CFG_TX_WB_DONE_MASK |
++ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
++
++ airoha_qdma_init_qos(eth);
++
++ /* disable qdma rx delay interrupt */
++ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ if (!eth->q_rx[i].ndesc)
++ continue;
++
++ airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
++ RX_DELAY_INT_MASK);
++ }
++
++ airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
++ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
++
++ return 0;
++}
++
++static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
++{
++ struct airoha_eth *eth = dev_instance;
++ u32 intr[ARRAY_SIZE(eth->irqmask)];
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
++ intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
++ intr[i] &= eth->irqmask[i];
++ airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
++ }
++
++ if (!test_bit(DEV_STATE_INITIALIZED, ð->state))
++ return IRQ_NONE;
++
++ if (intr[1] & RX_DONE_INT_MASK) {
++ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
++ RX_DONE_INT_MASK);
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ if (!eth->q_rx[i].ndesc)
++ continue;
++
++ if (intr[1] & BIT(i))
++ napi_schedule(ð->q_rx[i].napi);
++ }
++ }
++
++ if (intr[0] & INT_TX_MASK) {
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
++ struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i];
++ u32 status, head;
++
++ if (!(intr[0] & TX_DONE_INT_MASK(i)))
++ continue;
++
++ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
++ TX_DONE_INT_MASK(i));
++
++ status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
++ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
++ irq_q->head = head % irq_q->size;
++ irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
++
++ napi_schedule(ð->q_tx_irq[i].napi);
++ }
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int airoha_qdma_init(struct airoha_eth *eth)
++{
++ int err;
++
++ err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
++ IRQF_SHARED, KBUILD_MODNAME, eth);
++ if (err)
++ return err;
++
++ err = airoha_qdma_init_rx(eth);
++ if (err)
++ return err;
++
++ err = airoha_qdma_init_tx(eth);
++ if (err)
++ return err;
++
++ err = airoha_qdma_init_hfwd_queues(eth);
++ if (err)
++ return err;
++
++ err = airoha_qdma_hw_init(eth);
++ if (err)
++ return err;
++
++ set_bit(DEV_STATE_INITIALIZED, ð->state);
++
++ return 0;
++}
++
++static int airoha_hw_init(struct airoha_eth *eth)
++{
++ int err;
++
++ /* disable xsi */
++ reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
++
++ reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
++ msleep(20);
++ reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
++ msleep(20);
++
++ err = airoha_fe_init(eth);
++ if (err)
++ return err;
++
++ return airoha_qdma_init(eth);
++}
++
++static void airoha_hw_cleanup(struct airoha_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ if (!eth->q_rx[i].ndesc)
++ continue;
++
++ napi_disable(ð->q_rx[i].napi);
++ netif_napi_del(ð->q_rx[i].napi);
++ airoha_qdma_cleanup_rx_queue(ð->q_rx[i]);
++ if (eth->q_rx[i].page_pool)
++ page_pool_destroy(eth->q_rx[i].page_pool);
++ }
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
++ napi_disable(ð->q_tx_irq[i].napi);
++ netif_napi_del(ð->q_tx_irq[i].napi);
++ }
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
++ if (!eth->q_tx[i].ndesc)
++ continue;
++
++ airoha_qdma_cleanup_tx_queue(ð->q_tx[i]);
++ }
++}
++
++static void airoha_qdma_start_napi(struct airoha_eth *eth)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
++ napi_enable(ð->q_tx_irq[i].napi);
++
++ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ if (!eth->q_rx[i].ndesc)
++ continue;
++
++ napi_enable(ð->q_rx[i].napi);
++ }
++}
++
++static void airoha_update_hw_stats(struct airoha_gdm_port *port)
++{
++ struct airoha_eth *eth = port->eth;
++ u32 val, i = 0;
++
++ spin_lock(&port->stats.lock);
++ u64_stats_update_begin(&port->stats.syncp);
++
++ /* TX */
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
++ port->stats.tx_ok_pkts += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
++ port->stats.tx_ok_pkts += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
++ port->stats.tx_ok_bytes += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
++ port->stats.tx_ok_bytes += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
++ port->stats.tx_drops += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
++ port->stats.tx_broadcast += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
++ port->stats.tx_multicast += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
++ port->stats.tx_len[i] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
++ port->stats.tx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
++ port->stats.tx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
++ port->stats.tx_len[i++] += val;
++
++ /* RX */
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
++ port->stats.rx_ok_pkts += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
++ port->stats.rx_ok_pkts += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
++ port->stats.rx_ok_bytes += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
++ port->stats.rx_ok_bytes += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
++ port->stats.rx_drops += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
++ port->stats.rx_broadcast += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
++ port->stats.rx_multicast += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
++ port->stats.rx_errors += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
++ port->stats.rx_crc_error += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
++ port->stats.rx_over_errors += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
++ port->stats.rx_fragment += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
++ port->stats.rx_jabber += val;
++
++ i = 0;
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
++ port->stats.rx_len[i] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
++ port->stats.rx_len[i] += ((u64)val << 32);
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
++ port->stats.rx_len[i++] += val;
++
++ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
++ port->stats.rx_len[i++] += val;
++
++ /* reset mib counters */
++ airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
++ FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
++
++ u64_stats_update_end(&port->stats.syncp);
++ spin_unlock(&port->stats.lock);
++}
++
++static int airoha_dev_open(struct net_device *dev)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_eth *eth = port->eth;
++ int err;
++
++ netif_tx_start_all_queues(dev);
++ err = airoha_set_gdm_ports(eth, true);
++ if (err)
++ return err;
++
++ if (netdev_uses_dsa(dev))
++ airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
++ GDM_STAG_EN_MASK);
++ else
++ airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
++ GDM_STAG_EN_MASK);
++
++ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
++ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
++
++ return 0;
++}
++
++static int airoha_dev_stop(struct net_device *dev)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_eth *eth = port->eth;
++ int err;
++
++ netif_tx_disable(dev);
++ err = airoha_set_gdm_ports(eth, false);
++ if (err)
++ return err;
++
++ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
++ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
++
++ return 0;
++}
++
++static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ int err;
++
++ err = eth_mac_addr(dev, p);
++ if (err)
++ return err;
++
++ airoha_set_macaddr(port->eth, dev->dev_addr);
++
++ return 0;
++}
++
++static int airoha_dev_init(struct net_device *dev)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++
++ airoha_set_macaddr(port->eth, dev->dev_addr);
++
++ return 0;
++}
++
++static void airoha_dev_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ unsigned int start;
++
++ airoha_update_hw_stats(port);
++ do {
++ start = u64_stats_fetch_begin(&port->stats.syncp);
++ storage->rx_packets = port->stats.rx_ok_pkts;
++ storage->tx_packets = port->stats.tx_ok_pkts;
++ storage->rx_bytes = port->stats.rx_ok_bytes;
++ storage->tx_bytes = port->stats.tx_ok_bytes;
++ storage->multicast = port->stats.rx_multicast;
++ storage->rx_errors = port->stats.rx_errors;
++ storage->rx_dropped = port->stats.rx_drops;
++ storage->tx_dropped = port->stats.tx_drops;
++ storage->rx_crc_errors = port->stats.rx_crc_error;
++ storage->rx_over_errors = port->stats.rx_over_errors;
++ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
++}
++
++static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct skb_shared_info *sinfo = skb_shinfo(skb);
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ u32 msg0 = 0, msg1, len = skb_headlen(skb);
++ int i, qid = skb_get_queue_mapping(skb);
++ struct airoha_eth *eth = port->eth;
++ u32 nr_frags = 1 + sinfo->nr_frags;
++ struct netdev_queue *txq;
++ struct airoha_queue *q;
++ void *data = skb->data;
++ u16 index;
++ u8 fport;
++
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
++ FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
++ FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
++
++ /* TSO: fill MSS info in tcp checksum field */
++ if (skb_is_gso(skb)) {
++ if (skb_cow_head(skb, 0))
++ goto error;
++
++ if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
++ __be16 csum = cpu_to_be16(sinfo->gso_size);
++
++ tcp_hdr(skb)->check = (__force __sum16)csum;
++ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
++ }
++ }
++
++ fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
++ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
++ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
++
++ q = ð->q_tx[qid];
++ if (WARN_ON_ONCE(!q->ndesc))
++ goto error;
++
++ spin_lock_bh(&q->lock);
++
++ txq = netdev_get_tx_queue(dev, qid);
++ if (q->queued + nr_frags > q->ndesc) {
++ /* not enough space in the queue */
++ netif_tx_stop_queue(txq);
++ spin_unlock_bh(&q->lock);
++ return NETDEV_TX_BUSY;
++ }
++
++ index = q->head;
++ for (i = 0; i < nr_frags; i++) {
++ struct airoha_qdma_desc *desc = &q->desc[index];
++ struct airoha_queue_entry *e = &q->entry[index];
++ skb_frag_t *frag = &sinfo->frags[i];
++ dma_addr_t addr;
++ u32 val;
++
++ addr = dma_map_single(dev->dev.parent, data, len,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
++ goto error_unmap;
++
++ index = (index + 1) % q->ndesc;
++
++ val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
++ if (i < nr_frags - 1)
++ val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
++ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
++ WRITE_ONCE(desc->addr, cpu_to_le32(addr));
++ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
++ WRITE_ONCE(desc->data, cpu_to_le32(val));
++ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
++ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
++ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
++
++ e->skb = i ? NULL : skb;
++ e->dma_addr = addr;
++ e->dma_len = len;
++
++ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
++ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
++
++ data = skb_frag_address(frag);
++ len = skb_frag_size(frag);
++ }
++
++ q->head = index;
++ q->queued += i;
++
++ skb_tx_timestamp(skb);
++ if (q->ndesc - q->queued < q->free_thr)
++ netif_tx_stop_queue(txq);
++
++ spin_unlock_bh(&q->lock);
++
++ return NETDEV_TX_OK;
++
++error_unmap:
++ for (i--; i >= 0; i++)
++ dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr,
++ q->entry[i].dma_len, DMA_TO_DEVICE);
++
++ spin_unlock_bh(&q->lock);
++error:
++ dev_kfree_skb_any(skb);
++ dev->stats.tx_dropped++;
++
++ return NETDEV_TX_OK;
++}
++
++static void airoha_ethtool_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_eth *eth = port->eth;
++
++ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
++ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
++}
++
++static void airoha_ethtool_get_mac_stats(struct net_device *dev,
++ struct ethtool_eth_mac_stats *stats)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ unsigned int start;
++
++ airoha_update_hw_stats(port);
++ do {
++ start = u64_stats_fetch_begin(&port->stats.syncp);
++ stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
++ stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
++ stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
++ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
++}
++
++static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
++ { 0, 64 },
++ { 65, 127 },
++ { 128, 255 },
++ { 256, 511 },
++ { 512, 1023 },
++ { 1024, 1518 },
++ { 1519, 10239 },
++ {},
++};
++
++static void
++airoha_ethtool_get_rmon_stats(struct net_device *dev,
++ struct ethtool_rmon_stats *stats,
++ const struct ethtool_rmon_hist_range **ranges)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_hw_stats *hw_stats = &port->stats;
++ unsigned int start;
++
++ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
++ ARRAY_SIZE(hw_stats->tx_len) + 1);
++ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
++ ARRAY_SIZE(hw_stats->rx_len) + 1);
++
++ *ranges = airoha_ethtool_rmon_ranges;
++ airoha_update_hw_stats(port);
++ do {
++ int i;
++
++ start = u64_stats_fetch_begin(&port->stats.syncp);
++ stats->fragments = hw_stats->rx_fragment;
++ stats->jabbers = hw_stats->rx_jabber;
++ for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
++ i++) {
++ stats->hist[i] = hw_stats->rx_len[i];
++ stats->hist_tx[i] = hw_stats->tx_len[i];
++ }
++ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
++}
++
++static const struct net_device_ops airoha_netdev_ops = {
++ .ndo_init = airoha_dev_init,
++ .ndo_open = airoha_dev_open,
++ .ndo_stop = airoha_dev_stop,
++ .ndo_start_xmit = airoha_dev_xmit,
++ .ndo_get_stats64 = airoha_dev_get_stats64,
++ .ndo_set_mac_address = airoha_dev_set_macaddr,
++};
++
++static const struct ethtool_ops airoha_ethtool_ops = {
++ .get_drvinfo = airoha_ethtool_get_drvinfo,
++ .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
++ .get_rmon_stats = airoha_ethtool_get_rmon_stats,
++};
++
++static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
++{
++ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
++ struct airoha_gdm_port *port;
++ struct net_device *dev;
++ int err, index;
++ u32 id;
++
++ if (!id_ptr) {
++ dev_err(eth->dev, "missing gdm port id\n");
++ return -EINVAL;
++ }
++
++ id = be32_to_cpup(id_ptr);
++ index = id - 1;
++
++ if (!id || id > ARRAY_SIZE(eth->ports)) {
++ dev_err(eth->dev, "invalid gdm port id: %d\n", id);
++ return -EINVAL;
++ }
++
++ if (eth->ports[index]) {
++ dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
++ return -EINVAL;
++ }
++
++ dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
++ AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
++ if (!dev) {
++ dev_err(eth->dev, "alloc_etherdev failed\n");
++ return -ENOMEM;
++ }
++
++ dev->netdev_ops = &airoha_netdev_ops;
++ dev->ethtool_ops = &airoha_ethtool_ops;
++ dev->max_mtu = AIROHA_MAX_MTU;
++ dev->watchdog_timeo = 5 * HZ;
++ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
++ NETIF_F_SG | NETIF_F_TSO;
++ dev->features |= dev->hw_features;
++ dev->dev.of_node = np;
++ SET_NETDEV_DEV(dev, eth->dev);
++
++ err = of_get_ethdev_address(np, dev);
++ if (err) {
++ if (err == -EPROBE_DEFER)
++ return err;
++
++ eth_hw_addr_random(dev);
++ dev_info(eth->dev, "generated random MAC address %pM\n",
++ dev->dev_addr);
++ }
++
++ port = netdev_priv(dev);
++ u64_stats_init(&port->stats.syncp);
++ spin_lock_init(&port->stats.lock);
++ port->dev = dev;
++ port->eth = eth;
++ port->id = id;
++ eth->ports[index] = port;
++
++ return register_netdev(dev);
++}
++
++static int airoha_probe(struct platform_device *pdev)
++{
++ struct device_node *np;
++ struct airoha_eth *eth;
++ int i, err;
++
++ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
++ if (!eth)
++ return -ENOMEM;
++
++ eth->dev = &pdev->dev;
++
++ err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
++ if (err) {
++ dev_err(eth->dev, "failed configuring DMA mask\n");
++ return err;
++ }
++
++ eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
++ if (IS_ERR(eth->fe_regs))
++ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
++ "failed to iomap fe regs\n");
++
++ eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
++ if (IS_ERR(eth->qdma_regs))
++ return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
++ "failed to iomap qdma regs\n");
++
++ eth->rsts[0].id = "fe";
++ eth->rsts[1].id = "pdma";
++ eth->rsts[2].id = "qdma";
++ err = devm_reset_control_bulk_get_exclusive(eth->dev,
++ ARRAY_SIZE(eth->rsts),
++ eth->rsts);
++ if (err) {
++ dev_err(eth->dev, "failed to get bulk reset lines\n");
++ return err;
++ }
++
++ eth->xsi_rsts[0].id = "xsi-mac";
++ eth->xsi_rsts[1].id = "hsi0-mac";
++ eth->xsi_rsts[2].id = "hsi1-mac";
++ eth->xsi_rsts[3].id = "hsi-mac";
++ eth->xsi_rsts[4].id = "xfp-mac";
++ err = devm_reset_control_bulk_get_exclusive(eth->dev,
++ ARRAY_SIZE(eth->xsi_rsts),
++ eth->xsi_rsts);
++ if (err) {
++ dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
++ return err;
++ }
++
++ spin_lock_init(ð->irq_lock);
++ eth->irq = platform_get_irq(pdev, 0);
++ if (eth->irq < 0)
++ return eth->irq;
++
++ eth->napi_dev = alloc_netdev_dummy(0);
++ if (!eth->napi_dev)
++ return -ENOMEM;
++
++ /* Enable threaded NAPI by default */
++ eth->napi_dev->threaded = true;
++ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
++ platform_set_drvdata(pdev, eth);
++
++ err = airoha_hw_init(eth);
++ if (err)
++ goto error;
++
++ airoha_qdma_start_napi(eth);
++ for_each_child_of_node(pdev->dev.of_node, np) {
++ if (!of_device_is_compatible(np, "airoha,eth-mac"))
++ continue;
++
++ if (!of_device_is_available(np))
++ continue;
++
++ err = airoha_alloc_gdm_port(eth, np);
++ if (err) {
++ of_node_put(np);
++ goto error;
++ }
++ }
++
++ return 0;
++
++error:
++ airoha_hw_cleanup(eth);
++ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
++ struct airoha_gdm_port *port = eth->ports[i];
++
++ if (port && port->dev->reg_state == NETREG_REGISTERED)
++ unregister_netdev(port->dev);
++ }
++ free_netdev(eth->napi_dev);
++ platform_set_drvdata(pdev, NULL);
++
++ return err;
++}
++
++static void airoha_remove(struct platform_device *pdev)
++{
++ struct airoha_eth *eth = platform_get_drvdata(pdev);
++ int i;
++
++ airoha_hw_cleanup(eth);
++ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
++ struct airoha_gdm_port *port = eth->ports[i];
++
++ if (!port)
++ continue;
++
++ airoha_dev_stop(port->dev);
++ unregister_netdev(port->dev);
++ }
++ free_netdev(eth->napi_dev);
++
++ platform_set_drvdata(pdev, NULL);
++}
++
++static const struct of_device_id of_airoha_match[] = {
++ { .compatible = "airoha,en7581-eth" },
++ { /* sentinel */ }
++};
++
++static struct platform_driver airoha_driver = {
++ .probe = airoha_probe,
++ .remove_new = airoha_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = of_airoha_match,
++ },
++};
++module_platform_driver(airoha_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Lorenzo Bianconi ");
++MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch b/target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch
new file mode 100644
index 0000000000..3f2d577403
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/007-v6.11-net-airoha-fix-error-branch-in-airoha_dev_xmit-and-a.patch
@@ -0,0 +1,46 @@
+From 1f038d5897fe6b439039fc28420842abcc0d126b Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 17 Jul 2024 10:15:46 +0200
+Subject: [PATCH] net: airoha: fix error branch in airoha_dev_xmit and
+ airoha_set_gdm_ports
+
+Fix error case management in airoha_dev_xmit routine since we need to
+DMA unmap pending buffers starting from q->head.
+Moreover fix a typo in error case branch in airoha_set_gdm_ports
+routine.
+
+Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/b628871bc8ae4861b5e2ab4db90aaf373cbb7cee.1721203880.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -977,7 +977,7 @@ static int airoha_set_gdm_ports(struct a
+ return 0;
+
+ error:
+- for (i--; i >= 0; i++)
++ for (i--; i >= 0; i--)
+ airoha_set_gdm_port(eth, port_list[i], false);
+
+ return err;
+@@ -2432,9 +2432,11 @@ static netdev_tx_t airoha_dev_xmit(struc
+ return NETDEV_TX_OK;
+
+ error_unmap:
+- for (i--; i >= 0; i++)
+- dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr,
+- q->entry[i].dma_len, DMA_TO_DEVICE);
++ for (i--; i >= 0; i--) {
++ index = (q->head + i) % q->ndesc;
++ dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
++ q->entry[index].dma_len, DMA_TO_DEVICE);
++ }
+
+ spin_unlock_bh(&q->lock);
+ error:
diff --git a/target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch b/target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch
new file mode 100644
index 0000000000..4c8b361f3d
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/008-v6.11-net-airoha-Fix-NULL-pointer-dereference-in-airoha_qd.patch
@@ -0,0 +1,39 @@
+From 4e076ff6ad5302c015617da30d877b4cdcbdf613 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 17 Jul 2024 10:47:19 +0200
+Subject: [PATCH] net: airoha: Fix NULL pointer dereference in
+ airoha_qdma_cleanup_rx_queue()
+
+Move page_pool_get_dma_dir() inside the while loop of
+airoha_qdma_cleanup_rx_queue routine in order to avoid possible NULL
+pointer dereference if airoha_qdma_init_rx_queue() fails before
+properly allocating the page_pool pointer.
+
+Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/7330a41bba720c33abc039955f6172457a3a34f0.1721205981.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -1586,7 +1586,6 @@ static int airoha_qdma_init_rx_queue(str
+
+ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+ {
+- enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+
+ while (q->queued) {
+@@ -1594,7 +1593,7 @@ static void airoha_qdma_cleanup_rx_queue
+ struct page *page = virt_to_head_page(e->buf);
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
+- dir);
++ page_pool_get_dma_dir(q->page_pool));
+ page_pool_put_full_page(q->page_pool, page, false);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
diff --git a/target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch b/target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch
new file mode 100644
index 0000000000..15385beced
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/009-v6.11-net-airoha-Fix-MBI_RX_AGE_SEL_MASK-definition.patch
@@ -0,0 +1,27 @@
+From 39a9c25bcdfb5e88995841c47439b74cac74a527 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Fri, 19 Jul 2024 22:38:31 +0200
+Subject: [PATCH] net: airoha: Fix MBI_RX_AGE_SEL_MASK definition
+
+Fix copy-paste error in MBI_RX_AGE_SEL_MASK macro definition
+
+Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/d27d0465be1bff3369e886e5f10c4d37fefc4934.1721419930.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -249,7 +249,7 @@
+ #define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+ #define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+-#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17)
++#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+ #define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+ #define REG_GDM3_FWD_CFG GDM3_BASE
diff --git a/target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch b/target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch
new file mode 100644
index 0000000000..3649e1c843
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-01-v6.12-net-airoha-Introduce-airoha_qdma-struct.patch
@@ -0,0 +1,553 @@
+From 16874d1cf3818a5804cded8eaff634122b1d6c7c Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:03 +0200
+Subject: [PATCH 1/8] net: airoha: Introduce airoha_qdma struct
+
+Introduce airoha_qdma struct and move qdma IO register mapping in
+airoha_qdma. This is a preliminary patch to enable both QDMA controllers
+available on EN7581 SoC.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/7df163bdc72ee29c3d27a0cbf54522ffeeafe53c.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 197 ++++++++++++---------
+ 1 file changed, 112 insertions(+), 85 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -18,6 +18,7 @@
+ #include
+
+ #define AIROHA_MAX_NUM_GDM_PORTS 1
++#define AIROHA_MAX_NUM_QDMA 1
+ #define AIROHA_MAX_NUM_RSTS 3
+ #define AIROHA_MAX_NUM_XSI_RSTS 5
+ #define AIROHA_MAX_MTU 2000
+@@ -782,6 +783,10 @@ struct airoha_hw_stats {
+ u64 rx_len[7];
+ };
+
++struct airoha_qdma {
++ void __iomem *regs;
++};
++
+ struct airoha_gdm_port {
+ struct net_device *dev;
+ struct airoha_eth *eth;
+@@ -794,8 +799,6 @@ struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+-
+- void __iomem *qdma_regs;
+ void __iomem *fe_regs;
+
+ /* protect concurrent irqmask accesses */
+@@ -806,6 +809,7 @@ struct airoha_eth {
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
++ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+
+ struct net_device *napi_dev;
+@@ -850,16 +854,16 @@ static u32 airoha_rmw(void __iomem *base
+ #define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+-#define airoha_qdma_rr(eth, offset) \
+- airoha_rr((eth)->qdma_regs, (offset))
+-#define airoha_qdma_wr(eth, offset, val) \
+- airoha_wr((eth)->qdma_regs, (offset), (val))
+-#define airoha_qdma_rmw(eth, offset, mask, val) \
+- airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
+-#define airoha_qdma_set(eth, offset, val) \
+- airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
+-#define airoha_qdma_clear(eth, offset, val) \
+- airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
++#define airoha_qdma_rr(qdma, offset) \
++ airoha_rr((qdma)->regs, (offset))
++#define airoha_qdma_wr(qdma, offset, val) \
++ airoha_wr((qdma)->regs, (offset), (val))
++#define airoha_qdma_rmw(qdma, offset, mask, val) \
++ airoha_rmw((qdma)->regs, (offset), (mask), (val))
++#define airoha_qdma_set(qdma, offset, val) \
++ airoha_rmw((qdma)->regs, (offset), 0, (val))
++#define airoha_qdma_clear(qdma, offset, val) \
++ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+ static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+ u32 clear, u32 set)
+@@ -873,11 +877,12 @@ static void airoha_qdma_set_irqmask(stru
+
+ eth->irqmask[index] &= ~clear;
+ eth->irqmask[index] |= set;
+- airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
++ airoha_qdma_wr(ð->qdma[0], REG_INT_ENABLE(index),
++ eth->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+- airoha_qdma_rr(eth, REG_INT_ENABLE(index));
++ airoha_qdma_rr(ð->qdma[0], REG_INT_ENABLE(index));
+
+ spin_unlock_irqrestore(ð->irq_lock, flags);
+ }
+@@ -1383,6 +1388,7 @@ static int airoha_fe_init(struct airoha_
+ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+ {
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
++ struct airoha_qdma *qdma = &q->eth->qdma[0];
+ struct airoha_eth *eth = q->eth;
+ int qid = q - ð->q_rx[0];
+ int nframes = 0;
+@@ -1420,7 +1426,8 @@ static int airoha_qdma_fill_rx_queue(str
+ WRITE_ONCE(desc->msg2, 0);
+ WRITE_ONCE(desc->msg3, 0);
+
+- airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
++ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
++ RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
+
+@@ -1529,7 +1536,8 @@ static int airoha_qdma_rx_napi_poll(stru
+ }
+
+ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
+- struct airoha_queue *q, int ndesc)
++ struct airoha_queue *q,
++ struct airoha_qdma *qdma, int ndesc)
+ {
+ const struct page_pool_params pp_params = {
+ .order = 0,
+@@ -1569,14 +1577,15 @@ static int airoha_qdma_init_rx_queue(str
+
+ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
+
+- airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
+- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
++ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
++ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
++ RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ thr = clamp(ndesc >> 3, 1, 32);
+- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
++ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+- airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
++ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+
+ airoha_qdma_fill_rx_queue(q);
+@@ -1600,7 +1609,8 @@ static void airoha_qdma_cleanup_rx_queue
+ }
+ }
+
+-static int airoha_qdma_init_rx(struct airoha_eth *eth)
++static int airoha_qdma_init_rx(struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+ int i;
+
+@@ -1613,7 +1623,7 @@ static int airoha_qdma_init_rx(struct ai
+ }
+
+ err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i],
+- RX_DSCP_NUM(i));
++ qdma, RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+@@ -1624,11 +1634,13 @@ static int airoha_qdma_init_rx(struct ai
+ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
+ {
+ struct airoha_tx_irq_queue *irq_q;
++ struct airoha_qdma *qdma;
+ struct airoha_eth *eth;
+ int id, done = 0;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ eth = irq_q->eth;
++ qdma = ð->qdma[0];
+ id = irq_q - ð->q_tx_irq[0];
+
+ while (irq_q->queued > 0 && done < budget) {
+@@ -1698,9 +1710,9 @@ static int airoha_qdma_tx_napi_poll(stru
+ int i, len = done >> 7;
+
+ for (i = 0; i < len; i++)
+- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
++ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, 0x80);
+- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
++ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
+ }
+
+@@ -1712,7 +1724,8 @@ static int airoha_qdma_tx_napi_poll(stru
+ }
+
+ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
+- struct airoha_queue *q, int size)
++ struct airoha_queue *q,
++ struct airoha_qdma *qdma, int size)
+ {
+ int i, qid = q - ð->q_tx[0];
+ dma_addr_t dma_addr;
+@@ -1739,10 +1752,10 @@ static int airoha_qdma_init_tx_queue(str
+ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
+ }
+
+- airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
+- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
++ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
++ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+- airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
++ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+
+ return 0;
+@@ -1750,7 +1763,7 @@ static int airoha_qdma_init_tx_queue(str
+
+ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
+ struct airoha_tx_irq_queue *irq_q,
+- int size)
++ struct airoha_qdma *qdma, int size)
+ {
+ int id = irq_q - ð->q_tx_irq[0];
+ dma_addr_t dma_addr;
+@@ -1766,29 +1779,30 @@ static int airoha_qdma_tx_irq_init(struc
+ irq_q->size = size;
+ irq_q->eth = eth;
+
+- airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
+- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
++ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
++ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
++ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ FIELD_PREP(TX_IRQ_THR_MASK, 1));
+
+ return 0;
+ }
+
+-static int airoha_qdma_init_tx(struct airoha_eth *eth)
++static int airoha_qdma_init_tx(struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i],
+- IRQ_QUEUE_LEN(i));
++ qdma, IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i],
+- TX_DSCP_NUM);
++ qdma, TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+@@ -1815,7 +1829,8 @@ static void airoha_qdma_cleanup_tx_queue
+ spin_unlock_bh(&q->lock);
+ }
+
+-static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
++static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+ dma_addr_t dma_addr;
+ u32 status;
+@@ -1827,7 +1842,7 @@ static int airoha_qdma_init_hfwd_queues(
+ if (!eth->hfwd.desc)
+ return -ENOMEM;
+
+- airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
++ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+@@ -1835,14 +1850,14 @@ static int airoha_qdma_init_hfwd_queues(
+ if (!eth->hfwd.q)
+ return -ENOMEM;
+
+- airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
++ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
+- airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
++ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+- airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
++ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+- airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
++ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+@@ -1850,67 +1865,69 @@ static int airoha_qdma_init_hfwd_queues(
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+- 30 * USEC_PER_MSEC, true, eth,
++ 30 * USEC_PER_MSEC, true, qdma,
+ REG_LMGR_INIT_CFG);
+ }
+
+-static void airoha_qdma_init_qos(struct airoha_eth *eth)
++static void airoha_qdma_init_qos(struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+- airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+- airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
++ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
++ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+
+- airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
++ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
+ PSE_BUF_ESTIMATE_EN_MASK);
+
+- airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
++ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_EN_MASK |
+ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
+ /* 2047us x 31 = 63.457ms */
+- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
++ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_WINDOW_SZ_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
+- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
++ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_TIMESLICE_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
+
+ /* ratelimit init */
+- airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
++ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ /* fast-tick 25us */
+- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
++ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
+- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
++ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
+
+- airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
++ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
++ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
+- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
++ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
+ EGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
+
+- airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+- airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
++ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
++ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_TRTCM_MODE_MASK);
+- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
++ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
+- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
++ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
+
+- airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
++ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
++ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
+- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
++ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+ }
+
+-static int airoha_qdma_hw_init(struct airoha_eth *eth)
++static int airoha_qdma_hw_init(struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+ int i;
+
+ /* clear pending irqs */
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
+- airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
++ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+
+ /* setup irqs */
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+@@ -1923,14 +1940,14 @@ static int airoha_qdma_hw_init(struct ai
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+- airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
++ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ else
+- airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
++ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ }
+
+- airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
++ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_RX_2B_OFFSET_MASK |
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+@@ -1941,18 +1958,18 @@ static int airoha_qdma_hw_init(struct ai
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+- airoha_qdma_init_qos(eth);
++ airoha_qdma_init_qos(eth, qdma);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+- airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
++ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+- airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
++ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
+ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+
+ return 0;
+@@ -1962,12 +1979,14 @@ static irqreturn_t airoha_irq_handler(in
+ {
+ struct airoha_eth *eth = dev_instance;
+ u32 intr[ARRAY_SIZE(eth->irqmask)];
++ struct airoha_qdma *qdma;
+ int i;
+
++ qdma = ð->qdma[0];
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
+- intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
++ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= eth->irqmask[i];
+- airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
++ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+ if (!test_bit(DEV_STATE_INITIALIZED, ð->state))
+@@ -1997,7 +2016,7 @@ static irqreturn_t airoha_irq_handler(in
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+
+- status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
++ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ irq_q->head = head % irq_q->size;
+ irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+@@ -2011,6 +2030,7 @@ static irqreturn_t airoha_irq_handler(in
+
+ static int airoha_qdma_init(struct airoha_eth *eth)
+ {
++ struct airoha_qdma *qdma = ð->qdma[0];
+ int err;
+
+ err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
+@@ -2018,19 +2038,19 @@ static int airoha_qdma_init(struct airoh
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_rx(eth);
++ err = airoha_qdma_init_rx(eth, qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_tx(eth);
++ err = airoha_qdma_init_tx(eth, qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_hfwd_queues(eth);
++ err = airoha_qdma_init_hfwd_queues(eth, qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_hw_init(eth);
++ err = airoha_qdma_hw_init(eth, qdma);
+ if (err)
+ return err;
+
+@@ -2263,8 +2283,9 @@ static int airoha_dev_open(struct net_de
+ airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
++ airoha_qdma_set(ð->qdma[0], REG_QDMA_GLOBAL_CFG,
++ GLOBAL_CFG_TX_DMA_EN_MASK |
++ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+ }
+@@ -2280,8 +2301,9 @@ static int airoha_dev_stop(struct net_de
+ if (err)
+ return err;
+
+- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
++ airoha_qdma_clear(ð->qdma[0], REG_QDMA_GLOBAL_CFG,
++ GLOBAL_CFG_TX_DMA_EN_MASK |
++ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+ }
+@@ -2341,6 +2363,7 @@ static netdev_tx_t airoha_dev_xmit(struc
+ struct airoha_eth *eth = port->eth;
+ u32 nr_frags = 1 + sinfo->nr_frags;
+ struct netdev_queue *txq;
++ struct airoha_qdma *qdma;
+ struct airoha_queue *q;
+ void *data = skb->data;
+ u16 index;
+@@ -2368,6 +2391,7 @@ static netdev_tx_t airoha_dev_xmit(struc
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
++ qdma = ð->qdma[0];
+ q = ð->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+@@ -2412,7 +2436,8 @@ static netdev_tx_t airoha_dev_xmit(struc
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
++ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
++ TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ data = skb_frag_address(frag);
+@@ -2614,9 +2639,11 @@ static int airoha_probe(struct platform_
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+- eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
+- if (IS_ERR(eth->qdma_regs))
+- return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
++ eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev,
++ "qdma0");
++ if (IS_ERR(eth->qdma[0].regs))
++ return dev_err_probe(eth->dev,
++ PTR_ERR(eth->qdma[0].regs),
+ "failed to iomap qdma regs\n");
+
+ eth->rsts[0].id = "fe";
diff --git a/target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch b/target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch
new file mode 100644
index 0000000000..853a785180
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-02-v6.12-net-airoha-Move-airoha_queues-in-airoha_qdma.patch
@@ -0,0 +1,318 @@
+From 245c7bc86b198e5ec227eba6b582da73cb0721c8 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:04 +0200
+Subject: [PATCH 2/8] net: airoha: Move airoha_queues in airoha_qdma
+
+QDMA controllers available in EN7581 SoC have independent tx/rx hw queues
+so move them in airoha_queues structure.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/795fc4797bffbf7f0a1351308aa9bf0e65b5126e.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 126 +++++++++++----------
+ 1 file changed, 65 insertions(+), 61 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -785,6 +785,17 @@ struct airoha_hw_stats {
+
+ struct airoha_qdma {
+ void __iomem *regs;
++
++ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
++
++ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
++ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
++
++ /* descriptor and packet buffers for qdma hw forward */
++ struct {
++ void *desc;
++ void *q;
++ } hfwd;
+ };
+
+ struct airoha_gdm_port {
+@@ -809,20 +820,10 @@ struct airoha_eth {
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+-
+ struct net_device *napi_dev;
+- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+-
+- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+- /* descriptor and packet buffers for qdma hw forward */
+- struct {
+- void *desc;
+- void *q;
+- } hfwd;
++ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
++ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+ };
+
+ static u32 airoha_rr(void __iomem *base, u32 offset)
+@@ -1390,7 +1391,7 @@ static int airoha_qdma_fill_rx_queue(str
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_qdma *qdma = &q->eth->qdma[0];
+ struct airoha_eth *eth = q->eth;
+- int qid = q - ð->q_rx[0];
++ int qid = q - &qdma->q_rx[0];
+ int nframes = 0;
+
+ while (q->queued < q->ndesc - 1) {
+@@ -1457,8 +1458,9 @@ static int airoha_qdma_get_gdm_port(stru
+ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+ {
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
++ struct airoha_qdma *qdma = &q->eth->qdma[0];
+ struct airoha_eth *eth = q->eth;
+- int qid = q - ð->q_rx[0];
++ int qid = q - &qdma->q_rx[0];
+ int done = 0;
+
+ while (done < budget) {
+@@ -1550,7 +1552,7 @@ static int airoha_qdma_init_rx_queue(str
+ .dev = eth->dev,
+ .napi = &q->napi,
+ };
+- int qid = q - ð->q_rx[0], thr;
++ int qid = q - &qdma->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+@@ -1614,7 +1616,7 @@ static int airoha_qdma_init_rx(struct ai
+ {
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!(RX_DONE_INT_MASK & BIT(i))) {
+@@ -1622,7 +1624,7 @@ static int airoha_qdma_init_rx(struct ai
+ continue;
+ }
+
+- err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i],
++ err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
+ qdma, RX_DSCP_NUM(i));
+ if (err)
+ return err;
+@@ -1641,7 +1643,7 @@ static int airoha_qdma_tx_napi_poll(stru
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ eth = irq_q->eth;
+ qdma = ð->qdma[0];
+- id = irq_q - ð->q_tx_irq[0];
++ id = irq_q - &qdma->q_tx_irq[0];
+
+ while (irq_q->queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[irq_q->head];
+@@ -1658,10 +1660,10 @@ static int airoha_qdma_tx_napi_poll(stru
+ last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
+
+- if (qid >= ARRAY_SIZE(eth->q_tx))
++ if (qid >= ARRAY_SIZE(qdma->q_tx))
+ continue;
+
+- q = ð->q_tx[qid];
++ q = &qdma->q_tx[qid];
+ if (!q->ndesc)
+ continue;
+
+@@ -1727,7 +1729,7 @@ static int airoha_qdma_init_tx_queue(str
+ struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
+ {
+- int i, qid = q - ð->q_tx[0];
++ int i, qid = q - &qdma->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+@@ -1765,7 +1767,7 @@ static int airoha_qdma_tx_irq_init(struc
+ struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
+ {
+- int id = irq_q - ð->q_tx_irq[0];
++ int id = irq_q - &qdma->q_tx_irq[0];
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+@@ -1793,15 +1795,15 @@ static int airoha_qdma_init_tx(struct ai
+ {
+ int i, err;
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+- err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i],
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
++ err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
+ qdma, IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+- err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i],
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
++ err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
+ qdma, TX_DSCP_NUM);
+ if (err)
+ return err;
+@@ -1837,17 +1839,17 @@ static int airoha_qdma_init_hfwd_queues(
+ int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+- eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+- GFP_KERNEL);
+- if (!eth->hfwd.desc)
++ qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
++ GFP_KERNEL);
++ if (!qdma->hfwd.desc)
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+- eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+- GFP_KERNEL);
+- if (!eth->hfwd.q)
++ qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
++ GFP_KERNEL);
++ if (!qdma->hfwd.q)
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+@@ -1935,8 +1937,8 @@ static int airoha_qdma_hw_init(struct ai
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+
+ /* setup irq binding */
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+- if (!eth->q_tx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
++ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+@@ -1961,8 +1963,8 @@ static int airoha_qdma_hw_init(struct ai
+ airoha_qdma_init_qos(eth, qdma);
+
+ /* disable qdma rx delay interrupt */
+- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+- if (!eth->q_rx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
+@@ -1996,18 +1998,18 @@ static irqreturn_t airoha_irq_handler(in
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+- if (!eth->q_rx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (intr[1] & BIT(i))
+- napi_schedule(ð->q_rx[i].napi);
++ napi_schedule(&qdma->q_rx[i].napi);
+ }
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+- struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i];
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
++ struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
+ u32 status, head;
+
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+@@ -2021,7 +2023,7 @@ static irqreturn_t airoha_irq_handler(in
+ irq_q->head = head % irq_q->size;
+ irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+- napi_schedule(ð->q_tx_irq[i].napi);
++ napi_schedule(&qdma->q_tx_irq[i].napi);
+ }
+ }
+
+@@ -2080,44 +2082,46 @@ static int airoha_hw_init(struct airoha_
+
+ static void airoha_hw_cleanup(struct airoha_eth *eth)
+ {
++ struct airoha_qdma *qdma = ð->qdma[0];
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+- if (!eth->q_rx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+- napi_disable(ð->q_rx[i].napi);
+- netif_napi_del(ð->q_rx[i].napi);
+- airoha_qdma_cleanup_rx_queue(ð->q_rx[i]);
+- if (eth->q_rx[i].page_pool)
+- page_pool_destroy(eth->q_rx[i].page_pool);
++ napi_disable(&qdma->q_rx[i].napi);
++ netif_napi_del(&qdma->q_rx[i].napi);
++ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
++ if (qdma->q_rx[i].page_pool)
++ page_pool_destroy(qdma->q_rx[i].page_pool);
+ }
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+- napi_disable(ð->q_tx_irq[i].napi);
+- netif_napi_del(ð->q_tx_irq[i].napi);
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
++ napi_disable(&qdma->q_tx_irq[i].napi);
++ netif_napi_del(&qdma->q_tx_irq[i].napi);
+ }
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+- if (!eth->q_tx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
++ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+- airoha_qdma_cleanup_tx_queue(ð->q_tx[i]);
++ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
+ }
+
+ static void airoha_qdma_start_napi(struct airoha_eth *eth)
+ {
++ struct airoha_qdma *qdma = ð->qdma[0];
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
+- napi_enable(ð->q_tx_irq[i].napi);
++ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
++ napi_enable(&qdma->q_tx_irq[i].napi);
+
+- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+- if (!eth->q_rx[i].ndesc)
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+- napi_enable(ð->q_rx[i].napi);
++ napi_enable(&qdma->q_rx[i].napi);
+ }
+ }
+
+@@ -2392,7 +2396,7 @@ static netdev_tx_t airoha_dev_xmit(struc
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ qdma = ð->qdma[0];
+- q = ð->q_tx[qid];
++ q = &qdma->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+
diff --git a/target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch b/target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch
new file mode 100644
index 0000000000..9f05ad4057
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-03-v6.12-net-airoha-Move-irq_mask-in-airoha_qdma-structure.patch
@@ -0,0 +1,236 @@
+From 19e47fc2aeda3a657c4f64144ffd6e65f7a66601 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:05 +0200
+Subject: [PATCH 3/8] net: airoha: Move irq_mask in airoha_qdma structure
+
+QDMA controllers have independent irq lines, so move irqmask in
+airoha_qdma structure. This is a preliminary patch to support multiple
+QDMA controllers.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/1c8a06e8be605278a7b2f3cd8ac06e74bf5ebf2b.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
+ 1 file changed, 42 insertions(+), 42 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -786,6 +786,11 @@ struct airoha_hw_stats {
+ struct airoha_qdma {
+ void __iomem *regs;
+
++ /* protect concurrent irqmask accesses */
++ spinlock_t irq_lock;
++ u32 irqmask[QDMA_INT_REG_MAX];
++ int irq;
++
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+@@ -812,11 +817,6 @@ struct airoha_eth {
+ unsigned long state;
+ void __iomem *fe_regs;
+
+- /* protect concurrent irqmask accesses */
+- spinlock_t irq_lock;
+- u32 irqmask[QDMA_INT_REG_MAX];
+- int irq;
+-
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+@@ -866,38 +866,37 @@ static u32 airoha_rmw(void __iomem *base
+ #define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+-static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
++static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
+ u32 clear, u32 set)
+ {
+ unsigned long flags;
+
+- if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
++ if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
+ return;
+
+- spin_lock_irqsave(ð->irq_lock, flags);
++ spin_lock_irqsave(&qdma->irq_lock, flags);
+
+- eth->irqmask[index] &= ~clear;
+- eth->irqmask[index] |= set;
+- airoha_qdma_wr(ð->qdma[0], REG_INT_ENABLE(index),
+- eth->irqmask[index]);
++ qdma->irqmask[index] &= ~clear;
++ qdma->irqmask[index] |= set;
++ airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+- airoha_qdma_rr(ð->qdma[0], REG_INT_ENABLE(index));
++ airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
+
+- spin_unlock_irqrestore(ð->irq_lock, flags);
++ spin_unlock_irqrestore(&qdma->irq_lock, flags);
+ }
+
+-static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
++static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
+ u32 mask)
+ {
+- airoha_qdma_set_irqmask(eth, index, 0, mask);
++ airoha_qdma_set_irqmask(qdma, index, 0, mask);
+ }
+
+-static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
++static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
+ u32 mask)
+ {
+- airoha_qdma_set_irqmask(eth, index, mask, 0);
++ airoha_qdma_set_irqmask(qdma, index, mask, 0);
+ }
+
+ static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+@@ -1522,7 +1521,7 @@ static int airoha_qdma_rx_process(struct
+ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+ {
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+- struct airoha_eth *eth = q->eth;
++ struct airoha_qdma *qdma = &q->eth->qdma[0];
+ int cur, done = 0;
+
+ do {
+@@ -1531,7 +1530,7 @@ static int airoha_qdma_rx_napi_poll(stru
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
++ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ return done;
+@@ -1719,7 +1718,7 @@ static int airoha_qdma_tx_napi_poll(stru
+ }
+
+ if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
++ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+@@ -1928,13 +1927,13 @@ static int airoha_qdma_hw_init(struct ai
+ int i;
+
+ /* clear pending irqs */
+- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
++ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+
+ /* setup irqs */
+- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
++ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
++ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
++ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+@@ -1980,14 +1979,13 @@ static int airoha_qdma_hw_init(struct ai
+ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+ {
+ struct airoha_eth *eth = dev_instance;
+- u32 intr[ARRAY_SIZE(eth->irqmask)];
+- struct airoha_qdma *qdma;
++ struct airoha_qdma *qdma = ð->qdma[0];
++ u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ int i;
+
+- qdma = ð->qdma[0];
+- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
++ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+- intr[i] &= eth->irqmask[i];
++ intr[i] &= qdma->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+@@ -1995,7 +1993,7 @@ static irqreturn_t airoha_irq_handler(in
+ return IRQ_NONE;
+
+ if (intr[1] & RX_DONE_INT_MASK) {
+- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
++ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+@@ -2015,7 +2013,7 @@ static irqreturn_t airoha_irq_handler(in
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
++ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
+@@ -2030,12 +2028,18 @@ static irqreturn_t airoha_irq_handler(in
+ return IRQ_HANDLED;
+ }
+
+-static int airoha_qdma_init(struct airoha_eth *eth)
++static int airoha_qdma_init(struct platform_device *pdev,
++ struct airoha_eth *eth)
+ {
+ struct airoha_qdma *qdma = ð->qdma[0];
+ int err;
+
+- err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
++ spin_lock_init(&qdma->irq_lock);
++ qdma->irq = platform_get_irq(pdev, 0);
++ if (qdma->irq < 0)
++ return qdma->irq;
++
++ err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, eth);
+ if (err)
+ return err;
+@@ -2061,7 +2065,8 @@ static int airoha_qdma_init(struct airoh
+ return 0;
+ }
+
+-static int airoha_hw_init(struct airoha_eth *eth)
++static int airoha_hw_init(struct platform_device *pdev,
++ struct airoha_eth *eth)
+ {
+ int err;
+
+@@ -2077,7 +2082,7 @@ static int airoha_hw_init(struct airoha_
+ if (err)
+ return err;
+
+- return airoha_qdma_init(eth);
++ return airoha_qdma_init(pdev, eth);
+ }
+
+ static void airoha_hw_cleanup(struct airoha_eth *eth)
+@@ -2674,11 +2679,6 @@ static int airoha_probe(struct platform_
+ return err;
+ }
+
+- spin_lock_init(ð->irq_lock);
+- eth->irq = platform_get_irq(pdev, 0);
+- if (eth->irq < 0)
+- return eth->irq;
+-
+ eth->napi_dev = alloc_netdev_dummy(0);
+ if (!eth->napi_dev)
+ return -ENOMEM;
+@@ -2688,7 +2688,7 @@ static int airoha_probe(struct platform_
+ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
+ platform_set_drvdata(pdev, eth);
+
+- err = airoha_hw_init(eth);
++ err = airoha_hw_init(pdev, eth);
+ if (err)
+ goto error;
+
diff --git a/target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch b/target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch
new file mode 100644
index 0000000000..b73fc34ef1
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-04-v6.12-net-airoha-Add-airoha_qdma-pointer-in-airoha_tx_irq_.patch
@@ -0,0 +1,306 @@
+From 9a2500ab22f059e596942172a8e4a60ae8243ce4 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:06 +0200
+Subject: [PATCH 4/8] net: airoha: Add airoha_qdma pointer in
+ airoha_tx_irq_queue/airoha_queue structures
+
+Move airoha_eth pointer in airoha_qdma structure from
+airoha_tx_irq_queue/airoha_queue ones. This is a preliminary patch to
+introduce support for multi-QDMA controllers available on EN7581.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/074565b82fd0ceefe66e186f21133d825dbd48eb.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
+ 1 file changed, 41 insertions(+), 43 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -728,7 +728,7 @@ struct airoha_queue_entry {
+ };
+
+ struct airoha_queue {
+- struct airoha_eth *eth;
++ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+@@ -747,7 +747,7 @@ struct airoha_queue {
+ };
+
+ struct airoha_tx_irq_queue {
+- struct airoha_eth *eth;
++ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+ u32 *q;
+@@ -784,6 +784,7 @@ struct airoha_hw_stats {
+ };
+
+ struct airoha_qdma {
++ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+@@ -1388,8 +1389,8 @@ static int airoha_fe_init(struct airoha_
+ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+ {
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+- struct airoha_qdma *qdma = &q->eth->qdma[0];
+- struct airoha_eth *eth = q->eth;
++ struct airoha_qdma *qdma = q->qdma;
++ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
+ int nframes = 0;
+
+@@ -1457,8 +1458,8 @@ static int airoha_qdma_get_gdm_port(stru
+ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+ {
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+- struct airoha_qdma *qdma = &q->eth->qdma[0];
+- struct airoha_eth *eth = q->eth;
++ struct airoha_qdma *qdma = q->qdma;
++ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
+ int done = 0;
+
+@@ -1521,7 +1522,6 @@ static int airoha_qdma_rx_process(struct
+ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+ {
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+- struct airoha_qdma *qdma = &q->eth->qdma[0];
+ int cur, done = 0;
+
+ do {
+@@ -1530,14 +1530,13 @@ static int airoha_qdma_rx_napi_poll(stru
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
++ airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ return done;
+ }
+
+-static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
+- struct airoha_queue *q,
++static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
+ {
+ const struct page_pool_params pp_params = {
+@@ -1548,15 +1547,16 @@ static int airoha_qdma_init_rx_queue(str
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .nid = NUMA_NO_NODE,
+- .dev = eth->dev,
++ .dev = qdma->eth->dev,
+ .napi = &q->napi,
+ };
++ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+ q->ndesc = ndesc;
+- q->eth = eth;
++ q->qdma = qdma;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+@@ -1596,7 +1596,7 @@ static int airoha_qdma_init_rx_queue(str
+
+ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+ {
+- struct airoha_eth *eth = q->eth;
++ struct airoha_eth *eth = q->qdma->eth;
+
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+@@ -1610,8 +1610,7 @@ static void airoha_qdma_cleanup_rx_queue
+ }
+ }
+
+-static int airoha_qdma_init_rx(struct airoha_eth *eth,
+- struct airoha_qdma *qdma)
++static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
+ {
+ int i;
+
+@@ -1623,8 +1622,8 @@ static int airoha_qdma_init_rx(struct ai
+ continue;
+ }
+
+- err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
+- qdma, RX_DSCP_NUM(i));
++ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
++ RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+@@ -1640,9 +1639,9 @@ static int airoha_qdma_tx_napi_poll(stru
+ int id, done = 0;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+- eth = irq_q->eth;
+- qdma = ð->qdma[0];
++ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
++ eth = qdma->eth;
+
+ while (irq_q->queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[irq_q->head];
+@@ -1724,16 +1723,16 @@ static int airoha_qdma_tx_napi_poll(stru
+ return done;
+ }
+
+-static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
+- struct airoha_queue *q,
++static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
+ {
++ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+ q->ndesc = size;
+- q->eth = eth;
++ q->qdma = qdma;
+ q->free_thr = 1 + MAX_SKB_FRAGS;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+@@ -1762,11 +1761,11 @@ static int airoha_qdma_init_tx_queue(str
+ return 0;
+ }
+
+-static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
+- struct airoha_tx_irq_queue *irq_q,
++static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
+ {
+ int id = irq_q - &qdma->q_tx_irq[0];
++ struct airoha_eth *eth = qdma->eth;
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+@@ -1778,7 +1777,7 @@ static int airoha_qdma_tx_irq_init(struc
+
+ memset(irq_q->q, 0xff, size * sizeof(u32));
+ irq_q->size = size;
+- irq_q->eth = eth;
++ irq_q->qdma = qdma;
+
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+@@ -1789,21 +1788,20 @@ static int airoha_qdma_tx_irq_init(struc
+ return 0;
+ }
+
+-static int airoha_qdma_init_tx(struct airoha_eth *eth,
+- struct airoha_qdma *qdma)
++static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
+ {
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+- err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
+- qdma, IRQ_QUEUE_LEN(i));
++ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
++ IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+- err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
+- qdma, TX_DSCP_NUM);
++ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
++ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+@@ -1813,7 +1811,7 @@ static int airoha_qdma_init_tx(struct ai
+
+ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
+ {
+- struct airoha_eth *eth = q->eth;
++ struct airoha_eth *eth = q->qdma->eth;
+
+ spin_lock_bh(&q->lock);
+ while (q->queued) {
+@@ -1830,9 +1828,9 @@ static void airoha_qdma_cleanup_tx_queue
+ spin_unlock_bh(&q->lock);
+ }
+
+-static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
+- struct airoha_qdma *qdma)
++static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+ {
++ struct airoha_eth *eth = qdma->eth;
+ dma_addr_t dma_addr;
+ u32 status;
+ int size;
+@@ -1870,8 +1868,7 @@ static int airoha_qdma_init_hfwd_queues(
+ REG_LMGR_INIT_CFG);
+ }
+
+-static void airoha_qdma_init_qos(struct airoha_eth *eth,
+- struct airoha_qdma *qdma)
++static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
+ {
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+@@ -1921,8 +1918,7 @@ static void airoha_qdma_init_qos(struct
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+ }
+
+-static int airoha_qdma_hw_init(struct airoha_eth *eth,
+- struct airoha_qdma *qdma)
++static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
+ {
+ int i;
+
+@@ -1959,7 +1955,7 @@ static int airoha_qdma_hw_init(struct ai
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+- airoha_qdma_init_qos(eth, qdma);
++ airoha_qdma_init_qos(qdma);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+@@ -2035,6 +2031,8 @@ static int airoha_qdma_init(struct platf
+ int err;
+
+ spin_lock_init(&qdma->irq_lock);
++ qdma->eth = eth;
++
+ qdma->irq = platform_get_irq(pdev, 0);
+ if (qdma->irq < 0)
+ return qdma->irq;
+@@ -2044,19 +2042,19 @@ static int airoha_qdma_init(struct platf
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_rx(eth, qdma);
++ err = airoha_qdma_init_rx(qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_tx(eth, qdma);
++ err = airoha_qdma_init_tx(qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_init_hfwd_queues(eth, qdma);
++ err = airoha_qdma_init_hfwd_queues(qdma);
+ if (err)
+ return err;
+
+- err = airoha_qdma_hw_init(eth, qdma);
++ err = airoha_qdma_hw_init(qdma);
+ if (err)
+ return err;
+
diff --git a/target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch b/target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch
new file mode 100644
index 0000000000..9cabd10b58
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-05-v6.12-net-airoha-Use-qdma-pointer-as-private-structure-in-.patch
@@ -0,0 +1,45 @@
+From e3d6bfdfc0aeb8c1d7965413b1050ec07f9761e5 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:07 +0200
+Subject: [PATCH 5/8] net: airoha: Use qdma pointer as private structure in
+ airoha_irq_handler routine
+
+This is a preliminary patch to support multi-QDMA controllers.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/1e40c3cb973881c0eb3c3c247c78550da62054ab.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -1974,8 +1974,7 @@ static int airoha_qdma_hw_init(struct ai
+
+ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+ {
+- struct airoha_eth *eth = dev_instance;
+- struct airoha_qdma *qdma = ð->qdma[0];
++ struct airoha_qdma *qdma = dev_instance;
+ u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ int i;
+
+@@ -1985,7 +1984,7 @@ static irqreturn_t airoha_irq_handler(in
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+- if (!test_bit(DEV_STATE_INITIALIZED, ð->state))
++ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
+ return IRQ_NONE;
+
+ if (intr[1] & RX_DONE_INT_MASK) {
+@@ -2038,7 +2037,7 @@ static int airoha_qdma_init(struct platf
+ return qdma->irq;
+
+ err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+- IRQF_SHARED, KBUILD_MODNAME, eth);
++ IRQF_SHARED, KBUILD_MODNAME, qdma);
+ if (err)
+ return err;
+
diff --git a/target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch b/target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch
new file mode 100644
index 0000000000..ebc7318578
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-06-v6.12-net-airoha-Allow-mapping-IO-region-for-multiple-qdma.patch
@@ -0,0 +1,131 @@
+From e618447cf492d04415007336eec025fae6e9a2ea Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:08 +0200
+Subject: [PATCH 6/8] net: airoha: Allow mapping IO region for multiple qdma
+ controllers
+
+Map MMIO regions of both qdma controllers available on EN7581 SoC.
+Run airoha_hw_cleanup routine for both QDMA controllers available on
+EN7581 SoC removing airoha_eth module or in airoha_probe error path.
+This is a preliminary patch to support multi-QDMA controllers.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/a734ae608da14b67ae749b375d880dbbc70868ea.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 56 ++++++++++++----------
+ 1 file changed, 32 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -2024,15 +2024,25 @@ static irqreturn_t airoha_irq_handler(in
+ }
+
+ static int airoha_qdma_init(struct platform_device *pdev,
+- struct airoha_eth *eth)
++ struct airoha_eth *eth,
++ struct airoha_qdma *qdma)
+ {
+- struct airoha_qdma *qdma = ð->qdma[0];
+- int err;
++ int err, id = qdma - ð->qdma[0];
++ const char *res;
+
+ spin_lock_init(&qdma->irq_lock);
+ qdma->eth = eth;
+
+- qdma->irq = platform_get_irq(pdev, 0);
++ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
++ if (!res)
++ return -ENOMEM;
++
++ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
++ if (IS_ERR(qdma->regs))
++ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
++ "failed to iomap qdma%d regs\n", id);
++
++ qdma->irq = platform_get_irq(pdev, 4 * id);
+ if (qdma->irq < 0)
+ return qdma->irq;
+
+@@ -2053,19 +2063,13 @@ static int airoha_qdma_init(struct platf
+ if (err)
+ return err;
+
+- err = airoha_qdma_hw_init(qdma);
+- if (err)
+- return err;
+-
+- set_bit(DEV_STATE_INITIALIZED, ð->state);
+-
+- return 0;
++ return airoha_qdma_hw_init(qdma);
+ }
+
+ static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
+ {
+- int err;
++ int err, i;
+
+ /* disable xsi */
+ reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+@@ -2079,12 +2083,19 @@ static int airoha_hw_init(struct platfor
+ if (err)
+ return err;
+
+- return airoha_qdma_init(pdev, eth);
++ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
++ err = airoha_qdma_init(pdev, eth, ð->qdma[i]);
++ if (err)
++ return err;
++ }
++
++ set_bit(DEV_STATE_INITIALIZED, ð->state);
++
++ return 0;
+ }
+
+-static void airoha_hw_cleanup(struct airoha_eth *eth)
++static void airoha_hw_cleanup(struct airoha_qdma *qdma)
+ {
+- struct airoha_qdma *qdma = ð->qdma[0];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+@@ -2645,13 +2656,6 @@ static int airoha_probe(struct platform_
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+- eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev,
+- "qdma0");
+- if (IS_ERR(eth->qdma[0].regs))
+- return dev_err_probe(eth->dev,
+- PTR_ERR(eth->qdma[0].regs),
+- "failed to iomap qdma regs\n");
+-
+ eth->rsts[0].id = "fe";
+ eth->rsts[1].id = "pdma";
+ eth->rsts[2].id = "qdma";
+@@ -2707,7 +2711,9 @@ static int airoha_probe(struct platform_
+ return 0;
+
+ error:
+- airoha_hw_cleanup(eth);
++ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++ airoha_hw_cleanup(ð->qdma[i]);
++
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+@@ -2725,7 +2731,9 @@ static void airoha_remove(struct platfor
+ struct airoha_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+- airoha_hw_cleanup(eth);
++ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++ airoha_hw_cleanup(ð->qdma[i]);
++
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
diff --git a/target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch b/target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch
new file mode 100644
index 0000000000..c9a99f1e79
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-07-v6.12-net-airoha-Start-all-qdma-NAPIs-in-airoha_probe.patch
@@ -0,0 +1,38 @@
+From 160231e34b8e9512ba20530f3e68fb0ac499af87 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:09 +0200
+Subject: [PATCH 7/8] net: airoha: Start all qdma NAPIs in airoha_probe()
+
+This is a preliminary patch to support multi-QDMA controllers.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/b51cf69c94d8cbc81e0a0b35587f024d01e6d9c0.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -2122,9 +2122,8 @@ static void airoha_hw_cleanup(struct air
+ }
+ }
+
+-static void airoha_qdma_start_napi(struct airoha_eth *eth)
++static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
+ {
+- struct airoha_qdma *qdma = ð->qdma[0];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+@@ -2693,7 +2692,9 @@ static int airoha_probe(struct platform_
+ if (err)
+ goto error;
+
+- airoha_qdma_start_napi(eth);
++ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++ airoha_qdma_start_napi(ð->qdma[i]);
++
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_compatible(np, "airoha,eth-mac"))
+ continue;
diff --git a/target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch b/target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch
new file mode 100644
index 0000000000..1e89cf15aa
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/010-08-v6.12-net-airoha-Link-the-gdm-port-to-the-selected-qdma-co.patch
@@ -0,0 +1,174 @@
+From 9304640f2f78147dddf97a5ea01502ae175e41d9 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 16:35:10 +0200
+Subject: [PATCH 8/8] net: airoha: Link the gdm port to the selected qdma
+ controller
+
+Link the running gdm port to the qdma controller used to connect with
+the CPU. Moreover, load all QDMA controllers available on EN7581 SoC.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/95b515df34ba4727f7ae5b14a1d0462cceec84ff.1722522582.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 37 +++++++++++-----------
+ 1 file changed, 19 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -18,7 +18,7 @@
+ #include
+
+ #define AIROHA_MAX_NUM_GDM_PORTS 1
+-#define AIROHA_MAX_NUM_QDMA 1
++#define AIROHA_MAX_NUM_QDMA 2
+ #define AIROHA_MAX_NUM_RSTS 3
+ #define AIROHA_MAX_NUM_XSI_RSTS 5
+ #define AIROHA_MAX_MTU 2000
+@@ -805,8 +805,8 @@ struct airoha_qdma {
+ };
+
+ struct airoha_gdm_port {
++ struct airoha_qdma *qdma;
+ struct net_device *dev;
+- struct airoha_eth *eth;
+ int id;
+
+ struct airoha_hw_stats stats;
+@@ -2139,7 +2139,7 @@ static void airoha_qdma_start_napi(struc
+
+ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+ {
+- struct airoha_eth *eth = port->eth;
++ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, i = 0;
+
+ spin_lock(&port->stats.lock);
+@@ -2284,22 +2284,22 @@ static void airoha_update_hw_stats(struc
+ static int airoha_dev_open(struct net_device *dev)
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+- struct airoha_eth *eth = port->eth;
++ struct airoha_qdma *qdma = port->qdma;
+ int err;
+
+ netif_tx_start_all_queues(dev);
+- err = airoha_set_gdm_ports(eth, true);
++ err = airoha_set_gdm_ports(qdma->eth, true);
+ if (err)
+ return err;
+
+ if (netdev_uses_dsa(dev))
+- airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
++ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+ else
+- airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
++ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+- airoha_qdma_set(ð->qdma[0], REG_QDMA_GLOBAL_CFG,
++ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+@@ -2309,15 +2309,15 @@ static int airoha_dev_open(struct net_de
+ static int airoha_dev_stop(struct net_device *dev)
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+- struct airoha_eth *eth = port->eth;
++ struct airoha_qdma *qdma = port->qdma;
+ int err;
+
+ netif_tx_disable(dev);
+- err = airoha_set_gdm_ports(eth, false);
++ err = airoha_set_gdm_ports(qdma->eth, false);
+ if (err)
+ return err;
+
+- airoha_qdma_clear(ð->qdma[0], REG_QDMA_GLOBAL_CFG,
++ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+@@ -2333,7 +2333,7 @@ static int airoha_dev_set_macaddr(struct
+ if (err)
+ return err;
+
+- airoha_set_macaddr(port->eth, dev->dev_addr);
++ airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
+
+ return 0;
+ }
+@@ -2342,7 +2342,7 @@ static int airoha_dev_init(struct net_de
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+- airoha_set_macaddr(port->eth, dev->dev_addr);
++ airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
+
+ return 0;
+ }
+@@ -2376,10 +2376,9 @@ static netdev_tx_t airoha_dev_xmit(struc
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 msg0 = 0, msg1, len = skb_headlen(skb);
+ int i, qid = skb_get_queue_mapping(skb);
+- struct airoha_eth *eth = port->eth;
++ struct airoha_qdma *qdma = port->qdma;
+ u32 nr_frags = 1 + sinfo->nr_frags;
+ struct netdev_queue *txq;
+- struct airoha_qdma *qdma;
+ struct airoha_queue *q;
+ void *data = skb->data;
+ u16 index;
+@@ -2407,7 +2406,6 @@ static netdev_tx_t airoha_dev_xmit(struc
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+- qdma = ð->qdma[0];
+ q = &qdma->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+@@ -2490,7 +2488,7 @@ static void airoha_ethtool_get_drvinfo(s
+ struct ethtool_drvinfo *info)
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+- struct airoha_eth *eth = port->eth;
++ struct airoha_eth *eth = port->qdma->eth;
+
+ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
+@@ -2571,6 +2569,7 @@ static int airoha_alloc_gdm_port(struct
+ {
+ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
+ struct airoha_gdm_port *port;
++ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int err, index;
+ u32 id;
+@@ -2600,6 +2599,7 @@ static int airoha_alloc_gdm_port(struct
+ return -ENOMEM;
+ }
+
++ qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA];
+ dev->netdev_ops = &airoha_netdev_ops;
+ dev->ethtool_ops = &airoha_ethtool_ops;
+ dev->max_mtu = AIROHA_MAX_MTU;
+@@ -2609,6 +2609,7 @@ static int airoha_alloc_gdm_port(struct
+ NETIF_F_SG | NETIF_F_TSO;
+ dev->features |= dev->hw_features;
+ dev->dev.of_node = np;
++ dev->irq = qdma->irq;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ err = of_get_ethdev_address(np, dev);
+@@ -2624,8 +2625,8 @@ static int airoha_alloc_gdm_port(struct
+ port = netdev_priv(dev);
+ u64_stats_init(&port->stats.syncp);
+ spin_lock_init(&port->stats.lock);
++ port->qdma = qdma;
+ port->dev = dev;
+- port->eth = eth;
+ port->id = id;
+ eth->ports[index] = port;
+
diff --git a/target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch b/target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch
new file mode 100644
index 0000000000..ed25ccb89d
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/011-v6.12-net-airoha-honor-reset-return-value-in-airoha_hw_ini.patch
@@ -0,0 +1,44 @@
+From 63a796b4988c3dca83176a534890b510d44f105a Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 3 Aug 2024 17:50:50 +0200
+Subject: [PATCH] net: airoha: honor reset return value in airoha_hw_init()
+
+Take into account return value from reset_control_bulk_assert and
+reset_control_bulk_deassert routines in airoha_hw_init().
+
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/f49dc04a87653e0155f4fab3e3eb584785c8ad6a.1722699555.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -2072,13 +2072,21 @@ static int airoha_hw_init(struct platfor
+ int err, i;
+
+ /* disable xsi */
+- reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
++ err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
++ eth->xsi_rsts);
++ if (err)
++ return err;
++
++ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
++ if (err)
++ return err;
+
+- reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+- msleep(20);
+- reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ msleep(20);
++ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
++ if (err)
++ return err;
+
++ msleep(20);
+ err = airoha_fe_init(eth);
+ if (err)
+ return err;
diff --git a/target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch b/target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch
new file mode 100644
index 0000000000..da23955501
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/012-v6.12-net-airoha-configure-hw-mac-address-according-to-the.patch
@@ -0,0 +1,85 @@
+From 812a2751e827fa1eb01f3bd268b4d74c23f4226a Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 21 Aug 2024 09:30:14 +0200
+Subject: [PATCH] net: airoha: configure hw mac address according to the port
+ id
+
+GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+GDM{2,3,4} can be used as wan port connected to an external
+phy module. Configure hw mac address registers according to the port id.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240821-airoha-eth-wan-mac-addr-v2-1-8706d0cd6cd5@kernel.org
+Signed-off-by: Paolo Abeni
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 32 ++++++++++++++++------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -67,9 +67,11 @@
+ #define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+ #define FE_RST_CORE_MASK BIT(0)
+
++#define REG_FE_WAN_MAC_H 0x0030
+ #define REG_FE_LAN_MAC_H 0x0040
+-#define REG_FE_LAN_MAC_LMIN 0x0044
+-#define REG_FE_LAN_MAC_LMAX 0x0048
++
++#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
++#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+ #define REG_FE_CDM1_OQ_MAP0 0x0050
+ #define REG_FE_CDM1_OQ_MAP1 0x0054
+@@ -900,16 +902,28 @@ static void airoha_qdma_irq_disable(stru
+ airoha_qdma_set_irqmask(qdma, index, mask, 0);
+ }
+
+-static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
++static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+ {
+- u32 val;
++ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
++ * GDM{2,3,4} can be used as wan port connected to an external
++ * phy module.
++ */
++ return port->id == 1;
++}
++
++static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
++{
++ struct airoha_eth *eth = port->qdma->eth;
++ u32 val, reg;
+
++ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
++ : REG_FE_WAN_MAC_H;
+ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
+- airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
++ airoha_fe_wr(eth, reg, val);
+
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
+- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
++ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
++ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+ }
+
+ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+@@ -2341,7 +2355,7 @@ static int airoha_dev_set_macaddr(struct
+ if (err)
+ return err;
+
+- airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
++ airoha_set_macaddr(port, dev->dev_addr);
+
+ return 0;
+ }
+@@ -2350,7 +2364,7 @@ static int airoha_dev_init(struct net_de
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+- airoha_set_macaddr(port->qdma->eth, dev->dev_addr);
++ airoha_set_macaddr(port, dev->dev_addr);
+
+ return 0;
+ }
diff --git a/target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch b/target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch
new file mode 100644
index 0000000000..63c6162770
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/013-v6.12-net-airoha-fix-module-autoloading.patch
@@ -0,0 +1,26 @@
+From 7d2bd8ac9d2494cf9b16c4b00df9424ad24ed18c Mon Sep 17 00:00:00 2001
+From: Liao Chen
+Date: Mon, 26 Aug 2024 09:18:58 +0000
+Subject: [PATCH] net: airoha: fix module autoloading
+
+Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded
+based on the alias from of_device_id table.
+
+Signed-off-by: Liao Chen
+Acked-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240826091858.369910-4-liaochen4@huawei.com
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -2776,6 +2776,7 @@ static const struct of_device_id of_airo
+ { .compatible = "airoha,en7581-eth" },
+ { /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, of_airoha_match);
+
+ static struct platform_driver airoha_driver = {
+ .probe = airoha_probe,
diff --git a/target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch b/target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch
new file mode 100644
index 0000000000..fb86423733
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/014-01-v6.13-net-airoha-fix-PSE-memory-configuration-in-airoha_fe.patch
@@ -0,0 +1,40 @@
+From 8e38e08f2c560328a873c35aff1a0dbea6a7d084 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Tue, 1 Oct 2024 12:10:25 +0200
+Subject: [PATCH 2/2] net: airoha: fix PSE memory configuration in
+ airoha_fe_pse_ports_init()
+
+Align PSE memory configuration to vendor SDK. In particular, increase
+initial value of PSE reserved memory in airoha_fe_pse_ports_init()
+routine by the value used for the second Packet Processor Engine (PPE2)
+and do not overwrite the default value.
+
+Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
+for EN7581 SoC")
+
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/20241001-airoha-eth-pse-fix-v2-2-9a56cdffd074@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -1166,11 +1166,13 @@ static void airoha_fe_pse_ports_init(str
+ [FE_PSE_PORT_GDM4] = 2,
+ [FE_PSE_PORT_CDM5] = 2,
+ };
++ u32 all_rsv;
+ int q;
+
++ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ /* hw misses PPE2 oq rsv */
+- airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
+- PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
++ all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
++ airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
+
+ /* CMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
diff --git a/target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch b/target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch
new file mode 100644
index 0000000000..a2e5c4fdd9
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/014-02-v6.13-net-airoha-read-default-PSE-reserved-pages-value-bef.patch
@@ -0,0 +1,52 @@
+From 1f3e7ff4f296af1f4350f457d5bd82bc825e645a Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Tue, 1 Oct 2024 12:10:24 +0200
+Subject: [PATCH 1/2] net: airoha: read default PSE reserved pages value before
+ updating
+
+Store the default value for the number of PSE reserved pages in orig_val
+at the beginning of airoha_fe_set_pse_oq_rsv routine, before updating it
+with airoha_fe_set_pse_queue_rsv_pages().
+Introduce airoha_fe_get_pse_all_rsv utility routine.
+
+Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
+for EN7581 SoC")
+
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/20241001-airoha-eth-pse-fix-v2-1-9a56cdffd074@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -1116,17 +1116,23 @@ static void airoha_fe_set_pse_queue_rsv_
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
+ }
+
++static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
++{
++ u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
++
++ return FIELD_GET(PSE_ALLRSV_MASK, val);
++}
++
+ static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+ {
+- u32 orig_val, tmp, all_rsv, fq_limit;
++ u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
++ u32 tmp, all_rsv, fq_limit;
+
+ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
+
+ /* modify all rsv */
+- orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+- tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+- all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
++ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ all_rsv += (val - orig_val);
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
+ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
diff --git a/target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch b/target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch
new file mode 100644
index 0000000000..db6cc9caee
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/015-v6.12-net-airoha-Update-tx-cpu-dma-ring-idx-at-the-end-of-.patch
@@ -0,0 +1,45 @@
+From 3dc6e998d18bfba6e0dc979d3cc68eba98dfeef7 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Fri, 4 Oct 2024 15:51:26 +0200
+Subject: [PATCH] net: airoha: Update tx cpu dma ring idx at the end of xmit
+ loop
+
+Move the tx cpu dma ring index update out of transmit loop of
+airoha_dev_xmit routine in order to not start transmitting the packet
+before it is fully DMA mapped (e.g. fragmented skbs).
+
+Fixes: 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC")
+Reported-by: Felix Fietkau
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/20241004-airoha-eth-7581-mapping-fix-v1-1-8e4279ab1812@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -2480,10 +2480,6 @@ static netdev_tx_t airoha_dev_xmit(struc
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+- airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+- TX_RING_CPU_IDX_MASK,
+- FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+-
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+ }
+@@ -2492,6 +2488,11 @@ static netdev_tx_t airoha_dev_xmit(struc
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
++ if (!netdev_xmit_more())
++ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
++ TX_RING_CPU_IDX_MASK,
++ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
++
+ if (q->ndesc - q->queued < q->free_thr)
+ netif_tx_stop_queue(txq);
+
diff --git a/target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch b/target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch
new file mode 100644
index 0000000000..d70cadf9d9
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/016-v6.13-net-airoha-Fix-EGRESS_RATE_METER_EN_MASK-definition.patch
@@ -0,0 +1,33 @@
+From 2518b119639162251b6cc7195aec394930c1d867 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 9 Oct 2024 00:21:47 +0200
+Subject: [PATCH] net: airoha: Fix EGRESS_RATE_METER_EN_MASK definition
+
+Fix typo in EGRESS_RATE_METER_EN_MASK mask definition. This bus in not
+introducing any user visible problem since, even if we are setting
+EGRESS_RATE_METER_EN_MASK bit in REG_EGRESS_RATE_METER_CFG register,
+egress QoS metering is not supported yet since we are missing some other
+hw configurations (e.g token bucket rate, token bucket size).
+
+Introduced by commit 23020f049327 ("net: airoha: Introduce ethernet support
+for EN7581 SoC")
+
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Simon Horman
+Link: https://patch.msgid.link/20241009-airoha-fixes-v2-1-18af63ec19bf@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -554,7 +554,7 @@
+ #define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+ #define REG_EGRESS_RATE_METER_CFG 0x100c
+-#define EGRESS_RATE_METER_EN_MASK BIT(29)
++#define EGRESS_RATE_METER_EN_MASK BIT(31)
+ #define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+ #define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+ #define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
diff --git a/target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch b/target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch
new file mode 100644
index 0000000000..b6bb9f647d
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/017-v6.13-net-airoha-Implement-BQL-support.patch
@@ -0,0 +1,42 @@
+From 1d304174106c93ce05f6088813ad7203b3eb381a Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 12 Oct 2024 11:01:11 +0200
+Subject: [PATCH] net: airoha: Implement BQL support
+
+Introduce BQL support in the airoha_eth driver reporting to the kernel
+info about tx hw DMA queues in order to avoid bufferbloat and keep the
+latency small.
+
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20241012-en7581-bql-v2-1-4deb4efdb60b@kernel.org
+Signed-off-by: Jakub Kicinski
+---
+ drivers/net/ethernet/mediatek/airoha_eth.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -1710,9 +1710,11 @@ static int airoha_qdma_tx_napi_poll(stru
+ WRITE_ONCE(desc->msg1, 0);
+
+ if (skb) {
++ u16 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq;
+
+- txq = netdev_get_tx_queue(skb->dev, qid);
++ txq = netdev_get_tx_queue(skb->dev, queue);
++ netdev_tx_completed_queue(txq, 1, skb->len);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
+@@ -2488,7 +2490,9 @@ static netdev_tx_t airoha_dev_xmit(struc
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
+- if (!netdev_xmit_more())
++ netdev_tx_sent_queue(txq, skb->len);
++
++ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
diff --git a/target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch b/target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch
new file mode 100644
index 0000000000..1e19356762
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/018-01-v6.10-clk-en7523-Add-en_clk_soc_data-data-structure.patch
@@ -0,0 +1,98 @@
+From 457e74667f452d7f071ad2b2d9313ec62ebc4b02 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 6 Apr 2024 12:43:43 +0200
+Subject: [PATCH 1/2] clk: en7523: Add en_clk_soc_data data structure
+
+Introduce en_clk_soc_data data structure in order to define multiple
+clk_ops for each supported SoC. This is a preliminary patch to
+introduce EN7581 clock support.
+
+Tested-by: Zhengping Zhang
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/562a0da8d7874a02a324687c152c87a1549924bd.1712399981.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ drivers/clk/clk-en7523.c | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -3,8 +3,8 @@
+ #include
+ #include
+ #include
+-#include
+ #include
++#include
+ #include
+
+ #define REG_PCI_CONTROL 0x88
+@@ -48,6 +48,10 @@ struct en_clk_gate {
+ struct clk_hw hw;
+ };
+
++struct en_clk_soc_data {
++ const struct clk_ops pcie_ops;
++};
++
+ static const u32 gsw_base[] = { 400000000, 500000000 };
+ static const u32 emi_base[] = { 333000000, 400000000 };
+ static const u32 bus_base[] = { 500000000, 540000000 };
+@@ -150,11 +154,6 @@ static const struct en_clk_desc en7523_b
+ }
+ };
+
+-static const struct of_device_id of_match_clk_en7523[] = {
+- { .compatible = "airoha,en7523-scu", },
+- { /* sentinel */ }
+-};
+-
+ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+ {
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+@@ -252,14 +251,10 @@ static void en7523_pci_unprepare(struct
+ static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
+ void __iomem *np_base)
+ {
+- static const struct clk_ops pcie_gate_ops = {
+- .is_enabled = en7523_pci_is_enabled,
+- .prepare = en7523_pci_prepare,
+- .unprepare = en7523_pci_unprepare,
+- };
++ const struct en_clk_soc_data *soc_data = device_get_match_data(dev);
+ struct clk_init_data init = {
+ .name = "pcie",
+- .ops = &pcie_gate_ops,
++ .ops = &soc_data->pcie_ops,
+ };
+ struct en_clk_gate *cg;
+
+@@ -269,7 +264,7 @@ static struct clk_hw *en7523_register_pc
+
+ cg->base = np_base;
+ cg->hw.init = &init;
+- en7523_pci_unprepare(&cg->hw);
++ init.ops->unprepare(&cg->hw);
+
+ if (clk_hw_register(dev, &cg->hw))
+ return NULL;
+@@ -338,6 +333,19 @@ static int en7523_clk_probe(struct platf
+ return r;
+ }
+
++static const struct en_clk_soc_data en7523_data = {
++ .pcie_ops = {
++ .is_enabled = en7523_pci_is_enabled,
++ .prepare = en7523_pci_prepare,
++ .unprepare = en7523_pci_unprepare,
++ },
++};
++
++static const struct of_device_id of_match_clk_en7523[] = {
++ { .compatible = "airoha,en7523-scu", .data = &en7523_data },
++ { /* sentinel */ }
++};
++
+ static struct platform_driver clk_en7523_drv = {
+ .probe = en7523_clk_probe,
+ .driver = {
diff --git a/target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch b/target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch
new file mode 100644
index 0000000000..c27b79cf55
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/018-02-v6.10-clk-en7523-Add-EN7581-support.patch
@@ -0,0 +1,248 @@
+From 66bc47326ce2a319add7e933d9340215711236ac Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 6 Apr 2024 12:43:44 +0200
+Subject: [PATCH 2/2] clk: en7523: Add EN7581 support
+
+Introduce EN7581 clock support to clk-en7523 driver.
+Add hw_init callback to en_clk_soc_data data structure.
+
+Tested-by: Zhengping Zhang
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/57b6e53ed4d2b2e38abff6a3ea56841bad6be8a9.1712399981.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ drivers/clk/clk-en7523.c | 157 +++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 152 insertions(+), 5 deletions(-)
+
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -10,7 +10,9 @@
+ #define REG_PCI_CONTROL 0x88
+ #define REG_PCI_CONTROL_PERSTOUT BIT(29)
+ #define REG_PCI_CONTROL_PERSTOUT1 BIT(26)
++#define REG_PCI_CONTROL_REFCLK_EN0 BIT(23)
+ #define REG_PCI_CONTROL_REFCLK_EN1 BIT(22)
++#define REG_PCI_CONTROL_PERSTOUT2 BIT(16)
+ #define REG_GSW_CLK_DIV_SEL 0x1b4
+ #define REG_EMI_CLK_DIV_SEL 0x1b8
+ #define REG_BUS_CLK_DIV_SEL 0x1bc
+@@ -18,10 +20,25 @@
+ #define REG_SPI_CLK_FREQ_SEL 0x1c8
+ #define REG_NPU_CLK_DIV_SEL 0x1fc
+ #define REG_CRYPTO_CLKSRC 0x200
+-#define REG_RESET_CONTROL 0x834
++#define REG_RESET_CONTROL2 0x830
++#define REG_RESET2_CONTROL_PCIE2 BIT(27)
++#define REG_RESET_CONTROL1 0x834
+ #define REG_RESET_CONTROL_PCIEHB BIT(29)
+ #define REG_RESET_CONTROL_PCIE1 BIT(27)
+ #define REG_RESET_CONTROL_PCIE2 BIT(26)
++/* EN7581 */
++#define REG_PCIE0_MEM 0x00
++#define REG_PCIE0_MEM_MASK 0x04
++#define REG_PCIE1_MEM 0x08
++#define REG_PCIE1_MEM_MASK 0x0c
++#define REG_PCIE2_MEM 0x10
++#define REG_PCIE2_MEM_MASK 0x14
++#define REG_PCIE_RESET_OPEN_DRAIN 0x018c
++#define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0)
++#define REG_NP_SCU_PCIC 0x88
++#define REG_NP_SCU_SSTR 0x9c
++#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
++#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
+
+ struct en_clk_desc {
+ int id;
+@@ -50,6 +67,8 @@ struct en_clk_gate {
+
+ struct en_clk_soc_data {
+ const struct clk_ops pcie_ops;
++ int (*hw_init)(struct platform_device *pdev, void __iomem *base,
++ void __iomem *np_base);
+ };
+
+ static const u32 gsw_base[] = { 400000000, 500000000 };
+@@ -216,14 +235,14 @@ static int en7523_pci_prepare(struct clk
+ usleep_range(1000, 2000);
+
+ /* Reset to default */
+- val = readl(np_base + REG_RESET_CONTROL);
++ val = readl(np_base + REG_RESET_CONTROL1);
+ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+ REG_RESET_CONTROL_PCIEHB;
+- writel(val & ~mask, np_base + REG_RESET_CONTROL);
++ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
+ usleep_range(1000, 2000);
+- writel(val | mask, np_base + REG_RESET_CONTROL);
++ writel(val | mask, np_base + REG_RESET_CONTROL1);
+ msleep(100);
+- writel(val & ~mask, np_base + REG_RESET_CONTROL);
++ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
+ usleep_range(5000, 10000);
+
+ /* Release device */
+@@ -264,6 +283,9 @@ static struct clk_hw *en7523_register_pc
+
+ cg->base = np_base;
+ cg->hw.init = &init;
++
++ if (init.ops->disable)
++ init.ops->disable(&cg->hw);
+ init.ops->unprepare(&cg->hw);
+
+ if (clk_hw_register(dev, &cg->hw))
+@@ -272,6 +294,111 @@ static struct clk_hw *en7523_register_pc
+ return &cg->hw;
+ }
+
++static int en7581_pci_is_enabled(struct clk_hw *hw)
++{
++ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
++ u32 val, mask;
++
++ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1;
++ val = readl(cg->base + REG_PCI_CONTROL);
++ return (val & mask) == mask;
++}
++
++static int en7581_pci_prepare(struct clk_hw *hw)
++{
++ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
++ void __iomem *np_base = cg->base;
++ u32 val, mask;
++
++ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
++ REG_RESET_CONTROL_PCIEHB;
++ val = readl(np_base + REG_RESET_CONTROL1);
++ writel(val & ~mask, np_base + REG_RESET_CONTROL1);
++ val = readl(np_base + REG_RESET_CONTROL2);
++ writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
++ usleep_range(5000, 10000);
++
++ return 0;
++}
++
++static int en7581_pci_enable(struct clk_hw *hw)
++{
++ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
++ void __iomem *np_base = cg->base;
++ u32 val, mask;
++
++ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
++ REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
++ REG_PCI_CONTROL_PERSTOUT;
++ val = readl(np_base + REG_PCI_CONTROL);
++ writel(val | mask, np_base + REG_PCI_CONTROL);
++ msleep(250);
++
++ return 0;
++}
++
++static void en7581_pci_unprepare(struct clk_hw *hw)
++{
++ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
++ void __iomem *np_base = cg->base;
++ u32 val, mask;
++
++ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
++ REG_RESET_CONTROL_PCIEHB;
++ val = readl(np_base + REG_RESET_CONTROL1);
++ writel(val | mask, np_base + REG_RESET_CONTROL1);
++ mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
++ writel(val | mask, np_base + REG_RESET_CONTROL1);
++ val = readl(np_base + REG_RESET_CONTROL2);
++ writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
++ msleep(100);
++}
++
++static void en7581_pci_disable(struct clk_hw *hw)
++{
++ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
++ void __iomem *np_base = cg->base;
++ u32 val, mask;
++
++ mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 |
++ REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 |
++ REG_PCI_CONTROL_PERSTOUT;
++ val = readl(np_base + REG_PCI_CONTROL);
++ writel(val & ~mask, np_base + REG_PCI_CONTROL);
++ usleep_range(1000, 2000);
++}
++
++static int en7581_clk_hw_init(struct platform_device *pdev,
++ void __iomem *base,
++ void __iomem *np_base)
++{
++ void __iomem *pb_base;
++ u32 val;
++
++ pb_base = devm_platform_ioremap_resource(pdev, 2);
++ if (IS_ERR(pb_base))
++ return PTR_ERR(pb_base);
++
++ val = readl(np_base + REG_NP_SCU_SSTR);
++ val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
++ writel(val, np_base + REG_NP_SCU_SSTR);
++ val = readl(np_base + REG_NP_SCU_PCIC);
++ writel(val | 3, np_base + REG_NP_SCU_PCIC);
++
++ writel(0x20000000, pb_base + REG_PCIE0_MEM);
++ writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
++ writel(0x24000000, pb_base + REG_PCIE1_MEM);
++ writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
++ writel(0x28000000, pb_base + REG_PCIE2_MEM);
++ writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
++
++ val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
++ writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
++ base + REG_PCIE_RESET_OPEN_DRAIN);
++
++ return 0;
++}
++
+ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, void __iomem *np_base)
+ {
+@@ -304,6 +431,7 @@ static void en7523_register_clocks(struc
+ static int en7523_clk_probe(struct platform_device *pdev)
+ {
+ struct device_node *node = pdev->dev.of_node;
++ const struct en_clk_soc_data *soc_data;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *base, *np_base;
+ int r;
+@@ -316,6 +444,13 @@ static int en7523_clk_probe(struct platf
+ if (IS_ERR(np_base))
+ return PTR_ERR(np_base);
+
++ soc_data = device_get_match_data(&pdev->dev);
++ if (soc_data->hw_init) {
++ r = soc_data->hw_init(pdev, base, np_base);
++ if (r)
++ return r;
++ }
++
+ clk_data = devm_kzalloc(&pdev->dev,
+ struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ GFP_KERNEL);
+@@ -341,8 +476,20 @@ static const struct en_clk_soc_data en75
+ },
+ };
+
++static const struct en_clk_soc_data en7581_data = {
++ .pcie_ops = {
++ .is_enabled = en7581_pci_is_enabled,
++ .prepare = en7581_pci_prepare,
++ .enable = en7581_pci_enable,
++ .unprepare = en7581_pci_unprepare,
++ .disable = en7581_pci_disable,
++ },
++ .hw_init = en7581_clk_hw_init,
++};
++
+ static const struct of_device_id of_match_clk_en7523[] = {
+ { .compatible = "airoha,en7523-scu", .data = &en7523_data },
++ { .compatible = "airoha,en7581-scu", .data = &en7581_data },
+ { /* sentinel */ }
+ };
+
diff --git a/target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch b/target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch
new file mode 100644
index 0000000000..4d9ff9ef9d
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/019-01-v6.11-clk-en7523-Add-reset-controller-support-for-EN7581-S.patch
@@ -0,0 +1,270 @@
+From e0d8ea4ed5fa70fd085a54d0b574a044b9407c39 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 27 Jun 2024 13:04:23 +0200
+Subject: [PATCH 1/4] clk: en7523: Add reset-controller support for EN7581 SoC
+
+Introduce reset API support to EN7581 clock driver.
+
+Reviewed-by: AngeloGioacchino Del Regno
+Tested-by: Zhengping Zhang
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/4f735d17e549ea53769bf5a3f50406debb879a44.1719485847.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ drivers/clk/clk-en7523.c | 192 ++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 187 insertions(+), 5 deletions(-)
+
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -5,7 +5,11 @@
+ #include
+ #include
+ #include
++#include
+ #include
++#include
++
++#define RST_NR_PER_BANK 32
+
+ #define REG_PCI_CONTROL 0x88
+ #define REG_PCI_CONTROL_PERSTOUT BIT(29)
+@@ -40,6 +44,9 @@
+ #define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
+ #define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
+
++#define REG_RST_CTRL2 0x00
++#define REG_RST_CTRL1 0x04
++
+ struct en_clk_desc {
+ int id;
+ const char *name;
+@@ -65,8 +72,20 @@ struct en_clk_gate {
+ struct clk_hw hw;
+ };
+
++struct en_rst_data {
++ const u16 *bank_ofs;
++ const u16 *idx_map;
++ void __iomem *base;
++ struct reset_controller_dev rcdev;
++};
++
+ struct en_clk_soc_data {
+ const struct clk_ops pcie_ops;
++ struct {
++ const u16 *bank_ofs;
++ const u16 *idx_map;
++ u16 idx_map_nr;
++ } reset;
+ int (*hw_init)(struct platform_device *pdev, void __iomem *base,
+ void __iomem *np_base);
+ };
+@@ -173,6 +192,69 @@ static const struct en_clk_desc en7523_b
+ }
+ };
+
++static const u16 en7581_rst_ofs[] = {
++ REG_RST_CTRL2,
++ REG_RST_CTRL1,
++};
++
++static const u16 en7581_rst_map[] = {
++ /* RST_CTRL2 */
++ [EN7581_XPON_PHY_RST] = 0,
++ [EN7581_CPU_TIMER2_RST] = 2,
++ [EN7581_HSUART_RST] = 3,
++ [EN7581_UART4_RST] = 4,
++ [EN7581_UART5_RST] = 5,
++ [EN7581_I2C2_RST] = 6,
++ [EN7581_XSI_MAC_RST] = 7,
++ [EN7581_XSI_PHY_RST] = 8,
++ [EN7581_NPU_RST] = 9,
++ [EN7581_I2S_RST] = 10,
++ [EN7581_TRNG_RST] = 11,
++ [EN7581_TRNG_MSTART_RST] = 12,
++ [EN7581_DUAL_HSI0_RST] = 13,
++ [EN7581_DUAL_HSI1_RST] = 14,
++ [EN7581_HSI_RST] = 15,
++ [EN7581_DUAL_HSI0_MAC_RST] = 16,
++ [EN7581_DUAL_HSI1_MAC_RST] = 17,
++ [EN7581_HSI_MAC_RST] = 18,
++ [EN7581_WDMA_RST] = 19,
++ [EN7581_WOE0_RST] = 20,
++ [EN7581_WOE1_RST] = 21,
++ [EN7581_HSDMA_RST] = 22,
++ [EN7581_TDMA_RST] = 24,
++ [EN7581_EMMC_RST] = 25,
++ [EN7581_SOE_RST] = 26,
++ [EN7581_PCIE2_RST] = 27,
++ [EN7581_XFP_MAC_RST] = 28,
++ [EN7581_USB_HOST_P1_RST] = 29,
++ [EN7581_USB_HOST_P1_U3_PHY_RST] = 30,
++ /* RST_CTRL1 */
++ [EN7581_PCM1_ZSI_ISI_RST] = RST_NR_PER_BANK + 0,
++ [EN7581_FE_PDMA_RST] = RST_NR_PER_BANK + 1,
++ [EN7581_FE_QDMA_RST] = RST_NR_PER_BANK + 2,
++ [EN7581_PCM_SPIWP_RST] = RST_NR_PER_BANK + 4,
++ [EN7581_CRYPTO_RST] = RST_NR_PER_BANK + 6,
++ [EN7581_TIMER_RST] = RST_NR_PER_BANK + 8,
++ [EN7581_PCM1_RST] = RST_NR_PER_BANK + 11,
++ [EN7581_UART_RST] = RST_NR_PER_BANK + 12,
++ [EN7581_GPIO_RST] = RST_NR_PER_BANK + 13,
++ [EN7581_GDMA_RST] = RST_NR_PER_BANK + 14,
++ [EN7581_I2C_MASTER_RST] = RST_NR_PER_BANK + 16,
++ [EN7581_PCM2_ZSI_ISI_RST] = RST_NR_PER_BANK + 17,
++ [EN7581_SFC_RST] = RST_NR_PER_BANK + 18,
++ [EN7581_UART2_RST] = RST_NR_PER_BANK + 19,
++ [EN7581_GDMP_RST] = RST_NR_PER_BANK + 20,
++ [EN7581_FE_RST] = RST_NR_PER_BANK + 21,
++ [EN7581_USB_HOST_P0_RST] = RST_NR_PER_BANK + 22,
++ [EN7581_GSW_RST] = RST_NR_PER_BANK + 23,
++ [EN7581_SFC2_PCM_RST] = RST_NR_PER_BANK + 25,
++ [EN7581_PCIE0_RST] = RST_NR_PER_BANK + 26,
++ [EN7581_PCIE1_RST] = RST_NR_PER_BANK + 27,
++ [EN7581_CPU_TIMER_RST] = RST_NR_PER_BANK + 28,
++ [EN7581_PCIE_HB_RST] = RST_NR_PER_BANK + 29,
++ [EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
++};
++
+ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+ {
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+@@ -375,7 +457,7 @@ static int en7581_clk_hw_init(struct pla
+ void __iomem *pb_base;
+ u32 val;
+
+- pb_base = devm_platform_ioremap_resource(pdev, 2);
++ pb_base = devm_platform_ioremap_resource(pdev, 3);
+ if (IS_ERR(pb_base))
+ return PTR_ERR(pb_base);
+
+@@ -428,6 +510,95 @@ static void en7523_register_clocks(struc
+ clk_data->hws[EN7523_CLK_PCIE] = hw;
+ }
+
++static int en7523_reset_update(struct reset_controller_dev *rcdev,
++ unsigned long id, bool assert)
++{
++ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
++ void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
++ u32 val;
++
++ val = readl(addr);
++ if (assert)
++ val |= BIT(id % RST_NR_PER_BANK);
++ else
++ val &= ~BIT(id % RST_NR_PER_BANK);
++ writel(val, addr);
++
++ return 0;
++}
++
++static int en7523_reset_assert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ return en7523_reset_update(rcdev, id, true);
++}
++
++static int en7523_reset_deassert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ return en7523_reset_update(rcdev, id, false);
++}
++
++static int en7523_reset_status(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
++ void __iomem *addr = rst_data->base + rst_data->bank_ofs[id / RST_NR_PER_BANK];
++
++ return !!(readl(addr) & BIT(id % RST_NR_PER_BANK));
++}
++
++static int en7523_reset_xlate(struct reset_controller_dev *rcdev,
++ const struct of_phandle_args *reset_spec)
++{
++ struct en_rst_data *rst_data = container_of(rcdev, struct en_rst_data, rcdev);
++
++ if (reset_spec->args[0] >= rcdev->nr_resets)
++ return -EINVAL;
++
++ return rst_data->idx_map[reset_spec->args[0]];
++}
++
++static const struct reset_control_ops en7523_reset_ops = {
++ .assert = en7523_reset_assert,
++ .deassert = en7523_reset_deassert,
++ .status = en7523_reset_status,
++};
++
++static int en7523_reset_register(struct platform_device *pdev,
++ const struct en_clk_soc_data *soc_data)
++{
++ struct device *dev = &pdev->dev;
++ struct en_rst_data *rst_data;
++ void __iomem *base;
++
++ /* no reset lines available */
++ if (!soc_data->reset.idx_map_nr)
++ return 0;
++
++ base = devm_platform_ioremap_resource(pdev, 2);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
++ if (!rst_data)
++ return -ENOMEM;
++
++ rst_data->bank_ofs = soc_data->reset.bank_ofs;
++ rst_data->idx_map = soc_data->reset.idx_map;
++ rst_data->base = base;
++
++ rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr;
++ rst_data->rcdev.of_xlate = en7523_reset_xlate;
++ rst_data->rcdev.ops = &en7523_reset_ops;
++ rst_data->rcdev.of_node = dev->of_node;
++ rst_data->rcdev.of_reset_n_cells = 1;
++ rst_data->rcdev.owner = THIS_MODULE;
++ rst_data->rcdev.dev = dev;
++
++ return devm_reset_controller_register(dev, &rst_data->rcdev);
++}
++
+ static int en7523_clk_probe(struct platform_device *pdev)
+ {
+ struct device_node *node = pdev->dev.of_node;
+@@ -461,11 +632,17 @@ static int en7523_clk_probe(struct platf
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+- dev_err(&pdev->dev,
+- "could not register clock provider: %s: %d\n",
+- pdev->name, r);
++ return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n",
++ pdev->name);
++
++ r = en7523_reset_register(pdev, soc_data);
++ if (r) {
++ of_clk_del_provider(node);
++ return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n",
++ pdev->name);
++ }
+
+- return r;
++ return 0;
+ }
+
+ static const struct en_clk_soc_data en7523_data = {
+@@ -484,6 +661,11 @@ static const struct en_clk_soc_data en75
+ .unprepare = en7581_pci_unprepare,
+ .disable = en7581_pci_disable,
+ },
++ .reset = {
++ .bank_ofs = en7581_rst_ofs,
++ .idx_map = en7581_rst_map,
++ .idx_map_nr = ARRAY_SIZE(en7581_rst_map),
++ },
+ .hw_init = en7581_clk_hw_init,
+ };
+
diff --git a/target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch b/target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch
new file mode 100644
index 0000000000..2a32ad0cff
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/019-02-v6.11-clk-en7523-Remove-pcie-prepare-unpreare-callbacks-fo.patch
@@ -0,0 +1,91 @@
+From db7a4a11e8be375b0a9c159f688e0cea49eacc5d Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 27 Jun 2024 13:04:24 +0200
+Subject: [PATCH 2/4] clk: en7523: Remove pcie prepare/unpreare callbacks for
+ EN7581 SoC
+
+Get rid of prepare and unpreare callbacks for PCIe clock since they can
+be modeled as a reset line cosumed by the PCIe driver
+(pcie-mediatek-gen3)
+
+Reviewed-by: AngeloGioacchino Del Regno
+Tested-by: Zhengping Zhang
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/16df149975514d3030499c48fc1c64f090093595.1719485847.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ drivers/clk/clk-en7523.c | 41 ++--------------------------------------
+ 1 file changed, 2 insertions(+), 39 deletions(-)
+
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -366,9 +366,8 @@ static struct clk_hw *en7523_register_pc
+ cg->base = np_base;
+ cg->hw.init = &init;
+
+- if (init.ops->disable)
+- init.ops->disable(&cg->hw);
+- init.ops->unprepare(&cg->hw);
++ if (init.ops->unprepare)
++ init.ops->unprepare(&cg->hw);
+
+ if (clk_hw_register(dev, &cg->hw))
+ return NULL;
+@@ -386,23 +385,6 @@ static int en7581_pci_is_enabled(struct
+ return (val & mask) == mask;
+ }
+
+-static int en7581_pci_prepare(struct clk_hw *hw)
+-{
+- struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+- void __iomem *np_base = cg->base;
+- u32 val, mask;
+-
+- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+- REG_RESET_CONTROL_PCIEHB;
+- val = readl(np_base + REG_RESET_CONTROL1);
+- writel(val & ~mask, np_base + REG_RESET_CONTROL1);
+- val = readl(np_base + REG_RESET_CONTROL2);
+- writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
+- usleep_range(5000, 10000);
+-
+- return 0;
+-}
+-
+ static int en7581_pci_enable(struct clk_hw *hw)
+ {
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+@@ -419,23 +401,6 @@ static int en7581_pci_enable(struct clk_
+ return 0;
+ }
+
+-static void en7581_pci_unprepare(struct clk_hw *hw)
+-{
+- struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+- void __iomem *np_base = cg->base;
+- u32 val, mask;
+-
+- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
+- REG_RESET_CONTROL_PCIEHB;
+- val = readl(np_base + REG_RESET_CONTROL1);
+- writel(val | mask, np_base + REG_RESET_CONTROL1);
+- mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2;
+- writel(val | mask, np_base + REG_RESET_CONTROL1);
+- val = readl(np_base + REG_RESET_CONTROL2);
+- writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2);
+- msleep(100);
+-}
+-
+ static void en7581_pci_disable(struct clk_hw *hw)
+ {
+ struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
+@@ -656,9 +621,7 @@ static const struct en_clk_soc_data en75
+ static const struct en_clk_soc_data en7581_data = {
+ .pcie_ops = {
+ .is_enabled = en7581_pci_is_enabled,
+- .prepare = en7581_pci_prepare,
+ .enable = en7581_pci_enable,
+- .unprepare = en7581_pci_unprepare,
+ .disable = en7581_pci_disable,
+ },
+ .reset = {
diff --git a/target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch b/target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch
new file mode 100644
index 0000000000..8a4b9c7340
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/019-03-v6.11-clk-en7523-Remove-PCIe-reset-open-drain-configuratio.patch
@@ -0,0 +1,65 @@
+From bf288bd25d6232310abb81db417376ce460eb032 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 27 Jun 2024 13:04:25 +0200
+Subject: [PATCH 3/4] clk: en7523: Remove PCIe reset open drain configuration
+ for EN7581
+
+PCIe reset open drain configuration will be managed by pinctrl driver.
+
+Reviewed-by: AngeloGioacchino Del Regno
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/43276af5f08a554b4ab2e52e8d437fff5c06a732.1719485847.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ drivers/clk/clk-en7523.c | 12 ++----------
+ 1 file changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -37,8 +37,6 @@
+ #define REG_PCIE1_MEM_MASK 0x0c
+ #define REG_PCIE2_MEM 0x10
+ #define REG_PCIE2_MEM_MASK 0x14
+-#define REG_PCIE_RESET_OPEN_DRAIN 0x018c
+-#define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0)
+ #define REG_NP_SCU_PCIC 0x88
+ #define REG_NP_SCU_SSTR 0x9c
+ #define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
+@@ -86,8 +84,7 @@ struct en_clk_soc_data {
+ const u16 *idx_map;
+ u16 idx_map_nr;
+ } reset;
+- int (*hw_init)(struct platform_device *pdev, void __iomem *base,
+- void __iomem *np_base);
++ int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
+ };
+
+ static const u32 gsw_base[] = { 400000000, 500000000 };
+@@ -416,7 +413,6 @@ static void en7581_pci_disable(struct cl
+ }
+
+ static int en7581_clk_hw_init(struct platform_device *pdev,
+- void __iomem *base,
+ void __iomem *np_base)
+ {
+ void __iomem *pb_base;
+@@ -439,10 +435,6 @@ static int en7581_clk_hw_init(struct pla
+ writel(0x28000000, pb_base + REG_PCIE2_MEM);
+ writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
+
+- val = readl(base + REG_PCIE_RESET_OPEN_DRAIN);
+- writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK,
+- base + REG_PCIE_RESET_OPEN_DRAIN);
+-
+ return 0;
+ }
+
+@@ -582,7 +574,7 @@ static int en7523_clk_probe(struct platf
+
+ soc_data = device_get_match_data(&pdev->dev);
+ if (soc_data->hw_init) {
+- r = soc_data->hw_init(pdev, base, np_base);
++ r = soc_data->hw_init(pdev, np_base);
+ if (r)
+ return r;
+ }
diff --git a/target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch b/target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch
new file mode 100644
index 0000000000..49ab4e9786
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/020-v6.11-dt-bindings-clock-airoha-Add-reset-support-to-EN7581.patch
@@ -0,0 +1,93 @@
+From 7aa291962f4c3b7afb9a12fa60b406b95e5eacb4 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 27 Jun 2024 13:04:22 +0200
+Subject: [PATCH] dt-bindings: clock: airoha: Add reset support to EN7581 clock
+ binding
+
+Introduce reset capability to EN7581 device-tree clock binding
+documentation. Add reset register mapping between misc scu and pb scu
+ones in order to follow the memory order. This change is not
+introducing any backward compatibility issue since the EN7581 dts is not
+upstream yet.
+
+Fixes: 0a382be005cf ("dt-bindings: clock: airoha: add EN7581 binding")
+Reviewed-by: AngeloGioacchino Del Regno
+Reviewed-by: Rob Herring (Arm)
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/28fef3e83062d5d71e7b4be4b47583f851a15bf8.1719485847.git.lorenzo@kernel.org
+Signed-off-by: Stephen Boyd
+---
+ .../bindings/clock/airoha,en7523-scu.yaml | 25 ++++++-
+ .../dt-bindings/reset/airoha,en7581-reset.h | 66 +++++++++++++++++++
+ 2 files changed, 90 insertions(+), 1 deletion(-)
+ create mode 100644 include/dt-bindings/reset/airoha,en7581-reset.h
+
+--- /dev/null
++++ b/include/dt-bindings/reset/airoha,en7581-reset.h
+@@ -0,0 +1,66 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 AIROHA Inc
++ * Author: Lorenzo Bianconi
++ */
++
++#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
++#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
++
++/* RST_CTRL2 */
++#define EN7581_XPON_PHY_RST 0
++#define EN7581_CPU_TIMER2_RST 1
++#define EN7581_HSUART_RST 2
++#define EN7581_UART4_RST 3
++#define EN7581_UART5_RST 4
++#define EN7581_I2C2_RST 5
++#define EN7581_XSI_MAC_RST 6
++#define EN7581_XSI_PHY_RST 7
++#define EN7581_NPU_RST 8
++#define EN7581_I2S_RST 9
++#define EN7581_TRNG_RST 10
++#define EN7581_TRNG_MSTART_RST 11
++#define EN7581_DUAL_HSI0_RST 12
++#define EN7581_DUAL_HSI1_RST 13
++#define EN7581_HSI_RST 14
++#define EN7581_DUAL_HSI0_MAC_RST 15
++#define EN7581_DUAL_HSI1_MAC_RST 16
++#define EN7581_HSI_MAC_RST 17
++#define EN7581_WDMA_RST 18
++#define EN7581_WOE0_RST 19
++#define EN7581_WOE1_RST 20
++#define EN7581_HSDMA_RST 21
++#define EN7581_TDMA_RST 22
++#define EN7581_EMMC_RST 23
++#define EN7581_SOE_RST 24
++#define EN7581_PCIE2_RST 25
++#define EN7581_XFP_MAC_RST 26
++#define EN7581_USB_HOST_P1_RST 27
++#define EN7581_USB_HOST_P1_U3_PHY_RST 28
++/* RST_CTRL1 */
++#define EN7581_PCM1_ZSI_ISI_RST 29
++#define EN7581_FE_PDMA_RST 30
++#define EN7581_FE_QDMA_RST 31
++#define EN7581_PCM_SPIWP_RST 32
++#define EN7581_CRYPTO_RST 33
++#define EN7581_TIMER_RST 34
++#define EN7581_PCM1_RST 35
++#define EN7581_UART_RST 36
++#define EN7581_GPIO_RST 37
++#define EN7581_GDMA_RST 38
++#define EN7581_I2C_MASTER_RST 39
++#define EN7581_PCM2_ZSI_ISI_RST 40
++#define EN7581_SFC_RST 41
++#define EN7581_UART2_RST 42
++#define EN7581_GDMP_RST 43
++#define EN7581_FE_RST 44
++#define EN7581_USB_HOST_P0_RST 45
++#define EN7581_GSW_RST 46
++#define EN7581_SFC2_PCM_RST 47
++#define EN7581_PCIE0_RST 48
++#define EN7581_PCIE1_RST 49
++#define EN7581_CPU_TIMER_RST 50
++#define EN7581_PCIE_HB_RST 51
++#define EN7581_XPON_MAC_RST 52
++
++#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_ */
diff --git a/target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch b/target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch
new file mode 100644
index 0000000000..f09e69dc9a
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/021-01-v6.12-PCI-mediatek-gen3-Add-mtk_gen3_pcie_pdata-data-struc.patch
@@ -0,0 +1,100 @@
+From dc869a40d73ee6e9f47d683690ae507e30e56044 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 3 Jul 2024 18:12:42 +0200
+Subject: [PATCH 1/3] PCI: mediatek-gen3: Add mtk_gen3_pcie_pdata data
+ structure
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Introduce mtk_gen3_pcie_pdata data structure in order to define
+multiple callbacks for each supported SoC.
+
+This is a preliminary patch to introduce EN7581 PCIe support.
+
+Link: https://lore.kernel.org/linux-pci/c193d1a87505d045e2e0ef33317bce17012ee095.1720022580.git.lorenzo@kernel.org
+Signed-off-by: Lorenzo Bianconi
+Signed-off-by: Krzysztof Wilczyński
+Tested-by: Zhengping Zhang
+Reviewed-by: AngeloGioacchino Del Regno
+Acked-by: Jianjun Wang
+---
+ drivers/pci/controller/pcie-mediatek-gen3.c | 24 ++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -100,6 +100,16 @@
+ #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+ #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
++struct mtk_gen3_pcie;
++
++/**
++ * struct mtk_gen3_pcie_pdata - differentiate between host generations
++ * @power_up: pcie power_up callback
++ */
++struct mtk_gen3_pcie_pdata {
++ int (*power_up)(struct mtk_gen3_pcie *pcie);
++};
++
+ /**
+ * struct mtk_msi_set - MSI information for each set
+ * @base: IO mapped register base
+@@ -131,6 +141,7 @@ struct mtk_msi_set {
+ * @msi_sets: MSI sets information
+ * @lock: lock protecting IRQ bit map
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
++ * @soc: pointer to SoC-dependent operations
+ */
+ struct mtk_gen3_pcie {
+ struct device *dev;
+@@ -151,6 +162,8 @@ struct mtk_gen3_pcie {
+ struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
+ struct mutex lock;
+ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
++
++ const struct mtk_gen3_pcie_pdata *soc;
+ };
+
+ /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
+@@ -904,7 +917,7 @@ static int mtk_pcie_setup(struct mtk_gen
+ usleep_range(10, 20);
+
+ /* Don't touch the hardware registers before power up */
+- err = mtk_pcie_power_up(pcie);
++ err = pcie->soc->power_up(pcie);
+ if (err)
+ return err;
+
+@@ -939,6 +952,7 @@ static int mtk_pcie_probe(struct platfor
+ pcie = pci_host_bridge_priv(host);
+
+ pcie->dev = dev;
++ pcie->soc = device_get_match_data(dev);
+ platform_set_drvdata(pdev, pcie);
+
+ err = mtk_pcie_setup(pcie);
+@@ -1054,7 +1068,7 @@ static int mtk_pcie_resume_noirq(struct
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
+ int err;
+
+- err = mtk_pcie_power_up(pcie);
++ err = pcie->soc->power_up(pcie);
+ if (err)
+ return err;
+
+@@ -1074,8 +1088,12 @@ static const struct dev_pm_ops mtk_pcie_
+ mtk_pcie_resume_noirq)
+ };
+
++static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
++ .power_up = mtk_pcie_power_up,
++};
++
+ static const struct of_device_id mtk_pcie_of_match[] = {
+- { .compatible = "mediatek,mt8192-pcie" },
++ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch b/target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch
new file mode 100644
index 0000000000..5fbbc832d4
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/021-02-v6.12-PCI-mediatek-gen3-Rely-on-reset_bulk-APIs-for-PHY-re.patch
@@ -0,0 +1,155 @@
+From ee9eabbe3f0f0c7458d89840add97e54d4e0bccf Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 3 Jul 2024 18:12:43 +0200
+Subject: [PATCH 2/3] PCI: mediatek-gen3: Rely on reset_bulk APIs for PHY reset
+ lines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use reset_bulk APIs to manage PHY reset lines.
+
+This is a preliminary patch in order to add Airoha EN7581 PCIe support.
+
+Link: https://lore.kernel.org/linux-pci/3ceb83bc0defbcf868521f8df4b9100e55ec2614.1720022580.git.lorenzo@kernel.org
+Signed-off-by: Lorenzo Bianconi
+Signed-off-by: Krzysztof Wilczyński
+Tested-by: Zhengping Zhang
+Reviewed-by: AngeloGioacchino Del Regno
+Acked-by: Jianjun Wang
+---
+ drivers/pci/controller/pcie-mediatek-gen3.c | 45 +++++++++++++++------
+ 1 file changed, 33 insertions(+), 12 deletions(-)
+
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -100,14 +100,21 @@
+ #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+ #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
++#define MAX_NUM_PHY_RESETS 1
++
+ struct mtk_gen3_pcie;
+
+ /**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
++ * @phy_resets: phy reset lines SoC data.
+ */
+ struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
++ struct {
++ const char *id[MAX_NUM_PHY_RESETS];
++ int num_resets;
++ } phy_resets;
+ };
+
+ /**
+@@ -128,7 +135,7 @@ struct mtk_msi_set {
+ * @base: IO mapped register base
+ * @reg_base: physical register base
+ * @mac_reset: MAC reset control
+- * @phy_reset: PHY reset control
++ * @phy_resets: PHY reset controllers
+ * @phy: PHY controller block
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count for this port
+@@ -148,7 +155,7 @@ struct mtk_gen3_pcie {
+ void __iomem *base;
+ phys_addr_t reg_base;
+ struct reset_control *mac_reset;
+- struct reset_control *phy_reset;
++ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+@@ -788,10 +795,10 @@ static int mtk_pcie_setup_irq(struct mtk
+
+ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
+ {
++ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+- int ret;
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
+ if (!regs)
+@@ -804,12 +811,12 @@ static int mtk_pcie_parse_port(struct mt
+
+ pcie->reg_base = regs->start;
+
+- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+- if (IS_ERR(pcie->phy_reset)) {
+- ret = PTR_ERR(pcie->phy_reset);
+- if (ret != -EPROBE_DEFER)
+- dev_err(dev, "failed to get PHY reset\n");
++ for (i = 0; i < num_resets; i++)
++ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+
++ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
++ if (ret) {
++ dev_err(dev, "failed to get PHY bulk reset\n");
+ return ret;
+ }
+
+@@ -846,7 +853,11 @@ static int mtk_pcie_power_up(struct mtk_
+ int err;
+
+ /* PHY power on and enable pipe clock */
+- reset_control_deassert(pcie->phy_reset);
++ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
++ if (err) {
++ dev_err(dev, "failed to deassert PHYs\n");
++ return err;
++ }
+
+ err = phy_init(pcie->phy);
+ if (err) {
+@@ -882,7 +893,7 @@ err_clk_init:
+ err_phy_on:
+ phy_exit(pcie->phy);
+ err_phy_init:
+- reset_control_assert(pcie->phy_reset);
++ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+
+ return err;
+ }
+@@ -897,7 +908,7 @@ static void mtk_pcie_power_down(struct m
+
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+- reset_control_assert(pcie->phy_reset);
++ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ }
+
+ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
+@@ -909,10 +920,16 @@ static int mtk_pcie_setup(struct mtk_gen
+ return err;
+
+ /*
++ * Deassert the line in order to avoid unbalance in deassert_count
++ * counter since the bulk is shared.
++ */
++ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
++ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+- reset_control_assert(pcie->phy_reset);
++ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
++
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(10, 20);
+
+@@ -1090,6 +1107,10 @@ static const struct dev_pm_ops mtk_pcie_
+
+ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
++ .phy_resets = {
++ .id[0] = "phy",
++ .num_resets = 1,
++ },
+ };
+
+ static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch b/target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch
new file mode 100644
index 0000000000..19b003d892
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/021-03-v6.12-PCI-mediatek-gen3-Add-Airoha-EN7581-support.patch
@@ -0,0 +1,199 @@
+From f6ab898356dd70f267c49045a79d28ea5cf5e43e Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 3 Jul 2024 18:12:44 +0200
+Subject: [PATCH 3/3] PCI: mediatek-gen3: Add Airoha EN7581 support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Introduce support for Airoha EN7581 PCIe controller to mediatek-gen3
+PCIe controller driver.
+
+Link: https://lore.kernel.org/linux-pci/aca00bd672ee576ad96d279414fc0835ff31f637.1720022580.git.lorenzo@kernel.org
+Signed-off-by: Lorenzo Bianconi
+Signed-off-by: Krzysztof Wilczyński
+Tested-by: Zhengping Zhang
+Reviewed-by: AngeloGioacchino Del Regno
+Acked-by: Jianjun Wang
+---
+ drivers/pci/controller/Kconfig | 2 +-
+ drivers/pci/controller/pcie-mediatek-gen3.c | 113 +++++++++++++++++++-
+ 2 files changed, 113 insertions(+), 2 deletions(-)
+
+--- a/drivers/pci/controller/Kconfig
++++ b/drivers/pci/controller/Kconfig
+@@ -196,7 +196,7 @@ config PCIE_MEDIATEK
+
+ config PCIE_MEDIATEK_GEN3
+ tristate "MediaTek Gen3 PCIe controller"
+- depends on ARCH_MEDIATEK || COMPILE_TEST
++ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
+ depends on PCI_MSI
+ help
+ Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -6,7 +6,9 @@
+ * Author: Jianjun Wang
+ */
+
++#include
+ #include
++#include
+ #include
+ #include
+ #include
+@@ -15,6 +17,8 @@
+ #include
+ #include
+ #include
++#include
++#include
+ #include
+ #include
+ #include
+@@ -29,6 +33,12 @@
+ #define PCI_CLASS(class) (class << 8)
+ #define PCIE_RC_MODE BIT(0)
+
++#define PCIE_EQ_PRESET_01_REG 0x100
++#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
++#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
++#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
++#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
++
+ #define PCIE_CFGNUM_REG 0x140
+ #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
+ #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
+@@ -68,6 +78,14 @@
+ #define PCIE_MSI_SET_ENABLE_REG 0x190
+ #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+
++#define PCIE_PIPE4_PIE8_REG 0x338
++#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
++#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
++#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
++#define PCIE_K_PHYPARAM_QUERY BIT(19)
++#define PCIE_K_QUERY_TIMEOUT BIT(20)
++#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
++
+ #define PCIE_MSI_SET_BASE_REG 0xc00
+ #define PCIE_MSI_SET_OFFSET 0x10
+ #define PCIE_MSI_SET_STATUS_OFFSET 0x04
+@@ -100,7 +118,10 @@
+ #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+ #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
+-#define MAX_NUM_PHY_RESETS 1
++#define MAX_NUM_PHY_RESETS 3
++
++/* Time in ms needed to complete PCIe reset on EN7581 SoC */
++#define PCIE_EN7581_RESET_TIME_MS 100
+
+ struct mtk_gen3_pcie;
+
+@@ -847,6 +868,85 @@ static int mtk_pcie_parse_port(struct mt
+ return 0;
+ }
+
++static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
++{
++ struct device *dev = pcie->dev;
++ int err;
++ u32 val;
++
++ /*
++ * Wait for the time needed to complete the bulk assert in
++ * mtk_pcie_setup for EN7581 SoC.
++ */
++ mdelay(PCIE_EN7581_RESET_TIME_MS);
++
++ err = phy_init(pcie->phy);
++ if (err) {
++ dev_err(dev, "failed to initialize PHY\n");
++ return err;
++ }
++
++ err = phy_power_on(pcie->phy);
++ if (err) {
++ dev_err(dev, "failed to power on PHY\n");
++ goto err_phy_on;
++ }
++
++ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
++ if (err) {
++ dev_err(dev, "failed to deassert PHYs\n");
++ goto err_phy_deassert;
++ }
++
++ /*
++ * Wait for the time needed to complete the bulk de-assert above.
++ * This time is specific for EN7581 SoC.
++ */
++ mdelay(PCIE_EN7581_RESET_TIME_MS);
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
++
++ err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
++ if (err) {
++ dev_err(dev, "failed to prepare clock\n");
++ goto err_clk_prepare;
++ }
++
++ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
++ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
++ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
++ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
++ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
++
++ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
++ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
++ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
++ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
++ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
++
++ err = clk_bulk_enable(pcie->num_clks, pcie->clks);
++ if (err) {
++ dev_err(dev, "failed to prepare clock\n");
++ goto err_clk_enable;
++ }
++
++ return 0;
++
++err_clk_enable:
++ clk_bulk_unprepare(pcie->num_clks, pcie->clks);
++err_clk_prepare:
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
++err_phy_deassert:
++ phy_power_off(pcie->phy);
++err_phy_on:
++ phy_exit(pcie->phy);
++
++ return err;
++}
++
+ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
+ {
+ struct device *dev = pcie->dev;
+@@ -1113,7 +1213,18 @@ static const struct mtk_gen3_pcie_pdata
+ },
+ };
+
++static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
++ .power_up = mtk_pcie_en7581_power_up,
++ .phy_resets = {
++ .id[0] = "phy-lane0",
++ .id[1] = "phy-lane1",
++ .id[2] = "phy-lane2",
++ .num_resets = 3,
++ },
++};
++
+ static const struct of_device_id mtk_pcie_of_match[] = {
++ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ {},
+ };
diff --git a/target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch b/target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch
new file mode 100644
index 0000000000..3f9443e4d0
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/022-v6.11-phy-airoha-Add-PCIe-PHY-driver-for-EN7581-SoC.patch
@@ -0,0 +1,1783 @@
+From d7d2818b93837def4a33f92da2e64c3a2752c47e Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 15 Jun 2024 23:15:42 +0200
+Subject: [PATCH] phy: airoha: Add PCIe PHY driver for EN7581 SoC.
+
+Introduce support for Airoha PCIe PHY controller available in EN7581
+SoC.
+
+Reviewed-by: AngeloGioacchino Del Regno
+Tested-by: Zhengping Zhang
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/20ac99aa8628d97778594f606681db7f868f24fe.1718485860.git.lorenzo@kernel.org
+Signed-off-by: Vinod Koul
+---
+ MAINTAINERS | 8 +
+ drivers/phy/Kconfig | 10 +
+ drivers/phy/Makefile | 1 +
+ drivers/phy/phy-airoha-pcie-regs.h | 477 +++++++++++
+ drivers/phy/phy-airoha-pcie.c | 1248 ++++++++++++++++++++++++++++
+ 5 files changed, 1744 insertions(+)
+ create mode 100644 drivers/phy/phy-airoha-pcie-regs.h
+ create mode 100644 drivers/phy/phy-airoha-pcie.c
+
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -72,6 +72,16 @@ config PHY_CAN_TRANSCEIVER
+ functional modes using gpios and sets the attribute max link
+ rate, for CAN drivers.
+
++config PHY_AIROHA_PCIE
++ tristate "Airoha PCIe-PHY Driver"
++ depends on ARCH_AIROHA || COMPILE_TEST
++ depends on OF
++ select GENERIC_PHY
++ help
++ Say Y here to add support for Airoha PCIe PHY driver.
++ This driver create the basic PHY instance and provides initialize
++ callback for PCIe GEN3 port.
++
+ source "drivers/phy/allwinner/Kconfig"
+ source "drivers/phy/amlogic/Kconfig"
+ source "drivers/phy/broadcom/Kconfig"
+--- a/drivers/phy/Makefile
++++ b/drivers/phy/Makefile
+@@ -10,6 +10,7 @@ obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy
+ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
+ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
+ obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o
++obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o
+ obj-y += allwinner/ \
+ amlogic/ \
+ broadcom/ \
+--- /dev/null
++++ b/drivers/phy/phy-airoha-pcie-regs.h
+@@ -0,0 +1,477 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 AIROHA Inc
++ * Author: Lorenzo Bianconi
++ */
++
++#ifndef _PHY_AIROHA_PCIE_H
++#define _PHY_AIROHA_PCIE_H
++
++/* CSR_2L */
++#define REG_CSR_2L_CMN 0x0000
++#define CSR_2L_PXP_CMN_LANE_EN BIT(0)
++#define CSR_2L_PXP_CMN_TRIM_MASK GENMASK(28, 24)
++
++#define REG_CSR_2L_JCPLL_IB_EXT 0x0004
++#define REG_CSR_2L_JCPLL_LPF_SHCK_EN BIT(8)
++#define CSR_2L_PXP_JCPLL_CHP_IBIAS GENMASK(21, 16)
++#define CSR_2L_PXP_JCPLL_CHP_IOFST GENMASK(29, 24)
++
++#define REG_CSR_2L_JCPLL_LPF_BR 0x0008
++#define CSR_2L_PXP_JCPLL_LPF_BR GENMASK(4, 0)
++#define CSR_2L_PXP_JCPLL_LPF_BC GENMASK(12, 8)
++#define CSR_2L_PXP_JCPLL_LPF_BP GENMASK(20, 16)
++#define CSR_2L_PXP_JCPLL_LPF_BWR GENMASK(28, 24)
++
++#define REG_CSR_2L_JCPLL_LPF_BWC 0x000c
++#define CSR_2L_PXP_JCPLL_LPF_BWC GENMASK(4, 0)
++#define CSR_2L_PXP_JCPLL_KBAND_CODE GENMASK(23, 16)
++#define CSR_2L_PXP_JCPLL_KBAND_DIV GENMASK(26, 24)
++
++#define REG_CSR_2L_JCPLL_KBAND_KFC 0x0010
++#define CSR_2L_PXP_JCPLL_KBAND_KFC GENMASK(1, 0)
++#define CSR_2L_PXP_JCPLL_KBAND_KF GENMASK(9, 8)
++#define CSR_2L_PXP_JCPLL_KBAND_KS GENMASK(17, 16)
++#define CSR_2L_PXP_JCPLL_POSTDIV_EN BIT(24)
++
++#define REG_CSR_2L_JCPLL_MMD_PREDIV_MODE 0x0014
++#define CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE GENMASK(1, 0)
++#define CSR_2L_PXP_JCPLL_POSTDIV_D2 BIT(16)
++#define CSR_2L_PXP_JCPLL_POSTDIV_D5 BIT(24)
++
++#define CSR_2L_PXP_JCPLL_MONCK 0x0018
++#define CSR_2L_PXP_JCPLL_REFIN_DIV GENMASK(25, 24)
++
++#define REG_CSR_2L_JCPLL_RST_DLY 0x001c
++#define CSR_2L_PXP_JCPLL_RST_DLY GENMASK(2, 0)
++#define CSR_2L_PXP_JCPLL_RST BIT(8)
++#define CSR_2L_PXP_JCPLL_SDM_DI_EN BIT(16)
++#define CSR_2L_PXP_JCPLL_SDM_DI_LS GENMASK(25, 24)
++
++#define REG_CSR_2L_JCPLL_SDM_IFM 0x0020
++#define CSR_2L_PXP_JCPLL_SDM_IFM BIT(0)
++
++#define REG_CSR_2L_JCPLL_SDM_HREN 0x0024
++#define CSR_2L_PXP_JCPLL_SDM_HREN BIT(0)
++#define CSR_2L_PXP_JCPLL_TCL_AMP_EN BIT(8)
++#define CSR_2L_PXP_JCPLL_TCL_AMP_GAIN GENMASK(18, 16)
++#define CSR_2L_PXP_JCPLL_TCL_AMP_VREF GENMASK(28, 24)
++
++#define REG_CSR_2L_JCPLL_TCL_CMP 0x0028
++#define CSR_2L_PXP_JCPLL_TCL_LPF_EN BIT(16)
++#define CSR_2L_PXP_JCPLL_TCL_LPF_BW GENMASK(26, 24)
++
++#define REG_CSR_2L_JCPLL_VCODIV 0x002c
++#define CSR_2L_PXP_JCPLL_VCO_CFIX GENMASK(9, 8)
++#define CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN BIT(16)
++#define CSR_2L_PXP_JCPLL_VCO_SCAPWR GENMASK(26, 24)
++
++#define REG_CSR_2L_JCPLL_VCO_TCLVAR 0x0030
++#define CSR_2L_PXP_JCPLL_VCO_TCLVAR GENMASK(2, 0)
++
++#define REG_CSR_2L_JCPLL_SSC 0x0038
++#define CSR_2L_PXP_JCPLL_SSC_EN BIT(0)
++#define CSR_2L_PXP_JCPLL_SSC_PHASE_INI BIT(8)
++#define CSR_2L_PXP_JCPLL_SSC_TRI_EN BIT(16)
++
++#define REG_CSR_2L_JCPLL_SSC_DELTA1 0x003c
++#define CSR_2L_PXP_JCPLL_SSC_DELTA1 GENMASK(15, 0)
++#define CSR_2L_PXP_JCPLL_SSC_DELTA GENMASK(31, 16)
++
++#define REG_CSR_2L_JCPLL_SSC_PERIOD 0x0040
++#define CSR_2L_PXP_JCPLL_SSC_PERIOD GENMASK(15, 0)
++
++#define REG_CSR_2L_JCPLL_TCL_VTP_EN 0x004c
++#define CSR_2L_PXP_JCPLL_SPARE_LOW GENMASK(31, 24)
++
++#define REG_CSR_2L_JCPLL_TCL_KBAND_VREF 0x0050
++#define CSR_2L_PXP_JCPLL_TCL_KBAND_VREF GENMASK(4, 0)
++#define CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN BIT(24)
++
++#define REG_CSR_2L_750M_SYS_CK 0x0054
++#define CSR_2L_PXP_TXPLL_LPF_SHCK_EN BIT(16)
++#define CSR_2L_PXP_TXPLL_CHP_IBIAS GENMASK(29, 24)
++
++#define REG_CSR_2L_TXPLL_CHP_IOFST 0x0058
++#define CSR_2L_PXP_TXPLL_CHP_IOFST GENMASK(5, 0)
++#define CSR_2L_PXP_TXPLL_LPF_BR GENMASK(12, 8)
++#define CSR_2L_PXP_TXPLL_LPF_BC GENMASK(20, 16)
++#define CSR_2L_PXP_TXPLL_LPF_BP GENMASK(28, 24)
++
++#define REG_CSR_2L_TXPLL_LPF_BWR 0x005c
++#define CSR_2L_PXP_TXPLL_LPF_BWR GENMASK(4, 0)
++#define CSR_2L_PXP_TXPLL_LPF_BWC GENMASK(12, 8)
++#define CSR_2L_PXP_TXPLL_KBAND_CODE GENMASK(31, 24)
++
++#define REG_CSR_2L_TXPLL_KBAND_DIV 0x0060
++#define CSR_2L_PXP_TXPLL_KBAND_DIV GENMASK(2, 0)
++#define CSR_2L_PXP_TXPLL_KBAND_KFC GENMASK(9, 8)
++#define CSR_2L_PXP_TXPLL_KBAND_KF GENMASK(17, 16)
++#define CSR_2L_PXP_txpll_KBAND_KS GENMASK(25, 24)
++
++#define REG_CSR_2L_TXPLL_POSTDIV 0x0064
++#define CSR_2L_PXP_TXPLL_POSTDIV_EN BIT(0)
++#define CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE GENMASK(9, 8)
++#define CSR_2L_PXP_TXPLL_PHY_CK1_EN BIT(24)
++
++#define REG_CSR_2L_TXPLL_PHY_CK2 0x0068
++#define CSR_2L_PXP_TXPLL_REFIN_INTERNAL BIT(24)
++
++#define REG_CSR_2L_TXPLL_REFIN_DIV 0x006c
++#define CSR_2L_PXP_TXPLL_REFIN_DIV GENMASK(1, 0)
++#define CSR_2L_PXP_TXPLL_RST_DLY GENMASK(10, 8)
++#define CSR_2L_PXP_TXPLL_PLL_RSTB BIT(16)
++
++#define REG_CSR_2L_TXPLL_SDM_DI_LS 0x0070
++#define CSR_2L_PXP_TXPLL_SDM_DI_LS GENMASK(1, 0)
++#define CSR_2L_PXP_TXPLL_SDM_IFM BIT(8)
++#define CSR_2L_PXP_TXPLL_SDM_ORD GENMASK(25, 24)
++
++#define REG_CSR_2L_TXPLL_SDM_OUT 0x0074
++#define CSR_2L_PXP_TXPLL_TCL_AMP_EN BIT(16)
++#define CSR_2L_PXP_TXPLL_TCL_AMP_GAIN GENMASK(26, 24)
++
++#define REG_CSR_2L_TXPLL_TCL_AMP_VREF 0x0078
++#define CSR_2L_PXP_TXPLL_TCL_AMP_VREF GENMASK(4, 0)
++#define CSR_2L_PXP_TXPLL_TCL_LPF_EN BIT(24)
++
++#define REG_CSR_2L_TXPLL_TCL_LPF_BW 0x007c
++#define CSR_2L_PXP_TXPLL_TCL_LPF_BW GENMASK(2, 0)
++#define CSR_2L_PXP_TXPLL_VCO_CFIX GENMASK(17, 16)
++#define CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN BIT(24)
++
++#define REG_CSR_2L_TXPLL_VCO_SCAPWR 0x0080
++#define CSR_2L_PXP_TXPLL_VCO_SCAPWR GENMASK(2, 0)
++
++#define REG_CSR_2L_TXPLL_SSC 0x0084
++#define CSR_2L_PXP_TXPLL_SSC_EN BIT(0)
++#define CSR_2L_PXP_TXPLL_SSC_PHASE_INI BIT(8)
++
++#define REG_CSR_2L_TXPLL_SSC_DELTA1 0x0088
++#define CSR_2L_PXP_TXPLL_SSC_DELTA1 GENMASK(15, 0)
++#define CSR_2L_PXP_TXPLL_SSC_DELTA GENMASK(31, 16)
++
++#define REG_CSR_2L_TXPLL_SSC_PERIOD 0x008c
++#define CSR_2L_PXP_txpll_SSC_PERIOD GENMASK(15, 0)
++
++#define REG_CSR_2L_TXPLL_VTP 0x0090
++#define CSR_2L_PXP_TXPLL_VTP_EN BIT(0)
++
++#define REG_CSR_2L_TXPLL_TCL_VTP 0x0098
++#define CSR_2L_PXP_TXPLL_SPARE_L GENMASK(31, 24)
++
++#define REG_CSR_2L_TXPLL_TCL_KBAND_VREF 0x009c
++#define CSR_2L_PXP_TXPLL_TCL_KBAND_VREF GENMASK(4, 0)
++#define CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN BIT(24)
++
++#define REG_CSR_2L_TXPLL_POSTDIV_D256 0x00a0
++#define CSR_2L_PXP_CLKTX0_AMP GENMASK(10, 8)
++#define CSR_2L_PXP_CLKTX0_OFFSET GENMASK(17, 16)
++#define CSR_2L_PXP_CLKTX0_SR GENMASK(25, 24)
++
++#define REG_CSR_2L_CLKTX0_FORCE_OUT1 0x00a4
++#define CSR_2L_PXP_CLKTX0_HZ BIT(8)
++#define CSR_2L_PXP_CLKTX0_IMP_SEL GENMASK(20, 16)
++#define CSR_2L_PXP_CLKTX1_AMP GENMASK(26, 24)
++
++#define REG_CSR_2L_CLKTX1_OFFSET 0x00a8
++#define CSR_2L_PXP_CLKTX1_OFFSET GENMASK(1, 0)
++#define CSR_2L_PXP_CLKTX1_SR GENMASK(9, 8)
++#define CSR_2L_PXP_CLKTX1_HZ BIT(24)
++
++#define REG_CSR_2L_CLKTX1_IMP_SEL 0x00ac
++#define CSR_2L_PXP_CLKTX1_IMP_SEL GENMASK(4, 0)
++
++#define REG_CSR_2L_PLL_CMN_RESERVE0 0x00b0
++#define CSR_2L_PXP_PLL_RESERVE_MASK GENMASK(15, 0)
++
++#define REG_CSR_2L_TX0_CKLDO 0x00cc
++#define CSR_2L_PXP_TX0_CKLDO_EN BIT(0)
++#define CSR_2L_PXP_TX0_DMEDGEGEN_EN BIT(24)
++
++#define REG_CSR_2L_TX1_CKLDO 0x00e8
++#define CSR_2L_PXP_TX1_CKLDO_EN BIT(0)
++#define CSR_2L_PXP_TX1_DMEDGEGEN_EN BIT(24)
++
++#define REG_CSR_2L_TX1_MULTLANE 0x00ec
++#define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0)
++
++#define REG_CSR_2L_RX0_REV0 0x00fc
++#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2)
++#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4)
++#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8)
++
++#define REG_CSR_2L_RX0_PHYCK_DIV 0x0100
++#define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8)
++#define CSR_2L_PXP_RX0_PHYCK_RSTB BIT(16)
++#define CSR_2L_PXP_RX0_TDC_CK_SEL BIT(24)
++
++#define REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV 0x0104
++#define CSR_2L_PXP_CDR0_PD_EDGE_DISABLE BIT(8)
++
++#define REG_CSR_2L_CDR0_LPF_RATIO 0x0110
++#define CSR_2L_PXP_CDR0_LPF_TOP_LIM GENMASK(26, 8)
++
++#define REG_CSR_2L_CDR0_PR_INJ_MODE 0x011c
++#define CSR_2L_PXP_CDR0_INJ_FORCE_OFF BIT(24)
++
++#define REG_CSR_2L_CDR0_PR_BETA_DAC 0x0120
++#define CSR_2L_PXP_CDR0_PR_BETA_SEL GENMASK(19, 16)
++#define CSR_2L_PXP_CDR0_PR_KBAND_DIV GENMASK(26, 24)
++
++#define REG_CSR_2L_CDR0_PR_VREG_IBAND 0x0124
++#define CSR_2L_PXP_CDR0_PR_VREG_IBAND GENMASK(2, 0)
++#define CSR_2L_PXP_CDR0_PR_VREG_CKBUF GENMASK(10, 8)
++
++#define REG_CSR_2L_CDR0_PR_CKREF_DIV 0x0128
++#define CSR_2L_PXP_CDR0_PR_CKREF_DIV GENMASK(1, 0)
++
++#define REG_CSR_2L_CDR0_PR_MONCK 0x012c
++#define CSR_2L_PXP_CDR0_PR_MONCK_ENABLE BIT(0)
++#define CSR_2L_PXP_CDR0_PR_RESERVE0 GENMASK(19, 16)
++
++#define REG_CSR_2L_CDR0_PR_COR_HBW 0x0130
++#define CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON BIT(8)
++#define CSR_2L_PXP_CDR0_PR_CKREF_DIV1 GENMASK(17, 16)
++
++#define REG_CSR_2L_CDR0_PR_MONPI 0x0134
++#define CSR_2L_PXP_CDR0_PR_XFICK_EN BIT(8)
++
++#define REG_CSR_2L_RX0_SIGDET_DCTEST 0x0140
++#define CSR_2L_PXP_RX0_SIGDET_LPF_CTRL GENMASK(9, 8)
++#define CSR_2L_PXP_RX0_SIGDET_PEAK GENMASK(25, 24)
++
++#define REG_CSR_2L_RX0_SIGDET_VTH_SEL 0x0144
++#define CSR_2L_PXP_RX0_SIGDET_VTH_SEL GENMASK(4, 0)
++#define CSR_2L_PXP_RX0_FE_VB_EQ1_EN BIT(24)
++
++#define REG_CSR_2L_PXP_RX0_FE_VB_EQ2 0x0148
++#define CSR_2L_PXP_RX0_FE_VB_EQ2_EN BIT(0)
++#define CSR_2L_PXP_RX0_FE_VB_EQ3_EN BIT(8)
++#define CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB BIT(16)
++
++#define REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS 0x0158
++#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS GENMASK(29, 24)
++
++#define REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS 0x015c
++#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS GENMASK(5, 0)
++#define CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS GENMASK(13, 8)
++
++#define REG_CSR_2L_RX1_REV0 0x01b4
++
++#define REG_CSR_2L_RX1_PHYCK_DIV 0x01b8
++#define CSR_2L_PXP_RX1_PHYCK_SEL GENMASK(9, 8)
++#define CSR_2L_PXP_RX1_PHYCK_RSTB BIT(16)
++#define CSR_2L_PXP_RX1_TDC_CK_SEL BIT(24)
++
++#define REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV 0x01bc
++#define CSR_2L_PXP_CDR1_PD_EDGE_DISABLE BIT(8)
++
++#define REG_CSR_2L_CDR1_PR_BETA_DAC 0x01d8
++#define CSR_2L_PXP_CDR1_PR_BETA_SEL GENMASK(19, 16)
++#define CSR_2L_PXP_CDR1_PR_KBAND_DIV GENMASK(26, 24)
++
++#define REG_CSR_2L_CDR1_PR_MONCK 0x01e4
++#define CSR_2L_PXP_CDR1_PR_MONCK_ENABLE BIT(0)
++#define CSR_2L_PXP_CDR1_PR_RESERVE0 GENMASK(19, 16)
++
++#define REG_CSR_2L_CDR1_LPF_RATIO 0x01c8
++#define CSR_2L_PXP_CDR1_LPF_TOP_LIM GENMASK(26, 8)
++
++#define REG_CSR_2L_CDR1_PR_INJ_MODE 0x01d4
++#define CSR_2L_PXP_CDR1_INJ_FORCE_OFF BIT(24)
++
++#define REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL 0x01dc
++#define CSR_2L_PXP_CDR1_PR_VREG_IBAND GENMASK(2, 0)
++#define CSR_2L_PXP_CDR1_PR_VREG_CKBUF GENMASK(10, 8)
++
++#define REG_CSR_2L_CDR1_PR_CKREF_DIV 0x01e0
++#define CSR_2L_PXP_CDR1_PR_CKREF_DIV GENMASK(1, 0)
++
++#define REG_CSR_2L_CDR1_PR_COR_HBW 0x01e8
++#define CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON BIT(8)
++#define CSR_2L_PXP_CDR1_PR_CKREF_DIV1 GENMASK(17, 16)
++
++#define REG_CSR_2L_CDR1_PR_MONPI 0x01ec
++#define CSR_2L_PXP_CDR1_PR_XFICK_EN BIT(8)
++
++#define REG_CSR_2L_RX1_DAC_RANGE_EYE 0x01f4
++#define CSR_2L_PXP_RX1_SIGDET_LPF_CTRL GENMASK(25, 24)
++
++#define REG_CSR_2L_RX1_SIGDET_NOVTH 0x01f8
++#define CSR_2L_PXP_RX1_SIGDET_PEAK GENMASK(9, 8)
++#define CSR_2L_PXP_RX1_SIGDET_VTH_SEL GENMASK(20, 16)
++
++#define REG_CSR_2L_RX1_FE_VB_EQ1 0x0200
++#define CSR_2L_PXP_RX1_FE_VB_EQ1_EN BIT(0)
++#define CSR_2L_PXP_RX1_FE_VB_EQ2_EN BIT(8)
++#define CSR_2L_PXP_RX1_FE_VB_EQ3_EN BIT(16)
++#define CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB BIT(24)
++
++#define REG_CSR_2L_RX1_OSCAL_VGA1IOS 0x0214
++#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS GENMASK(5, 0)
++#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS GENMASK(13, 8)
++#define CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS GENMASK(21, 16)
++
++/* PMA */
++#define REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1 0x0004
++#define PCIE_LCPLL_MAN_PWDB BIT(0)
++
++#define REG_PCIE_PMA_SEQUENCE_DISB_CTRL1 0x010c
++#define PCIE_DISB_RX_SDCAL_EN BIT(0)
++
++#define REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1 0x0114
++#define PCIE_FORCE_RX_SDCAL_EN BIT(0)
++
++#define REG_PCIE_PMA_SS_RX_FREQ_DET1 0x014c
++#define PCIE_PLL_FT_LOCK_CYCLECNT GENMASK(15, 0)
++#define PCIE_PLL_FT_UNLOCK_CYCLECNT GENMASK(31, 16)
++
++#define REG_PCIE_PMA_SS_RX_FREQ_DET2 0x0150
++#define PCIE_LOCK_TARGET_BEG GENMASK(15, 0)
++#define PCIE_LOCK_TARGET_END GENMASK(31, 16)
++
++#define REG_PCIE_PMA_SS_RX_FREQ_DET3 0x0154
++#define PCIE_UNLOCK_TARGET_BEG GENMASK(15, 0)
++#define PCIE_UNLOCK_TARGET_END GENMASK(31, 16)
++
++#define REG_PCIE_PMA_SS_RX_FREQ_DET4 0x0158
++#define PCIE_FREQLOCK_DET_EN GENMASK(2, 0)
++#define PCIE_LOCK_LOCKTH GENMASK(11, 8)
++#define PCIE_UNLOCK_LOCKTH GENMASK(15, 12)
++
++#define REG_PCIE_PMA_SS_RX_CAL1 0x0160
++#define REG_PCIE_PMA_SS_RX_CAL2 0x0164
++#define PCIE_CAL_OUT_OS GENMASK(11, 8)
++
++#define REG_PCIE_PMA_SS_RX_SIGDET0 0x0168
++#define PCIE_SIGDET_WIN_NONVLD_TIMES GENMASK(28, 24)
++
++#define REG_PCIE_PMA_TX_RESET 0x0260
++#define PCIE_TX_TOP_RST BIT(0)
++#define PCIE_TX_CAL_RST BIT(8)
++
++#define REG_PCIE_PMA_RX_FORCE_MODE0 0x0294
++#define PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL GENMASK(1, 0)
++
++#define REG_PCIE_PMA_SS_DA_XPON_PWDB0 0x034c
++#define PCIE_DA_XPON_CDR_PR_PWDB BIT(8)
++
++#define REG_PCIE_PMA_SW_RESET 0x0460
++#define PCIE_SW_RX_FIFO_RST BIT(0)
++#define PCIE_SW_RX_RST BIT(1)
++#define PCIE_SW_TX_RST BIT(2)
++#define PCIE_SW_PMA_RST BIT(3)
++#define PCIE_SW_ALLPCS_RST BIT(4)
++#define PCIE_SW_REF_RST BIT(5)
++#define PCIE_SW_TX_FIFO_RST BIT(6)
++#define PCIE_SW_XFI_TXPCS_RST BIT(7)
++#define PCIE_SW_XFI_RXPCS_RST BIT(8)
++#define PCIE_SW_XFI_RXPCS_BIST_RST BIT(9)
++#define PCIE_SW_HSG_TXPCS_RST BIT(10)
++#define PCIE_SW_HSG_RXPCS_RST BIT(11)
++#define PCIE_PMA_SW_RST (PCIE_SW_RX_FIFO_RST | \
++ PCIE_SW_RX_RST | \
++ PCIE_SW_TX_RST | \
++ PCIE_SW_PMA_RST | \
++ PCIE_SW_ALLPCS_RST | \
++ PCIE_SW_REF_RST | \
++ PCIE_SW_TX_FIFO_RST | \
++ PCIE_SW_XFI_TXPCS_RST | \
++ PCIE_SW_XFI_RXPCS_RST | \
++ PCIE_SW_XFI_RXPCS_BIST_RST | \
++ PCIE_SW_HSG_TXPCS_RST | \
++ PCIE_SW_HSG_RXPCS_RST)
++
++#define REG_PCIE_PMA_RO_RX_FREQDET 0x0530
++#define PCIE_RO_FBCK_LOCK BIT(0)
++#define PCIE_RO_FL_OUT GENMASK(31, 16)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC 0x0794
++#define PCIE_FORCE_DA_PXP_CDR_PR_IDAC GENMASK(10, 0)
++#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW BIT(24)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW 0x0798
++#define PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW GENMASK(30, 0)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS 0x079c
++#define PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW BIT(16)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW 0x0800
++#define PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW GENMASK(30, 0)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB 0x081c
++#define PCIE_FORCE_DA_PXP_CDR_PD_PWDB BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB BIT(8)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C 0x0820
++#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN BIT(8)
++#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN BIT(24)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB 0x0824
++#define PCIE_FORCE_DA_PXP_CDR_PR_PWDB BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB BIT(24)
++
++#define REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT 0x0828
++#define PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN BIT(8)
++#define PCIE_FORCE_DA_PXP_JCPLL_EN BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_JCPLL_EN BIT(24)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST 0x0084c
++#define PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB BIT(24)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT 0x0854
++#define PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN BIT(8)
++#define PCIE_FORCE_DA_PXP_TXPLL_EN BIT(16)
++#define PCIE_FORCE_SEL_DA_PXP_TXPLL_EN BIT(24)
++
++#define REG_PCIE_PMA_SCAN_MODE 0x0884
++#define PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(8)
++
++#define REG_PCIE_PMA_DIG_RESERVE_13 0x08bc
++#define PCIE_FLL_IDAC_PCIEG1 GENMASK(10, 0)
++#define PCIE_FLL_IDAC_PCIEG2 GENMASK(26, 16)
++
++#define REG_PCIE_PMA_DIG_RESERVE_14 0x08c0
++#define PCIE_FLL_IDAC_PCIEG3 GENMASK(10, 0)
++#define PCIE_FLL_LOAD_EN BIT(16)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL 0x088c
++#define PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL GENMASK(1, 0)
++#define PCIE_FORCE_SEL_DA_PXP_RX_FE_GAIN_CTRL BIT(8)
++
++#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB 0x0894
++#define PCIE_FORCE_DA_PXP_RX_FE_PWDB BIT(0)
++#define PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB BIT(8)
++
++#define REG_PCIE_PMA_DIG_RESERVE_12 0x08b8
++#define PCIE_FORCE_PMA_RX_SPEED GENMASK(7, 4)
++#define PCIE_FORCE_SEL_PMA_RX_SPEED BIT(7)
++
++#define REG_PCIE_PMA_DIG_RESERVE_17 0x08e0
++
++#define REG_PCIE_PMA_DIG_RESERVE_18 0x08e4
++#define PCIE_PXP_RX_VTH_SEL_PCIE_G1 GENMASK(4, 0)
++#define PCIE_PXP_RX_VTH_SEL_PCIE_G2 GENMASK(12, 8)
++#define PCIE_PXP_RX_VTH_SEL_PCIE_G3 GENMASK(20, 16)
++
++#define REG_PCIE_PMA_DIG_RESERVE_19 0x08e8
++#define PCIE_PCP_RX_REV0_PCIE_GEN1 GENMASK(31, 16)
++
++#define REG_PCIE_PMA_DIG_RESERVE_20 0x08ec
++#define PCIE_PCP_RX_REV0_PCIE_GEN2 GENMASK(15, 0)
++#define PCIE_PCP_RX_REV0_PCIE_GEN3 GENMASK(31, 16)
++
++#define REG_PCIE_PMA_DIG_RESERVE_21 0x08f0
++#define REG_PCIE_PMA_DIG_RESERVE_22 0x08f4
++#define REG_PCIE_PMA_DIG_RESERVE_27 0x0908
++#define REG_PCIE_PMA_DIG_RESERVE_30 0x0914
++
++#endif /* _PHY_AIROHA_PCIE_H */
+--- /dev/null
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -0,0 +1,1248 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 AIROHA Inc
++ * Author: Lorenzo Bianconi
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#include "phy-airoha-pcie-regs.h"
++
++#define LEQ_LEN_CTRL_MAX_VAL 7
++#define FREQ_LOCK_MAX_ATTEMPT 10
++
++enum airoha_pcie_port_gen {
++ PCIE_PORT_GEN1 = 1,
++ PCIE_PORT_GEN2,
++ PCIE_PORT_GEN3,
++};
++
++/**
++ * struct airoha_pcie_phy - PCIe phy driver main structure
++ * @dev: pointer to device
++ * @phy: pointer to generic phy
++ * @csr_2l: Analogic lane IO mapped register base address
++ * @pma0: IO mapped register base address of PMA0-PCIe
++ * @pma1: IO mapped register base address of PMA1-PCIe
++ */
++struct airoha_pcie_phy {
++ struct device *dev;
++ struct phy *phy;
++ void __iomem *csr_2l;
++ void __iomem *pma0;
++ void __iomem *pma1;
++};
++
++static void airoha_phy_clear_bits(void __iomem *reg, u32 mask)
++{
++ u32 val = readl(reg) & ~mask;
++
++ writel(val, reg);
++}
++
++static void airoha_phy_set_bits(void __iomem *reg, u32 mask)
++{
++ u32 val = readl(reg) | mask;
++
++ writel(val, reg);
++}
++
++static void airoha_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
++{
++ u32 tmp = readl(reg);
++
++ tmp &= ~mask;
++ tmp |= val & mask;
++ writel(tmp, reg);
++}
++
++#define airoha_phy_update_field(reg, mask, val) \
++ do { \
++ BUILD_BUG_ON_MSG(!__builtin_constant_p((mask)), \
++ "mask is not constant"); \
++ airoha_phy_update_bits((reg), (mask), \
++ FIELD_PREP((mask), (val))); \
++ } while (0)
++
++#define airoha_phy_csr_2l_clear_bits(pcie_phy, reg, mask) \
++ airoha_phy_clear_bits((pcie_phy)->csr_2l + (reg), (mask))
++#define airoha_phy_csr_2l_set_bits(pcie_phy, reg, mask) \
++ airoha_phy_set_bits((pcie_phy)->csr_2l + (reg), (mask))
++#define airoha_phy_csr_2l_update_field(pcie_phy, reg, mask, val) \
++ airoha_phy_update_field((pcie_phy)->csr_2l + (reg), (mask), (val))
++#define airoha_phy_pma0_clear_bits(pcie_phy, reg, mask) \
++ airoha_phy_clear_bits((pcie_phy)->pma0 + (reg), (mask))
++#define airoha_phy_pma1_clear_bits(pcie_phy, reg, mask) \
++ airoha_phy_clear_bits((pcie_phy)->pma1 + (reg), (mask))
++#define airoha_phy_pma0_set_bits(pcie_phy, reg, mask) \
++ airoha_phy_set_bits((pcie_phy)->pma0 + (reg), (mask))
++#define airoha_phy_pma1_set_bits(pcie_phy, reg, mask) \
++ airoha_phy_set_bits((pcie_phy)->pma1 + (reg), (mask))
++#define airoha_phy_pma0_update_field(pcie_phy, reg, mask, val) \
++ airoha_phy_update_field((pcie_phy)->pma0 + (reg), (mask), (val))
++#define airoha_phy_pma1_update_field(pcie_phy, reg, mask, val) \
++ airoha_phy_update_field((pcie_phy)->pma1 + (reg), (mask), (val))
++
++static void
++airoha_phy_init_lane0_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
++ enum airoha_pcie_port_gen gen)
++{
++ u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
++ u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
++ u32 pr_idac, val, cdr_pr_idac_tmp = 0;
++ int i;
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
++ PCIE_LCPLL_MAN_PWDB);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
++ PCIE_LOCK_TARGET_BEG,
++ fl_out_target - 100);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
++ PCIE_LOCK_TARGET_END,
++ fl_out_target + 100);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
++ PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_LOCK_LOCKTH, 0x3);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
++ PCIE_UNLOCK_TARGET_BEG,
++ fl_out_target - 100);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
++ PCIE_UNLOCK_TARGET_END,
++ fl_out_target + 100);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
++ PCIE_PLL_FT_UNLOCK_CYCLECNT,
++ lock_cyclecnt);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_UNLOCK_LOCKTH, 0x3);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
++ CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
++
++ for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = FIELD_GET(PCIE_RO_FL_OUT,
++ readl(pcie_phy->pma0 +
++ REG_PCIE_PMA_RO_RX_FREQDET));
++ if (val > fl_out_target)
++ cdr_pr_idac_tmp = i << 8;
++ }
++
++ for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
++ pr_idac = cdr_pr_idac_tmp | (0x1 << i);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = FIELD_GET(PCIE_RO_FL_OUT,
++ readl(pcie_phy->pma0 +
++ REG_PCIE_PMA_RO_RX_FREQDET));
++ if (val < fl_out_target)
++ pr_idac &= ~(0x1 << i);
++
++ cdr_pr_idac_tmp = pr_idac;
++ }
++
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
++ cdr_pr_idac_tmp);
++
++ for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
++ u32 val;
++
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = readl(pcie_phy->pma0 + REG_PCIE_PMA_RO_RX_FREQDET);
++ if (val & PCIE_RO_FBCK_LOCK)
++ break;
++ }
++
++ /* turn off force mode and update band values */
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE,
++ CSR_2L_PXP_CDR0_INJ_FORCE_OFF);
++
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
++ if (gen == PCIE_PORT_GEN3) {
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_14,
++ PCIE_FLL_IDAC_PCIEG3,
++ cdr_pr_idac_tmp);
++ } else {
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_13,
++ PCIE_FLL_IDAC_PCIEG1,
++ cdr_pr_idac_tmp);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_13,
++ PCIE_FLL_IDAC_PCIEG2,
++ cdr_pr_idac_tmp);
++ }
++}
++
++static void
++airoha_phy_init_lane1_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy,
++ enum airoha_pcie_port_gen gen)
++{
++ u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941;
++ u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767;
++ u32 pr_idac, val, cdr_pr_idac_tmp = 0;
++ int i;
++
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1,
++ PCIE_LCPLL_MAN_PWDB);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
++ PCIE_LOCK_TARGET_BEG,
++ fl_out_target - 100);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2,
++ PCIE_LOCK_TARGET_END,
++ fl_out_target + 100);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
++ PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_LOCK_LOCKTH, 0x3);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
++ PCIE_UNLOCK_TARGET_BEG,
++ fl_out_target - 100);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3,
++ PCIE_UNLOCK_TARGET_END,
++ fl_out_target + 100);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1,
++ PCIE_PLL_FT_UNLOCK_CYCLECNT,
++ lock_cyclecnt);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_UNLOCK_LOCKTH, 0x3);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
++ CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
++
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PR_PWDB);
++
++ for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) {
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = FIELD_GET(PCIE_RO_FL_OUT,
++ readl(pcie_phy->pma1 +
++ REG_PCIE_PMA_RO_RX_FREQDET));
++ if (val > fl_out_target)
++ cdr_pr_idac_tmp = i << 8;
++ }
++
++ for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) {
++ pr_idac = cdr_pr_idac_tmp | (0x1 << i);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = FIELD_GET(PCIE_RO_FL_OUT,
++ readl(pcie_phy->pma1 +
++ REG_PCIE_PMA_RO_RX_FREQDET));
++ if (val < fl_out_target)
++ pr_idac &= ~(0x1 << i);
++
++ cdr_pr_idac_tmp = pr_idac;
++ }
++
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_DA_PXP_CDR_PR_IDAC,
++ cdr_pr_idac_tmp);
++
++ for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) {
++ u32 val;
++
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_SS_RX_FREQ_DET4,
++ PCIE_FREQLOCK_DET_EN, 0x3);
++
++ usleep_range(10000, 15000);
++
++ val = readl(pcie_phy->pma1 + REG_PCIE_PMA_RO_RX_FREQDET);
++ if (val & PCIE_RO_FBCK_LOCK)
++ break;
++ }
++
++ /* turn off force mode and update band values */
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE,
++ CSR_2L_PXP_CDR1_INJ_FORCE_OFF);
++
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC);
++ if (gen == PCIE_PORT_GEN3) {
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_14,
++ PCIE_FLL_IDAC_PCIEG3,
++ cdr_pr_idac_tmp);
++ } else {
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_13,
++ PCIE_FLL_IDAC_PCIEG1,
++ cdr_pr_idac_tmp);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_DIG_RESERVE_13,
++ PCIE_FLL_IDAC_PCIEG2,
++ cdr_pr_idac_tmp);
++ }
++}
++
++static void airoha_pcie_phy_init_default(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CMN,
++ CSR_2L_PXP_CMN_TRIM_MASK, 0x10);
++ writel(0xcccbcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_21);
++ writel(0xcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_22);
++ writel(0xcccbcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_21);
++ writel(0xcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_22);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CMN,
++ CSR_2L_PXP_CMN_LANE_EN);
++}
++
++static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_TXPLL_POSTDIV_D256,
++ CSR_2L_PXP_CLKTX0_AMP, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_CLKTX0_FORCE_OUT1,
++ CSR_2L_PXP_CLKTX1_AMP, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_TXPLL_POSTDIV_D256,
++ CSR_2L_PXP_CLKTX0_OFFSET, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
++ CSR_2L_PXP_CLKTX1_OFFSET, 0x2);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX0_FORCE_OUT1,
++ CSR_2L_PXP_CLKTX0_HZ);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
++ CSR_2L_PXP_CLKTX1_HZ);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_CLKTX0_FORCE_OUT1,
++ CSR_2L_PXP_CLKTX0_IMP_SEL, 0x12);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_IMP_SEL,
++ CSR_2L_PXP_CLKTX1_IMP_SEL, 0x12);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV_D256,
++ CSR_2L_PXP_CLKTX0_SR);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
++ CSR_2L_PXP_CLKTX1_SR);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
++ CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
++}
++
++static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
++ PCIE_SW_RX_RST);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
++ PCIE_SW_RX_RST);
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
++ PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
++ PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++}
++
++static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
++{
++ writel(0x2a00090b, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_17);
++ writel(0x2a00090b, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_17);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONPI,
++ CSR_2L_PXP_CDR0_PR_XFICK_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONPI,
++ CSR_2L_PXP_CDR1_PR_XFICK_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy,
++ REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV,
++ CSR_2L_PXP_CDR0_PD_EDGE_DISABLE);
++ airoha_phy_csr_2l_clear_bits(pcie_phy,
++ REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV,
++ CSR_2L_PXP_CDR1_PD_EDGE_DISABLE);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
++ CSR_2L_PXP_RX0_PHYCK_SEL, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
++ CSR_2L_PXP_RX1_PHYCK_SEL, 0x1);
++}
++
++static void airoha_pcie_phy_init_jcpll(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_VTP_EN,
++ CSR_2L_PXP_JCPLL_SPARE_LOW, 0x20);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
++ CSR_2L_PXP_JCPLL_RST);
++ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_JCPLL_SSC_DELTA1);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
++ CSR_2L_PXP_JCPLL_SSC_PERIOD);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_TRI_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
++ CSR_2L_PXP_JCPLL_LPF_BR, 0xa);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
++ CSR_2L_PXP_JCPLL_LPF_BP, 0xc);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
++ CSR_2L_PXP_JCPLL_LPF_BC, 0x1f);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
++ CSR_2L_PXP_JCPLL_LPF_BWC, 0x1e);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR,
++ CSR_2L_PXP_JCPLL_LPF_BWR, 0xa);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
++ CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
++ 0x1);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, CSR_2L_PXP_JCPLL_MONCK,
++ CSR_2L_PXP_JCPLL_REFIN_DIV);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
++ PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
++ 0x50000000);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW,
++ PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW,
++ 0x50000000);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy,
++ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
++ CSR_2L_PXP_JCPLL_POSTDIV_D5);
++ airoha_phy_csr_2l_set_bits(pcie_phy,
++ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
++ CSR_2L_PXP_JCPLL_POSTDIV_D2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
++ CSR_2L_PXP_JCPLL_RST_DLY, 0x4);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
++ CSR_2L_PXP_JCPLL_SDM_DI_LS);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
++ CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
++ CSR_2L_PXP_JCPLL_CHP_IOFST);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
++ CSR_2L_PXP_JCPLL_CHP_IBIAS, 0xc);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_JCPLL_MMD_PREDIV_MODE,
++ CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE,
++ 0x1);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
++ CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
++ CSR_2L_PXP_JCPLL_VCO_CFIX, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV,
++ CSR_2L_PXP_JCPLL_VCO_SCAPWR, 0x4);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT,
++ REG_CSR_2L_JCPLL_LPF_SHCK_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
++ CSR_2L_PXP_JCPLL_POSTDIV_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
++ CSR_2L_PXP_JCPLL_KBAND_KFC);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
++ CSR_2L_PXP_JCPLL_KBAND_KF, 0x3);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC,
++ CSR_2L_PXP_JCPLL_KBAND_KS);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
++ CSR_2L_PXP_JCPLL_KBAND_DIV, 0x1);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE,
++ PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC,
++ CSR_2L_PXP_JCPLL_KBAND_CODE, 0xe4);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
++ CSR_2L_PXP_JCPLL_TCL_AMP_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
++ CSR_2L_PXP_JCPLL_TCL_LPF_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_JCPLL_TCL_KBAND_VREF,
++ CSR_2L_PXP_JCPLL_TCL_KBAND_VREF, 0xf);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
++ CSR_2L_PXP_JCPLL_TCL_AMP_GAIN, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
++ CSR_2L_PXP_JCPLL_TCL_AMP_VREF, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP,
++ CSR_2L_PXP_JCPLL_TCL_LPF_BW, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCO_TCLVAR,
++ CSR_2L_PXP_JCPLL_VCO_TCLVAR, 0x3);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN);
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_JCPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_JCPLL_EN);
++}
++
++static void airoha_pcie_phy_txpll(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_EN);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
++ CSR_2L_PXP_TXPLL_PLL_RSTB);
++ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC_PERIOD,
++ CSR_2L_PXP_txpll_SSC_PERIOD);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
++ CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
++ CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
++ CSR_2L_PXP_TXPLL_REFIN_DIV);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
++ CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
++ PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
++ 0xc800000);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW,
++ PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW,
++ 0xc800000);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
++ CSR_2L_PXP_TXPLL_SDM_IFM);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
++ CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
++ CSR_2L_PXP_TXPLL_RST_DLY, 0x4);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
++ CSR_2L_PXP_TXPLL_SDM_DI_LS);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS,
++ CSR_2L_PXP_TXPLL_SDM_ORD, 0x3);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
++ CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
++ writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
++ CSR_2L_PXP_TXPLL_LPF_BP, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
++ CSR_2L_PXP_TXPLL_LPF_BC, 0x18);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
++ CSR_2L_PXP_TXPLL_LPF_BR, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST,
++ CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK,
++ CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_VTP,
++ CSR_2L_PXP_TXPLL_SPARE_L, 0x1);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
++ CSR_2L_PXP_TXPLL_LPF_BWC);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
++ CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV,
++ CSR_2L_PXP_TXPLL_REFIN_DIV);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
++ CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_VCO_SCAPWR,
++ CSR_2L_PXP_TXPLL_VCO_SCAPWR, 0x7);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
++ CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
++ CSR_2L_PXP_TXPLL_SSC_PHASE_INI);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
++ CSR_2L_PXP_TXPLL_LPF_BWR);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
++ CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
++ CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_VTP,
++ CSR_2L_PXP_TXPLL_VTP_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
++ CSR_2L_PXP_TXPLL_PHY_CK1_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2,
++ CSR_2L_PXP_TXPLL_REFIN_INTERNAL);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC,
++ CSR_2L_PXP_TXPLL_SSC_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_750M_SYS_CK,
++ CSR_2L_PXP_TXPLL_LPF_SHCK_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV,
++ CSR_2L_PXP_TXPLL_POSTDIV_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
++ CSR_2L_PXP_TXPLL_KBAND_KFC);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
++ CSR_2L_PXP_TXPLL_KBAND_KF, 0x3);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
++ CSR_2L_PXP_txpll_KBAND_KS, 0x1);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV,
++ CSR_2L_PXP_TXPLL_KBAND_DIV, 0x4);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR,
++ CSR_2L_PXP_TXPLL_KBAND_CODE, 0xe4);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
++ CSR_2L_PXP_TXPLL_TCL_AMP_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_AMP_VREF,
++ CSR_2L_PXP_TXPLL_TCL_LPF_EN);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_TXPLL_TCL_KBAND_VREF,
++ CSR_2L_PXP_TXPLL_TCL_KBAND_VREF, 0xf);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT,
++ CSR_2L_PXP_TXPLL_TCL_AMP_GAIN, 0x3);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_TXPLL_TCL_AMP_VREF,
++ CSR_2L_PXP_TXPLL_TCL_AMP_VREF, 0xb);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW,
++ CSR_2L_PXP_TXPLL_TCL_LPF_BW, 0x3);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_SEL_DA_PXP_TXPLL_EN);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT,
++ PCIE_FORCE_DA_PXP_TXPLL_EN);
++}
++
++static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
++ CSR_2L_PXP_JCPLL_SSC_DELTA1, 0x106);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1,
++ CSR_2L_PXP_JCPLL_SSC_DELTA, 0x106);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD,
++ CSR_2L_PXP_JCPLL_SSC_PERIOD, 0x31b);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_PHASE_INI);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
++ CSR_2L_PXP_JCPLL_SDM_IFM);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
++ REG_CSR_2L_JCPLL_SDM_HREN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
++ CSR_2L_PXP_JCPLL_SDM_DI_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_TRI_EN);
++}
++
++static void
++airoha_pcie_phy_set_rxlan0_signal_detect(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
++ CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON);
++
++ usleep_range(100, 200);
++
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
++ PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
++ PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
++ PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
++ CSR_2L_PXP_RX0_SIGDET_PEAK, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
++ CSR_2L_PXP_RX0_SIGDET_VTH_SEL, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
++ CSR_2L_PXP_VOS_PNINV, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST,
++ CSR_2L_PXP_RX0_SIGDET_LPF_CTRL, 0x1);
++
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
++ PCIE_CAL_OUT_OS, 0x0);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
++ CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
++ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
++ airoha_phy_pma0_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
++ PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
++ PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
++ PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
++ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
++ PCIE_DISB_RX_SDCAL_EN);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
++ PCIE_FORCE_RX_SDCAL_EN);
++ usleep_range(150, 200);
++ airoha_phy_pma0_clear_bits(pcie_phy,
++ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
++ PCIE_FORCE_RX_SDCAL_EN);
++}
++
++static void
++airoha_pcie_phy_set_rxlan1_signal_detect(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
++ CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON);
++
++ usleep_range(100, 200);
++
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19,
++ PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
++ PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20,
++ PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
++ CSR_2L_PXP_RX1_SIGDET_PEAK, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH,
++ CSR_2L_PXP_RX1_SIGDET_VTH_SEL, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
++ CSR_2L_PXP_VOS_PNINV, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_DAC_RANGE_EYE,
++ CSR_2L_PXP_RX1_SIGDET_LPF_CTRL, 0x1);
++
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2,
++ PCIE_CAL_OUT_OS, 0x0);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
++ CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB);
++
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
++ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
++ airoha_phy_pma1_update_field(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL,
++ PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0,
++ PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0,
++ PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3);
++ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1,
++ PCIE_DISB_RX_SDCAL_EN);
++
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
++ PCIE_FORCE_RX_SDCAL_EN);
++ usleep_range(150, 200);
++ airoha_phy_pma1_clear_bits(pcie_phy,
++ REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1,
++ PCIE_FORCE_RX_SDCAL_EN);
++}
++
++static void airoha_pcie_phy_set_rxflow(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
++ PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST,
++ PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB);
++
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
++ airoha_phy_pma0_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
++ PCIE_FORCE_DA_PXP_RX_FE_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB,
++ PCIE_FORCE_DA_PXP_CDR_PD_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB);
++ airoha_phy_pma1_set_bits(pcie_phy,
++ REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB,
++ PCIE_FORCE_DA_PXP_RX_FE_PWDB |
++ PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV,
++ CSR_2L_PXP_RX0_PHYCK_RSTB |
++ CSR_2L_PXP_RX0_TDC_CK_SEL);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV,
++ CSR_2L_PXP_RX1_PHYCK_RSTB |
++ CSR_2L_PXP_RX1_TDC_CK_SEL);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
++ PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
++ PCIE_SW_TX_FIFO_RST);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST |
++ PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST |
++ PCIE_SW_TX_FIFO_RST);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2,
++ CSR_2L_PXP_RX0_FE_VB_EQ2_EN |
++ CSR_2L_PXP_RX0_FE_VB_EQ3_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL,
++ CSR_2L_PXP_RX0_FE_VB_EQ1_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1,
++ CSR_2L_PXP_RX1_FE_VB_EQ1_EN |
++ CSR_2L_PXP_RX1_FE_VB_EQ2_EN |
++ CSR_2L_PXP_RX1_FE_VB_EQ3_EN);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
++ CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0,
++ CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
++ CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0,
++ CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4);
++}
++
++static void airoha_pcie_phy_set_pr(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
++ CSR_2L_PXP_CDR0_PR_VREG_IBAND, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND,
++ CSR_2L_PXP_CDR0_PR_VREG_CKBUF, 0x5);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_CKREF_DIV,
++ CSR_2L_PXP_CDR0_PR_CKREF_DIV);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW,
++ CSR_2L_PXP_CDR0_PR_CKREF_DIV1);
++
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
++ CSR_2L_PXP_CDR1_PR_VREG_IBAND, 0x5);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL,
++ CSR_2L_PXP_CDR1_PR_VREG_CKBUF, 0x5);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_CKREF_DIV,
++ CSR_2L_PXP_CDR1_PR_CKREF_DIV);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW,
++ CSR_2L_PXP_CDR1_PR_CKREF_DIV1);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_LPF_RATIO,
++ CSR_2L_PXP_CDR0_LPF_TOP_LIM, 0x20000);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_LPF_RATIO,
++ CSR_2L_PXP_CDR1_LPF_TOP_LIM, 0x20000);
++
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
++ CSR_2L_PXP_CDR0_PR_BETA_SEL, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
++ CSR_2L_PXP_CDR1_PR_BETA_SEL, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC,
++ CSR_2L_PXP_CDR0_PR_KBAND_DIV, 0x4);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC,
++ CSR_2L_PXP_CDR1_PR_KBAND_DIV, 0x4);
++}
++
++static void airoha_pcie_phy_set_txflow(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
++ CSR_2L_PXP_TX0_CKLDO_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
++ CSR_2L_PXP_TX1_CKLDO_EN);
++
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO,
++ CSR_2L_PXP_TX0_DMEDGEGEN_EN);
++ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO,
++ CSR_2L_PXP_TX1_DMEDGEGEN_EN);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TX1_MULTLANE,
++ CSR_2L_PXP_TX1_MULTLANE_EN);
++}
++
++static void airoha_pcie_phy_set_rx_mode(struct airoha_pcie_phy *pcie_phy)
++{
++ writel(0x804000, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_27);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
++ 0x77700);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
++ CSR_2L_PXP_CDR0_PR_MONCK_ENABLE);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK,
++ CSR_2L_PXP_CDR0_PR_RESERVE0, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS,
++ CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS, 0x19);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
++ CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS, 0x19);
++ airoha_phy_csr_2l_update_field(pcie_phy,
++ REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS,
++ CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS, 0x14);
++
++ writel(0x804000, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_27);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18,
++ PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5);
++
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30,
++ 0x77700);
++
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
++ CSR_2L_PXP_CDR1_PR_MONCK_ENABLE);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK,
++ CSR_2L_PXP_CDR1_PR_RESERVE0, 0x2);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
++ CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS, 0x19);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
++ CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS, 0x19);
++ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS,
++ CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS, 0x14);
++}
++
++static void airoha_pcie_phy_load_kflow(struct airoha_pcie_phy *pcie_phy)
++{
++ airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
++ PCIE_FORCE_PMA_RX_SPEED, 0xa);
++ airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
++ PCIE_FORCE_PMA_RX_SPEED, 0xa);
++ airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
++ airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3);
++
++ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
++ PCIE_FORCE_PMA_RX_SPEED);
++ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12,
++ PCIE_FORCE_PMA_RX_SPEED);
++ usleep_range(100, 200);
++
++ airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
++ airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2);
++}
++
++/**
++ * airoha_pcie_phy_init() - Initialize the phy
++ * @phy: the phy to be initialized
++ *
++ * Initialize the phy registers.
++ * The hardware settings will be reset during suspend, it should be
++ * reinitialized when the consumer calls phy_init() again on resume.
++ */
++static int airoha_pcie_phy_init(struct phy *phy)
++{
++ struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
++
++ /* enable load FLL-K flow */
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
++ PCIE_FLL_LOAD_EN);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
++ PCIE_FLL_LOAD_EN);
++
++ airoha_pcie_phy_init_default(pcie_phy);
++ airoha_pcie_phy_init_clk_out(pcie_phy);
++ airoha_pcie_phy_init_csr_2l(pcie_phy);
++
++ usleep_range(100, 200);
++
++ airoha_pcie_phy_init_rx(pcie_phy);
++ /* phase 1, no ssc for K TXPLL */
++ airoha_pcie_phy_init_jcpll(pcie_phy);
++
++ usleep_range(500, 600);
++
++ /* TX PLL settings */
++ airoha_pcie_phy_txpll(pcie_phy);
++
++ usleep_range(200, 300);
++
++ /* SSC JCPLL setting */
++ airoha_pcie_phy_init_ssc_jcpll(pcie_phy);
++
++ usleep_range(100, 200);
++
++ /* Rx lan0 signal detect */
++ airoha_pcie_phy_set_rxlan0_signal_detect(pcie_phy);
++ /* Rx lan1 signal detect */
++ airoha_pcie_phy_set_rxlan1_signal_detect(pcie_phy);
++ /* RX FLOW */
++ airoha_pcie_phy_set_rxflow(pcie_phy);
++
++ usleep_range(100, 200);
++
++ airoha_pcie_phy_set_pr(pcie_phy);
++ /* TX FLOW */
++ airoha_pcie_phy_set_txflow(pcie_phy);
++
++ usleep_range(100, 200);
++ /* RX mode setting */
++ airoha_pcie_phy_set_rx_mode(pcie_phy);
++ /* Load K-Flow */
++ airoha_pcie_phy_load_kflow(pcie_phy);
++ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
++ PCIE_DA_XPON_CDR_PR_PWDB);
++ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
++ PCIE_DA_XPON_CDR_PR_PWDB);
++
++ usleep_range(100, 200);
++
++ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
++ PCIE_DA_XPON_CDR_PR_PWDB);
++ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
++ PCIE_DA_XPON_CDR_PR_PWDB);
++
++ usleep_range(100, 200);
++
++ return 0;
++}
++
++static int airoha_pcie_phy_exit(struct phy *phy)
++{
++ struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
++
++ airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_PMA_SW_RST);
++ airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET,
++ PCIE_PMA_SW_RST);
++ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
++ CSR_2L_PXP_JCPLL_SSC_PHASE_INI |
++ CSR_2L_PXP_JCPLL_SSC_TRI_EN |
++ CSR_2L_PXP_JCPLL_SSC_EN);
++
++ return 0;
++}
++
++static const struct phy_ops airoha_pcie_phy_ops = {
++ .init = airoha_pcie_phy_init,
++ .exit = airoha_pcie_phy_exit,
++ .owner = THIS_MODULE,
++};
++
++static int airoha_pcie_phy_probe(struct platform_device *pdev)
++{
++ struct airoha_pcie_phy *pcie_phy;
++ struct device *dev = &pdev->dev;
++ struct phy_provider *provider;
++
++ pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL);
++ if (!pcie_phy)
++ return -ENOMEM;
++
++ pcie_phy->csr_2l = devm_platform_ioremap_resource_byname(pdev, "csr-2l");
++ if (IS_ERR(pcie_phy->csr_2l))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->csr_2l),
++ "Failed to map phy-csr-2l base\n");
++
++ pcie_phy->pma0 = devm_platform_ioremap_resource_byname(pdev, "pma0");
++ if (IS_ERR(pcie_phy->pma0))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->pma0),
++ "Failed to map phy-pma0 base\n");
++
++ pcie_phy->pma1 = devm_platform_ioremap_resource_byname(pdev, "pma1");
++ if (IS_ERR(pcie_phy->pma1))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->pma1),
++ "Failed to map phy-pma1 base\n");
++
++ pcie_phy->phy = devm_phy_create(dev, dev->of_node, &airoha_pcie_phy_ops);
++ if (IS_ERR(pcie_phy->phy))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
++ "Failed to create PCIe phy\n");
++
++ pcie_phy->dev = dev;
++ phy_set_drvdata(pcie_phy->phy, pcie_phy);
++
++ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
++ if (IS_ERR(provider))
++ return dev_err_probe(dev, PTR_ERR(provider),
++ "PCIe phy probe failed\n");
++
++ return 0;
++}
++
++static const struct of_device_id airoha_pcie_phy_of_match[] = {
++ { .compatible = "airoha,en7581-pcie-phy" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, airoha_pcie_phy_of_match);
++
++static struct platform_driver airoha_pcie_phy_driver = {
++ .probe = airoha_pcie_phy_probe,
++ .driver = {
++ .name = "airoha-pcie-phy",
++ .of_match_table = airoha_pcie_phy_of_match,
++ },
++};
++module_platform_driver(airoha_pcie_phy_driver);
++
++MODULE_DESCRIPTION("Airoha PCIe PHY driver");
++MODULE_AUTHOR("Lorenzo Bianconi ");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch b/target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch
new file mode 100644
index 0000000000..51be7664b4
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/023-v6.11-phy-airoha-Add-dtime-and-Rx-AEQ-IO-registers.patch
@@ -0,0 +1,112 @@
+From 2a011c3c12e8de461fb1fdce85fa38d308c4eb8b Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sat, 29 Jun 2024 19:51:49 +0200
+Subject: [PATCH] phy: airoha: Add dtime and Rx AEQ IO registers
+
+Introduce Tx-Rx detection Time and Rx AEQ training mappings to
+phy-airoha-pcie driver. This is a preliminary patch to introduce PCIe
+support to En7581 SoC through the mediatek-gen3 PCIe driver.
+This change is not introducing any backward compatibility issue since
+the EN7581 dts is not upstream yet.
+
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: AngeloGioacchino Del Regno
+Link: https://lore.kernel.org/r/edf3b28926177166c65256604d69f2f576cb6fb3.1719682943.git.lorenzo@kernel.org
+Signed-off-by: Vinod Koul
+---
+ drivers/phy/phy-airoha-pcie-regs.h | 17 +++++++++++++
+ drivers/phy/phy-airoha-pcie.c | 38 ++++++++++++++++++++++++++++++
+ 2 files changed, 55 insertions(+)
+
+--- a/drivers/phy/phy-airoha-pcie-regs.h
++++ b/drivers/phy/phy-airoha-pcie-regs.h
+@@ -474,4 +474,21 @@
+ #define REG_PCIE_PMA_DIG_RESERVE_27 0x0908
+ #define REG_PCIE_PMA_DIG_RESERVE_30 0x0914
+
++/* DTIME */
++#define REG_PCIE_PEXTP_DIG_GLB44 0x00
++#define PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL GENMASK(7, 0)
++#define PCIE_XTP_RXDET_EN_STB_T_SEL GENMASK(15, 8)
++#define PCIE_XTP_RXDET_FINISH_STB_T_SEL GENMASK(23, 16)
++#define PCIE_XTP_TXPD_TX_DATA_EN_DLY GENMASK(27, 24)
++#define PCIE_XTP_TXPD_RXDET_DONE_CDT BIT(28)
++#define PCIE_XTP_RXDET_LATCH_STB_T_SEL GENMASK(31, 29)
++
++/* RX AEQ */
++#define REG_PCIE_PEXTP_DIG_LN_RX30_P0 0x0000
++#define PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT GENMASK(7, 0)
++#define PCIE_XTP_LN_RX_PDOWN_T2RLB_DIG_EN BIT(8)
++#define PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT GENMASK(31, 16)
++
++#define REG_PCIE_PEXTP_DIG_LN_RX30_P1 0x0100
++
+ #endif /* _PHY_AIROHA_PCIE_H */
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -31,6 +31,9 @@ enum airoha_pcie_port_gen {
+ * @csr_2l: Analogic lane IO mapped register base address
+ * @pma0: IO mapped register base address of PMA0-PCIe
+ * @pma1: IO mapped register base address of PMA1-PCIe
++ * @p0_xr_dtime: IO mapped register base address of port0 Tx-Rx detection time
++ * @p1_xr_dtime: IO mapped register base address of port1 Tx-Rx detection time
++ * @rx_aeq: IO mapped register base address of Rx AEQ training
+ */
+ struct airoha_pcie_phy {
+ struct device *dev;
+@@ -38,6 +41,9 @@ struct airoha_pcie_phy {
+ void __iomem *csr_2l;
+ void __iomem *pma0;
+ void __iomem *pma1;
++ void __iomem *p0_xr_dtime;
++ void __iomem *p1_xr_dtime;
++ void __iomem *rx_aeq;
+ };
+
+ static void airoha_phy_clear_bits(void __iomem *reg, u32 mask)
+@@ -1101,6 +1107,21 @@ static void airoha_pcie_phy_load_kflow(s
+ static int airoha_pcie_phy_init(struct phy *phy)
+ {
+ struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy);
++ u32 val;
++
++ /* Setup Tx-Rx detection time */
++ val = FIELD_PREP(PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL, 0x33) |
++ FIELD_PREP(PCIE_XTP_RXDET_EN_STB_T_SEL, 0x1) |
++ FIELD_PREP(PCIE_XTP_RXDET_FINISH_STB_T_SEL, 0x2) |
++ FIELD_PREP(PCIE_XTP_TXPD_TX_DATA_EN_DLY, 0x3) |
++ FIELD_PREP(PCIE_XTP_RXDET_LATCH_STB_T_SEL, 0x1);
++ writel(val, pcie_phy->p0_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
++ writel(val, pcie_phy->p1_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44);
++ /* Setup Rx AEQ training time */
++ val = FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT, 0x32) |
++ FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT, 0x5050);
++ writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P0);
++ writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P1);
+
+ /* enable load FLL-K flow */
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14,
+@@ -1217,6 +1238,23 @@ static int airoha_pcie_phy_probe(struct
+ return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
+ "Failed to create PCIe phy\n");
+
++ pcie_phy->p0_xr_dtime =
++ devm_platform_ioremap_resource_byname(pdev, "p0-xr-dtime");
++ if (IS_ERR(pcie_phy->p0_xr_dtime))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->p0_xr_dtime),
++ "Failed to map P0 Tx-Rx dtime base\n");
++
++ pcie_phy->p1_xr_dtime =
++ devm_platform_ioremap_resource_byname(pdev, "p1-xr-dtime");
++ if (IS_ERR(pcie_phy->p1_xr_dtime))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->p1_xr_dtime),
++ "Failed to map P1 Tx-Rx dtime base\n");
++
++ pcie_phy->rx_aeq = devm_platform_ioremap_resource_byname(pdev, "rx-aeq");
++ if (IS_ERR(pcie_phy->rx_aeq))
++ return dev_err_probe(dev, PTR_ERR(pcie_phy->rx_aeq),
++ "Failed to map Rx AEQ base\n");
++
+ pcie_phy->dev = dev;
+ phy_set_drvdata(pcie_phy->phy, pcie_phy);
+
diff --git a/target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch b/target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch
new file mode 100644
index 0000000000..ff31b23800
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/024-v6.12-phy-airoha-adjust-initialization-delay-in-airoha_pci.patch
@@ -0,0 +1,40 @@
+From 7f7315db3d262298ab33d198d3f0b09cabfa7b6b Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Tue, 6 Aug 2024 17:55:48 +0200
+Subject: [PATCH] phy: airoha: adjust initialization delay in
+ airoha_pcie_phy_init()
+
+Align phy-pcie initialization delay to the vendor sdk in
+airoha_pcie_phy_init routine and allow the hw to complete required
+configuration before proceeding
+
+Reviewed-by: AngeloGioacchino Del Regno
+Signed-off-by: Lorenzo Bianconi
+Link: https://lore.kernel.org/r/8af6f27857619f1e0dd227f08b8584ae8fb22fb2.1722959625.git.lorenzo@kernel.org
+Signed-off-by: Vinod Koul
+---
+ drivers/phy/phy-airoha-pcie.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -18,6 +18,9 @@
+ #define LEQ_LEN_CTRL_MAX_VAL 7
+ #define FREQ_LOCK_MAX_ATTEMPT 10
+
++/* PCIe-PHY initialization time in ms needed by the hw to complete */
++#define PHY_HW_INIT_TIME_MS 30
++
+ enum airoha_pcie_port_gen {
+ PCIE_PORT_GEN1 = 1,
+ PCIE_PORT_GEN2,
+@@ -1181,7 +1184,8 @@ static int airoha_pcie_phy_init(struct p
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
+ PCIE_DA_XPON_CDR_PR_PWDB);
+
+- usleep_range(100, 200);
++ /* Wait for the PCIe PHY to complete initialization before returning */
++ msleep(PHY_HW_INIT_TIME_MS);
+
+ return 0;
+ }
diff --git a/target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch b/target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch
new file mode 100644
index 0000000000..271ef01ed3
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/025-01-v6.13-phy-airoha-Fix-REG_CSR_2L_PLL_CMN_RESERVE0-config-in.patch
@@ -0,0 +1,26 @@
+From ca9afde0563a80200eab856a53d7eab28c8fdd90 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 18 Sep 2024 15:32:52 +0200
+Subject: [PATCH 1/4] phy: airoha: Fix REG_CSR_2L_PLL_CMN_RESERVE0 config in
+ airoha_pcie_phy_init_clk_out()
+
+Fix typo configuring REG_CSR_2L_PLL_CMN_RESERVE0 register in
+airoha_pcie_phy_init_clk_out routine.
+
+Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
+Signed-off-by: Lorenzo Bianconi
+---
+ drivers/phy/phy-airoha-pcie.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -459,7 +459,7 @@ static void airoha_pcie_phy_init_clk_out
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
+ CSR_2L_PXP_CLKTX1_SR);
+ airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
+- CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
++ CSR_2L_PXP_PLL_RESERVE_MASK, 0xd0d);
+ }
+
+ static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
diff --git a/target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch b/target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch
new file mode 100644
index 0000000000..5c909596f1
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/025-02-v6.13-phy-airoha-Fix-REG_PCIE_PMA_TX_RESET-config-in-airoh.patch
@@ -0,0 +1,29 @@
+From 2c2313c84ad7c0e5e39fbd98559d40f6b9ec1f83 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 18 Sep 2024 15:32:53 +0200
+Subject: [PATCH 2/4] phy: airoha: Fix REG_PCIE_PMA_TX_RESET config in
+ airoha_pcie_phy_init_csr_2l()
+
+Fix typos configuring REG_PCIE_PMA_TX_RESET register in
+airoha_pcie_phy_init_csr_2l routine for lane0 and lane1
+
+Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
+Signed-off-by: Lorenzo Bianconi
+---
+ drivers/phy/phy-airoha-pcie.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -471,9 +471,9 @@ static void airoha_pcie_phy_init_csr_2l(
+ PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
+ PCIE_SW_RX_RST);
+ airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+- PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++ PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
+ airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+- PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++ PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
+ }
+
+ static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
diff --git a/target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch b/target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch
new file mode 100644
index 0000000000..8cde5f1cf7
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/025-03-v6.13-phy-airoha-Fix-REG_CSR_2L_JCPLL_SDM_HREN-config-in-a.patch
@@ -0,0 +1,26 @@
+From 6e0c349a8a59959c3d3571b5f6776bc2d2ca62bc Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 18 Sep 2024 15:32:54 +0200
+Subject: [PATCH 3/4] phy: airoha: Fix REG_CSR_2L_JCPLL_SDM_HREN config in
+ airoha_pcie_phy_init_ssc_jcpll()
+
+Fix typo configuring REG_CSR_2L_JCPLL_SDM_HREN register in
+airoha_pcie_phy_init_ssc_jcpll routine.
+
+Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
+Signed-off-by: Lorenzo Bianconi
+---
+ drivers/phy/phy-airoha-pcie.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -802,7 +802,7 @@ static void airoha_pcie_phy_init_ssc_jcp
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
+ CSR_2L_PXP_JCPLL_SDM_IFM);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+- REG_CSR_2L_JCPLL_SDM_HREN);
++ CSR_2L_PXP_JCPLL_SDM_HREN);
+ airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ CSR_2L_PXP_JCPLL_SDM_DI_EN);
+ airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
diff --git a/target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch b/target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch
new file mode 100644
index 0000000000..163aebcbdb
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/025-04-v6.13-phy-airoha-Fix-REG_CSR_2L_RX-0-1-_REV0-definitions.patch
@@ -0,0 +1,32 @@
+From bc1bb265f504ea19ce611a1aec1a40dec409cd15 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Wed, 18 Sep 2024 15:32:55 +0200
+Subject: [PATCH 4/4] phy: airoha: Fix REG_CSR_2L_RX{0,1}_REV0 definitions
+
+Fix the following register definitions for REG_CSR_2L_RX{0,1}_REV0
+registers:
+- CSR_2L_PXP_VOS_PNINV
+- CSR_2L_PXP_FE_GAIN_NORMAL_MODE
+- CSR_2L_PXP_FE_GAIN_TRAIN_MODE
+
+Fixes: d7d2818b9383 ("phy: airoha: Add PCIe PHY driver for EN7581 SoC.")
+Signed-off-by: Lorenzo Bianconi
+---
+ drivers/phy/phy-airoha-pcie-regs.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/phy/phy-airoha-pcie-regs.h
++++ b/drivers/phy/phy-airoha-pcie-regs.h
+@@ -197,9 +197,9 @@
+ #define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0)
+
+ #define REG_CSR_2L_RX0_REV0 0x00fc
+-#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2)
+-#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4)
+-#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8)
++#define CSR_2L_PXP_VOS_PNINV GENMASK(19, 18)
++#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(22, 20)
++#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(26, 24)
+
+ #define REG_CSR_2L_RX0_PHYCK_DIV 0x0100
+ #define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8)
diff --git a/target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch b/target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch
new file mode 100644
index 0000000000..417dcc06d8
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/025-v6.10-spi-airoha-add-SPI-NAND-Flash-controller-driver.patch
@@ -0,0 +1,1203 @@
+From a403997c12019d0f82a9480207bf85985b8de5e7 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Mon, 29 Apr 2024 10:13:10 +0200
+Subject: [PATCH] spi: airoha: add SPI-NAND Flash controller driver
+
+Introduce support for SPI-NAND driver of the Airoha NAND Flash Interface
+found on Airoha ARM SoCs.
+
+Tested-by: Rajeev Kumar
+Signed-off-by: Lorenzo Bianconi
+Reviewed-by: Andy Shevchenko
+Reviewed-by: AngeloGioacchino Del Regno
+Link: https://lore.kernel.org/r/6c9db20505b01a66807995374f2af475a23ce5b2.1714377864.git.lorenzo@kernel.org
+Signed-off-by: Mark Brown
+---
+ MAINTAINERS | 9 +
+ drivers/spi/Kconfig | 10 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/spi-airoha-snfi.c | 1129 +++++++++++++++++++++++++++++++++
+ 4 files changed, 1149 insertions(+)
+ create mode 100644 drivers/spi/spi-airoha-snfi.c
+
+# diff --git a/MAINTAINERS b/MAINTAINERS
+# index 2b63ed114532..dde7dd956156 100644
+# --- a/MAINTAINERS
+# +++ b/MAINTAINERS
+# @@ -653,6 +653,15 @@ S: Supported
+# F: fs/aio.c
+# F: include/linux/*aio*.h
+
+# +AIROHA SPI SNFI DRIVER
+# +M: Lorenzo Bianconi
+# +M: Ray Liu
+# +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+# +L: linux-spi@vger.kernel.org
+# +S: Maintained
+# +F: Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
+# +F: drivers/spi/spi-airoha.c
+# +
+# AIRSPY MEDIA DRIVER
+# L: linux-media@vger.kernel.org
+# S: Orphan
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -57,6 +57,16 @@ config SPI_MEM
+
+ comment "SPI Master Controller Drivers"
+
++config SPI_AIROHA_SNFI
++ tristate "Airoha SPI NAND Flash Interface"
++ depends on ARCH_AIROHA || COMPILE_TEST
++ depends on SPI_MASTER
++ select REGMAP_MMIO
++ help
++ This enables support for SPI-NAND mode on the Airoha NAND
++ Flash Interface found on Airoha ARM SoCs. This controller
++ is implemented as a SPI-MEM controller.
++
+ config SPI_ALTERA
+ tristate "Altera SPI Controller platform driver"
+ select SPI_ALTERA_CORE
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_SPIDEV) += spidev.o
+ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
+
+ # SPI master controller drivers (bus)
++obj-$(CONFIG_SPI_AIROHA_SNFI) += spi-airoha-snfi.o
+ obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o
+ obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o
+ obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o
+--- /dev/null
++++ b/drivers/spi/spi-airoha-snfi.c
+@@ -0,0 +1,1129 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 AIROHA Inc
++ * Author: Lorenzo Bianconi
++ * Author: Ray Liu
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++/* SPI */
++#define REG_SPI_CTRL_BASE 0x1FA10000
++
++#define REG_SPI_CTRL_READ_MODE 0x0000
++#define REG_SPI_CTRL_READ_IDLE_EN 0x0004
++#define REG_SPI_CTRL_SIDLY 0x0008
++#define REG_SPI_CTRL_CSHEXT 0x000c
++#define REG_SPI_CTRL_CSLEXT 0x0010
++
++#define REG_SPI_CTRL_MTX_MODE_TOG 0x0014
++#define SPI_CTRL_MTX_MODE_TOG GENMASK(3, 0)
++
++#define REG_SPI_CTRL_RDCTL_FSM 0x0018
++#define SPI_CTRL_RDCTL_FSM GENMASK(3, 0)
++
++#define REG_SPI_CTRL_MACMUX_SEL 0x001c
++
++#define REG_SPI_CTRL_MANUAL_EN 0x0020
++#define SPI_CTRL_MANUAL_EN BIT(0)
++
++#define REG_SPI_CTRL_OPFIFO_EMPTY 0x0024
++#define SPI_CTRL_OPFIFO_EMPTY BIT(0)
++
++#define REG_SPI_CTRL_OPFIFO_WDATA 0x0028
++#define SPI_CTRL_OPFIFO_LEN GENMASK(8, 0)
++#define SPI_CTRL_OPFIFO_OP GENMASK(13, 9)
++
++#define REG_SPI_CTRL_OPFIFO_FULL 0x002c
++#define SPI_CTRL_OPFIFO_FULL BIT(0)
++
++#define REG_SPI_CTRL_OPFIFO_WR 0x0030
++#define SPI_CTRL_OPFIFO_WR BIT(0)
++
++#define REG_SPI_CTRL_DFIFO_FULL 0x0034
++#define SPI_CTRL_DFIFO_FULL BIT(0)
++
++#define REG_SPI_CTRL_DFIFO_WDATA 0x0038
++#define SPI_CTRL_DFIFO_WDATA GENMASK(7, 0)
++
++#define REG_SPI_CTRL_DFIFO_EMPTY 0x003c
++#define SPI_CTRL_DFIFO_EMPTY BIT(0)
++
++#define REG_SPI_CTRL_DFIFO_RD 0x0040
++#define SPI_CTRL_DFIFO_RD BIT(0)
++
++#define REG_SPI_CTRL_DFIFO_RDATA 0x0044
++#define SPI_CTRL_DFIFO_RDATA GENMASK(7, 0)
++
++#define REG_SPI_CTRL_DUMMY 0x0080
++#define SPI_CTRL_CTRL_DUMMY GENMASK(3, 0)
++
++#define REG_SPI_CTRL_PROBE_SEL 0x0088
++#define REG_SPI_CTRL_INTERRUPT 0x0090
++#define REG_SPI_CTRL_INTERRUPT_EN 0x0094
++#define REG_SPI_CTRL_SI_CK_SEL 0x009c
++#define REG_SPI_CTRL_SW_CFGNANDADDR_VAL 0x010c
++#define REG_SPI_CTRL_SW_CFGNANDADDR_EN 0x0110
++#define REG_SPI_CTRL_SFC_STRAP 0x0114
++
++#define REG_SPI_CTRL_NFI2SPI_EN 0x0130
++#define SPI_CTRL_NFI2SPI_EN BIT(0)
++
++/* NFI2SPI */
++#define REG_SPI_NFI_CNFG 0x0000
++#define SPI_NFI_DMA_MODE BIT(0)
++#define SPI_NFI_READ_MODE BIT(1)
++#define SPI_NFI_DMA_BURST_EN BIT(2)
++#define SPI_NFI_HW_ECC_EN BIT(8)
++#define SPI_NFI_AUTO_FDM_EN BIT(9)
++#define SPI_NFI_OPMODE GENMASK(14, 12)
++
++#define REG_SPI_NFI_PAGEFMT 0x0004
++#define SPI_NFI_PAGE_SIZE GENMASK(1, 0)
++#define SPI_NFI_SPARE_SIZE GENMASK(5, 4)
++
++#define REG_SPI_NFI_CON 0x0008
++#define SPI_NFI_FIFO_FLUSH BIT(0)
++#define SPI_NFI_RST BIT(1)
++#define SPI_NFI_RD_TRIG BIT(8)
++#define SPI_NFI_WR_TRIG BIT(9)
++#define SPI_NFI_SEC_NUM GENMASK(15, 12)
++
++#define REG_SPI_NFI_INTR_EN 0x0010
++#define SPI_NFI_RD_DONE_EN BIT(0)
++#define SPI_NFI_WR_DONE_EN BIT(1)
++#define SPI_NFI_RST_DONE_EN BIT(2)
++#define SPI_NFI_ERASE_DONE_EN BIT(3)
++#define SPI_NFI_BUSY_RETURN_EN BIT(4)
++#define SPI_NFI_ACCESS_LOCK_EN BIT(5)
++#define SPI_NFI_AHB_DONE_EN BIT(6)
++#define SPI_NFI_ALL_IRQ_EN \
++ (SPI_NFI_RD_DONE_EN | SPI_NFI_WR_DONE_EN | \
++ SPI_NFI_RST_DONE_EN | SPI_NFI_ERASE_DONE_EN | \
++ SPI_NFI_BUSY_RETURN_EN | SPI_NFI_ACCESS_LOCK_EN | \
++ SPI_NFI_AHB_DONE_EN)
++
++#define REG_SPI_NFI_INTR 0x0014
++#define SPI_NFI_AHB_DONE BIT(6)
++
++#define REG_SPI_NFI_CMD 0x0020
++
++#define REG_SPI_NFI_ADDR_NOB 0x0030
++#define SPI_NFI_ROW_ADDR_NOB GENMASK(6, 4)
++
++#define REG_SPI_NFI_STA 0x0060
++#define REG_SPI_NFI_FIFOSTA 0x0064
++#define REG_SPI_NFI_STRADDR 0x0080
++#define REG_SPI_NFI_FDM0L 0x00a0
++#define REG_SPI_NFI_FDM0M 0x00a4
++#define REG_SPI_NFI_FDM7L 0x00d8
++#define REG_SPI_NFI_FDM7M 0x00dc
++#define REG_SPI_NFI_FIFODATA0 0x0190
++#define REG_SPI_NFI_FIFODATA1 0x0194
++#define REG_SPI_NFI_FIFODATA2 0x0198
++#define REG_SPI_NFI_FIFODATA3 0x019c
++#define REG_SPI_NFI_MASTERSTA 0x0224
++
++#define REG_SPI_NFI_SECCUS_SIZE 0x022c
++#define SPI_NFI_CUS_SEC_SIZE GENMASK(12, 0)
++#define SPI_NFI_CUS_SEC_SIZE_EN BIT(16)
++
++#define REG_SPI_NFI_RD_CTL2 0x0510
++#define REG_SPI_NFI_RD_CTL3 0x0514
++
++#define REG_SPI_NFI_PG_CTL1 0x0524
++#define SPI_NFI_PG_LOAD_CMD GENMASK(15, 8)
++
++#define REG_SPI_NFI_PG_CTL2 0x0528
++#define REG_SPI_NFI_NOR_PROG_ADDR 0x052c
++#define REG_SPI_NFI_NOR_RD_ADDR 0x0534
++
++#define REG_SPI_NFI_SNF_MISC_CTL 0x0538
++#define SPI_NFI_DATA_READ_WR_MODE GENMASK(18, 16)
++
++#define REG_SPI_NFI_SNF_MISC_CTL2 0x053c
++#define SPI_NFI_READ_DATA_BYTE_NUM GENMASK(12, 0)
++#define SPI_NFI_PROG_LOAD_BYTE_NUM GENMASK(28, 16)
++
++#define REG_SPI_NFI_SNF_STA_CTL1 0x0550
++#define SPI_NFI_READ_FROM_CACHE_DONE BIT(25)
++#define SPI_NFI_LOAD_TO_CACHE_DONE BIT(26)
++
++#define REG_SPI_NFI_SNF_STA_CTL2 0x0554
++
++#define REG_SPI_NFI_SNF_NFI_CNFG 0x055c
++#define SPI_NFI_SPI_MODE BIT(0)
++
++/* SPI NAND Protocol OP */
++#define SPI_NAND_OP_GET_FEATURE 0x0f
++#define SPI_NAND_OP_SET_FEATURE 0x1f
++#define SPI_NAND_OP_PAGE_READ 0x13
++#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE 0x03
++#define SPI_NAND_OP_READ_FROM_CACHE_SINGLE_FAST 0x0b
++#define SPI_NAND_OP_READ_FROM_CACHE_DUAL 0x3b
++#define SPI_NAND_OP_READ_FROM_CACHE_QUAD 0x6b
++#define SPI_NAND_OP_WRITE_ENABLE 0x06
++#define SPI_NAND_OP_WRITE_DISABLE 0x04
++#define SPI_NAND_OP_PROGRAM_LOAD_SINGLE 0x02
++#define SPI_NAND_OP_PROGRAM_LOAD_QUAD 0x32
++#define SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE 0x84
++#define SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD 0x34
++#define SPI_NAND_OP_PROGRAM_EXECUTE 0x10
++#define SPI_NAND_OP_READ_ID 0x9f
++#define SPI_NAND_OP_BLOCK_ERASE 0xd8
++#define SPI_NAND_OP_RESET 0xff
++#define SPI_NAND_OP_DIE_SELECT 0xc2
++
++#define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256)
++#define SPI_MAX_TRANSFER_SIZE 511
++
++enum airoha_snand_mode {
++ SPI_MODE_AUTO,
++ SPI_MODE_MANUAL,
++ SPI_MODE_DMA,
++};
++
++enum airoha_snand_cs {
++ SPI_CHIP_SEL_HIGH,
++ SPI_CHIP_SEL_LOW,
++};
++
++struct airoha_snand_dev {
++ size_t buf_len;
++
++ u8 *txrx_buf;
++ dma_addr_t dma_addr;
++
++ u64 cur_page_num;
++ bool data_need_update;
++};
++
++struct airoha_snand_ctrl {
++ struct device *dev;
++ struct regmap *regmap_ctrl;
++ struct regmap *regmap_nfi;
++ struct clk *spi_clk;
++
++ struct {
++ size_t page_size;
++ size_t sec_size;
++ u8 sec_num;
++ u8 spare_size;
++ } nfi_cfg;
++};
++
++static int airoha_snand_set_fifo_op(struct airoha_snand_ctrl *as_ctrl,
++ u8 op_cmd, int op_len)
++{
++ int err;
++ u32 val;
++
++ err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WDATA,
++ FIELD_PREP(SPI_CTRL_OPFIFO_LEN, op_len) |
++ FIELD_PREP(SPI_CTRL_OPFIFO_OP, op_cmd));
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_OPFIFO_FULL,
++ val, !(val & SPI_CTRL_OPFIFO_FULL),
++ 0, 250 * USEC_PER_MSEC);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_OPFIFO_WR,
++ SPI_CTRL_OPFIFO_WR);
++ if (err)
++ return err;
++
++ return regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_OPFIFO_EMPTY,
++ val, (val & SPI_CTRL_OPFIFO_EMPTY),
++ 0, 250 * USEC_PER_MSEC);
++}
++
++static int airoha_snand_set_cs(struct airoha_snand_ctrl *as_ctrl, u8 cs)
++{
++ return airoha_snand_set_fifo_op(as_ctrl, cs, sizeof(cs));
++}
++
++static int airoha_snand_write_data_to_fifo(struct airoha_snand_ctrl *as_ctrl,
++ const u8 *data, int len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ int err;
++ u32 val;
++
++ /* 1. Wait until dfifo is not full */
++ err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_FULL, val,
++ !(val & SPI_CTRL_DFIFO_FULL),
++ 0, 250 * USEC_PER_MSEC);
++ if (err)
++ return err;
++
++ /* 2. Write data to register DFIFO_WDATA */
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_WDATA,
++ FIELD_PREP(SPI_CTRL_DFIFO_WDATA, data[i]));
++ if (err)
++ return err;
++
++ /* 3. Wait until dfifo is not full */
++ err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_FULL, val,
++ !(val & SPI_CTRL_DFIFO_FULL),
++ 0, 250 * USEC_PER_MSEC);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_snand_read_data_from_fifo(struct airoha_snand_ctrl *as_ctrl,
++ u8 *ptr, int len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ int err;
++ u32 val;
++
++ /* 1. wait until dfifo is not empty */
++ err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_EMPTY, val,
++ !(val & SPI_CTRL_DFIFO_EMPTY),
++ 0, 250 * USEC_PER_MSEC);
++ if (err)
++ return err;
++
++ /* 2. read from dfifo to register DFIFO_RDATA */
++ err = regmap_read(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_RDATA, &val);
++ if (err)
++ return err;
++
++ ptr[i] = FIELD_GET(SPI_CTRL_DFIFO_RDATA, val);
++ /* 3. enable register DFIFO_RD to read next byte */
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_DFIFO_RD, SPI_CTRL_DFIFO_RD);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl,
++ enum airoha_snand_mode mode)
++{
++ int err;
++
++ switch (mode) {
++ case SPI_MODE_MANUAL: {
++ u32 val;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_NFI2SPI_EN, 0);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_READ_IDLE_EN, 0);
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_RDCTL_FSM, val,
++ !(val & SPI_CTRL_RDCTL_FSM),
++ 0, 250 * USEC_PER_MSEC);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_MTX_MODE_TOG, 9);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_MANUAL_EN, SPI_CTRL_MANUAL_EN);
++ if (err)
++ return err;
++ break;
++ }
++ case SPI_MODE_DMA:
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_NFI2SPI_EN,
++ SPI_CTRL_MANUAL_EN);
++ if (err < 0)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_MTX_MODE_TOG, 0x0);
++ if (err < 0)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_ctrl,
++ REG_SPI_CTRL_MANUAL_EN, 0x0);
++ if (err < 0)
++ return err;
++ break;
++ case SPI_MODE_AUTO:
++ default:
++ break;
++ }
++
++ return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
++}
++
++static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
++ const u8 *data, int len)
++{
++ int i, data_len;
++
++ for (i = 0; i < len; i += data_len) {
++ int err;
++
++ data_len = min(len, SPI_MAX_TRANSFER_SIZE);
++ err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
++ if (err)
++ return err;
++
++ err = airoha_snand_write_data_to_fifo(as_ctrl, &data[i],
++ data_len);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
++ int len)
++{
++ int i, data_len;
++
++ for (i = 0; i < len; i += data_len) {
++ int err;
++
++ data_len = min(len, SPI_MAX_TRANSFER_SIZE);
++ err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
++ if (err)
++ return err;
++
++ err = airoha_snand_read_data_from_fifo(as_ctrl, &data[i],
++ data_len);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_snand_nfi_init(struct airoha_snand_ctrl *as_ctrl)
++{
++ int err;
++
++ /* switch to SNFI mode */
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_NFI_CNFG,
++ SPI_NFI_SPI_MODE);
++ if (err)
++ return err;
++
++ /* Enable DMA */
++ return regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR_EN,
++ SPI_NFI_ALL_IRQ_EN, SPI_NFI_AHB_DONE_EN);
++}
++
++static int airoha_snand_nfi_config(struct airoha_snand_ctrl *as_ctrl)
++{
++ int err;
++ u32 val;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_FIFO_FLUSH | SPI_NFI_RST);
++ if (err)
++ return err;
++
++ /* auto FDM */
++ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_AUTO_FDM_EN);
++ if (err)
++ return err;
++
++ /* HW ECC */
++ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_HW_ECC_EN);
++ if (err)
++ return err;
++
++ /* DMA Burst */
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_DMA_BURST_EN);
++ if (err)
++ return err;
++
++ /* page format */
++ switch (as_ctrl->nfi_cfg.spare_size) {
++ case 26:
++ val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x1);
++ break;
++ case 27:
++ val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x2);
++ break;
++ case 28:
++ val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x3);
++ break;
++ default:
++ val = FIELD_PREP(SPI_NFI_SPARE_SIZE, 0x0);
++ break;
++ }
++
++ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT,
++ SPI_NFI_SPARE_SIZE, val);
++ if (err)
++ return err;
++
++ switch (as_ctrl->nfi_cfg.page_size) {
++ case 2048:
++ val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x1);
++ break;
++ case 4096:
++ val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x2);
++ break;
++ default:
++ val = FIELD_PREP(SPI_NFI_PAGE_SIZE, 0x0);
++ break;
++ }
++
++ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_PAGEFMT,
++ SPI_NFI_PAGE_SIZE, val);
++ if (err)
++ return err;
++
++ /* sec num */
++ val = FIELD_PREP(SPI_NFI_SEC_NUM, as_ctrl->nfi_cfg.sec_num);
++ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_SEC_NUM, val);
++ if (err)
++ return err;
++
++ /* enable cust sec size */
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE,
++ SPI_NFI_CUS_SEC_SIZE_EN);
++ if (err)
++ return err;
++
++ /* set cust sec size */
++ val = FIELD_PREP(SPI_NFI_CUS_SEC_SIZE, as_ctrl->nfi_cfg.sec_size);
++ return regmap_update_bits(as_ctrl->regmap_nfi,
++ REG_SPI_NFI_SECCUS_SIZE,
++ SPI_NFI_CUS_SEC_SIZE, val);
++}
++
++static bool airoha_snand_is_page_ops(const struct spi_mem_op *op)
++{
++ if (op->addr.nbytes != 2)
++ return false;
++
++ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
++ op->addr.buswidth != 4)
++ return false;
++
++ switch (op->data.dir) {
++ case SPI_MEM_DATA_IN:
++ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 0xf)
++ return false;
++
++ /* quad in / quad out */
++ if (op->addr.buswidth == 4)
++ return op->data.buswidth == 4;
++
++ if (op->addr.buswidth == 2)
++ return op->data.buswidth == 2;
++
++ /* standard spi */
++ return op->data.buswidth == 4 || op->data.buswidth == 2 ||
++ op->data.buswidth == 1;
++ case SPI_MEM_DATA_OUT:
++ return !op->dummy.nbytes && op->addr.buswidth == 1 &&
++ (op->data.buswidth == 4 || op->data.buswidth == 1);
++ default:
++ return false;
++ }
++}
++
++static int airoha_snand_adjust_op_size(struct spi_mem *mem,
++ struct spi_mem_op *op)
++{
++ size_t max_len;
++
++ if (airoha_snand_is_page_ops(op)) {
++ struct airoha_snand_ctrl *as_ctrl;
++
++ as_ctrl = spi_controller_get_devdata(mem->spi->controller);
++ max_len = as_ctrl->nfi_cfg.sec_size;
++ max_len += as_ctrl->nfi_cfg.spare_size;
++ max_len *= as_ctrl->nfi_cfg.sec_num;
++
++ if (op->data.nbytes > max_len)
++ op->data.nbytes = max_len;
++ } else {
++ max_len = 1 + op->addr.nbytes + op->dummy.nbytes;
++ if (max_len >= 160)
++ return -EOPNOTSUPP;
++
++ if (op->data.nbytes > 160 - max_len)
++ op->data.nbytes = 160 - max_len;
++ }
++
++ return 0;
++}
++
++static bool airoha_snand_supports_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ if (!spi_mem_default_supports_op(mem, op))
++ return false;
++
++ if (op->cmd.buswidth != 1)
++ return false;
++
++ if (airoha_snand_is_page_ops(op))
++ return true;
++
++ return (!op->addr.nbytes || op->addr.buswidth == 1) &&
++ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
++ (!op->data.nbytes || op->data.buswidth == 1);
++}
++
++static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
++{
++ struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi);
++
++ if (!as_dev->txrx_buf)
++ return -EINVAL;
++
++ if (desc->info.offset + desc->info.length > U32_MAX)
++ return -EINVAL;
++
++ if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl))
++ return -EOPNOTSUPP;
++
++ return 0;
++}
++
++static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
++ u64 offs, size_t len, void *buf)
++{
++ struct spi_device *spi = desc->mem->spi;
++ struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
++ struct spi_mem_op *op = &desc->info.op_tmpl;
++ struct airoha_snand_ctrl *as_ctrl;
++ u32 val, rd_mode;
++ int err;
++
++ if (!as_dev->data_need_update)
++ return len;
++
++ as_dev->data_need_update = false;
++
++ switch (op->cmd.opcode) {
++ case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
++ rd_mode = 1;
++ break;
++ case SPI_NAND_OP_READ_FROM_CACHE_QUAD:
++ rd_mode = 2;
++ break;
++ default:
++ rd_mode = 0;
++ break;
++ }
++
++ as_ctrl = spi_controller_get_devdata(spi->controller);
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
++ if (err < 0)
++ return err;
++
++ err = airoha_snand_nfi_config(as_ctrl);
++ if (err)
++ return err;
++
++ dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++
++ /* set dma addr */
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
++ as_dev->dma_addr);
++ if (err)
++ return err;
++
++ /* set cust sec size */
++ val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num;
++ val = FIELD_PREP(SPI_NFI_READ_DATA_BYTE_NUM, val);
++ err = regmap_update_bits(as_ctrl->regmap_nfi,
++ REG_SPI_NFI_SNF_MISC_CTL2,
++ SPI_NFI_READ_DATA_BYTE_NUM, val);
++ if (err)
++ return err;
++
++ /* set read command */
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2,
++ op->cmd.opcode);
++ if (err)
++ return err;
++
++ /* set read mode */
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
++ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode));
++ if (err)
++ return err;
++
++ /* set read addr */
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
++ if (err)
++ return err;
++
++ /* set nfi read */
++ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_OPMODE,
++ FIELD_PREP(SPI_NFI_OPMODE, 6));
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0);
++ if (err)
++ return err;
++
++ /* trigger dma start read */
++ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_RD_TRIG);
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_RD_TRIG);
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
++ REG_SPI_NFI_SNF_STA_CTL1, val,
++ (val & SPI_NFI_READ_FROM_CACHE_DONE),
++ 0, 1 * USEC_PER_SEC);
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_READ_FROM_CACHE_DONE);
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
++ val, (val & SPI_NFI_AHB_DONE), 0,
++ 1 * USEC_PER_SEC);
++ if (err)
++ return err;
++
++ /* DMA read need delay for data ready from controller to DRAM */
++ udelay(1);
++
++ dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ memcpy(buf, as_dev->txrx_buf + offs, len);
++
++ return len;
++}
++
++static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
++ u64 offs, size_t len, const void *buf)
++{
++ struct spi_device *spi = desc->mem->spi;
++ struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
++ struct spi_mem_op *op = &desc->info.op_tmpl;
++ struct airoha_snand_ctrl *as_ctrl;
++ u32 wr_mode, val;
++ int err;
++
++ as_ctrl = spi_controller_get_devdata(spi->controller);
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++ memcpy(as_dev->txrx_buf + offs, buf, len);
++ dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
++ if (err < 0)
++ return err;
++
++ err = airoha_snand_nfi_config(as_ctrl);
++ if (err)
++ return err;
++
++ if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD ||
++ op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD)
++ wr_mode = BIT(1);
++ else
++ wr_mode = 0;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
++ as_dev->dma_addr);
++ if (err)
++ return err;
++
++ val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM,
++ as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num);
++ err = regmap_update_bits(as_ctrl->regmap_nfi,
++ REG_SPI_NFI_SNF_MISC_CTL2,
++ SPI_NFI_PROG_LOAD_BYTE_NUM, val);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1,
++ FIELD_PREP(SPI_NFI_PG_LOAD_CMD,
++ op->cmd.opcode));
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
++ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode));
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
++ if (err)
++ return err;
++
++ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_READ_MODE);
++ if (err)
++ return err;
++
++ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_OPMODE,
++ FIELD_PREP(SPI_NFI_OPMODE, 3));
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_DMA_MODE);
++ if (err)
++ return err;
++
++ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80);
++ if (err)
++ return err;
++
++ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_WR_TRIG);
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_WR_TRIG);
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
++ val, (val & SPI_NFI_AHB_DONE), 0,
++ 1 * USEC_PER_SEC);
++ if (err)
++ return err;
++
++ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
++ REG_SPI_NFI_SNF_STA_CTL1, val,
++ (val & SPI_NFI_LOAD_TO_CACHE_DONE),
++ 0, 1 * USEC_PER_SEC);
++ if (err)
++ return err;
++
++ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_LOAD_TO_CACHE_DONE);
++ if (err)
++ return err;
++
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ return len;
++}
++
++static int airoha_snand_exec_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
++ u8 data[8], cmd, opcode = op->cmd.opcode;
++ struct airoha_snand_ctrl *as_ctrl;
++ int i, err;
++
++ as_ctrl = spi_controller_get_devdata(mem->spi->controller);
++ if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
++ op->addr.val == as_dev->cur_page_num) {
++ as_dev->data_need_update = true;
++ } else if (opcode == SPI_NAND_OP_PAGE_READ) {
++ if (!as_dev->data_need_update &&
++ op->addr.val == as_dev->cur_page_num)
++ return 0;
++
++ as_dev->data_need_update = true;
++ as_dev->cur_page_num = op->addr.val;
++ }
++
++ /* switch to manual mode */
++ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ err = airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_LOW);
++ if (err < 0)
++ return err;
++
++ /* opcode */
++ err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode));
++ if (err)
++ return err;
++
++ /* addr part */
++ cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
++ put_unaligned_be64(op->addr.val, data);
++
++ for (i = ARRAY_SIZE(data) - op->addr.nbytes;
++ i < ARRAY_SIZE(data); i++) {
++ err = airoha_snand_write_data(as_ctrl, cmd, &data[i],
++ sizeof(data[0]));
++ if (err)
++ return err;
++ }
++
++ /* dummy */
++ data[0] = 0xff;
++ for (i = 0; i < op->dummy.nbytes; i++) {
++ err = airoha_snand_write_data(as_ctrl, 0x8, &data[0],
++ sizeof(data[0]));
++ if (err)
++ return err;
++ }
++
++ /* data */
++ if (op->data.dir == SPI_MEM_DATA_IN) {
++ err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
++ op->data.nbytes);
++ if (err)
++ return err;
++ } else {
++ err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out,
++ op->data.nbytes);
++ if (err)
++ return err;
++ }
++
++ return airoha_snand_set_cs(as_ctrl, SPI_CHIP_SEL_HIGH);
++}
++
++static const struct spi_controller_mem_ops airoha_snand_mem_ops = {
++ .adjust_op_size = airoha_snand_adjust_op_size,
++ .supports_op = airoha_snand_supports_op,
++ .exec_op = airoha_snand_exec_op,
++ .dirmap_create = airoha_snand_dirmap_create,
++ .dirmap_read = airoha_snand_dirmap_read,
++ .dirmap_write = airoha_snand_dirmap_write,
++};
++
++static int airoha_snand_setup(struct spi_device *spi)
++{
++ struct airoha_snand_ctrl *as_ctrl;
++ struct airoha_snand_dev *as_dev;
++
++ as_ctrl = spi_controller_get_devdata(spi->controller);
++
++ as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL);
++ if (!as_dev)
++ return -ENOMEM;
++
++ /* prepare device buffer */
++ as_dev->buf_len = SPI_NAND_CACHE_SIZE;
++ as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len,
++ GFP_KERNEL);
++ if (!as_dev->txrx_buf)
++ return -ENOMEM;
++
++ as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
++ return -ENOMEM;
++
++ as_dev->data_need_update = true;
++ spi_set_ctldata(spi, as_dev);
++
++ return 0;
++}
++
++static void airoha_snand_cleanup(struct spi_device *spi)
++{
++ struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
++ struct airoha_snand_ctrl *as_ctrl;
++
++ as_ctrl = spi_controller_get_devdata(spi->controller);
++ dma_unmap_single(as_ctrl->dev, as_dev->dma_addr,
++ as_dev->buf_len, DMA_BIDIRECTIONAL);
++ spi_set_ctldata(spi, NULL);
++}
++
++static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl)
++{
++ u32 val, sec_size, sec_num;
++ int err;
++
++ err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, &val);
++ if (err)
++ return err;
++
++ sec_num = FIELD_GET(SPI_NFI_SEC_NUM, val);
++
++ err = regmap_read(as_ctrl->regmap_nfi, REG_SPI_NFI_SECCUS_SIZE, &val);
++ if (err)
++ return err;
++
++ sec_size = FIELD_GET(SPI_NFI_CUS_SEC_SIZE, val);
++
++ /* init default value */
++ as_ctrl->nfi_cfg.sec_size = sec_size;
++ as_ctrl->nfi_cfg.sec_num = sec_num;
++ as_ctrl->nfi_cfg.page_size = round_down(sec_size * sec_num, 1024);
++ as_ctrl->nfi_cfg.spare_size = 16;
++
++ err = airoha_snand_nfi_init(as_ctrl);
++ if (err)
++ return err;
++
++ return airoha_snand_nfi_config(as_ctrl);
++}
++
++static const struct regmap_config spi_ctrl_regmap_config = {
++ .name = "ctrl",
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = REG_SPI_CTRL_NFI2SPI_EN,
++};
++
++static const struct regmap_config spi_nfi_regmap_config = {
++ .name = "nfi",
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = REG_SPI_NFI_SNF_NFI_CNFG,
++};
++
++static const struct of_device_id airoha_snand_ids[] = {
++ { .compatible = "airoha,en7581-snand" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, airoha_snand_ids);
++
++static int airoha_snand_probe(struct platform_device *pdev)
++{
++ struct airoha_snand_ctrl *as_ctrl;
++ struct device *dev = &pdev->dev;
++ struct spi_controller *ctrl;
++ void __iomem *base;
++ int err;
++
++ ctrl = devm_spi_alloc_host(dev, sizeof(*as_ctrl));
++ if (!ctrl)
++ return -ENOMEM;
++
++ as_ctrl = spi_controller_get_devdata(ctrl);
++ as_ctrl->dev = dev;
++
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ as_ctrl->regmap_ctrl = devm_regmap_init_mmio(dev, base,
++ &spi_ctrl_regmap_config);
++ if (IS_ERR(as_ctrl->regmap_ctrl))
++ return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_ctrl),
++ "failed to init spi ctrl regmap\n");
++
++ base = devm_platform_ioremap_resource(pdev, 1);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ as_ctrl->regmap_nfi = devm_regmap_init_mmio(dev, base,
++ &spi_nfi_regmap_config);
++ if (IS_ERR(as_ctrl->regmap_nfi))
++ return dev_err_probe(dev, PTR_ERR(as_ctrl->regmap_nfi),
++ "failed to init spi nfi regmap\n");
++
++ as_ctrl->spi_clk = devm_clk_get_enabled(dev, "spi");
++ if (IS_ERR(as_ctrl->spi_clk))
++ return dev_err_probe(dev, PTR_ERR(as_ctrl->spi_clk),
++ "unable to get spi clk\n");
++
++ err = dma_set_mask(as_ctrl->dev, DMA_BIT_MASK(32));
++ if (err)
++ return err;
++
++ ctrl->num_chipselect = 2;
++ ctrl->mem_ops = &airoha_snand_mem_ops;
++ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
++ ctrl->mode_bits = SPI_RX_DUAL;
++ ctrl->setup = airoha_snand_setup;
++ ctrl->cleanup = airoha_snand_cleanup;
++ device_set_node(&ctrl->dev, dev_fwnode(dev));
++
++ err = airoha_snand_nfi_setup(as_ctrl);
++ if (err)
++ return err;
++
++ return devm_spi_register_controller(dev, ctrl);
++}
++
++static struct platform_driver airoha_snand_driver = {
++ .driver = {
++ .name = "airoha-spi",
++ .of_match_table = airoha_snand_ids,
++ },
++ .probe = airoha_snand_probe,
++};
++module_platform_driver(airoha_snand_driver);
++
++MODULE_DESCRIPTION("Airoha SPI-NAND Flash Controller Driver");
++MODULE_AUTHOR("Lorenzo Bianconi ");
++MODULE_AUTHOR("Ray Liu ");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch b/target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch
new file mode 100644
index 0000000000..dce013acd3
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/026-01-v6.12-spi-airoha-fix-dirmap_-read-write-operations.patch
@@ -0,0 +1,55 @@
+From 2e6bbfe7b0c0607001b784082c2685b134174fac Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Fri, 13 Sep 2024 23:07:13 +0200
+Subject: [PATCH 1/2] spi: airoha: fix dirmap_{read,write} operations
+
+SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end of
+dirmap_read operation even if it is already set.
+In the same way, SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the
+end of dirmap_write operation even if it is already set.
+For this reason use regmap_write_bits() instead of regmap_set_bits().
+This patch fixes mtd_pagetest kernel module test.
+
+Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
+Tested-by: Christian Marangi
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240913-airoha-spi-fixes-v1-1-de2e74ed4664@kernel.org
+Signed-off-by: Mark Brown
+---
+ drivers/spi/spi-airoha-snfi.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/spi/spi-airoha-snfi.c
++++ b/drivers/spi/spi-airoha-snfi.c
+@@ -739,8 +739,13 @@ static ssize_t airoha_snand_dirmap_read(
+ if (err)
+ return err;
+
+- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+- SPI_NFI_READ_FROM_CACHE_DONE);
++ /*
++ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
++ * of dirmap_read operation even if it is already set.
++ */
++ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_READ_FROM_CACHE_DONE,
++ SPI_NFI_READ_FROM_CACHE_DONE);
+ if (err)
+ return err;
+
+@@ -870,8 +875,13 @@ static ssize_t airoha_snand_dirmap_write
+ if (err)
+ return err;
+
+- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+- SPI_NFI_LOAD_TO_CACHE_DONE);
++ /*
++ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
++ * of dirmap_write operation even if it is already set.
++ */
++ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_LOAD_TO_CACHE_DONE,
++ SPI_NFI_LOAD_TO_CACHE_DONE);
+ if (err)
+ return err;
+
diff --git a/target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch b/target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch
new file mode 100644
index 0000000000..738cb0c9cb
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/026-02-v6.12-spi-airoha-fix-airoha_snand_-write-read-_data-data_l.patch
@@ -0,0 +1,39 @@
+From 0e58637eb968c636725dcd6c7055249b4e5326fb Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Fri, 13 Sep 2024 23:07:14 +0200
+Subject: [PATCH 2/2] spi: airoha: fix airoha_snand_{write,read}_data data_len
+ estimation
+
+Fix data length written and read in airoha_snand_write_data and
+airoha_snand_read_data routines respectively if it is bigger than
+SPI_MAX_TRANSFER_SIZE.
+
+Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
+Tested-by: Christian Marangi
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240913-airoha-spi-fixes-v1-2-de2e74ed4664@kernel.org
+Signed-off-by: Mark Brown
+---
+ drivers/spi/spi-airoha-snfi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/spi-airoha-snfi.c
++++ b/drivers/spi/spi-airoha-snfi.c
+@@ -405,7 +405,7 @@ static int airoha_snand_write_data(struc
+ for (i = 0; i < len; i += data_len) {
+ int err;
+
+- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
++ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
+ err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
+ if (err)
+ return err;
+@@ -427,7 +427,7 @@ static int airoha_snand_read_data(struct
+ for (i = 0; i < len; i += data_len) {
+ int err;
+
+- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
++ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
+ err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
+ if (err)
+ return err;
diff --git a/target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch b/target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch
new file mode 100644
index 0000000000..d2d2b54d30
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/027-v6.12-spi-airoha-remove-read-cache-in-airoha_snand_dirmap_.patch
@@ -0,0 +1,116 @@
+From fffca269e4f31c3633c6d810833ba1b184407915 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 19 Sep 2024 18:57:16 +0200
+Subject: [PATCH] spi: airoha: remove read cache in airoha_snand_dirmap_read()
+
+Current upstream driver reports errors running mtd_oobtest kernel module
+test:
+
+root@OpenWrt:/# insmod mtd_test.ko
+root@OpenWrt:/# insmod mtd_oobtest.ko dev=5
+[ 7023.730584] =================================================
+[ 7023.736399] mtd_oobtest: MTD device: 5
+[ 7023.740160] mtd_oobtest: MTD device size 3670016, eraseblock size 131072, page size 2048, count of eraseblocks 28, pages per eraseblock 64, OOB size 128
+[ 7023.753837] mtd_test: scanning for bad eraseblocks
+[ 7023.758636] mtd_test: scanned 28 eraseblocks, 0 are bad
+[ 7023.763861] mtd_oobtest: test 1 of 5
+[ 7024.042076] mtd_oobtest: writing OOBs of whole device
+[ 7024.682069] mtd_oobtest: written up to eraseblock 0
+[ 7041.962077] mtd_oobtest: written 28 eraseblocks
+[ 7041.966626] mtd_oobtest: verifying all eraseblocks
+[ 7041.972276] mtd_oobtest: error @addr[0x0:0x0] 0xff -> 0xe diff 0xf1
+[ 7041.978550] mtd_oobtest: error @addr[0x0:0x1] 0xff -> 0x10 diff 0xef
+[ 7041.984932] mtd_oobtest: error @addr[0x0:0x2] 0xff -> 0x82 diff 0x7d
+[ 7041.991293] mtd_oobtest: error @addr[0x0:0x3] 0xff -> 0x10 diff 0xef
+[ 7041.997659] mtd_oobtest: error @addr[0x0:0x4] 0xff -> 0x0 diff 0xff
+[ 7042.003942] mtd_oobtest: error @addr[0x0:0x5] 0xff -> 0x8a diff 0x75
+[ 7042.010294] mtd_oobtest: error @addr[0x0:0x6] 0xff -> 0x20 diff 0xdf
+[ 7042.016659] mtd_oobtest: error @addr[0x0:0x7] 0xff -> 0x1 diff 0xfe
+[ 7042.022935] mtd_oobtest: error @addr[0x0:0x8] 0xff -> 0x2e diff 0xd1
+[ 7042.029295] mtd_oobtest: error @addr[0x0:0x9] 0xff -> 0x40 diff 0xbf
+[ 7042.035661] mtd_oobtest: error @addr[0x0:0xa] 0xff -> 0x0 diff 0xff
+[ 7042.041935] mtd_oobtest: error @addr[0x0:0xb] 0xff -> 0x89 diff 0x76
+[ 7042.048300] mtd_oobtest: error @addr[0x0:0xc] 0xff -> 0x82 diff 0x7d
+[ 7042.054662] mtd_oobtest: error @addr[0x0:0xd] 0xff -> 0x15 diff 0xea
+[ 7042.061014] mtd_oobtest: error @addr[0x0:0xe] 0xff -> 0x90 diff 0x6f
+[ 7042.067380] mtd_oobtest: error @addr[0x0:0xf] 0xff -> 0x0 diff 0xff
+....
+[ 7432.421369] mtd_oobtest: error @addr[0x237800:0x36] 0xff -> 0x5f diff 0xa0
+[ 7432.428242] mtd_oobtest: error @addr[0x237800:0x37] 0xff -> 0x21 diff 0xde
+[ 7432.435118] mtd_oobtest: error: verify failed at 0x237800
+[ 7432.440510] mtd_oobtest: error: too many errors
+[ 7432.445053] mtd_oobtest: error -1 occurred
+
+The above errors are due to the buggy logic in the 'read cache' available
+in airoha_snand_dirmap_read() routine since there are some corner cases
+where we are missing data updates. Since we do not get any read/write speed
+improvement using the cache (according to the mtd_speedtest kernel
+module test), in order to fix the mtd_oobtest test, remove the 'read cache'
+in airoha_snand_dirmap_read routine. Now the driver is passing all the
+tests available in mtd_test suite.
+
+Fixes: a403997c1201 ("spi: airoha: add SPI-NAND Flash controller driver")
+Tested-by: Christian Marangi
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240919-airoha-spi-fixes-v2-1-cb0f0ed9920a@kernel.org
+Signed-off-by: Mark Brown
+---
+ drivers/spi/spi-airoha-snfi.c | 21 ---------------------
+ 1 file changed, 21 deletions(-)
+
+--- a/drivers/spi/spi-airoha-snfi.c
++++ b/drivers/spi/spi-airoha-snfi.c
+@@ -211,9 +211,6 @@ struct airoha_snand_dev {
+
+ u8 *txrx_buf;
+ dma_addr_t dma_addr;
+-
+- u64 cur_page_num;
+- bool data_need_update;
+ };
+
+ struct airoha_snand_ctrl {
+@@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(
+ u32 val, rd_mode;
+ int err;
+
+- if (!as_dev->data_need_update)
+- return len;
+-
+- as_dev->data_need_update = false;
+-
+ switch (op->cmd.opcode) {
+ case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
+ rd_mode = 1;
+@@ -895,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write
+ static int airoha_snand_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+ {
+- struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
+ u8 data[8], cmd, opcode = op->cmd.opcode;
+ struct airoha_snand_ctrl *as_ctrl;
+ int i, err;
+
+ as_ctrl = spi_controller_get_devdata(mem->spi->controller);
+- if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
+- op->addr.val == as_dev->cur_page_num) {
+- as_dev->data_need_update = true;
+- } else if (opcode == SPI_NAND_OP_PAGE_READ) {
+- if (!as_dev->data_need_update &&
+- op->addr.val == as_dev->cur_page_num)
+- return 0;
+-
+- as_dev->data_need_update = true;
+- as_dev->cur_page_num = op->addr.val;
+- }
+
+ /* switch to manual mode */
+ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
+@@ -996,7 +976,6 @@ static int airoha_snand_setup(struct spi
+ if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
+ return -ENOMEM;
+
+- as_dev->data_need_update = true;
+ spi_set_ctldata(spi, as_dev);
+
+ return 0;
diff --git a/target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch b/target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch
new file mode 100644
index 0000000000..71e920cd0f
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/028-v6.13-spi-airoha-do-not-keep-tx-rx-dma-buffer-always-mappe.patch
@@ -0,0 +1,435 @@
+From 7a4b3ebf1d60349587fee21872536e7bd6a4cf39 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Sun, 22 Sep 2024 19:38:30 +0200
+Subject: [PATCH] spi: airoha: do not keep {tx,rx} dma buffer always mapped
+
+DMA map txrx_buf on demand in airoha_snand_dirmap_read and
+airoha_snand_dirmap_write routines and do not keep it always mapped.
+This patch is not fixing any bug or introducing any functional change
+to the driver, it just simplifies the code and improve code readability
+without introducing any performance degradation according to the results
+obtained from the mtd_speedtest kernel module test.
+
+root@OpenWrt:# insmod mtd_test.ko
+root@OpenWrt:# insmod mtd_speedtest.ko dev=5
+[ 49.849869] =================================================
+[ 49.855659] mtd_speedtest: MTD device: 5
+[ 49.859583] mtd_speedtest: MTD device size 8388608, eraseblock size 131072, page size 2048, count of eraseblocks 64, pages per eraseblock 64, OOB size 128
+[ 49.874622] mtd_test: scanning for bad eraseblocks
+[ 49.879433] mtd_test: scanned 64 eraseblocks, 0 are bad
+[ 50.106372] mtd_speedtest: testing eraseblock write speed
+[ 53.083380] mtd_speedtest: eraseblock write speed is 2756 KiB/s
+[ 53.089322] mtd_speedtest: testing eraseblock read speed
+[ 54.143360] mtd_speedtest: eraseblock read speed is 7811 KiB/s
+[ 54.370365] mtd_speedtest: testing page write speed
+[ 57.349480] mtd_speedtest: page write speed is 2754 KiB/s
+[ 57.354895] mtd_speedtest: testing page read speed
+[ 58.410431] mtd_speedtest: page read speed is 7796 KiB/s
+[ 58.636805] mtd_speedtest: testing 2 page write speed
+[ 61.612427] mtd_speedtest: 2 page write speed is 2757 KiB/s
+[ 61.618021] mtd_speedtest: testing 2 page read speed
+[ 62.672653] mtd_speedtest: 2 page read speed is 7804 KiB/s
+[ 62.678159] mtd_speedtest: Testing erase speed
+[ 62.903617] mtd_speedtest: erase speed is 37063 KiB/s
+[ 62.908678] mtd_speedtest: Testing 2x multi-block erase speed
+[ 63.134083] mtd_speedtest: 2x multi-block erase speed is 37292 KiB/s
+[ 63.140442] mtd_speedtest: Testing 4x multi-block erase speed
+[ 63.364262] mtd_speedtest: 4x multi-block erase speed is 37566 KiB/s
+[ 63.370632] mtd_speedtest: Testing 8x multi-block erase speed
+[ 63.595740] mtd_speedtest: 8x multi-block erase speed is 37344 KiB/s
+[ 63.602089] mtd_speedtest: Testing 16x multi-block erase speed
+[ 63.827426] mtd_speedtest: 16x multi-block erase speed is 37320 KiB/s
+[ 63.833860] mtd_speedtest: Testing 32x multi-block erase speed
+[ 64.059389] mtd_speedtest: 32x multi-block erase speed is 37288 KiB/s
+[ 64.065833] mtd_speedtest: Testing 64x multi-block erase speed
+[ 64.290609] mtd_speedtest: 64x multi-block erase speed is 37415 KiB/s
+[ 64.297063] mtd_speedtest: finished
+[ 64.300555] =================================================
+
+Tested-by: Christian Marangi
+Signed-off-by: Lorenzo Bianconi
+Link: https://patch.msgid.link/20240922-airoha-spi-fixes-v3-1-f958802b3d68@kernel.org
+Signed-off-by: Mark Brown
+---
+ drivers/spi/spi-airoha-snfi.c | 154 ++++++++++++++++------------------
+ 1 file changed, 71 insertions(+), 83 deletions(-)
+
+--- a/drivers/spi/spi-airoha-snfi.c
++++ b/drivers/spi/spi-airoha-snfi.c
+@@ -206,13 +206,6 @@ enum airoha_snand_cs {
+ SPI_CHIP_SEL_LOW,
+ };
+
+-struct airoha_snand_dev {
+- size_t buf_len;
+-
+- u8 *txrx_buf;
+- dma_addr_t dma_addr;
+-};
+-
+ struct airoha_snand_ctrl {
+ struct device *dev;
+ struct regmap *regmap_ctrl;
+@@ -617,9 +610,9 @@ static bool airoha_snand_supports_op(str
+
+ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
+ {
+- struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi);
++ u8 *txrx_buf = spi_get_ctldata(desc->mem->spi);
+
+- if (!as_dev->txrx_buf)
++ if (!txrx_buf)
+ return -EINVAL;
+
+ if (desc->info.offset + desc->info.length > U32_MAX)
+@@ -634,10 +627,11 @@ static int airoha_snand_dirmap_create(st
+ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+ {
+- struct spi_device *spi = desc->mem->spi;
+- struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
+ struct spi_mem_op *op = &desc->info.op_tmpl;
++ struct spi_device *spi = desc->mem->spi;
+ struct airoha_snand_ctrl *as_ctrl;
++ u8 *txrx_buf = spi_get_ctldata(spi);
++ dma_addr_t dma_addr;
+ u32 val, rd_mode;
+ int err;
+
+@@ -662,14 +656,17 @@ static ssize_t airoha_snand_dirmap_read(
+ if (err)
+ return err;
+
+- dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
++ dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
++ DMA_FROM_DEVICE);
++ err = dma_mapping_error(as_ctrl->dev, dma_addr);
++ if (err)
++ return err;
+
+ /* set dma addr */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
+- as_dev->dma_addr);
++ dma_addr);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* set cust sec size */
+ val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num;
+@@ -678,58 +675,58 @@ static ssize_t airoha_snand_dirmap_read(
+ REG_SPI_NFI_SNF_MISC_CTL2,
+ SPI_NFI_READ_DATA_BYTE_NUM, val);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* set read command */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2,
+ op->cmd.opcode);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* set read mode */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
+ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode));
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* set read addr */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* set nfi read */
+ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
+ SPI_NFI_OPMODE,
+ FIELD_PREP(SPI_NFI_OPMODE, 6));
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
+ SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* trigger dma start read */
+ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
+ SPI_NFI_RD_TRIG);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
+ SPI_NFI_RD_TRIG);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
+ REG_SPI_NFI_SNF_STA_CTL1, val,
+ (val & SPI_NFI_READ_FROM_CACHE_DONE),
+ 0, 1 * USEC_PER_SEC);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /*
+ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
+@@ -739,35 +736,41 @@ static ssize_t airoha_snand_dirmap_read(
+ SPI_NFI_READ_FROM_CACHE_DONE,
+ SPI_NFI_READ_FROM_CACHE_DONE);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
+ val, (val & SPI_NFI_AHB_DONE), 0,
+ 1 * USEC_PER_SEC);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /* DMA read need delay for data ready from controller to DRAM */
+ udelay(1);
+
+- dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
++ dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
++ DMA_FROM_DEVICE);
+ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
+ if (err < 0)
+ return err;
+
+- memcpy(buf, as_dev->txrx_buf + offs, len);
++ memcpy(buf, txrx_buf + offs, len);
+
+ return len;
++
++error_dma_unmap:
++ dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
++ DMA_FROM_DEVICE);
++ return err;
+ }
+
+ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+ {
+- struct spi_device *spi = desc->mem->spi;
+- struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
+ struct spi_mem_op *op = &desc->info.op_tmpl;
++ struct spi_device *spi = desc->mem->spi;
++ u8 *txrx_buf = spi_get_ctldata(spi);
+ struct airoha_snand_ctrl *as_ctrl;
++ dma_addr_t dma_addr;
+ u32 wr_mode, val;
+ int err;
+
+@@ -776,19 +779,20 @@ static ssize_t airoha_snand_dirmap_write
+ if (err < 0)
+ return err;
+
+- dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
+- memcpy(as_dev->txrx_buf + offs, buf, len);
+- dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
++ memcpy(txrx_buf + offs, buf, len);
++ dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
++ DMA_TO_DEVICE);
++ err = dma_mapping_error(as_ctrl->dev, dma_addr);
++ if (err)
++ return err;
+
+ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA);
+ if (err < 0)
+- return err;
++ goto error_dma_unmap;
+
+ err = airoha_snand_nfi_config(as_ctrl);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD ||
+ op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD)
+@@ -797,9 +801,9 @@ static ssize_t airoha_snand_dirmap_write
+ wr_mode = 0;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
+- as_dev->dma_addr);
++ dma_addr);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM,
+ as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num);
+@@ -807,65 +811,65 @@ static ssize_t airoha_snand_dirmap_write
+ REG_SPI_NFI_SNF_MISC_CTL2,
+ SPI_NFI_PROG_LOAD_BYTE_NUM, val);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1,
+ FIELD_PREP(SPI_NFI_PG_LOAD_CMD,
+ op->cmd.opcode));
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
+ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode));
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
+ SPI_NFI_READ_MODE);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
+ SPI_NFI_OPMODE,
+ FIELD_PREP(SPI_NFI_OPMODE, 3));
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG,
+ SPI_NFI_DMA_MODE);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
+ SPI_NFI_WR_TRIG);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON,
+ SPI_NFI_WR_TRIG);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR,
+ val, (val & SPI_NFI_AHB_DONE), 0,
+ 1 * USEC_PER_SEC);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ err = regmap_read_poll_timeout(as_ctrl->regmap_nfi,
+ REG_SPI_NFI_SNF_STA_CTL1, val,
+ (val & SPI_NFI_LOAD_TO_CACHE_DONE),
+ 0, 1 * USEC_PER_SEC);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
+ /*
+ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
+@@ -875,13 +879,20 @@ static ssize_t airoha_snand_dirmap_write
+ SPI_NFI_LOAD_TO_CACHE_DONE,
+ SPI_NFI_LOAD_TO_CACHE_DONE);
+ if (err)
+- return err;
++ goto error_dma_unmap;
+
++ dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
++ DMA_TO_DEVICE);
+ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
+ if (err < 0)
+ return err;
+
+ return len;
++
++error_dma_unmap:
++ dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
++ DMA_TO_DEVICE);
++ return err;
+ }
+
+ static int airoha_snand_exec_op(struct spi_mem *mem,
+@@ -956,42 +967,20 @@ static const struct spi_controller_mem_o
+ static int airoha_snand_setup(struct spi_device *spi)
+ {
+ struct airoha_snand_ctrl *as_ctrl;
+- struct airoha_snand_dev *as_dev;
+-
+- as_ctrl = spi_controller_get_devdata(spi->controller);
+-
+- as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL);
+- if (!as_dev)
+- return -ENOMEM;
++ u8 *txrx_buf;
+
+ /* prepare device buffer */
+- as_dev->buf_len = SPI_NAND_CACHE_SIZE;
+- as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len,
+- GFP_KERNEL);
+- if (!as_dev->txrx_buf)
+- return -ENOMEM;
+-
+- as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
++ as_ctrl = spi_controller_get_devdata(spi->controller);
++ txrx_buf = devm_kzalloc(as_ctrl->dev, SPI_NAND_CACHE_SIZE,
++ GFP_KERNEL);
++ if (!txrx_buf)
+ return -ENOMEM;
+
+- spi_set_ctldata(spi, as_dev);
++ spi_set_ctldata(spi, txrx_buf);
+
+ return 0;
+ }
+
+-static void airoha_snand_cleanup(struct spi_device *spi)
+-{
+- struct airoha_snand_dev *as_dev = spi_get_ctldata(spi);
+- struct airoha_snand_ctrl *as_ctrl;
+-
+- as_ctrl = spi_controller_get_devdata(spi->controller);
+- dma_unmap_single(as_ctrl->dev, as_dev->dma_addr,
+- as_dev->buf_len, DMA_BIDIRECTIONAL);
+- spi_set_ctldata(spi, NULL);
+-}
+-
+ static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl)
+ {
+ u32 val, sec_size, sec_num;
+@@ -1093,7 +1082,6 @@ static int airoha_snand_probe(struct pla
+ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctrl->mode_bits = SPI_RX_DUAL;
+ ctrl->setup = airoha_snand_setup;
+- ctrl->cleanup = airoha_snand_cleanup;
+ device_set_node(&ctrl->dev, dev_fwnode(dev));
+
+ err = airoha_snand_nfi_setup(as_ctrl);
diff --git a/target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch b/target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch
new file mode 100644
index 0000000000..d07163902d
--- /dev/null
+++ b/target/linux/airoha/patches-6.6/029-v6.12-net-dsa-mt7530-Add-EN7581-support.patch
@@ -0,0 +1,184 @@
+From 2b0229f67932e4b9e2f458bf286903582bd30740 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi
+Date: Thu, 1 Aug 2024 09:35:12 +0200
+Subject: [PATCH] net: dsa: mt7530: Add EN7581 support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Introduce support for the DSA built-in switch available on the EN7581
+development board. EN7581 support is similar to MT7988 one except
+it requires to set MT7530_FORCE_MODE bit in MT753X_PMCR_P register
+for on cpu port.
+
+Tested-by: Benjamin Larsson